Browse Source

Merge branch 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6

* 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (189 commits)
  drm/radeon/kms: fix warning about cur_placement being uninitialised.
  drm/ttm: Print debug information on memory manager when eviction fails
  drm: Add memory manager debug function
  drm/radeon/kms: restore surface registers on resume.
  drm/radeon/kms/r600/r700: fallback gracefully on ucode failure
  drm/ttm: Initialize eviction placement in case the driver callback doesn't
  drm/radeon/kms: cleanup structure and module if initialization fails
  drm/radeon/kms: actualy set the eviction placements we choose
  drm/radeon/kms: Fix NULL ptr dereference
  drm/radeon/kms/avivo: add support for new pll selection algo
  drm/radeon/kms/avivo: fix some bugs in the display bandwidth setup
  drm/radeon/kms: fix return value from fence function.
  drm/radeon: Remove tests for -ERESTART from the TTM code.
  drm/ttm: Have the TTM code return -ERESTARTSYS instead of -ERESTART.
  drm/radeon/kms: Convert radeon to new TTM validation API (V2)
  drm/ttm: Rework validation & memory space allocation (V3)
  drm: Add search/get functions to get a block in a specific range
  drm/radeon/kms: fix avivo tiling regression since radeon object rework
  drm/i915: Remove a debugging printk from hangcheck
  drm/radeon/kms: make sure i2c id matches
  ...
Linus Torvalds 15 years ago
parent
commit
3ef884b4c0
100 changed files with 9601 additions and 2702 deletions
  1. 54 49
      drivers/char/agp/intel-agp.c
  2. 1 1
      drivers/gpu/drm/Makefile
  3. 175 1
      drivers/gpu/drm/drm_crtc.c
  4. 4 1
      drivers/gpu/drm/drm_crtc_helper.c
  5. 6 70
      drivers/gpu/drm/drm_dp_i2c_helper.c
  6. 36 6
      drivers/gpu/drm/drm_drv.c
  7. 204 124
      drivers/gpu/drm/drm_edid.c
  8. 16 7
      drivers/gpu/drm/drm_fb_helper.c
  9. 110 2
      drivers/gpu/drm/drm_fops.c
  10. 122 8
      drivers/gpu/drm/drm_irq.c
  11. 109 1
      drivers/gpu/drm/drm_mm.c
  12. 27 1
      drivers/gpu/drm/drm_modes.c
  13. 15 0
      drivers/gpu/drm/drm_stub.c
  14. 1 1
      drivers/gpu/drm/i915/Makefile
  15. 5 4
      drivers/gpu/drm/i915/dvo_ch7017.c
  16. 9 7
      drivers/gpu/drm/i915/dvo_ch7xxx.c
  17. 19 18
      drivers/gpu/drm/i915/dvo_ivch.c
  18. 10 10
      drivers/gpu/drm/i915/dvo_sil164.c
  19. 18 16
      drivers/gpu/drm/i915/dvo_tfp410.c
  20. 116 4
      drivers/gpu/drm/i915/i915_debugfs.c
  21. 28 12
      drivers/gpu/drm/i915/i915_dma.c
  22. 1 0
      drivers/gpu/drm/i915/i915_drv.c
  23. 57 23
      drivers/gpu/drm/i915/i915_drv.h
  24. 92 22
      drivers/gpu/drm/i915/i915_gem.c
  25. 3 3
      drivers/gpu/drm/i915/i915_gem_tiling.c
  26. 99 64
      drivers/gpu/drm/i915/i915_irq.c
  27. 81 11
      drivers/gpu/drm/i915/i915_opregion.c
  28. 47 24
      drivers/gpu/drm/i915/i915_reg.h
  29. 44 42
      drivers/gpu/drm/i915/i915_suspend.c
  30. 120 17
      drivers/gpu/drm/i915/intel_bios.c
  31. 17 0
      drivers/gpu/drm/i915/intel_bios.h
  32. 11 39
      drivers/gpu/drm/i915/intel_crt.c
  33. 314 236
      drivers/gpu/drm/i915/intel_display.c
  34. 137 25
      drivers/gpu/drm/i915/intel_dp.c
  35. 44 0
      drivers/gpu/drm/i915/intel_drv.h
  36. 4 3
      drivers/gpu/drm/i915/intel_fb.c
  37. 52 3
      drivers/gpu/drm/i915/intel_hdmi.c
  38. 20 1
      drivers/gpu/drm/i915/intel_i2c.c
  39. 118 22
      drivers/gpu/drm/i915/intel_lvds.c
  40. 1416 0
      drivers/gpu/drm/i915/intel_overlay.c
  41. 4 10
      drivers/gpu/drm/i915/intel_sdvo.c
  42. 47 11
      drivers/gpu/drm/i915/intel_tv.c
  43. 1 1
      drivers/gpu/drm/radeon/Makefile
  44. 29 4
      drivers/gpu/drm/radeon/atom.c
  45. 2 0
      drivers/gpu/drm/radeon/atom.h
  46. 1 1
      drivers/gpu/drm/radeon/atombios.h
  47. 41 18
      drivers/gpu/drm/radeon/atombios_crtc.c
  48. 790 0
      drivers/gpu/drm/radeon/atombios_dp.c
  49. 197 48
      drivers/gpu/drm/radeon/r100.c
  50. 5 5
      drivers/gpu/drm/radeon/r100_track.h
  51. 23 10
      drivers/gpu/drm/radeon/r300.c
  52. 13 12
      drivers/gpu/drm/radeon/r420.c
  53. 2 0
      drivers/gpu/drm/radeon/r500_reg.h
  54. 6 2
      drivers/gpu/drm/radeon/r520.c
  55. 1077 56
      drivers/gpu/drm/radeon/r600.c
  56. 21 13
      drivers/gpu/drm/radeon/r600_blit_kms.c
  57. 210 2
      drivers/gpu/drm/radeon/r600d.h
  58. 103 62
      drivers/gpu/drm/radeon/radeon.h
  59. 70 0
      drivers/gpu/drm/radeon/radeon_asic.h
  60. 246 86
      drivers/gpu/drm/radeon/radeon_atombios.c
  61. 26 10
      drivers/gpu/drm/radeon/radeon_benchmark.c
  62. 21 2
      drivers/gpu/drm/radeon/radeon_clocks.c
  63. 353 140
      drivers/gpu/drm/radeon/radeon_combios.c
  64. 175 19
      drivers/gpu/drm/radeon/radeon_connectors.c
  65. 4 41
      drivers/gpu/drm/radeon/radeon_cp.c
  66. 7 6
      drivers/gpu/drm/radeon/radeon_cs.c
  67. 51 11
      drivers/gpu/drm/radeon/radeon_device.c
  68. 133 12
      drivers/gpu/drm/radeon/radeon_display.c
  69. 4 0
      drivers/gpu/drm/radeon/radeon_drv.c
  70. 0 1
      drivers/gpu/drm/radeon/radeon_drv.h
  71. 190 86
      drivers/gpu/drm/radeon/radeon_encoders.c
  72. 47 25
      drivers/gpu/drm/radeon/radeon_fb.c
  73. 6 41
      drivers/gpu/drm/radeon/radeon_fence.c
  74. 17 0
      drivers/gpu/drm/radeon/radeon_fixed.h
  75. 23 19
      drivers/gpu/drm/radeon/radeon_gart.c
  76. 55 49
      drivers/gpu/drm/radeon/radeon_gem.c
  77. 141 41
      drivers/gpu/drm/radeon/radeon_i2c.c
  78. 58 3
      drivers/gpu/drm/radeon/radeon_irq_kms.c
  79. 20 22
      drivers/gpu/drm/radeon/radeon_kms.c
  80. 76 28
      drivers/gpu/drm/radeon/radeon_legacy_crtc.c
  81. 66 59
      drivers/gpu/drm/radeon/radeon_legacy_encoders.c
  82. 129 20
      drivers/gpu/drm/radeon/radeon_mode.h
  83. 224 336
      drivers/gpu/drm/radeon/radeon_object.c
  84. 145 12
      drivers/gpu/drm/radeon/radeon_object.h
  85. 3 3
      drivers/gpu/drm/radeon/radeon_pm.c
  86. 39 21
      drivers/gpu/drm/radeon/radeon_reg.h
  87. 42 25
      drivers/gpu/drm/radeon/radeon_ring.c
  88. 32 23
      drivers/gpu/drm/radeon/radeon_test.c
  89. 52 42
      drivers/gpu/drm/radeon/radeon_ttm.c
  90. 9 8
      drivers/gpu/drm/radeon/rs400.c
  91. 200 36
      drivers/gpu/drm/radeon/rs600.c
  92. 90 22
      drivers/gpu/drm/radeon/rs600d.h
  93. 38 19
      drivers/gpu/drm/radeon/rs690.c
  94. 13 11
      drivers/gpu/drm/radeon/rv515.c
  95. 58 21
      drivers/gpu/drm/radeon/rv770.c
  96. 2 1
      drivers/gpu/drm/ttm/Makefile
  97. 283 262
      drivers/gpu/drm/ttm/ttm_bo.c
  98. 1 0
      drivers/gpu/drm/ttm/ttm_bo_util.c
  99. 1 6
      drivers/gpu/drm/ttm/ttm_bo_vm.c
  100. 117 0
      drivers/gpu/drm/ttm/ttm_execbuf_util.c

+ 54 - 49
drivers/char/agp/intel-agp.c

@@ -36,10 +36,10 @@
 #define PCI_DEVICE_ID_INTEL_82965GME_IG     0x2A12
 #define PCI_DEVICE_ID_INTEL_82945GME_HB     0x27AC
 #define PCI_DEVICE_ID_INTEL_82945GME_IG     0x27AE
-#define PCI_DEVICE_ID_INTEL_IGDGM_HB        0xA010
-#define PCI_DEVICE_ID_INTEL_IGDGM_IG        0xA011
-#define PCI_DEVICE_ID_INTEL_IGDG_HB         0xA000
-#define PCI_DEVICE_ID_INTEL_IGDG_IG         0xA001
+#define PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB        0xA010
+#define PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG        0xA011
+#define PCI_DEVICE_ID_INTEL_PINEVIEW_HB         0xA000
+#define PCI_DEVICE_ID_INTEL_PINEVIEW_IG         0xA001
 #define PCI_DEVICE_ID_INTEL_G33_HB          0x29C0
 #define PCI_DEVICE_ID_INTEL_G33_IG          0x29C2
 #define PCI_DEVICE_ID_INTEL_Q35_HB          0x29B0
@@ -50,20 +50,20 @@
 #define PCI_DEVICE_ID_INTEL_B43_IG          0x2E42
 #define PCI_DEVICE_ID_INTEL_GM45_HB         0x2A40
 #define PCI_DEVICE_ID_INTEL_GM45_IG         0x2A42
-#define PCI_DEVICE_ID_INTEL_IGD_E_HB        0x2E00
-#define PCI_DEVICE_ID_INTEL_IGD_E_IG        0x2E02
+#define PCI_DEVICE_ID_INTEL_EAGLELAKE_HB        0x2E00
+#define PCI_DEVICE_ID_INTEL_EAGLELAKE_IG        0x2E02
 #define PCI_DEVICE_ID_INTEL_Q45_HB          0x2E10
 #define PCI_DEVICE_ID_INTEL_Q45_IG          0x2E12
 #define PCI_DEVICE_ID_INTEL_G45_HB          0x2E20
 #define PCI_DEVICE_ID_INTEL_G45_IG          0x2E22
 #define PCI_DEVICE_ID_INTEL_G41_HB          0x2E30
 #define PCI_DEVICE_ID_INTEL_G41_IG          0x2E32
-#define PCI_DEVICE_ID_INTEL_IGDNG_D_HB	    0x0040
-#define PCI_DEVICE_ID_INTEL_IGDNG_D_IG	    0x0042
-#define PCI_DEVICE_ID_INTEL_IGDNG_M_HB	    0x0044
-#define PCI_DEVICE_ID_INTEL_IGDNG_MA_HB	    0x0062
-#define PCI_DEVICE_ID_INTEL_IGDNG_MC2_HB    0x006a
-#define PCI_DEVICE_ID_INTEL_IGDNG_M_IG	    0x0046
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB	    0x0040
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG	    0x0042
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB	    0x0044
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB	    0x0062
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB    0x006a
+#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG	    0x0046
 
 /* cover 915 and 945 variants */
 #define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \
@@ -83,22 +83,22 @@
 #define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \
 		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \
 		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB || \
-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDGM_HB || \
-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDG_HB)
+		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \
+		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
 
-#define IS_IGD (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDGM_HB || \
-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDG_HB)
+#define IS_PINEVIEW (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \
+		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
 
-#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGD_E_HB || \
+#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \
 		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \
 		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \
 		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \
 		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \
 		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_B43_HB || \
-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_D_HB || \
-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_M_HB || \
-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_MA_HB || \
-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_MC2_HB)
+		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB || \
+		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || \
+		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \
+		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB)
 
 extern int agp_memory_reserved;
 
@@ -178,6 +178,7 @@ static struct _intel_private {
 	 * popup and for the GTT.
 	 */
 	int gtt_entries;			/* i830+ */
+	int gtt_total_size;
 	union {
 		void __iomem *i9xx_flush_page;
 		void *i8xx_flush_page;
@@ -653,7 +654,7 @@ static void intel_i830_init_gtt_entries(void)
 			size = 512;
 		}
 		size += 4; /* add in BIOS popup space */
-	} else if (IS_G33 && !IS_IGD) {
+	} else if (IS_G33 && !IS_PINEVIEW) {
 	/* G33's GTT size defined in gmch_ctrl */
 		switch (gmch_ctrl & G33_PGETBL_SIZE_MASK) {
 		case G33_PGETBL_SIZE_1M:
@@ -669,7 +670,7 @@ static void intel_i830_init_gtt_entries(void)
 			size = 512;
 		}
 		size += 4;
-	} else if (IS_G4X || IS_IGD) {
+	} else if (IS_G4X || IS_PINEVIEW) {
 		/* On 4 series hardware, GTT stolen is separate from graphics
 		 * stolen, ignore it in stolen gtt entries counting.  However,
 		 * 4KB of the stolen memory doesn't get mapped to the GTT.
@@ -1153,7 +1154,7 @@ static int intel_i915_configure(void)
 	readl(intel_private.registers+I810_PGETBL_CTL);	/* PCI Posting. */
 
 	if (agp_bridge->driver->needs_scratch_page) {
-		for (i = intel_private.gtt_entries; i < current_size->num_entries; i++) {
+		for (i = intel_private.gtt_entries; i < intel_private.gtt_total_size; i++) {
 			writel(agp_bridge->scratch_page, intel_private.gtt+i);
 		}
 		readl(intel_private.gtt+i-1);	/* PCI Posting. */
@@ -1308,6 +1309,8 @@ static int intel_i915_create_gatt_table(struct agp_bridge_data *bridge)
 	if (!intel_private.gtt)
 		return -ENOMEM;
 
+	intel_private.gtt_total_size = gtt_map_size / 4;
+
 	temp &= 0xfff80000;
 
 	intel_private.registers = ioremap(temp, 128 * 4096);
@@ -1352,15 +1355,15 @@ static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
 {
 	switch (agp_bridge->dev->device) {
 	case PCI_DEVICE_ID_INTEL_GM45_HB:
-	case PCI_DEVICE_ID_INTEL_IGD_E_HB:
+	case PCI_DEVICE_ID_INTEL_EAGLELAKE_HB:
 	case PCI_DEVICE_ID_INTEL_Q45_HB:
 	case PCI_DEVICE_ID_INTEL_G45_HB:
 	case PCI_DEVICE_ID_INTEL_G41_HB:
 	case PCI_DEVICE_ID_INTEL_B43_HB:
-	case PCI_DEVICE_ID_INTEL_IGDNG_D_HB:
-	case PCI_DEVICE_ID_INTEL_IGDNG_M_HB:
-	case PCI_DEVICE_ID_INTEL_IGDNG_MA_HB:
-	case PCI_DEVICE_ID_INTEL_IGDNG_MC2_HB:
+	case PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB:
+	case PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB:
+	case PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB:
+	case PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB:
 		*gtt_offset = *gtt_size = MB(2);
 		break;
 	default:
@@ -1395,6 +1398,8 @@ static int intel_i965_create_gatt_table(struct agp_bridge_data *bridge)
 	if (!intel_private.gtt)
 		return -ENOMEM;
 
+	intel_private.gtt_total_size = gtt_size / 4;
+
 	intel_private.registers = ioremap(temp, 128 * 4096);
 	if (!intel_private.registers) {
 		iounmap(intel_private.gtt);
@@ -2340,14 +2345,14 @@ static const struct intel_driver_description {
 		NULL, &intel_g33_driver },
 	{ PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, 0, "Q33",
 		NULL, &intel_g33_driver },
-	{ PCI_DEVICE_ID_INTEL_IGDGM_HB, PCI_DEVICE_ID_INTEL_IGDGM_IG, 0, "IGD",
+	{ PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, 0, "Pineview",
 		NULL, &intel_g33_driver },
-	{ PCI_DEVICE_ID_INTEL_IGDG_HB, PCI_DEVICE_ID_INTEL_IGDG_IG, 0, "IGD",
+	{ PCI_DEVICE_ID_INTEL_PINEVIEW_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_IG, 0, "Pineview",
 		NULL, &intel_g33_driver },
 	{ PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG, 0,
-	    "Mobile Intel® GM45 Express", NULL, &intel_i965_driver },
-	{ PCI_DEVICE_ID_INTEL_IGD_E_HB, PCI_DEVICE_ID_INTEL_IGD_E_IG, 0,
-	    "Intel Integrated Graphics Device", NULL, &intel_i965_driver },
+	    "GM45", NULL, &intel_i965_driver },
+	{ PCI_DEVICE_ID_INTEL_EAGLELAKE_HB, PCI_DEVICE_ID_INTEL_EAGLELAKE_IG, 0,
+	    "Eaglelake", NULL, &intel_i965_driver },
 	{ PCI_DEVICE_ID_INTEL_Q45_HB, PCI_DEVICE_ID_INTEL_Q45_IG, 0,
 	    "Q45/Q43", NULL, &intel_i965_driver },
 	{ PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG, 0,
@@ -2356,14 +2361,14 @@ static const struct intel_driver_description {
 	    "B43", NULL, &intel_i965_driver },
 	{ PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG, 0,
 	    "G41", NULL, &intel_i965_driver },
-	{ PCI_DEVICE_ID_INTEL_IGDNG_D_HB, PCI_DEVICE_ID_INTEL_IGDNG_D_IG, 0,
-	    "IGDNG/D", NULL, &intel_i965_driver },
-	{ PCI_DEVICE_ID_INTEL_IGDNG_M_HB, PCI_DEVICE_ID_INTEL_IGDNG_M_IG, 0,
-	    "IGDNG/M", NULL, &intel_i965_driver },
-	{ PCI_DEVICE_ID_INTEL_IGDNG_MA_HB, PCI_DEVICE_ID_INTEL_IGDNG_M_IG, 0,
-	    "IGDNG/MA", NULL, &intel_i965_driver },
-	{ PCI_DEVICE_ID_INTEL_IGDNG_MC2_HB, PCI_DEVICE_ID_INTEL_IGDNG_M_IG, 0,
-	    "IGDNG/MC2", NULL, &intel_i965_driver },
+	{ PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG, 0,
+	    "Ironlake/D", NULL, &intel_i965_driver },
+	{ PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0,
+	    "Ironlake/M", NULL, &intel_i965_driver },
+	{ PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0,
+	    "Ironlake/MA", NULL, &intel_i965_driver },
+	{ PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG, 0,
+	    "Ironlake/MC2", NULL, &intel_i965_driver },
 	{ 0, 0, 0, NULL, NULL, NULL }
 };
 
@@ -2545,8 +2550,8 @@ static struct pci_device_id agp_intel_pci_table[] = {
 	ID(PCI_DEVICE_ID_INTEL_82945G_HB),
 	ID(PCI_DEVICE_ID_INTEL_82945GM_HB),
 	ID(PCI_DEVICE_ID_INTEL_82945GME_HB),
-	ID(PCI_DEVICE_ID_INTEL_IGDGM_HB),
-	ID(PCI_DEVICE_ID_INTEL_IGDG_HB),
+	ID(PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB),
+	ID(PCI_DEVICE_ID_INTEL_PINEVIEW_HB),
 	ID(PCI_DEVICE_ID_INTEL_82946GZ_HB),
 	ID(PCI_DEVICE_ID_INTEL_82G35_HB),
 	ID(PCI_DEVICE_ID_INTEL_82965Q_HB),
@@ -2557,15 +2562,15 @@ static struct pci_device_id agp_intel_pci_table[] = {
 	ID(PCI_DEVICE_ID_INTEL_Q35_HB),
 	ID(PCI_DEVICE_ID_INTEL_Q33_HB),
 	ID(PCI_DEVICE_ID_INTEL_GM45_HB),
-	ID(PCI_DEVICE_ID_INTEL_IGD_E_HB),
+	ID(PCI_DEVICE_ID_INTEL_EAGLELAKE_HB),
 	ID(PCI_DEVICE_ID_INTEL_Q45_HB),
 	ID(PCI_DEVICE_ID_INTEL_G45_HB),
 	ID(PCI_DEVICE_ID_INTEL_G41_HB),
 	ID(PCI_DEVICE_ID_INTEL_B43_HB),
-	ID(PCI_DEVICE_ID_INTEL_IGDNG_D_HB),
-	ID(PCI_DEVICE_ID_INTEL_IGDNG_M_HB),
-	ID(PCI_DEVICE_ID_INTEL_IGDNG_MA_HB),
-	ID(PCI_DEVICE_ID_INTEL_IGDNG_MC2_HB),
+	ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB),
+	ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB),
+	ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB),
+	ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB),
 	{ }
 };
 

+ 1 - 1
drivers/gpu/drm/Makefile

@@ -15,7 +15,7 @@ drm-y       :=	drm_auth.o drm_bufs.o drm_cache.o \
 
 drm-$(CONFIG_COMPAT) += drm_ioc32.o
 
-drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o
+drm_kms_helper-y := drm_fb_helper.o drm_crtc_helper.o drm_dp_i2c_helper.o
 
 obj-$(CONFIG_DRM_KMS_HELPER) += drm_kms_helper.o
 

+ 175 - 1
drivers/gpu/drm/drm_crtc.c

@@ -125,6 +125,15 @@ static struct drm_prop_enum_list drm_tv_subconnector_enum_list[] =
 DRM_ENUM_NAME_FN(drm_get_tv_subconnector_name,
 		 drm_tv_subconnector_enum_list)
 
+static struct drm_prop_enum_list drm_dirty_info_enum_list[] = {
+	{ DRM_MODE_DIRTY_OFF,      "Off"      },
+	{ DRM_MODE_DIRTY_ON,       "On"       },
+	{ DRM_MODE_DIRTY_ANNOTATE, "Annotate" },
+};
+
+DRM_ENUM_NAME_FN(drm_get_dirty_info_name,
+		 drm_dirty_info_enum_list)
+
 struct drm_conn_prop_enum_list {
 	int type;
 	char *name;
@@ -247,7 +256,8 @@ static void drm_mode_object_put(struct drm_device *dev,
 	mutex_unlock(&dev->mode_config.idr_mutex);
 }
 
-void *drm_mode_object_find(struct drm_device *dev, uint32_t id, uint32_t type)
+struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
+		uint32_t id, uint32_t type)
 {
 	struct drm_mode_object *obj = NULL;
 
@@ -801,6 +811,36 @@ int drm_mode_create_dithering_property(struct drm_device *dev)
 }
 EXPORT_SYMBOL(drm_mode_create_dithering_property);
 
+/**
+ * drm_mode_create_dirty_property - create dirty property
+ * @dev: DRM device
+ *
+ * Called by a driver the first time it's needed, must be attached to desired
+ * connectors.
+ */
+int drm_mode_create_dirty_info_property(struct drm_device *dev)
+{
+	struct drm_property *dirty_info;
+	int i;
+
+	if (dev->mode_config.dirty_info_property)
+		return 0;
+
+	dirty_info =
+		drm_property_create(dev, DRM_MODE_PROP_ENUM |
+				    DRM_MODE_PROP_IMMUTABLE,
+				    "dirty",
+				    ARRAY_SIZE(drm_dirty_info_enum_list));
+	for (i = 0; i < ARRAY_SIZE(drm_dirty_info_enum_list); i++)
+		drm_property_add_enum(dirty_info, i,
+				      drm_dirty_info_enum_list[i].type,
+				      drm_dirty_info_enum_list[i].name);
+	dev->mode_config.dirty_info_property = dirty_info;
+
+	return 0;
+}
+EXPORT_SYMBOL(drm_mode_create_dirty_info_property);
+
 /**
  * drm_mode_config_init - initialize DRM mode_configuration structure
  * @dev: DRM device
@@ -1753,6 +1793,71 @@ out:
 	return ret;
 }
 
+int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
+			   void *data, struct drm_file *file_priv)
+{
+	struct drm_clip_rect __user *clips_ptr;
+	struct drm_clip_rect *clips = NULL;
+	struct drm_mode_fb_dirty_cmd *r = data;
+	struct drm_mode_object *obj;
+	struct drm_framebuffer *fb;
+	unsigned flags;
+	int num_clips;
+	int ret = 0;
+
+	mutex_lock(&dev->mode_config.mutex);
+	obj = drm_mode_object_find(dev, r->fb_id, DRM_MODE_OBJECT_FB);
+	if (!obj) {
+		DRM_ERROR("invalid framebuffer id\n");
+		ret = -EINVAL;
+		goto out_err1;
+	}
+	fb = obj_to_fb(obj);
+
+	num_clips = r->num_clips;
+	clips_ptr = (struct drm_clip_rect *)(unsigned long)r->clips_ptr;
+
+	if (!num_clips != !clips_ptr) {
+		ret = -EINVAL;
+		goto out_err1;
+	}
+
+	flags = DRM_MODE_FB_DIRTY_FLAGS & r->flags;
+
+	/* If userspace annotates copy, clips must come in pairs */
+	if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY && (num_clips % 2)) {
+		ret = -EINVAL;
+		goto out_err1;
+	}
+
+	if (num_clips && clips_ptr) {
+		clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
+		if (!clips) {
+			ret = -ENOMEM;
+			goto out_err1;
+		}
+
+		ret = copy_from_user(clips, clips_ptr,
+				     num_clips * sizeof(*clips));
+		if (ret)
+			goto out_err2;
+	}
+
+	if (fb->funcs->dirty) {
+		ret = fb->funcs->dirty(fb, flags, r->color, clips, num_clips);
+	} else {
+		ret = -ENOSYS;
+		goto out_err2;
+	}
+
+out_err2:
+	kfree(clips);
+out_err1:
+	mutex_unlock(&dev->mode_config.mutex);
+	return ret;
+}
+
+
 /**
  * drm_fb_release - remove and free the FBs on this file
  * @filp: file * from the ioctl
@@ -2478,3 +2583,72 @@ out:
 	mutex_unlock(&dev->mode_config.mutex);
 	return ret;
 }
+
+int drm_mode_page_flip_ioctl(struct drm_device *dev,
+			     void *data, struct drm_file *file_priv)
+{
+	struct drm_mode_crtc_page_flip *page_flip = data;
+	struct drm_mode_object *obj;
+	struct drm_crtc *crtc;
+	struct drm_framebuffer *fb;
+	struct drm_pending_vblank_event *e = NULL;
+	unsigned long flags;
+	int ret = -EINVAL;
+
+	if (page_flip->flags & ~DRM_MODE_PAGE_FLIP_FLAGS ||
+	    page_flip->reserved != 0)
+		return -EINVAL;
+
+	mutex_lock(&dev->mode_config.mutex);
+	obj = drm_mode_object_find(dev, page_flip->crtc_id, DRM_MODE_OBJECT_CRTC);
+	if (!obj)
+		goto out;
+	crtc = obj_to_crtc(obj);
+
+	if (crtc->funcs->page_flip == NULL)
+		goto out;
+
+	obj = drm_mode_object_find(dev, page_flip->fb_id, DRM_MODE_OBJECT_FB);
+	if (!obj)
+		goto out;
+	fb = obj_to_fb(obj);
+
+	if (page_flip->flags & DRM_MODE_PAGE_FLIP_EVENT) {
+		ret = -ENOMEM;
+		spin_lock_irqsave(&dev->event_lock, flags);
+		if (file_priv->event_space < sizeof e->event) {
+			spin_unlock_irqrestore(&dev->event_lock, flags);
+			goto out;
+		}
+		file_priv->event_space -= sizeof e->event;
+		spin_unlock_irqrestore(&dev->event_lock, flags);
+
+		e = kzalloc(sizeof *e, GFP_KERNEL);
+		if (e == NULL) {
+			spin_lock_irqsave(&dev->event_lock, flags);
+			file_priv->event_space += sizeof e->event;
+			spin_unlock_irqrestore(&dev->event_lock, flags);
+			goto out;
+		}
+
+		e->event.base.type = DRM_EVENT_FLIP_COMPLETE;
+		e->event.base.length = sizeof e->event;
+		e->event.user_data = page_flip->user_data;
+		e->base.event = &e->event.base;
+		e->base.file_priv = file_priv;
+		e->base.destroy =
+			(void (*) (struct drm_pending_event *)) kfree;
+	}
+
+	ret = crtc->funcs->page_flip(crtc, fb, e);
+	if (ret) {
+		spin_lock_irqsave(&dev->event_lock, flags);
+		file_priv->event_space += sizeof e->event;
+		spin_unlock_irqrestore(&dev->event_lock, flags);
+		kfree(e);
+	}
+
+out:
+	mutex_unlock(&dev->mode_config.mutex);
+	return ret;
+}

+ 4 - 1
drivers/gpu/drm/drm_crtc_helper.c

@@ -109,7 +109,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
 
 	count = (*connector_funcs->get_modes)(connector);
 	if (!count) {
-		count = drm_add_modes_noedid(connector, 800, 600);
+		count = drm_add_modes_noedid(connector, 1024, 768);
 		if (!count)
 			return 0;
 	}
@@ -1020,6 +1020,9 @@ bool drm_helper_initial_config(struct drm_device *dev)
 {
 	int count = 0;
 
+	/* disable all the possible outputs/crtcs before entering KMS mode */
+	drm_helper_disable_unused_functions(dev);
+
 	drm_fb_helper_parse_command_line(dev);
 
 	count = drm_helper_probe_connector_modes(dev,

+ 6 - 70
drivers/gpu/drm/i915/intel_dp_i2c.c → drivers/gpu/drm/drm_dp_i2c_helper.c

@@ -28,84 +28,20 @@
 #include <linux/errno.h>
 #include <linux/sched.h>
 #include <linux/i2c.h>
-#include "intel_dp.h"
+#include "drm_dp_helper.h"
 #include "drmP.h"
 
 /* Run a single AUX_CH I2C transaction, writing/reading data as necessary */
-
-#define MODE_I2C_START	1
-#define MODE_I2C_WRITE	2
-#define MODE_I2C_READ	4
-#define MODE_I2C_STOP	8
-
 static int
 i2c_algo_dp_aux_transaction(struct i2c_adapter *adapter, int mode,
 			    uint8_t write_byte, uint8_t *read_byte)
 {
 	struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
-	uint16_t address = algo_data->address;
-	uint8_t msg[5];
-	uint8_t reply[2];
-	int msg_bytes;
-	int reply_bytes;
 	int ret;
-
-	/* Set up the command byte */
-	if (mode & MODE_I2C_READ)
-		msg[0] = AUX_I2C_READ << 4;
-	else
-		msg[0] = AUX_I2C_WRITE << 4;
-
-	if (!(mode & MODE_I2C_STOP))
-		msg[0] |= AUX_I2C_MOT << 4;
-
-	msg[1] = address >> 8;
-	msg[2] = address;
-
-	switch (mode) {
-	case MODE_I2C_WRITE:
-		msg[3] = 0;
-		msg[4] = write_byte;
-		msg_bytes = 5;
-		reply_bytes = 1;
-		break;
-	case MODE_I2C_READ:
-		msg[3] = 0;
-		msg_bytes = 4;
-		reply_bytes = 2;
-		break;
-	default:
-		msg_bytes = 3;
-		reply_bytes = 1;
-		break;
-	}
-
-	for (;;) {
-		ret = (*algo_data->aux_ch)(adapter,
-					   msg, msg_bytes,
-					   reply, reply_bytes);
-		if (ret < 0) {
-			DRM_DEBUG("aux_ch failed %d\n", ret);
-			return ret;
-		}
-		switch (reply[0] & AUX_I2C_REPLY_MASK) {
-		case AUX_I2C_REPLY_ACK:
-			if (mode == MODE_I2C_READ) {
-				*read_byte = reply[1];
-			}
-			return reply_bytes - 1;
-		case AUX_I2C_REPLY_NACK:
-			DRM_DEBUG("aux_ch nack\n");
-			return -EREMOTEIO;
-		case AUX_I2C_REPLY_DEFER:
-			DRM_DEBUG("aux_ch defer\n");
-			udelay(100);
-			break;
-		default:
-			DRM_ERROR("aux_ch invalid reply 0x%02x\n", reply[0]);
-			return -EREMOTEIO;
-		}
-	}
+	
+	ret = (*algo_data->aux_ch)(adapter, mode,
+				   write_byte, read_byte);
+	return ret;
 }
 
 /*
@@ -224,7 +160,7 @@ i2c_algo_dp_aux_xfer(struct i2c_adapter *adapter,
 	if (ret >= 0)
 		ret = num;
 	i2c_algo_dp_aux_stop(adapter, reading);
-	DRM_DEBUG("dp_aux_xfer return %d\n", ret);
+	DRM_DEBUG_KMS("dp_aux_xfer return %d\n", ret);
 	return ret;
 }
 

+ 36 - 6
drivers/gpu/drm/drm_drv.c

@@ -145,6 +145,8 @@ static struct drm_ioctl_desc drm_ioctls[] = {
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, DRM_MASTER|DRM_CONTROL_ALLOW),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb, DRM_MASTER|DRM_CONTROL_ALLOW),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb, DRM_MASTER|DRM_CONTROL_ALLOW),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_PAGE_FLIP, drm_mode_page_flip_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW),
+	DRM_IOCTL_DEF(DRM_IOCTL_MODE_DIRTYFB, drm_mode_dirtyfb_ioctl, DRM_MASTER|DRM_CONTROL_ALLOW)
 };
 
 #define DRM_CORE_IOCTL_COUNT	ARRAY_SIZE( drm_ioctls )
@@ -365,6 +367,29 @@ static void __exit drm_core_exit(void)
 module_init(drm_core_init);
 module_exit(drm_core_exit);
 
+/**
+ * Copy and IOCTL return string to user space
+ */
+static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
+{
+	int len;
+
+	/* don't overflow userbuf */
+	len = strlen(value);
+	if (len > *buf_len)
+		len = *buf_len;
+
+	/* let userspace know exact length of driver value (which could be
+	 * larger than the userspace-supplied buffer) */
+	*buf_len = strlen(value);
+
+	/* finally, try filling in the userbuf */
+	if (len && buf)
+		if (copy_to_user(buf, value, len))
+			return -EFAULT;
+	return 0;
+}
+
 /**
  * Get version information
  *
@@ -380,16 +405,21 @@ static int drm_version(struct drm_device *dev, void *data,
 		       struct drm_file *file_priv)
 {
 	struct drm_version *version = data;
-	int len;
+	int err;
 
 	version->version_major = dev->driver->major;
 	version->version_minor = dev->driver->minor;
 	version->version_patchlevel = dev->driver->patchlevel;
-	DRM_COPY(version->name, dev->driver->name);
-	DRM_COPY(version->date, dev->driver->date);
-	DRM_COPY(version->desc, dev->driver->desc);
-
-	return 0;
+	err = drm_copy_field(version->name, &version->name_len,
+			dev->driver->name);
+	if (!err)
+		err = drm_copy_field(version->date, &version->date_len,
+				dev->driver->date);
+	if (!err)
+		err = drm_copy_field(version->desc, &version->desc_len,
+				dev->driver->desc);
+
+	return err;
 }
 
 /**

+ 204 - 124
drivers/gpu/drm/drm_edid.c

@@ -123,18 +123,20 @@ static const u8 edid_header[] = {
  */
 static bool edid_is_valid(struct edid *edid)
 {
-	int i;
+	int i, score = 0;
 	u8 csum = 0;
 	u8 *raw_edid = (u8 *)edid;
 
-	if (memcmp(edid->header, edid_header, sizeof(edid_header)))
-		goto bad;
-	if (edid->version != 1) {
-		DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version);
+	for (i = 0; i < sizeof(edid_header); i++)
+		if (raw_edid[i] == edid_header[i])
+			score++;
+
+	if (score == 8) ;
+	else if (score >= 6) {
+		DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
+		memcpy(raw_edid, edid_header, sizeof(edid_header));
+	} else
 		goto bad;
-	}
-	if (edid->revision > 4)
-		DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n");
 
 	for (i = 0; i < EDID_LENGTH; i++)
 		csum += raw_edid[i];
@@ -143,6 +145,14 @@ static bool edid_is_valid(struct edid *edid)
 		goto bad;
 	}
 
+	if (edid->version != 1) {
+		DRM_ERROR("EDID has major version %d, instead of 1\n", edid->version);
+		goto bad;
+	}
+
+	if (edid->revision > 4)
+		DRM_DEBUG("EDID minor > 4, assuming backward compatibility\n");
+
 	return 1;
 
 bad:
@@ -481,16 +491,17 @@ static struct drm_display_mode drm_dmt_modes[] = {
 		   3048, 3536, 0, 1600, 1603, 1609, 1682, 0,
 		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
 };
+static const int drm_num_dmt_modes =
+	sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
 
 static struct drm_display_mode *drm_find_dmt(struct drm_device *dev,
 			int hsize, int vsize, int fresh)
 {
-	int i, count;
+	int i;
 	struct drm_display_mode *ptr, *mode;
 
-	count = sizeof(drm_dmt_modes) / sizeof(struct drm_display_mode);
 	mode = NULL;
-	for (i = 0; i < count; i++) {
+	for (i = 0; i < drm_num_dmt_modes; i++) {
 		ptr = &drm_dmt_modes[i];
 		if (hsize == ptr->hdisplay &&
 			vsize == ptr->vdisplay &&
@@ -834,8 +845,165 @@ static int add_standard_modes(struct drm_connector *connector, struct edid *edid
 	return modes;
 }
 
+/*
+ * XXX fix this for:
+ * - GTF secondary curve formula
+ * - EDID 1.4 range offsets
+ * - CVT extended bits
+ */
+static bool
+mode_in_range(struct drm_display_mode *mode, struct detailed_timing *timing)
+{
+	struct detailed_data_monitor_range *range;
+	int hsync, vrefresh;
+
+	range = &timing->data.other_data.data.range;
+
+	hsync = drm_mode_hsync(mode);
+	vrefresh = drm_mode_vrefresh(mode);
+
+	if (hsync < range->min_hfreq_khz || hsync > range->max_hfreq_khz)
+		return false;
+
+	if (vrefresh < range->min_vfreq || vrefresh > range->max_vfreq)
+		return false;
+
+	if (range->pixel_clock_mhz && range->pixel_clock_mhz != 0xff) {
+		/* be forgiving since it's in units of 10MHz */
+		int max_clock = range->pixel_clock_mhz * 10 + 9;
+		max_clock *= 1000;
+		if (mode->clock > max_clock)
+			return false;
+	}
+
+	return true;
+}
+
+/*
+ * XXX If drm_dmt_modes ever regrows the CVT-R modes (and it will) this will
+ * need to account for them.
+ */
+static int drm_gtf_modes_for_range(struct drm_connector *connector,
+				   struct detailed_timing *timing)
+{
+	int i, modes = 0;
+	struct drm_display_mode *newmode;
+	struct drm_device *dev = connector->dev;
+
+	for (i = 0; i < drm_num_dmt_modes; i++) {
+		if (mode_in_range(drm_dmt_modes + i, timing)) {
+			newmode = drm_mode_duplicate(dev, &drm_dmt_modes[i]);
+			if (newmode) {
+				drm_mode_probed_add(connector, newmode);
+				modes++;
+			}
+		}
+	}
+
+	return modes;
+}
+
+static int drm_cvt_modes(struct drm_connector *connector,
+			 struct detailed_timing *timing)
+{
+	int i, j, modes = 0;
+	struct drm_display_mode *newmode;
+	struct drm_device *dev = connector->dev;
+	struct cvt_timing *cvt;
+	const int rates[] = { 60, 85, 75, 60, 50 };
+
+	for (i = 0; i < 4; i++) {
+		int width, height;
+		cvt = &(timing->data.other_data.data.cvt[i]);
+
+		height = (cvt->code[0] + ((cvt->code[1] & 0xf0) << 8) + 1) * 2;
+		switch (cvt->code[1] & 0xc0) {
+		case 0x00:
+			width = height * 4 / 3;
+			break;
+		case 0x40:
+			width = height * 16 / 9;
+			break;
+		case 0x80:
+			width = height * 16 / 10;
+			break;
+		case 0xc0:
+			width = height * 15 / 9;
+			break;
+		}
+
+		for (j = 1; j < 5; j++) {
+			if (cvt->code[2] & (1 << j)) {
+				newmode = drm_cvt_mode(dev, width, height,
+						       rates[j], j == 0,
+						       false, false);
+				if (newmode) {
+					drm_mode_probed_add(connector, newmode);
+					modes++;
+				}
+			}
+		}
+	}
+
+	return modes;
+}
+
+static int add_detailed_modes(struct drm_connector *connector,
+			      struct detailed_timing *timing,
+			      struct edid *edid, u32 quirks, int preferred)
+{
+	int i, modes = 0;
+	struct detailed_non_pixel *data = &timing->data.other_data;
+	int timing_level = standard_timing_level(edid);
+	int gtf = (edid->features & DRM_EDID_FEATURE_DEFAULT_GTF);
+	struct drm_display_mode *newmode;
+	struct drm_device *dev = connector->dev;
+
+	if (timing->pixel_clock) {
+		newmode = drm_mode_detailed(dev, edid, timing, quirks);
+		if (!newmode)
+			return 0;
+
+		if (preferred)
+			newmode->type |= DRM_MODE_TYPE_PREFERRED;
+
+		drm_mode_probed_add(connector, newmode);
+		return 1;
+	}
+
+	/* other timing types */
+	switch (data->type) {
+	case EDID_DETAIL_MONITOR_RANGE:
+		if (gtf)
+			modes += drm_gtf_modes_for_range(connector, timing);
+		break;
+	case EDID_DETAIL_STD_MODES:
+		/* Six modes per detailed section */
+		for (i = 0; i < 6; i++) {
+			struct std_timing *std;
+			struct drm_display_mode *newmode;
+
+			std = &data->data.timings[i];
+			newmode = drm_mode_std(dev, std, edid->revision,
+					       timing_level);
+			if (newmode) {
+				drm_mode_probed_add(connector, newmode);
+				modes++;
+			}
+		}
+		break;
+	case EDID_DETAIL_CVT_3BYTE:
+		modes += drm_cvt_modes(connector, timing);
+		break;
+	default:
+		break;
+	}
+
+	return modes;
+}
+
 /**
- * add_detailed_modes - get detailed mode info from EDID data
+ * add_detailed_info - get detailed mode info from EDID data
  * @connector: attached connector
  * @edid: EDID block to scan
  * @quirks: quirks to apply
@@ -846,67 +1014,24 @@ static int add_standard_modes(struct drm_connector *connector, struct edid *edid
 static int add_detailed_info(struct drm_connector *connector,
 			     struct edid *edid, u32 quirks)
 {
-	struct drm_device *dev = connector->dev;
-	int i, j, modes = 0;
-	int timing_level;
-
-	timing_level = standard_timing_level(edid);
+	int i, modes = 0;
 
 	for (i = 0; i < EDID_DETAILED_TIMINGS; i++) {
 		struct detailed_timing *timing = &edid->detailed_timings[i];
-		struct detailed_non_pixel *data = &timing->data.other_data;
-		struct drm_display_mode *newmode;
-
-		/* X server check is version 1.1 or higher */
-		if (edid->version == 1 && edid->revision >= 1 &&
-		    !timing->pixel_clock) {
-			/* Other timing or info */
-			switch (data->type) {
-			case EDID_DETAIL_MONITOR_SERIAL:
-				break;
-			case EDID_DETAIL_MONITOR_STRING:
-				break;
-			case EDID_DETAIL_MONITOR_RANGE:
-				/* Get monitor range data */
-				break;
-			case EDID_DETAIL_MONITOR_NAME:
-				break;
-			case EDID_DETAIL_MONITOR_CPDATA:
-				break;
-			case EDID_DETAIL_STD_MODES:
-				for (j = 0; j < 6; i++) {
-					struct std_timing *std;
-					struct drm_display_mode *newmode;
-
-					std = &data->data.timings[j];
-					newmode = drm_mode_std(dev, std,
-							       edid->revision,
-							       timing_level);
-					if (newmode) {
-						drm_mode_probed_add(connector, newmode);
-						modes++;
-					}
-				}
-				break;
-			default:
-				break;
-			}
-		} else {
-			newmode = drm_mode_detailed(dev, edid, timing, quirks);
-			if (!newmode)
-				continue;
+		int preferred = (i == 0) && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING);
 
-			/* First detailed mode is preferred */
-			if (i == 0 && (edid->features & DRM_EDID_FEATURE_PREFERRED_TIMING))
-				newmode->type |= DRM_MODE_TYPE_PREFERRED;
-			drm_mode_probed_add(connector, newmode);
+		/* In 1.0, only timings are allowed */
+		if (!timing->pixel_clock && edid->version == 1 &&
+			edid->revision == 0)
+			continue;
 
-			modes++;
-		}
+		modes += add_detailed_modes(connector, timing, edid, quirks,
+					    preferred);
 	}
 
 	return modes;
 }
+
 /**
  * add_detailed_mode_eedid - get detailed mode info from addtional timing
  * 			EDID block
@@ -920,12 +1045,9 @@ static int add_detailed_info(struct drm_connector *connector,
 static int add_detailed_info_eedid(struct drm_connector *connector,
 			     struct edid *edid, u32 quirks)
 {
-	struct drm_device *dev = connector->dev;
-	int i, j, modes = 0;
+	int i, modes = 0;
 	char *edid_ext = NULL;
 	struct detailed_timing *timing;
-	struct detailed_non_pixel *data;
-	struct drm_display_mode *newmode;
 	int edid_ext_num;
 	int start_offset, end_offset;
 	int timing_level;
@@ -976,51 +1098,7 @@ static int add_detailed_info_eedid(struct drm_connector *connector,
 	for (i = start_offset; i < end_offset;
 			i += sizeof(struct detailed_timing)) {
 		timing = (struct detailed_timing *)(edid_ext + i);
-		data = &timing->data.other_data;
-		/* Detailed mode timing */
-		if (timing->pixel_clock) {
-			newmode = drm_mode_detailed(dev, edid, timing, quirks);
-			if (!newmode)
-				continue;
-
-			drm_mode_probed_add(connector, newmode);
-
-			modes++;
-			continue;
-		}
-
-		/* Other timing or info */
-		switch (data->type) {
-		case EDID_DETAIL_MONITOR_SERIAL:
-			break;
-		case EDID_DETAIL_MONITOR_STRING:
-			break;
-		case EDID_DETAIL_MONITOR_RANGE:
-			/* Get monitor range data */
-			break;
-		case EDID_DETAIL_MONITOR_NAME:
-			break;
-		case EDID_DETAIL_MONITOR_CPDATA:
-			break;
-		case EDID_DETAIL_STD_MODES:
-			/* Five modes per detailed section */
-			for (j = 0; j < 5; i++) {
-				struct std_timing *std;
-				struct drm_display_mode *newmode;
-
-				std = &data->data.timings[j];
-				newmode = drm_mode_std(dev, std,
-						       edid->revision,
-						       timing_level);
-				if (newmode) {
-					drm_mode_probed_add(connector, newmode);
-					modes++;
-				}
-			}
-			break;
-		default:
-			break;
-		}
+		modes += add_detailed_modes(connector, timing, edid, quirks, 0);
 	}
 
 	return modes;
@@ -1066,19 +1144,19 @@ static int drm_ddc_read_edid(struct drm_connector *connector,
 			     struct i2c_adapter *adapter,
 			     char *buf, int len)
 {
-	int ret;
+	int i;
 
-	ret = drm_do_probe_ddc_edid(adapter, buf, len);
-	if (ret != 0) {
-		goto end;
-	}
-	if (!edid_is_valid((struct edid *)buf)) {
-		dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
-			 drm_get_connector_name(connector));
-		ret = -1;
+	for (i = 0; i < 4; i++) {
+		if (drm_do_probe_ddc_edid(adapter, buf, len))
+			return -1;
+		if (edid_is_valid((struct edid *)buf))
+			return 0;
 	}
-end:
-	return ret;
+
+	/* repeated checksum failures; warn, but carry on */
+	dev_warn(&connector->dev->pdev->dev, "%s: EDID invalid.\n",
+		 drm_get_connector_name(connector));
+	return -1;
 }
 
 /**
@@ -1296,6 +1374,8 @@ int drm_add_modes_noedid(struct drm_connector *connector,
 					ptr->vdisplay > vdisplay)
 				continue;
 		}
+		if (drm_mode_vrefresh(ptr) > 61)
+			continue;
 		mode = drm_mode_duplicate(dev, ptr);
 		if (mode) {
 			drm_mode_probed_add(connector, mode);

+ 16 - 7
drivers/gpu/drm/drm_fb_helper.c

@@ -373,11 +373,9 @@ static void drm_fb_helper_off(struct fb_info *info, int dpms_mode)
 					mutex_unlock(&dev->mode_config.mutex);
 				}
 			}
-			if (dpms_mode == DRM_MODE_DPMS_OFF) {
-				mutex_lock(&dev->mode_config.mutex);
-				crtc_funcs->dpms(crtc, dpms_mode);
-				mutex_unlock(&dev->mode_config.mutex);
-			}
+			mutex_lock(&dev->mode_config.mutex);
+			crtc_funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+			mutex_unlock(&dev->mode_config.mutex);
 		}
 	}
 }
@@ -385,18 +383,23 @@ static void drm_fb_helper_off(struct fb_info *info, int dpms_mode)
 int drm_fb_helper_blank(int blank, struct fb_info *info)
 {
 	switch (blank) {
+	/* Display: On; HSync: On, VSync: On */
 	case FB_BLANK_UNBLANK:
 		drm_fb_helper_on(info);
 		break;
+	/* Display: Off; HSync: On, VSync: On */
 	case FB_BLANK_NORMAL:
-		drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY);
+		drm_fb_helper_off(info, DRM_MODE_DPMS_ON);
 		break;
+	/* Display: Off; HSync: Off, VSync: On */
 	case FB_BLANK_HSYNC_SUSPEND:
 		drm_fb_helper_off(info, DRM_MODE_DPMS_STANDBY);
 		break;
+	/* Display: Off; HSync: On, VSync: Off */
 	case FB_BLANK_VSYNC_SUSPEND:
 		drm_fb_helper_off(info, DRM_MODE_DPMS_SUSPEND);
 		break;
+	/* Display: Off; HSync: Off, VSync: Off */
 	case FB_BLANK_POWERDOWN:
 		drm_fb_helper_off(info, DRM_MODE_DPMS_OFF);
 		break;
@@ -905,8 +908,13 @@ int drm_fb_helper_single_fb_probe(struct drm_device *dev,
 
 	if (new_fb) {
 		info->var.pixclock = 0;
-		if (register_framebuffer(info) < 0)
+		ret = fb_alloc_cmap(&info->cmap, modeset->crtc->gamma_size, 0);
+		if (ret)
+			return ret;
+		if (register_framebuffer(info) < 0) {
+			fb_dealloc_cmap(&info->cmap);
 			return -EINVAL;
+		}
 	} else {
 		drm_fb_helper_set_par(info);
 	}
@@ -936,6 +944,7 @@ void drm_fb_helper_free(struct drm_fb_helper *helper)
 		unregister_sysrq_key('v', &sysrq_drm_fb_helper_restore_op);
 	}
 	drm_fb_helper_crtc_free(helper);
+	fb_dealloc_cmap(&helper->fb->fbdev->cmap);
 }
 EXPORT_SYMBOL(drm_fb_helper_free);
 

+ 110 - 2
drivers/gpu/drm/drm_fops.c

@@ -257,6 +257,9 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
 
 	INIT_LIST_HEAD(&priv->lhead);
 	INIT_LIST_HEAD(&priv->fbs);
+	INIT_LIST_HEAD(&priv->event_list);
+	init_waitqueue_head(&priv->event_wait);
+	priv->event_space = 4096; /* set aside 4k for event buffer */
 
 	if (dev->driver->driver_features & DRIVER_GEM)
 		drm_gem_open(dev, priv);
@@ -297,6 +300,18 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
 				goto out_free;
 			}
 		}
+		mutex_lock(&dev->struct_mutex);
+		if (dev->driver->master_set) {
+			ret = dev->driver->master_set(dev, priv, true);
+			if (ret) {
+				/* drop both references if this fails */
+				drm_master_put(&priv->minor->master);
+				drm_master_put(&priv->master);
+				mutex_unlock(&dev->struct_mutex);
+				goto out_free;
+			}
+		}
+		mutex_unlock(&dev->struct_mutex);
 	} else {
 		/* get a reference to the master */
 		priv->master = drm_master_get(priv->minor->master);
@@ -413,6 +428,30 @@ static void drm_master_release(struct drm_device *dev, struct file *filp)
 	}
 }
 
+static void drm_events_release(struct drm_file *file_priv)
+{
+	struct drm_device *dev = file_priv->minor->dev;
+	struct drm_pending_event *e, *et;
+	struct drm_pending_vblank_event *v, *vt;
+	unsigned long flags;
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+
+	/* Remove pending flips */
+	list_for_each_entry_safe(v, vt, &dev->vblank_event_list, base.link)
+		if (v->base.file_priv == file_priv) {
+			list_del(&v->base.link);
+			drm_vblank_put(dev, v->pipe);
+			v->base.destroy(&v->base);
+		}
+
+	/* Remove unconsumed events */
+	list_for_each_entry_safe(e, et, &file_priv->event_list, link)
+		e->destroy(e);
+
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
 /**
  * Release file.
  *
@@ -451,6 +490,8 @@ int drm_release(struct inode *inode, struct file *filp)
 	if (file_priv->minor->master)
 		drm_master_release(dev, filp);
 
+	drm_events_release(file_priv);
+
 	if (dev->driver->driver_features & DRIVER_GEM)
 		drm_gem_release(dev, file_priv);
 
@@ -504,6 +545,8 @@ int drm_release(struct inode *inode, struct file *filp)
 
 		if (file_priv->minor->master == file_priv->master) {
 			/* drop the reference held my the minor */
+			if (dev->driver->master_drop)
+				dev->driver->master_drop(dev, file_priv, true);
 			drm_master_put(&file_priv->minor->master);
 		}
 	}
@@ -544,9 +587,74 @@ int drm_release(struct inode *inode, struct file *filp)
 }
 EXPORT_SYMBOL(drm_release);
 
-/** No-op. */
+static bool
+drm_dequeue_event(struct drm_file *file_priv,
+		  size_t total, size_t max, struct drm_pending_event **out)
+{
+	struct drm_device *dev = file_priv->minor->dev;
+	struct drm_pending_event *e;
+	unsigned long flags;
+	bool ret = false;
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+
+	*out = NULL;
+	if (list_empty(&file_priv->event_list))
+		goto out;
+	e = list_first_entry(&file_priv->event_list,
+			     struct drm_pending_event, link);
+	if (e->event->length + total > max)
+		goto out;
+
+	file_priv->event_space += e->event->length;
+	list_del(&e->link);
+	*out = e;
+	ret = true;
+
+out:
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+	return ret;
+}
+
+ssize_t drm_read(struct file *filp, char __user *buffer,
+		 size_t count, loff_t *offset)
+{
+	struct drm_file *file_priv = filp->private_data;
+	struct drm_pending_event *e;
+	size_t total;
+	ssize_t ret;
+
+	ret = wait_event_interruptible(file_priv->event_wait,
+				       !list_empty(&file_priv->event_list));
+	if (ret < 0)
+		return ret;
+
+	total = 0;
+	while (drm_dequeue_event(file_priv, total, count, &e)) {
+		if (copy_to_user(buffer + total,
+				 e->event, e->event->length)) {
+			total = -EFAULT;
+			break;
+		}
+
+		total += e->event->length;
+		e->destroy(e);
+	}
+
+	return total;
+}
+EXPORT_SYMBOL(drm_read);
+
 unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait)
 {
-	return 0;
+	struct drm_file *file_priv = filp->private_data;
+	unsigned int mask = 0;
+
+	poll_wait(filp, &file_priv->event_wait, wait);
+
+	if (!list_empty(&file_priv->event_list))
+		mask |= POLLIN | POLLRDNORM;
+
+	return mask;
 }
 EXPORT_SYMBOL(drm_poll);

+ 122 - 8
drivers/gpu/drm/drm_irq.c

@@ -429,15 +429,21 @@ int drm_vblank_get(struct drm_device *dev, int crtc)
 
 	spin_lock_irqsave(&dev->vbl_lock, irqflags);
 	/* Going from 0->1 means we have to enable interrupts again */
-	if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1 &&
-	    !dev->vblank_enabled[crtc]) {
-		ret = dev->driver->enable_vblank(dev, crtc);
-		DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
-		if (ret)
+	if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1) {
+		if (!dev->vblank_enabled[crtc]) {
+			ret = dev->driver->enable_vblank(dev, crtc);
+			DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
+			if (ret)
+				atomic_dec(&dev->vblank_refcount[crtc]);
+			else {
+				dev->vblank_enabled[crtc] = 1;
+				drm_update_vblank_count(dev, crtc);
+			}
+		}
+	} else {
+		if (!dev->vblank_enabled[crtc]) {
 			atomic_dec(&dev->vblank_refcount[crtc]);
-		else {
-			dev->vblank_enabled[crtc] = 1;
-			drm_update_vblank_count(dev, crtc);
+			ret = -EINVAL;
 		}
 	}
 	spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
@@ -464,6 +470,18 @@ void drm_vblank_put(struct drm_device *dev, int crtc)
 }
 EXPORT_SYMBOL(drm_vblank_put);
 
+void drm_vblank_off(struct drm_device *dev, int crtc)
+{
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&dev->vbl_lock, irqflags);
+	DRM_WAKEUP(&dev->vbl_queue[crtc]);
+	dev->vblank_enabled[crtc] = 0;
+	dev->last_vblank[crtc] = dev->driver->get_vblank_counter(dev, crtc);
+	spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+}
+EXPORT_SYMBOL(drm_vblank_off);
+
 /**
  * drm_vblank_pre_modeset - account for vblanks across mode sets
  * @dev: DRM device
@@ -550,6 +568,63 @@ out:
 	return ret;
 }
 
+static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
+				  union drm_wait_vblank *vblwait,
+				  struct drm_file *file_priv)
+{
+	struct drm_pending_vblank_event *e;
+	struct timeval now;
+	unsigned long flags;
+	unsigned int seq;
+
+	e = kzalloc(sizeof *e, GFP_KERNEL);
+	if (e == NULL)
+		return -ENOMEM;
+
+	e->pipe = pipe;
+	e->event.base.type = DRM_EVENT_VBLANK;
+	e->event.base.length = sizeof e->event;
+	e->event.user_data = vblwait->request.signal;
+	e->base.event = &e->event.base;
+	e->base.file_priv = file_priv;
+	e->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
+
+	do_gettimeofday(&now);
+	spin_lock_irqsave(&dev->event_lock, flags);
+
+	if (file_priv->event_space < sizeof e->event) {
+		spin_unlock_irqrestore(&dev->event_lock, flags);
+		kfree(e);
+		return -ENOMEM;
+	}
+
+	file_priv->event_space -= sizeof e->event;
+	seq = drm_vblank_count(dev, pipe);
+	if ((vblwait->request.type & _DRM_VBLANK_NEXTONMISS) &&
+	    (seq - vblwait->request.sequence) <= (1 << 23)) {
+		vblwait->request.sequence = seq + 1;
+		vblwait->reply.sequence = vblwait->request.sequence;
+	}
+
+	DRM_DEBUG("event on vblank count %d, current %d, crtc %d\n",
+		  vblwait->request.sequence, seq, pipe);
+
+	e->event.sequence = vblwait->request.sequence;
+	if ((seq - vblwait->request.sequence) <= (1 << 23)) {
+		e->event.tv_sec = now.tv_sec;
+		e->event.tv_usec = now.tv_usec;
+		drm_vblank_put(dev, e->pipe);
+		list_add_tail(&e->base.link, &e->base.file_priv->event_list);
+		wake_up_interruptible(&e->base.file_priv->event_wait);
+	} else {
+		list_add_tail(&e->base.link, &dev->vblank_event_list);
+	}
+
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+
+	return 0;
+}
+
 /**
  * Wait for VBLANK.
  *
@@ -609,6 +684,9 @@ int drm_wait_vblank(struct drm_device *dev, void *data,
 		goto done;
 	}
 
+	if (flags & _DRM_VBLANK_EVENT)
+		return drm_queue_vblank_event(dev, crtc, vblwait, file_priv);
+
 	if ((flags & _DRM_VBLANK_NEXTONMISS) &&
 	    (seq - vblwait->request.sequence) <= (1<<23)) {
 		vblwait->request.sequence = seq + 1;
@@ -641,6 +719,38 @@ done:
 	return ret;
 }
 
+void drm_handle_vblank_events(struct drm_device *dev, int crtc)
+{
+	struct drm_pending_vblank_event *e, *t;
+	struct timeval now;
+	unsigned long flags;
+	unsigned int seq;
+
+	do_gettimeofday(&now);
+	seq = drm_vblank_count(dev, crtc);
+
+	spin_lock_irqsave(&dev->event_lock, flags);
+
+	list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
+		if (e->pipe != crtc)
+			continue;
+		if ((seq - e->event.sequence) > (1<<23))
+			continue;
+
+		DRM_DEBUG("vblank event on %d, current %d\n",
+			  e->event.sequence, seq);
+
+		e->event.sequence = seq;
+		e->event.tv_sec = now.tv_sec;
+		e->event.tv_usec = now.tv_usec;
+		drm_vblank_put(dev, e->pipe);
+		list_move_tail(&e->base.link, &e->base.file_priv->event_list);
+		wake_up_interruptible(&e->base.file_priv->event_wait);
+	}
+
+	spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
 /**
  * drm_handle_vblank - handle a vblank event
  * @dev: DRM device
@@ -651,7 +761,11 @@ done:
  */
 void drm_handle_vblank(struct drm_device *dev, int crtc)
 {
+	if (!dev->num_crtcs)
+		return;
+
 	atomic_inc(&dev->_vblank_count[crtc]);
 	DRM_WAKEUP(&dev->vbl_queue[crtc]);
+	drm_handle_vblank_events(dev, crtc);
 }
 EXPORT_SYMBOL(drm_handle_vblank);

+ 109 - 1
drivers/gpu/drm/drm_mm.c

@@ -226,6 +226,44 @@ struct drm_mm_node *drm_mm_get_block_generic(struct drm_mm_node *node,
 }
 EXPORT_SYMBOL(drm_mm_get_block_generic);
 
+struct drm_mm_node *drm_mm_get_block_range_generic(struct drm_mm_node *node,
+						unsigned long size,
+						unsigned alignment,
+						unsigned long start,
+						unsigned long end,
+						int atomic)
+{
+	struct drm_mm_node *align_splitoff = NULL;
+	unsigned tmp = 0;
+	unsigned wasted = 0;
+
+	if (node->start < start)
+		wasted += start - node->start;
+	if (alignment)
+		tmp = ((node->start + wasted) % alignment);
+
+	if (tmp)
+		wasted += alignment - tmp;
+	if (wasted) {
+		align_splitoff = drm_mm_split_at_start(node, wasted, atomic);
+		if (unlikely(align_splitoff == NULL))
+			return NULL;
+	}
+
+	if (node->size == size) {
+		list_del_init(&node->fl_entry);
+		node->free = 0;
+	} else {
+		node = drm_mm_split_at_start(node, size, atomic);
+	}
+
+	if (align_splitoff)
+		drm_mm_put_block(align_splitoff);
+
+	return node;
+}
+EXPORT_SYMBOL(drm_mm_get_block_range_generic);
+
 /*
  * Put a block. Merge with the previous and / or next block if they are free.
  * Otherwise add to the free stack.
@@ -331,6 +369,56 @@ struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
 }
 EXPORT_SYMBOL(drm_mm_search_free);
 
+struct drm_mm_node *drm_mm_search_free_in_range(const struct drm_mm *mm,
+						unsigned long size,
+						unsigned alignment,
+						unsigned long start,
+						unsigned long end,
+						int best_match)
+{
+	struct list_head *list;
+	const struct list_head *free_stack = &mm->fl_entry;
+	struct drm_mm_node *entry;
+	struct drm_mm_node *best;
+	unsigned long best_size;
+	unsigned wasted;
+
+	best = NULL;
+	best_size = ~0UL;
+
+	list_for_each(list, free_stack) {
+		entry = list_entry(list, struct drm_mm_node, fl_entry);
+		wasted = 0;
+
+		if (entry->size < size)
+			continue;
+
+		if (entry->start > end || (entry->start+entry->size) < start)
+			continue;
+
+		if (entry->start < start)
+			wasted += start - entry->start;
+
+		if (alignment) {
+			register unsigned tmp = (entry->start + wasted) % alignment;
+			if (tmp)
+				wasted += alignment - tmp;
+		}
+
+		if (entry->size >= size + wasted) {
+			if (!best_match)
+				return entry;
+			if (size < best_size) {
+				best = entry;
+				best_size = entry->size;
+			}
+		}
+	}
+
+	return best;
+}
+EXPORT_SYMBOL(drm_mm_search_free_in_range);
+
 int drm_mm_clean(struct drm_mm * mm)
 {
 	struct list_head *head = &mm->ml_entry;
@@ -381,6 +469,26 @@ void drm_mm_takedown(struct drm_mm * mm)
 }
 EXPORT_SYMBOL(drm_mm_takedown);
 
+void drm_mm_debug_table(struct drm_mm *mm, const char *prefix)
+{
+	struct drm_mm_node *entry;
+	int total_used = 0, total_free = 0, total = 0;
+
+	list_for_each_entry(entry, &mm->ml_entry, ml_entry) {
+		printk(KERN_DEBUG "%s 0x%08lx-0x%08lx: %8ld: %s\n",
+			prefix, entry->start, entry->start + entry->size,
+			entry->size, entry->free ? "free" : "used");
+		total += entry->size;
+		if (entry->free)
+			total_free += entry->size;
+		else
+			total_used += entry->size;
+	}
+	printk(KERN_DEBUG "%s total: %d, used %d free %d\n", prefix, total,
+		total_used, total_free);
+}
+EXPORT_SYMBOL(drm_mm_debug_table);
+
 #if defined(CONFIG_DEBUG_FS)
 int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
 {
@@ -395,7 +503,7 @@ int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm)
 		else
 			total_used += entry->size;
 	}
-	seq_printf(m, "total: %d, used %d free %d\n", total, total_free, total_used);
+	seq_printf(m, "total: %d, used %d free %d\n", total, total_used, total_free);
 	return 0;
 }
 EXPORT_SYMBOL(drm_mm_dump_table);

+ 27 - 1
drivers/gpu/drm/drm_modes.c

@@ -553,6 +553,32 @@ int drm_mode_height(struct drm_display_mode *mode)
 }
 EXPORT_SYMBOL(drm_mode_height);
 
+/** drm_mode_hsync - get the hsync of a mode
+ * @mode: mode
+ *
+ * LOCKING:
+ * None.
+ *
+ * Return @modes's hsync rate in kHz, rounded to the nearest int.
+ */
+int drm_mode_hsync(struct drm_display_mode *mode)
+{
+	unsigned int calc_val;
+
+	if (mode->hsync)
+		return mode->hsync;
+
+	if (mode->htotal < 0)
+		return 0;
+
+	calc_val = (mode->clock * 1000) / mode->htotal; /* hsync in Hz */
+	calc_val += 500;				/* round to 1000Hz */
+	calc_val /= 1000;				/* truncate to kHz */
+
+	return calc_val;
+}
+EXPORT_SYMBOL(drm_mode_hsync);
+
 /**
  * drm_mode_vrefresh - get the vrefresh of a mode
  * @mode: mode
@@ -560,7 +586,7 @@ EXPORT_SYMBOL(drm_mode_height);
  * LOCKING:
  * None.
  *
- * Return @mode's vrefresh rate or calculate it if necessary.
+ * Return @mode's vrefresh rate in Hz or calculate it if necessary.
  *
  * FIXME: why is this needed?  shouldn't vrefresh be set already?
  *

+ 15 - 0
drivers/gpu/drm/drm_stub.c

@@ -128,6 +128,7 @@ struct drm_master *drm_master_get(struct drm_master *master)
 	kref_get(&master->refcount);
 	return master;
 }
+EXPORT_SYMBOL(drm_master_get);
 
 static void drm_master_destroy(struct kref *kref)
 {
@@ -170,10 +171,13 @@ void drm_master_put(struct drm_master **master)
 	kref_put(&(*master)->refcount, drm_master_destroy);
 	*master = NULL;
 }
+EXPORT_SYMBOL(drm_master_put);
 
 int drm_setmaster_ioctl(struct drm_device *dev, void *data,
 			struct drm_file *file_priv)
 {
+	int ret = 0;
+
 	if (file_priv->is_master)
 		return 0;
 
@@ -188,6 +192,13 @@ int drm_setmaster_ioctl(struct drm_device *dev, void *data,
 		mutex_lock(&dev->struct_mutex);
 		file_priv->minor->master = drm_master_get(file_priv->master);
 		file_priv->is_master = 1;
+		if (dev->driver->master_set) {
+			ret = dev->driver->master_set(dev, file_priv, false);
+			if (unlikely(ret != 0)) {
+				file_priv->is_master = 0;
+				drm_master_put(&file_priv->minor->master);
+			}
+		}
 		mutex_unlock(&dev->struct_mutex);
 	}
 
@@ -204,6 +215,8 @@ int drm_dropmaster_ioctl(struct drm_device *dev, void *data,
 		return -EINVAL;
 
 	mutex_lock(&dev->struct_mutex);
+	if (dev->driver->master_drop)
+		dev->driver->master_drop(dev, file_priv, false);
 	drm_master_put(&file_priv->minor->master);
 	file_priv->is_master = 0;
 	mutex_unlock(&dev->struct_mutex);
@@ -220,9 +233,11 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
 	INIT_LIST_HEAD(&dev->ctxlist);
 	INIT_LIST_HEAD(&dev->vmalist);
 	INIT_LIST_HEAD(&dev->maplist);
+	INIT_LIST_HEAD(&dev->vblank_event_list);
 
 	spin_lock_init(&dev->count_lock);
 	spin_lock_init(&dev->drw_lock);
+	spin_lock_init(&dev->event_lock);
 	init_timer(&dev->timer);
 	mutex_init(&dev->struct_mutex);
 	mutex_init(&dev->ctxlist_mutex);

+ 1 - 1
drivers/gpu/drm/i915/Makefile

@@ -15,7 +15,6 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
 	  intel_lvds.o \
 	  intel_bios.o \
 	  intel_dp.o \
-	  intel_dp_i2c.o \
 	  intel_hdmi.o \
 	  intel_sdvo.o \
 	  intel_modes.o \
@@ -23,6 +22,7 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
 	  intel_fb.o \
 	  intel_tv.o \
 	  intel_dvo.o \
+	  intel_overlay.o \
 	  dvo_ch7xxx.o \
 	  dvo_ch7017.o \
 	  dvo_ivch.o \

+ 5 - 4
drivers/gpu/drm/i915/dvo_ch7017.c

@@ -249,7 +249,8 @@ static bool ch7017_init(struct intel_dvo_device *dvo,
 	if (val != CH7017_DEVICE_ID_VALUE &&
 	    val != CH7018_DEVICE_ID_VALUE &&
 	    val != CH7019_DEVICE_ID_VALUE) {
-		DRM_DEBUG("ch701x not detected, got %d: from %s Slave %d.\n",
+		DRM_DEBUG_KMS("ch701x not detected, got %d: from %s "
+				"Slave %d.\n",
 			  val, i2cbus->adapter.name,dvo->slave_addr);
 		goto fail;
 	}
@@ -284,7 +285,7 @@ static void ch7017_mode_set(struct intel_dvo_device *dvo,
 	uint8_t horizontal_active_pixel_output, vertical_active_line_output;
 	uint8_t active_input_line_output;
 
-	DRM_DEBUG("Registers before mode setting\n");
+	DRM_DEBUG_KMS("Registers before mode setting\n");
 	ch7017_dump_regs(dvo);
 
 	/* LVDS PLL settings from page 75 of 7017-7017ds.pdf*/
@@ -346,7 +347,7 @@ static void ch7017_mode_set(struct intel_dvo_device *dvo,
 	/* Turn the LVDS back on with new settings. */
 	ch7017_write(dvo, CH7017_LVDS_POWER_DOWN, lvds_power_down);
 
-	DRM_DEBUG("Registers after mode setting\n");
+	DRM_DEBUG_KMS("Registers after mode setting\n");
 	ch7017_dump_regs(dvo);
 }
 
@@ -386,7 +387,7 @@ static void ch7017_dump_regs(struct intel_dvo_device *dvo)
 #define DUMP(reg)					\
 do {							\
 	ch7017_read(dvo, reg, &val);			\
-	DRM_DEBUG(#reg ": %02x\n", val);		\
+	DRM_DEBUG_KMS(#reg ": %02x\n", val);		\
 } while (0)
 
 	DUMP(CH7017_HORIZONTAL_ACTIVE_PIXEL_INPUT);

+ 9 - 7
drivers/gpu/drm/i915/dvo_ch7xxx.c

@@ -152,7 +152,7 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
 	};
 
 	if (!ch7xxx->quiet) {
-		DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
+		DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
 			  addr, i2cbus->adapter.name, dvo->slave_addr);
 	}
 	return false;
@@ -179,7 +179,7 @@ static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
 		return true;
 
 	if (!ch7xxx->quiet) {
-		DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
+		DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
 			  addr, i2cbus->adapter.name, dvo->slave_addr);
 	}
 
@@ -207,7 +207,8 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
 
 	name = ch7xxx_get_id(vendor);
 	if (!name) {
-		DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n",
+		DRM_DEBUG_KMS("ch7xxx not detected; got 0x%02x from %s "
+				"slave %d.\n",
 			  vendor, adapter->name, dvo->slave_addr);
 		goto out;
 	}
@@ -217,13 +218,14 @@ static bool ch7xxx_init(struct intel_dvo_device *dvo,
 		goto out;
 
 	if (device != CH7xxx_DID) {
-		DRM_DEBUG("ch7xxx not detected; got 0x%02x from %s slave %d.\n",
+		DRM_DEBUG_KMS("ch7xxx not detected; got 0x%02x from %s "
+				"slave %d.\n",
 			  vendor, adapter->name, dvo->slave_addr);
 		goto out;
 	}
 
 	ch7xxx->quiet = false;
-	DRM_DEBUG("Detected %s chipset, vendor/device ID 0x%02x/0x%02x\n",
+	DRM_DEBUG_KMS("Detected %s chipset, vendor/device ID 0x%02x/0x%02x\n",
 		  name, vendor, device);
 	return true;
 out:
@@ -315,8 +317,8 @@ static void ch7xxx_dump_regs(struct intel_dvo_device *dvo)
 
 	for (i = 0; i < CH7xxx_NUM_REGS; i++) {
 		if ((i % 8) == 0 )
-			DRM_DEBUG("\n %02X: ", i);
-		DRM_DEBUG("%02X ", ch7xxx->mode_reg.regs[i]);
+			DRM_LOG_KMS("\n %02X: ", i);
+		DRM_LOG_KMS("%02X ", ch7xxx->mode_reg.regs[i]);
 	}
 }
 

+ 19 - 18
drivers/gpu/drm/i915/dvo_ivch.c

@@ -202,7 +202,8 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
 	};
 
 	if (!priv->quiet) {
-		DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
+		DRM_DEBUG_KMS("Unable to read register 0x%02x from "
+				"%s:%02x.\n",
 			  addr, i2cbus->adapter.name, dvo->slave_addr);
 	}
 	return false;
@@ -230,7 +231,7 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
 		return true;
 
 	if (!priv->quiet) {
-		DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
+		DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
 			  addr, i2cbus->adapter.name, dvo->slave_addr);
 	}
 
@@ -261,7 +262,7 @@ static bool ivch_init(struct intel_dvo_device *dvo,
 	 * the address it's responding on.
 	 */
 	if ((temp & VR00_BASE_ADDRESS_MASK) != dvo->slave_addr) {
-		DRM_DEBUG("ivch detect failed due to address mismatch "
+		DRM_DEBUG_KMS("ivch detect failed due to address mismatch "
 			  "(%d vs %d)\n",
 			  (temp & VR00_BASE_ADDRESS_MASK), dvo->slave_addr);
 		goto out;
@@ -367,41 +368,41 @@ static void ivch_dump_regs(struct intel_dvo_device *dvo)
 	uint16_t val;
 
 	ivch_read(dvo, VR00, &val);
-	DRM_DEBUG("VR00: 0x%04x\n", val);
+	DRM_LOG_KMS("VR00: 0x%04x\n", val);
 	ivch_read(dvo, VR01, &val);
-	DRM_DEBUG("VR01: 0x%04x\n", val);
+	DRM_LOG_KMS("VR01: 0x%04x\n", val);
 	ivch_read(dvo, VR30, &val);
-	DRM_DEBUG("VR30: 0x%04x\n", val);
+	DRM_LOG_KMS("VR30: 0x%04x\n", val);
 	ivch_read(dvo, VR40, &val);
-	DRM_DEBUG("VR40: 0x%04x\n", val);
+	DRM_LOG_KMS("VR40: 0x%04x\n", val);
 
 	/* GPIO registers */
 	ivch_read(dvo, VR80, &val);
-	DRM_DEBUG("VR80: 0x%04x\n", val);
+	DRM_LOG_KMS("VR80: 0x%04x\n", val);
 	ivch_read(dvo, VR81, &val);
-	DRM_DEBUG("VR81: 0x%04x\n", val);
+	DRM_LOG_KMS("VR81: 0x%04x\n", val);
 	ivch_read(dvo, VR82, &val);
-	DRM_DEBUG("VR82: 0x%04x\n", val);
+	DRM_LOG_KMS("VR82: 0x%04x\n", val);
 	ivch_read(dvo, VR83, &val);
-	DRM_DEBUG("VR83: 0x%04x\n", val);
+	DRM_LOG_KMS("VR83: 0x%04x\n", val);
 	ivch_read(dvo, VR84, &val);
-	DRM_DEBUG("VR84: 0x%04x\n", val);
+	DRM_LOG_KMS("VR84: 0x%04x\n", val);
 	ivch_read(dvo, VR85, &val);
-	DRM_DEBUG("VR85: 0x%04x\n", val);
+	DRM_LOG_KMS("VR85: 0x%04x\n", val);
 	ivch_read(dvo, VR86, &val);
-	DRM_DEBUG("VR86: 0x%04x\n", val);
+	DRM_LOG_KMS("VR86: 0x%04x\n", val);
 	ivch_read(dvo, VR87, &val);
-	DRM_DEBUG("VR87: 0x%04x\n", val);
+	DRM_LOG_KMS("VR87: 0x%04x\n", val);
 	ivch_read(dvo, VR88, &val);
-	DRM_DEBUG("VR88: 0x%04x\n", val);
+	DRM_LOG_KMS("VR88: 0x%04x\n", val);
 
 	/* Scratch register 0 - AIM Panel type */
 	ivch_read(dvo, VR8E, &val);
-	DRM_DEBUG("VR8E: 0x%04x\n", val);
+	DRM_LOG_KMS("VR8E: 0x%04x\n", val);
 
 	/* Scratch register 1 - Status register */
 	ivch_read(dvo, VR8F, &val);
-	DRM_DEBUG("VR8F: 0x%04x\n", val);
+	DRM_LOG_KMS("VR8F: 0x%04x\n", val);
 }
 
 static void ivch_save(struct intel_dvo_device *dvo)

+ 10 - 10
drivers/gpu/drm/i915/dvo_sil164.c

@@ -105,7 +105,7 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
 	};
 
 	if (!sil->quiet) {
-		DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
+		DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
 			  addr, i2cbus->adapter.name, dvo->slave_addr);
 	}
 	return false;
@@ -131,7 +131,7 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
 		return true;
 
 	if (!sil->quiet) {
-		DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
+		DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
 			  addr, i2cbus->adapter.name, dvo->slave_addr);
 	}
 
@@ -158,7 +158,7 @@ static bool sil164_init(struct intel_dvo_device *dvo,
 		goto out;
 
 	if (ch != (SIL164_VID & 0xff)) {
-		DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n",
+		DRM_DEBUG_KMS("sil164 not detected got %d: from %s Slave %d.\n",
 			  ch, adapter->name, dvo->slave_addr);
 		goto out;
 	}
@@ -167,13 +167,13 @@ static bool sil164_init(struct intel_dvo_device *dvo,
 		goto out;
 
 	if (ch != (SIL164_DID & 0xff)) {
-		DRM_DEBUG("sil164 not detected got %d: from %s Slave %d.\n",
+		DRM_DEBUG_KMS("sil164 not detected got %d: from %s Slave %d.\n",
 			  ch, adapter->name, dvo->slave_addr);
 		goto out;
 	}
 	sil->quiet = false;
 
-	DRM_DEBUG("init sil164 dvo controller successfully!\n");
+	DRM_DEBUG_KMS("init sil164 dvo controller successfully!\n");
 	return true;
 
 out:
@@ -241,15 +241,15 @@ static void sil164_dump_regs(struct intel_dvo_device *dvo)
 	uint8_t val;
 
 	sil164_readb(dvo, SIL164_FREQ_LO, &val);
-	DRM_DEBUG("SIL164_FREQ_LO: 0x%02x\n", val);
+	DRM_LOG_KMS("SIL164_FREQ_LO: 0x%02x\n", val);
 	sil164_readb(dvo, SIL164_FREQ_HI, &val);
-	DRM_DEBUG("SIL164_FREQ_HI: 0x%02x\n", val);
+	DRM_LOG_KMS("SIL164_FREQ_HI: 0x%02x\n", val);
 	sil164_readb(dvo, SIL164_REG8, &val);
-	DRM_DEBUG("SIL164_REG8: 0x%02x\n", val);
+	DRM_LOG_KMS("SIL164_REG8: 0x%02x\n", val);
 	sil164_readb(dvo, SIL164_REG9, &val);
-	DRM_DEBUG("SIL164_REG9: 0x%02x\n", val);
+	DRM_LOG_KMS("SIL164_REG9: 0x%02x\n", val);
 	sil164_readb(dvo, SIL164_REGC, &val);
-	DRM_DEBUG("SIL164_REGC: 0x%02x\n", val);
+	DRM_LOG_KMS("SIL164_REGC: 0x%02x\n", val);
 }
 
 static void sil164_save(struct intel_dvo_device *dvo)

+ 18 - 16
drivers/gpu/drm/i915/dvo_tfp410.c

@@ -130,7 +130,7 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
 	};
 
 	if (!tfp->quiet) {
-		DRM_DEBUG("Unable to read register 0x%02x from %s:%02x.\n",
+		DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
 			  addr, i2cbus->adapter.name, dvo->slave_addr);
 	}
 	return false;
@@ -156,7 +156,7 @@ static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
 		return true;
 
 	if (!tfp->quiet) {
-		DRM_DEBUG("Unable to write register 0x%02x to %s:%d.\n",
+		DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
 			  addr, i2cbus->adapter.name, dvo->slave_addr);
 	}
 
@@ -191,13 +191,15 @@ static bool tfp410_init(struct intel_dvo_device *dvo,
 	tfp->quiet = true;
 
 	if ((id = tfp410_getid(dvo, TFP410_VID_LO)) != TFP410_VID) {
-		DRM_DEBUG("tfp410 not detected got VID %X: from %s Slave %d.\n",
+		DRM_DEBUG_KMS("tfp410 not detected got VID %X: from %s "
+				"Slave %d.\n",
 			  id, adapter->name, dvo->slave_addr);
 		goto out;
 	}
 
 	if ((id = tfp410_getid(dvo, TFP410_DID_LO)) != TFP410_DID) {
-		DRM_DEBUG("tfp410 not detected got DID %X: from %s Slave %d.\n",
+		DRM_DEBUG_KMS("tfp410 not detected got DID %X: from %s "
+				"Slave %d.\n",
 			  id, adapter->name, dvo->slave_addr);
 		goto out;
 	}
@@ -262,33 +264,33 @@ static void tfp410_dump_regs(struct intel_dvo_device *dvo)
 	uint8_t val, val2;
 
 	tfp410_readb(dvo, TFP410_REV, &val);
-	DRM_DEBUG("TFP410_REV: 0x%02X\n", val);
+	DRM_LOG_KMS("TFP410_REV: 0x%02X\n", val);
 	tfp410_readb(dvo, TFP410_CTL_1, &val);
-	DRM_DEBUG("TFP410_CTL1: 0x%02X\n", val);
+	DRM_LOG_KMS("TFP410_CTL1: 0x%02X\n", val);
 	tfp410_readb(dvo, TFP410_CTL_2, &val);
-	DRM_DEBUG("TFP410_CTL2: 0x%02X\n", val);
+	DRM_LOG_KMS("TFP410_CTL2: 0x%02X\n", val);
 	tfp410_readb(dvo, TFP410_CTL_3, &val);
-	DRM_DEBUG("TFP410_CTL3: 0x%02X\n", val);
+	DRM_LOG_KMS("TFP410_CTL3: 0x%02X\n", val);
 	tfp410_readb(dvo, TFP410_USERCFG, &val);
-	DRM_DEBUG("TFP410_USERCFG: 0x%02X\n", val);
+	DRM_LOG_KMS("TFP410_USERCFG: 0x%02X\n", val);
 	tfp410_readb(dvo, TFP410_DE_DLY, &val);
-	DRM_DEBUG("TFP410_DE_DLY: 0x%02X\n", val);
+	DRM_LOG_KMS("TFP410_DE_DLY: 0x%02X\n", val);
 	tfp410_readb(dvo, TFP410_DE_CTL, &val);
-	DRM_DEBUG("TFP410_DE_CTL: 0x%02X\n", val);
+	DRM_LOG_KMS("TFP410_DE_CTL: 0x%02X\n", val);
 	tfp410_readb(dvo, TFP410_DE_TOP, &val);
-	DRM_DEBUG("TFP410_DE_TOP: 0x%02X\n", val);
+	DRM_LOG_KMS("TFP410_DE_TOP: 0x%02X\n", val);
 	tfp410_readb(dvo, TFP410_DE_CNT_LO, &val);
 	tfp410_readb(dvo, TFP410_DE_CNT_HI, &val2);
-	DRM_DEBUG("TFP410_DE_CNT: 0x%02X%02X\n", val2, val);
+	DRM_LOG_KMS("TFP410_DE_CNT: 0x%02X%02X\n", val2, val);
 	tfp410_readb(dvo, TFP410_DE_LIN_LO, &val);
 	tfp410_readb(dvo, TFP410_DE_LIN_HI, &val2);
-	DRM_DEBUG("TFP410_DE_LIN: 0x%02X%02X\n", val2, val);
+	DRM_LOG_KMS("TFP410_DE_LIN: 0x%02X%02X\n", val2, val);
 	tfp410_readb(dvo, TFP410_H_RES_LO, &val);
 	tfp410_readb(dvo, TFP410_H_RES_HI, &val2);
-	DRM_DEBUG("TFP410_H_RES: 0x%02X%02X\n", val2, val);
+	DRM_LOG_KMS("TFP410_H_RES: 0x%02X%02X\n", val2, val);
 	tfp410_readb(dvo, TFP410_V_RES_LO, &val);
 	tfp410_readb(dvo, TFP410_V_RES_HI, &val2);
-	DRM_DEBUG("TFP410_V_RES: 0x%02X%02X\n", val2, val);
+	DRM_LOG_KMS("TFP410_V_RES: 0x%02X%02X\n", val2, val);
 }
 
 static void tfp410_save(struct intel_dvo_device *dvo)

+ 116 - 4
drivers/gpu/drm/i915/i915_debugfs.c

@@ -27,6 +27,7 @@
  */
 
 #include <linux/seq_file.h>
+#include <linux/debugfs.h>
 #include "drmP.h"
 #include "drm.h"
 #include "i915_drm.h"
@@ -96,13 +97,14 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
 	{
 		struct drm_gem_object *obj = obj_priv->obj;
 
-		seq_printf(m, "    %p: %s %8zd %08x %08x %d %s",
+		seq_printf(m, "    %p: %s %8zd %08x %08x %d%s%s",
 			   obj,
 			   get_pin_flag(obj_priv),
 			   obj->size,
 			   obj->read_domains, obj->write_domain,
 			   obj_priv->last_rendering_seqno,
-			   obj_priv->dirty ? "dirty" : "");
+			   obj_priv->dirty ? " dirty" : "",
+			   obj_priv->madv == I915_MADV_DONTNEED ? " purgeable" : "");
 
 		if (obj->name)
 			seq_printf(m, " (name: %d)", obj->name);
@@ -160,7 +162,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
 	struct drm_device *dev = node->minor->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 
-	if (!IS_IGDNG(dev)) {
+	if (!IS_IRONLAKE(dev)) {
 		seq_printf(m, "Interrupt enable:    %08x\n",
 			   I915_READ(IER));
 		seq_printf(m, "Interrupt identity:  %08x\n",
@@ -412,6 +414,109 @@ static int i915_registers_info(struct seq_file *m, void *data) {
 	return 0;
 }
 
+static int
+i915_wedged_open(struct inode *inode,
+		 struct file *filp)
+{
+	filp->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t
+i915_wedged_read(struct file *filp,
+		 char __user *ubuf,
+		 size_t max,
+		 loff_t *ppos)
+{
+	struct drm_device *dev = filp->private_data;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	char buf[80];
+	int len;
+
+	len = snprintf(buf, sizeof (buf),
+		       "wedged :  %d\n",
+		       atomic_read(&dev_priv->mm.wedged));
+
+	return simple_read_from_buffer(ubuf, max, ppos, buf, len);
+}
+
+static ssize_t
+i915_wedged_write(struct file *filp,
+		  const char __user *ubuf,
+		  size_t cnt,
+		  loff_t *ppos)
+{
+	struct drm_device *dev = filp->private_data;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	char buf[20];
+	int val = 1;
+
+	if (cnt > 0) {
+		if (cnt > sizeof (buf) - 1)
+			return -EINVAL;
+
+		if (copy_from_user(buf, ubuf, cnt))
+			return -EFAULT;
+		buf[cnt] = 0;
+
+		val = simple_strtoul(buf, NULL, 0);
+	}
+
+	DRM_INFO("Manually setting wedged to %d\n", val);
+
+	atomic_set(&dev_priv->mm.wedged, val);
+	if (val) {
+		DRM_WAKEUP(&dev_priv->irq_queue);
+		queue_work(dev_priv->wq, &dev_priv->error_work);
+	}
+
+	return cnt;
+}
+
+static const struct file_operations i915_wedged_fops = {
+	.owner = THIS_MODULE,
+	.open = i915_wedged_open,
+	.read = i915_wedged_read,
+	.write = i915_wedged_write,
+};
+
+/* As the drm_debugfs_init() routines are called before dev->dev_private is
+ * allocated we need to hook into the minor for release. */
+static int
+drm_add_fake_info_node(struct drm_minor *minor,
+		       struct dentry *ent,
+		       const void *key)
+{
+	struct drm_info_node *node;
+
+	node = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
+	if (node == NULL) {
+		debugfs_remove(ent);
+		return -ENOMEM;
+	}
+
+	node->minor = minor;
+	node->dent = ent;
+	node->info_ent = (void *) key;
+	list_add(&node->list, &minor->debugfs_nodes.list);
+
+	return 0;
+}
+
+static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
+{
+	struct drm_device *dev = minor->dev;
+	struct dentry *ent;
+
+	ent = debugfs_create_file("i915_wedged",
+				  S_IRUGO | S_IWUSR,
+				  root, dev,
+				  &i915_wedged_fops);
+	if (IS_ERR(ent))
+		return PTR_ERR(ent);
+
+	return drm_add_fake_info_node(minor, ent, &i915_wedged_fops);
+}
 
 static struct drm_info_list i915_debugfs_list[] = {
 	{"i915_regs", i915_registers_info, 0},
@@ -432,6 +537,12 @@ static struct drm_info_list i915_debugfs_list[] = {
 
 int i915_debugfs_init(struct drm_minor *minor)
 {
+	int ret;
+
+	ret = i915_wedged_create(minor->debugfs_root, minor);
+	if (ret)
+		return ret;
+
 	return drm_debugfs_create_files(i915_debugfs_list,
 					I915_DEBUGFS_ENTRIES,
 					minor->debugfs_root, minor);
@@ -441,7 +552,8 @@ void i915_debugfs_cleanup(struct drm_minor *minor)
 {
 	drm_debugfs_remove_files(i915_debugfs_list,
 				 I915_DEBUGFS_ENTRIES, minor);
+	drm_debugfs_remove_files((struct drm_info_list *) &i915_wedged_fops,
+				 1, minor);
 }
 
 #endif /* CONFIG_DEBUG_FS */
-

+ 28 - 12
drivers/gpu/drm/i915/i915_dma.c

@@ -807,6 +807,12 @@ static int i915_getparam(struct drm_device *dev, void *data,
 	case I915_PARAM_NUM_FENCES_AVAIL:
 		value = dev_priv->num_fence_regs - dev_priv->fence_reg_start;
 		break;
+	case I915_PARAM_HAS_OVERLAY:
+		value = dev_priv->overlay ? 1 : 0;
+		break;
+	case I915_PARAM_HAS_PAGEFLIPPING:
+		value = 1;
+		break;
 	default:
 		DRM_DEBUG_DRIVER("Unknown parameter %d\n",
 					param->param);
@@ -962,7 +968,7 @@ static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size,
 	 * Some of the preallocated space is taken by the GTT
 	 * and popup.  GTT is 1K per MB of aperture size, and popup is 4K.
 	 */
-	if (IS_G4X(dev) || IS_IGD(dev) || IS_IGDNG(dev))
+	if (IS_G4X(dev) || IS_PINEVIEW(dev) || IS_IRONLAKE(dev))
 		overhead = 4096;
 	else
 		overhead = (*aperture_size / 1024) + 4096;
@@ -1048,7 +1054,7 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev,
 	int gtt_offset, gtt_size;
 
 	if (IS_I965G(dev)) {
-		if (IS_G4X(dev) || IS_IGDNG(dev)) {
+		if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
 			gtt_offset = 2*1024*1024;
 			gtt_size = 2*1024*1024;
 		} else {
@@ -1070,7 +1076,7 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev,
 
 	entry = *(volatile u32 *)(gtt + (gtt_addr / 1024));
 
-	DRM_DEBUG("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry);
+	DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry);
 
 	/* Mask out these reserved bits on this hardware. */
 	if (!IS_I9XX(dev) || IS_I915G(dev) || IS_I915GM(dev) ||
@@ -1096,7 +1102,7 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev,
 	phys =(entry & PTE_ADDRESS_MASK) |
 		((uint64_t)(entry & PTE_ADDRESS_MASK_HIGH) << (32 - 4));
 
-	DRM_DEBUG("GTT addr: 0x%08lx, phys addr: 0x%08lx\n", gtt_addr, phys);
+	DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, phys addr: 0x%08lx\n", gtt_addr, phys);
 
 	return phys;
 }
@@ -1306,7 +1312,7 @@ static void i915_get_mem_freq(struct drm_device *dev)
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	u32 tmp;
 
-	if (!IS_IGD(dev))
+	if (!IS_PINEVIEW(dev))
 		return;
 
 	tmp = I915_READ(CLKCFG);
@@ -1413,7 +1419,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 	if (ret)
 		goto out_iomapfree;
 
-	dev_priv->wq = create_workqueue("i915");
+	dev_priv->wq = create_singlethread_workqueue("i915");
 	if (dev_priv->wq == NULL) {
 		DRM_ERROR("Failed to create our workqueue.\n");
 		ret = -ENOMEM;
@@ -1434,7 +1440,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 
 	dev->driver->get_vblank_counter = i915_get_vblank_counter;
 	dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
-	if (IS_G4X(dev) || IS_IGDNG(dev)) {
+	if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
 		dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
 		dev->driver->get_vblank_counter = gm45_get_vblank_counter;
 	}
@@ -1489,9 +1495,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 	}
 
 	/* Must be done after probing outputs */
-	/* FIXME: verify on IGDNG */
-	if (!IS_IGDNG(dev))
-		intel_opregion_init(dev, 0);
+	intel_opregion_init(dev, 0);
 
 	setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
 		    (unsigned long) dev);
@@ -1525,6 +1529,15 @@ int i915_driver_unload(struct drm_device *dev)
 	}
 
 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+		/*
+		 * free the memory space allocated for the child device
+		 * config parsed from VBT
+		 */
+		if (dev_priv->child_dev && dev_priv->child_dev_num) {
+			kfree(dev_priv->child_dev);
+			dev_priv->child_dev = NULL;
+			dev_priv->child_dev_num = 0;
+		}
 		drm_irq_uninstall(dev);
 		vga_client_register(dev->pdev, NULL, NULL, NULL);
 	}
@@ -1535,8 +1548,7 @@ int i915_driver_unload(struct drm_device *dev)
 	if (dev_priv->regs != NULL)
 		iounmap(dev_priv->regs);
 
-	if (!IS_IGDNG(dev))
-		intel_opregion_free(dev, 0);
+	intel_opregion_free(dev, 0);
 
 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
 		intel_modeset_cleanup(dev);
@@ -1548,6 +1560,8 @@ int i915_driver_unload(struct drm_device *dev)
 		mutex_unlock(&dev->struct_mutex);
 		drm_mm_takedown(&dev_priv->vram);
 		i915_gem_lastclose(dev);
+
+		intel_cleanup_overlay(dev);
 	}
 
 	pci_dev_put(dev_priv->bridge_dev);
@@ -1656,6 +1670,8 @@ struct drm_ioctl_desc i915_ioctls[] = {
 	DRM_IOCTL_DEF(DRM_I915_GEM_GET_APERTURE, i915_gem_get_aperture_ioctl, 0),
 	DRM_IOCTL_DEF(DRM_I915_GET_PIPE_FROM_CRTC_ID, intel_get_pipe_from_crtc_id, 0),
 	DRM_IOCTL_DEF(DRM_I915_GEM_MADVISE, i915_gem_madvise_ioctl, 0),
+	DRM_IOCTL_DEF(DRM_I915_OVERLAY_PUT_IMAGE, intel_overlay_put_image, DRM_MASTER|DRM_CONTROL_ALLOW),
+	DRM_IOCTL_DEF(DRM_I915_OVERLAY_ATTRS, intel_overlay_attrs, DRM_MASTER|DRM_CONTROL_ALLOW),
 };
 
 int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);

+ 1 - 0
drivers/gpu/drm/i915/i915_drv.c

@@ -333,6 +333,7 @@ static struct drm_driver driver = {
 		 .mmap = drm_gem_mmap,
 		 .poll = drm_poll,
 		 .fasync = drm_fasync,
+		 .read = drm_read,
 #ifdef CONFIG_COMPAT
 		 .compat_ioctl = i915_compat_ioctl,
 #endif

+ 57 - 23
drivers/gpu/drm/i915/i915_drv.h

@@ -170,6 +170,8 @@ struct drm_i915_display_funcs {
 	/* clock gating init */
 };
 
+struct intel_overlay;
+
 typedef struct drm_i915_private {
 	struct drm_device *dev;
 
@@ -187,6 +189,7 @@ typedef struct drm_i915_private {
 	unsigned int status_gfx_addr;
 	drm_local_map_t hws_map;
 	struct drm_gem_object *hws_obj;
+	struct drm_gem_object *pwrctx;
 
 	struct resource mch_res;
 
@@ -206,11 +209,13 @@ typedef struct drm_i915_private {
 	/** Cached value of IMR to avoid reads in updating the bitfield */
 	u32 irq_mask_reg;
 	u32 pipestat[2];
-	/** splitted irq regs for graphics and display engine on IGDNG,
+	/** splitted irq regs for graphics and display engine on Ironlake,
 	    irq_mask_reg is still used for display irq. */
 	u32 gt_irq_mask_reg;
 	u32 gt_irq_enable_reg;
 	u32 de_irq_enable_reg;
+	u32 pch_irq_mask_reg;
+	u32 pch_irq_enable_reg;
 
 	u32 hotplug_supported_mask;
 	struct work_struct hotplug_work;
@@ -240,6 +245,9 @@ typedef struct drm_i915_private {
 
 	struct intel_opregion opregion;
 
+	/* overlay */
+	struct intel_overlay *overlay;
+
 	/* LVDS info */
 	int backlight_duty_cycle;  /* restore backlight to this value */
 	bool panel_wants_dither;
@@ -258,7 +266,7 @@ typedef struct drm_i915_private {
 
 	struct notifier_block lid_notifier;
 
-	int crt_ddc_bus; /* -1 = unknown, else GPIO to use for CRT DDC */
+	int crt_ddc_bus; /* 0 = unknown, else GPIO to use for CRT DDC */
 	struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */
 	int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
 	int num_fence_regs; /* 8 on pre-965, 16 otherwise */
@@ -280,6 +288,7 @@ typedef struct drm_i915_private {
 	u32 saveDSPBCNTR;
 	u32 saveDSPARB;
 	u32 saveRENDERSTANDBY;
+	u32 savePWRCTXA;
 	u32 saveHWS;
 	u32 savePIPEACONF;
 	u32 savePIPEBCONF;
@@ -374,8 +383,6 @@ typedef struct drm_i915_private {
 	u32 saveFDI_RXA_IMR;
 	u32 saveFDI_RXB_IMR;
 	u32 saveCACHE_MODE_0;
-	u32 saveD_STATE;
-	u32 saveDSPCLK_GATE_D;
 	u32 saveMI_ARB_STATE;
 	u32 saveSWF0[16];
 	u32 saveSWF1[16];
@@ -539,13 +546,21 @@ typedef struct drm_i915_private {
 	/* indicate whether the LVDS_BORDER should be enabled or not */
 	unsigned int lvds_border_bits;
 
+	struct drm_crtc *plane_to_crtc_mapping[2];
+	struct drm_crtc *pipe_to_crtc_mapping[2];
+	wait_queue_head_t pending_flip_queue;
+
 	/* Reclocking support */
 	bool render_reclock_avail;
 	bool lvds_downclock_avail;
+	/* indicates the reduced downclock for LVDS*/
+	int lvds_downclock;
 	struct work_struct idle_work;
 	struct timer_list idle_timer;
 	bool busy;
 	u16 orig_clock;
+	int child_dev_num;
+	struct child_device_config *child_dev;
 } drm_i915_private_t;
 
 /** driver private structure attached to each drm_gem_object */
@@ -638,6 +653,13 @@ struct drm_i915_gem_object {
 	 * Advice: are the backing pages purgeable?
 	 */
 	int madv;
+
+	/**
+	 * Number of crtcs where this object is currently the fb, but
+	 * will be page flipped away on the next vblank.  When it
+	 * reaches 0, dev_priv->pending_flip_queue will be woken up.
+	 */
+	atomic_t pending_flip;
 };
 
 /**
@@ -738,6 +760,8 @@ i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
 void
 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
 
+void intel_enable_asle (struct drm_device *dev);
+
 
 /* i915_mem.c */
 extern int i915_mem_alloc(struct drm_device *dev, void *data,
@@ -813,6 +837,9 @@ void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
 int i915_gem_do_init(struct drm_device *dev, unsigned long start,
 		     unsigned long end);
 int i915_gem_idle(struct drm_device *dev);
+uint32_t i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
+			  uint32_t flush_domains);
+int i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible);
 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
 int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
 				      int write);
@@ -824,6 +851,7 @@ void i915_gem_free_all_phys_object(struct drm_device *dev);
 int i915_gem_object_get_pages(struct drm_gem_object *obj);
 void i915_gem_object_put_pages(struct drm_gem_object *obj);
 void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
+void i915_gem_object_flush_write_domain(struct drm_gem_object *obj);
 
 void i915_gem_shrinker_init(void);
 void i915_gem_shrinker_exit(void);
@@ -863,11 +891,13 @@ extern int i915_restore_state(struct drm_device *dev);
 extern int intel_opregion_init(struct drm_device *dev, int resume);
 extern void intel_opregion_free(struct drm_device *dev, int suspend);
 extern void opregion_asle_intr(struct drm_device *dev);
+extern void ironlake_opregion_gse_intr(struct drm_device *dev);
 extern void opregion_enable_asle(struct drm_device *dev);
 #else
 static inline int intel_opregion_init(struct drm_device *dev, int resume) { return 0; }
 static inline void intel_opregion_free(struct drm_device *dev, int suspend) { return; }
 static inline void opregion_asle_intr(struct drm_device *dev) { return; }
+static inline void ironlake_opregion_gse_intr(struct drm_device *dev) { return; }
 static inline void opregion_enable_asle(struct drm_device *dev) { return; }
 #endif
 
@@ -955,8 +985,8 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
 #define IS_I830(dev) ((dev)->pci_device == 0x3577)
 #define IS_845G(dev) ((dev)->pci_device == 0x2562)
 #define IS_I85X(dev) ((dev)->pci_device == 0x3582)
-#define IS_I855(dev) ((dev)->pci_device == 0x3582)
 #define IS_I865G(dev) ((dev)->pci_device == 0x2572)
+#define IS_I8XX(dev) (IS_I830(dev) || IS_845G(dev) || IS_I85X(dev) || IS_I865G(dev))
 
 #define IS_I915G(dev) ((dev)->pci_device == 0x2582 || (dev)->pci_device == 0x258a)
 #define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
@@ -990,47 +1020,51 @@ extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller);
 		     (dev)->pci_device == 0x2E42 || \
 		     IS_GM45(dev))
 
-#define IS_IGDG(dev) ((dev)->pci_device == 0xa001)
-#define IS_IGDGM(dev) ((dev)->pci_device == 0xa011)
-#define IS_IGD(dev) (IS_IGDG(dev) || IS_IGDGM(dev))
+#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
+#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
+#define IS_PINEVIEW(dev) (IS_PINEVIEW_G(dev) || IS_PINEVIEW_M(dev))
 
 #define IS_G33(dev)    ((dev)->pci_device == 0x29C2 ||	\
 			(dev)->pci_device == 0x29B2 ||	\
 			(dev)->pci_device == 0x29D2 ||  \
-			(IS_IGD(dev)))
+			(IS_PINEVIEW(dev)))
 
-#define IS_IGDNG_D(dev)	((dev)->pci_device == 0x0042)
-#define IS_IGDNG_M(dev)	((dev)->pci_device == 0x0046)
-#define IS_IGDNG(dev)	(IS_IGDNG_D(dev) || IS_IGDNG_M(dev))
+#define IS_IRONLAKE_D(dev)	((dev)->pci_device == 0x0042)
+#define IS_IRONLAKE_M(dev)	((dev)->pci_device == 0x0046)
+#define IS_IRONLAKE(dev)	(IS_IRONLAKE_D(dev) || IS_IRONLAKE_M(dev))
 
 #define IS_I9XX(dev) (IS_I915G(dev) || IS_I915GM(dev) || IS_I945G(dev) || \
 		      IS_I945GM(dev) || IS_I965G(dev) || IS_G33(dev) || \
-		      IS_IGDNG(dev))
+		      IS_IRONLAKE(dev))
 
 #define IS_MOBILE(dev) (IS_I830(dev) || IS_I85X(dev) || IS_I915GM(dev) || \
 			IS_I945GM(dev) || IS_I965GM(dev) || IS_GM45(dev) || \
-			IS_IGD(dev) || IS_IGDNG_M(dev))
+			IS_PINEVIEW(dev) || IS_IRONLAKE_M(dev))
 
 #define I915_NEED_GFX_HWS(dev) (IS_G33(dev) || IS_GM45(dev) || IS_G4X(dev) || \
-				IS_IGDNG(dev))
+				IS_IRONLAKE(dev))
 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
  * rows, which changed the alignment requirements and fence programming.
  */
 #define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \
 						      IS_I915GM(dev)))
-#define SUPPORTS_INTEGRATED_HDMI(dev)	(IS_G4X(dev) || IS_IGDNG(dev))
-#define SUPPORTS_INTEGRATED_DP(dev)	(IS_G4X(dev) || IS_IGDNG(dev))
-#define SUPPORTS_EDP(dev)		(IS_IGDNG_M(dev))
+#define SUPPORTS_DIGITAL_OUTPUTS(dev)	(IS_I9XX(dev) && !IS_PINEVIEW(dev))
+#define SUPPORTS_INTEGRATED_HDMI(dev)	(IS_G4X(dev) || IS_IRONLAKE(dev))
+#define SUPPORTS_INTEGRATED_DP(dev)	(IS_G4X(dev) || IS_IRONLAKE(dev))
+#define SUPPORTS_EDP(dev)		(IS_IRONLAKE_M(dev))
+#define SUPPORTS_TV(dev)		(IS_I9XX(dev) && IS_MOBILE(dev) && \
+					!IS_IRONLAKE(dev) && !IS_PINEVIEW(dev))
 #define I915_HAS_HOTPLUG(dev) (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev) || IS_I965G(dev))
 /* dsparb controlled by hw only */
-#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IGDNG(dev))
+#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
 
-#define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IGDNG(dev))
-#define HAS_PIPE_CXSR(dev) (IS_G4X(dev) || IS_IGDNG(dev))
+#define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IRONLAKE(dev))
+#define HAS_PIPE_CXSR(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
 #define I915_HAS_FBC(dev) (IS_MOBILE(dev) && \
 			   (IS_I9XX(dev) || IS_GM45(dev)) && \
-			   !IS_IGD(dev) && \
-			   !IS_IGDNG(dev))
+			   !IS_PINEVIEW(dev) && \
+			   !IS_IRONLAKE(dev))
+#define I915_HAS_RC6(dev) (IS_I965GM(dev) || IS_GM45(dev) || IS_IRONLAKE_M(dev))
 
 #define PRIMARY_RINGBUFFER_SIZE         (128*1024)
 

+ 92 - 22
drivers/gpu/drm/i915/i915_gem.c

@@ -1288,6 +1288,7 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
 	list->hash.key = list->file_offset_node->start;
 	if (drm_ht_insert_item(&mm->offset_hash, &list->hash)) {
 		DRM_ERROR("failed to add to map hash\n");
+		ret = -ENOMEM;
 		goto out_free_mm;
 	}
 
@@ -1583,7 +1584,7 @@ i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
  *
  * Returned sequence numbers are nonzero on success.
  */
-static uint32_t
+uint32_t
 i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
 		 uint32_t flush_domains)
 {
@@ -1617,7 +1618,7 @@ i915_add_request(struct drm_device *dev, struct drm_file *file_priv,
 	OUT_RING(MI_USER_INTERRUPT);
 	ADVANCE_LP_RING();
 
-	DRM_DEBUG("%d\n", seqno);
+	DRM_DEBUG_DRIVER("%d\n", seqno);
 
 	request->seqno = seqno;
 	request->emitted_jiffies = jiffies;
@@ -1820,12 +1821,8 @@ i915_gem_retire_work_handler(struct work_struct *work)
 	mutex_unlock(&dev->struct_mutex);
 }
 
-/**
- * Waits for a sequence number to be signaled, and cleans up the
- * request and object lists appropriately for that event.
- */
-static int
-i915_wait_request(struct drm_device *dev, uint32_t seqno)
+int
+i915_do_wait_request(struct drm_device *dev, uint32_t seqno, int interruptible)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	u32 ier;
@@ -1837,7 +1834,7 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
 		return -EIO;
 
 	if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
-		if (IS_IGDNG(dev))
+		if (IS_IRONLAKE(dev))
 			ier = I915_READ(DEIER) | I915_READ(GTIER);
 		else
 			ier = I915_READ(IER);
@@ -1852,10 +1849,15 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
 
 		dev_priv->mm.waiting_gem_seqno = seqno;
 		i915_user_irq_get(dev);
-		ret = wait_event_interruptible(dev_priv->irq_queue,
-					       i915_seqno_passed(i915_get_gem_seqno(dev),
-								 seqno) ||
-					       atomic_read(&dev_priv->mm.wedged));
+		if (interruptible)
+			ret = wait_event_interruptible(dev_priv->irq_queue,
+				i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
+				atomic_read(&dev_priv->mm.wedged));
+		else
+			wait_event(dev_priv->irq_queue,
+				i915_seqno_passed(i915_get_gem_seqno(dev), seqno) ||
+				atomic_read(&dev_priv->mm.wedged));
+
 		i915_user_irq_put(dev);
 		dev_priv->mm.waiting_gem_seqno = 0;
 
@@ -1879,6 +1881,16 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno)
 	return ret;
 }
 
+/**
+ * Waits for a sequence number to be signaled, and cleans up the
+ * request and object lists appropriately for that event.
+ */
+static int
+i915_wait_request(struct drm_device *dev, uint32_t seqno)
+{
+	return i915_do_wait_request(dev, seqno, 1);
+}
+
 static void
 i915_gem_flush(struct drm_device *dev,
 	       uint32_t invalidate_domains,
@@ -1947,7 +1959,7 @@ i915_gem_flush(struct drm_device *dev,
 #endif
 		BEGIN_LP_RING(2);
 		OUT_RING(cmd);
-		OUT_RING(0); /* noop */
+		OUT_RING(MI_NOOP);
 		ADVANCE_LP_RING();
 	}
 }
@@ -2760,6 +2772,22 @@ i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
 					    old_write_domain);
 }
 
+void
+i915_gem_object_flush_write_domain(struct drm_gem_object *obj)
+{
+	switch (obj->write_domain) {
+	case I915_GEM_DOMAIN_GTT:
+		i915_gem_object_flush_gtt_write_domain(obj);
+		break;
+	case I915_GEM_DOMAIN_CPU:
+		i915_gem_object_flush_cpu_write_domain(obj);
+		break;
+	default:
+		i915_gem_object_flush_gpu_write_domain(obj);
+		break;
+	}
+}
+
 /**
  * Moves a single object to the GTT read, and possibly write domain.
  *
@@ -3525,6 +3553,41 @@ i915_gem_check_execbuffer (struct drm_i915_gem_execbuffer *exec,
 	return 0;
 }
 
+static int
+i915_gem_wait_for_pending_flip(struct drm_device *dev,
+			       struct drm_gem_object **object_list,
+			       int count)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj_priv;
+	DEFINE_WAIT(wait);
+	int i, ret = 0;
+
+	for (;;) {
+		prepare_to_wait(&dev_priv->pending_flip_queue,
+				&wait, TASK_INTERRUPTIBLE);
+		for (i = 0; i < count; i++) {
+			obj_priv = object_list[i]->driver_private;
+			if (atomic_read(&obj_priv->pending_flip) > 0)
+				break;
+		}
+		if (i == count)
+			break;
+
+		if (!signal_pending(current)) {
+			mutex_unlock(&dev->struct_mutex);
+			schedule();
+			mutex_lock(&dev->struct_mutex);
+			continue;
+		}
+		ret = -ERESTARTSYS;
+		break;
+	}
+	finish_wait(&dev_priv->pending_flip_queue, &wait);
+
+	return ret;
+}
+
 int
 i915_gem_execbuffer(struct drm_device *dev, void *data,
 		    struct drm_file *file_priv)
@@ -3540,7 +3603,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
 	int ret, ret2, i, pinned = 0;
 	uint64_t exec_offset;
 	uint32_t seqno, flush_domains, reloc_index;
-	int pin_tries;
+	int pin_tries, flips;
 
 #if WATCH_EXEC
 	DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
@@ -3552,8 +3615,8 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
 		return -EINVAL;
 	}
 	/* Copy in the exec list from userland */
-	exec_list = drm_calloc_large(sizeof(*exec_list), args->buffer_count);
-	object_list = drm_calloc_large(sizeof(*object_list), args->buffer_count);
+	exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
+	object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count);
 	if (exec_list == NULL || object_list == NULL) {
 		DRM_ERROR("Failed to allocate exec or object list "
 			  "for %d buffers\n",
@@ -3598,20 +3661,19 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
 	i915_verify_inactive(dev, __FILE__, __LINE__);
 
 	if (atomic_read(&dev_priv->mm.wedged)) {
-		DRM_ERROR("Execbuf while wedged\n");
 		mutex_unlock(&dev->struct_mutex);
 		ret = -EIO;
 		goto pre_mutex_err;
 	}
 
 	if (dev_priv->mm.suspended) {
-		DRM_ERROR("Execbuf while VT-switched.\n");
 		mutex_unlock(&dev->struct_mutex);
 		ret = -EBUSY;
 		goto pre_mutex_err;
 	}
 
 	/* Look up object handles */
+	flips = 0;
 	for (i = 0; i < args->buffer_count; i++) {
 		object_list[i] = drm_gem_object_lookup(dev, file_priv,
 						       exec_list[i].handle);
@@ -3630,6 +3692,14 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
 			goto err;
 		}
 		obj_priv->in_execbuffer = true;
+		flips += atomic_read(&obj_priv->pending_flip);
+	}
+
+	if (flips > 0) {
+		ret = i915_gem_wait_for_pending_flip(dev, object_list,
+						     args->buffer_count);
+		if (ret)
+			goto err;
 	}
 
 	/* Pin and relocate */
@@ -4356,7 +4426,7 @@ i915_gem_init_hws(struct drm_device *dev)
 	memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
 	I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
 	I915_READ(HWS_PGA); /* posting read */
-	DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
+	DRM_DEBUG_DRIVER("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
 
 	return 0;
 }
@@ -4614,8 +4684,8 @@ i915_gem_load(struct drm_device *dev)
 			for (i = 0; i < 8; i++)
 				I915_WRITE(FENCE_REG_945_8 + (i * 4), 0);
 	}
-
 	i915_gem_detect_bit_6_swizzle(dev);
+	init_waitqueue_head(&dev_priv->pending_flip_queue);
 }
 
 /*
@@ -4790,7 +4860,7 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
 	user_data = (char __user *) (uintptr_t) args->data_ptr;
 	obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
 
-	DRM_DEBUG("obj_addr %p, %lld\n", obj_addr, args->size);
+	DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size);
 	ret = copy_from_user(obj_addr, user_data, args->size);
 	if (ret)
 		return -EFAULT;

+ 3 - 3
drivers/gpu/drm/i915/i915_gem_tiling.c

@@ -121,7 +121,7 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
 				     0,   pcibios_align_resource,
 				     dev_priv->bridge_dev);
 	if (ret) {
-		DRM_DEBUG("failed bus alloc: %d\n", ret);
+		DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
 		dev_priv->mch_res.start = 0;
 		goto out;
 	}
@@ -209,8 +209,8 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
 	uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
 	bool need_disable;
 
-	if (IS_IGDNG(dev)) {
-		/* On IGDNG whatever DRAM config, GPU always do
+	if (IS_IRONLAKE(dev)) {
+		/* On Ironlake whatever DRAM config, GPU always do
 		 * same swizzling setup.
 		 */
 		swizzle_x = I915_BIT_6_SWIZZLE_9_10;

+ 99 - 64
drivers/gpu/drm/i915/i915_irq.c

@@ -43,10 +43,13 @@
  * we leave them always unmasked in IMR and then control enabling them through
  * PIPESTAT alone.
  */
-#define I915_INTERRUPT_ENABLE_FIX (I915_ASLE_INTERRUPT |		 \
-				   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
-				   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT | \
-				   I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
+#define I915_INTERRUPT_ENABLE_FIX			\
+	(I915_ASLE_INTERRUPT |				\
+	 I915_DISPLAY_PIPE_A_EVENT_INTERRUPT |		\
+	 I915_DISPLAY_PIPE_B_EVENT_INTERRUPT |		\
+	 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |	\
+	 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT |	\
+	 I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT)
 
 /** Interrupts that we mask and unmask at runtime. */
 #define I915_INTERRUPT_ENABLE_VAR (I915_USER_INTERRUPT)
@@ -61,7 +64,7 @@
 					 DRM_I915_VBLANK_PIPE_B)
 
 void
-igdng_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
+ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
 {
 	if ((dev_priv->gt_irq_mask_reg & mask) != 0) {
 		dev_priv->gt_irq_mask_reg &= ~mask;
@@ -71,7 +74,7 @@ igdng_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
 }
 
 static inline void
-igdng_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
+ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
 {
 	if ((dev_priv->gt_irq_mask_reg & mask) != mask) {
 		dev_priv->gt_irq_mask_reg |= mask;
@@ -82,7 +85,7 @@ igdng_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
 
 /* For display hotplug interrupt */
 void
-igdng_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
+ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
 {
 	if ((dev_priv->irq_mask_reg & mask) != 0) {
 		dev_priv->irq_mask_reg &= ~mask;
@@ -92,7 +95,7 @@ igdng_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
 }
 
 static inline void
-igdng_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
+ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
 {
 	if ((dev_priv->irq_mask_reg & mask) != mask) {
 		dev_priv->irq_mask_reg |= mask;
@@ -156,6 +159,20 @@ i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
 	}
 }
 
+/**
+ * intel_enable_asle - enable ASLE interrupt for OpRegion
+ */
+void intel_enable_asle (struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+	if (IS_IRONLAKE(dev))
+		ironlake_enable_display_irq(dev_priv, DE_GSE);
+	else
+		i915_enable_pipestat(dev_priv, 1,
+				     I915_LEGACY_BLC_EVENT_ENABLE);
+}
+
 /**
  * i915_pipe_enabled - check if a pipe is enabled
  * @dev: DRM device
@@ -191,7 +208,8 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
 	low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
 
 	if (!i915_pipe_enabled(dev, pipe)) {
-		DRM_DEBUG("trying to get vblank count for disabled pipe %d\n", pipe);
+		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
+				"pipe %d\n", pipe);
 		return 0;
 	}
 
@@ -220,7 +238,8 @@ u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
 	int reg = pipe ? PIPEB_FRMCOUNT_GM45 : PIPEA_FRMCOUNT_GM45;
 
 	if (!i915_pipe_enabled(dev, pipe)) {
-		DRM_DEBUG("trying to get vblank count for disabled pipe %d\n", pipe);
+		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
+					"pipe %d\n", pipe);
 		return 0;
 	}
 
@@ -250,12 +269,12 @@ static void i915_hotplug_work_func(struct work_struct *work)
 	drm_sysfs_hotplug_event(dev);
 }
 
-irqreturn_t igdng_irq_handler(struct drm_device *dev)
+irqreturn_t ironlake_irq_handler(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	int ret = IRQ_NONE;
-	u32 de_iir, gt_iir, de_ier;
-	u32 new_de_iir, new_gt_iir;
+	u32 de_iir, gt_iir, de_ier, pch_iir;
+	u32 new_de_iir, new_gt_iir, new_pch_iir;
 	struct drm_i915_master_private *master_priv;
 
 	/* disable master interrupt before clearing iir  */
@@ -265,13 +284,18 @@ irqreturn_t igdng_irq_handler(struct drm_device *dev)
 
 	de_iir = I915_READ(DEIIR);
 	gt_iir = I915_READ(GTIIR);
+	pch_iir = I915_READ(SDEIIR);
 
 	for (;;) {
-		if (de_iir == 0 && gt_iir == 0)
+		if (de_iir == 0 && gt_iir == 0 && pch_iir == 0)
 			break;
 
 		ret = IRQ_HANDLED;
 
+		/* should clear PCH hotplug event before clear CPU irq */
+		I915_WRITE(SDEIIR, pch_iir);
+		new_pch_iir = I915_READ(SDEIIR);
+
 		I915_WRITE(DEIIR, de_iir);
 		new_de_iir = I915_READ(DEIIR);
 		I915_WRITE(GTIIR, gt_iir);
@@ -291,8 +315,18 @@ irqreturn_t igdng_irq_handler(struct drm_device *dev)
 			DRM_WAKEUP(&dev_priv->irq_queue);
 		}
 
+		if (de_iir & DE_GSE)
+			ironlake_opregion_gse_intr(dev);
+
+		/* check event from PCH */
+		if ((de_iir & DE_PCH_EVENT) &&
+			(pch_iir & SDE_HOTPLUG_MASK)) {
+			queue_work(dev_priv->wq, &dev_priv->hotplug_work);
+		}
+
 		de_iir = new_de_iir;
 		gt_iir = new_gt_iir;
+		pch_iir = new_pch_iir;
 	}
 
 	I915_WRITE(DEIER, de_ier);
@@ -317,19 +351,19 @@ static void i915_error_work_func(struct work_struct *work)
 	char *reset_event[] = { "RESET=1", NULL };
 	char *reset_done_event[] = { "ERROR=0", NULL };
 
-	DRM_DEBUG("generating error event\n");
+	DRM_DEBUG_DRIVER("generating error event\n");
 	kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
 
 	if (atomic_read(&dev_priv->mm.wedged)) {
 		if (IS_I965G(dev)) {
-			DRM_DEBUG("resetting chip\n");
+			DRM_DEBUG_DRIVER("resetting chip\n");
 			kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
 			if (!i965_reset(dev, GDRST_RENDER)) {
 				atomic_set(&dev_priv->mm.wedged, 0);
 				kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
 			}
 		} else {
-			printk("reboot required\n");
+			DRM_DEBUG_DRIVER("reboot required\n");
 		}
 	}
 }
@@ -355,7 +389,7 @@ static void i915_capture_error_state(struct drm_device *dev)
 
 	error = kmalloc(sizeof(*error), GFP_ATOMIC);
 	if (!error) {
-		DRM_DEBUG("out ot memory, not capturing error state\n");
+		DRM_DEBUG_DRIVER("out ot memory, not capturing error state\n");
 		goto out;
 	}
 
@@ -512,7 +546,6 @@ static void i915_handle_error(struct drm_device *dev, bool wedged)
 		/*
 		 * Wakeup waiting processes so they don't hang
 		 */
-		printk("i915: Waking up sleeping processes\n");
 		DRM_WAKEUP(&dev_priv->irq_queue);
 	}
 
@@ -535,8 +568,8 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
 
 	atomic_inc(&dev_priv->irq_received);
 
-	if (IS_IGDNG(dev))
-		return igdng_irq_handler(dev);
+	if (IS_IRONLAKE(dev))
+		return ironlake_irq_handler(dev);
 
 	iir = I915_READ(IIR);
 
@@ -568,14 +601,14 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
 		 */
 		if (pipea_stats & 0x8000ffff) {
 			if (pipea_stats &  PIPE_FIFO_UNDERRUN_STATUS)
-				DRM_DEBUG("pipe a underrun\n");
+				DRM_DEBUG_DRIVER("pipe a underrun\n");
 			I915_WRITE(PIPEASTAT, pipea_stats);
 			irq_received = 1;
 		}
 
 		if (pipeb_stats & 0x8000ffff) {
 			if (pipeb_stats &  PIPE_FIFO_UNDERRUN_STATUS)
-				DRM_DEBUG("pipe b underrun\n");
+				DRM_DEBUG_DRIVER("pipe b underrun\n");
 			I915_WRITE(PIPEBSTAT, pipeb_stats);
 			irq_received = 1;
 		}
@@ -591,7 +624,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
 		    (iir & I915_DISPLAY_PORT_INTERRUPT)) {
 			u32 hotplug_status = I915_READ(PORT_HOTPLUG_STAT);
 
-			DRM_DEBUG("hotplug event received, stat 0x%08x\n",
+			DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x\n",
 				  hotplug_status);
 			if (hotplug_status & dev_priv->hotplug_supported_mask)
 				queue_work(dev_priv->wq,
@@ -599,27 +632,6 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
 
 			I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
 			I915_READ(PORT_HOTPLUG_STAT);
-
-			/* EOS interrupts occurs */
-			if (IS_IGD(dev) &&
-				(hotplug_status & CRT_EOS_INT_STATUS)) {
-				u32 temp;
-
-				DRM_DEBUG("EOS interrupt occurs\n");
-				/* status is already cleared */
-				temp = I915_READ(ADPA);
-				temp &= ~ADPA_DAC_ENABLE;
-				I915_WRITE(ADPA, temp);
-
-				temp = I915_READ(PORT_HOTPLUG_EN);
-				temp &= ~CRT_EOS_INT_EN;
-				I915_WRITE(PORT_HOTPLUG_EN, temp);
-
-				temp = I915_READ(PORT_HOTPLUG_STAT);
-				if (temp & CRT_EOS_INT_STATUS)
-					I915_WRITE(PORT_HOTPLUG_STAT,
-						CRT_EOS_INT_STATUS);
-			}
 		}
 
 		I915_WRITE(IIR, iir);
@@ -641,14 +653,22 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
 			mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
 		}
 
+		if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT)
+			intel_prepare_page_flip(dev, 0);
+
+		if (iir & I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT)
+			intel_prepare_page_flip(dev, 1);
+
 		if (pipea_stats & vblank_status) {
 			vblank++;
 			drm_handle_vblank(dev, 0);
+			intel_finish_page_flip(dev, 0);
 		}
 
 		if (pipeb_stats & vblank_status) {
 			vblank++;
 			drm_handle_vblank(dev, 1);
+			intel_finish_page_flip(dev, 1);
 		}
 
 		if ((pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS) ||
@@ -684,7 +704,7 @@ static int i915_emit_irq(struct drm_device * dev)
 
 	i915_kernel_lost_context(dev);
 
-	DRM_DEBUG("\n");
+	DRM_DEBUG_DRIVER("\n");
 
 	dev_priv->counter++;
 	if (dev_priv->counter > 0x7FFFFFFFUL)
@@ -709,8 +729,8 @@ void i915_user_irq_get(struct drm_device *dev)
 
 	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
 	if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1)) {
-		if (IS_IGDNG(dev))
-			igdng_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
+		if (IS_IRONLAKE(dev))
+			ironlake_enable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
 		else
 			i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
 	}
@@ -725,8 +745,8 @@ void i915_user_irq_put(struct drm_device *dev)
 	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
 	BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
 	if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0)) {
-		if (IS_IGDNG(dev))
-			igdng_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
+		if (IS_IRONLAKE(dev))
+			ironlake_disable_graphics_irq(dev_priv, GT_USER_INTERRUPT);
 		else
 			i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
 	}
@@ -749,7 +769,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
 	struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
 	int ret = 0;
 
-	DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
+	DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
 		  READ_BREADCRUMB(dev_priv));
 
 	if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
@@ -832,7 +852,7 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
 	if (!(pipeconf & PIPEACONF_ENABLE))
 		return -EINVAL;
 
-	if (IS_IGDNG(dev))
+	if (IS_IRONLAKE(dev))
 		return 0;
 
 	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
@@ -854,7 +874,7 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	unsigned long irqflags;
 
-	if (IS_IGDNG(dev))
+	if (IS_IRONLAKE(dev))
 		return;
 
 	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
@@ -868,7 +888,7 @@ void i915_enable_interrupt (struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	if (!IS_IGDNG(dev))
+	if (!IS_IRONLAKE(dev))
 		opregion_enable_asle(dev);
 	dev_priv->irq_enabled = 1;
 }
@@ -976,7 +996,7 @@ void i915_hangcheck_elapsed(unsigned long data)
 
 /* drm_dma.h hooks
 */
-static void igdng_irq_preinstall(struct drm_device *dev)
+static void ironlake_irq_preinstall(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
@@ -992,14 +1012,21 @@ static void igdng_irq_preinstall(struct drm_device *dev)
 	I915_WRITE(GTIMR, 0xffffffff);
 	I915_WRITE(GTIER, 0x0);
 	(void) I915_READ(GTIER);
+
+	/* south display irq */
+	I915_WRITE(SDEIMR, 0xffffffff);
+	I915_WRITE(SDEIER, 0x0);
+	(void) I915_READ(SDEIER);
 }
 
-static int igdng_irq_postinstall(struct drm_device *dev)
+static int ironlake_irq_postinstall(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	/* enable kind of interrupts always enabled */
-	u32 display_mask = DE_MASTER_IRQ_CONTROL /*| DE_PCH_EVENT */;
+	u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT;
 	u32 render_mask = GT_USER_INTERRUPT;
+	u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
+			   SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
 
 	dev_priv->irq_mask_reg = ~display_mask;
 	dev_priv->de_irq_enable_reg = display_mask;
@@ -1019,6 +1046,14 @@ static int igdng_irq_postinstall(struct drm_device *dev)
 	I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg);
 	(void) I915_READ(GTIER);
 
+	dev_priv->pch_irq_mask_reg = ~hotplug_mask;
+	dev_priv->pch_irq_enable_reg = hotplug_mask;
+
+	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
+	I915_WRITE(SDEIMR, dev_priv->pch_irq_mask_reg);
+	I915_WRITE(SDEIER, dev_priv->pch_irq_enable_reg);
+	(void) I915_READ(SDEIER);
+
 	return 0;
 }
 
@@ -1031,8 +1066,8 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
 	INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
 	INIT_WORK(&dev_priv->error_work, i915_error_work_func);
 
-	if (IS_IGDNG(dev)) {
-		igdng_irq_preinstall(dev);
+	if (IS_IRONLAKE(dev)) {
+		ironlake_irq_preinstall(dev);
 		return;
 	}
 
@@ -1059,8 +1094,8 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
 
 	dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
 
-	if (IS_IGDNG(dev))
-		return igdng_irq_postinstall(dev);
+	if (IS_IRONLAKE(dev))
+		return ironlake_irq_postinstall(dev);
 
 	/* Unmask the interrupts that we always want on. */
 	dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX;
@@ -1120,7 +1155,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
 	return 0;
 }
 
-static void igdng_irq_uninstall(struct drm_device *dev)
+static void ironlake_irq_uninstall(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	I915_WRITE(HWSTAM, 0xffffffff);
@@ -1143,8 +1178,8 @@ void i915_driver_irq_uninstall(struct drm_device * dev)
 
 	dev_priv->vblank_pipe = 0;
 
-	if (IS_IGDNG(dev)) {
-		igdng_irq_uninstall(dev);
+	if (IS_IRONLAKE(dev)) {
+		ironlake_irq_uninstall(dev);
 		return;
 	}
 

+ 81 - 11
drivers/gpu/drm/i915/i915_opregion.c

@@ -118,6 +118,10 @@ struct opregion_asle {
 #define ASLE_BACKLIGHT_FAIL    (2<<12)
 #define ASLE_PFIT_FAIL         (2<<14)
 #define ASLE_PWM_FREQ_FAIL     (2<<16)
+#define ASLE_ALS_ILLUM_FAILED	(1<<10)
+#define ASLE_BACKLIGHT_FAILED	(1<<12)
+#define ASLE_PFIT_FAILED	(1<<14)
+#define ASLE_PWM_FREQ_FAILED	(1<<16)
 
 /* ASLE backlight brightness to set */
 #define ASLE_BCLP_VALID                (1<<31)
@@ -163,7 +167,7 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
 	if (IS_I965G(dev) && (blc_pwm_ctl2 & BLM_COMBINATION_MODE))
 		pci_write_config_dword(dev->pdev, PCI_LBPC, bclp);
 	else {
-		if (IS_IGD(dev)) {
+		if (IS_PINEVIEW(dev)) {
 			blc_pwm_ctl &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
 			max_backlight = (blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >> 
 					BACKLIGHT_MODULATION_FREQ_SHIFT;
@@ -224,7 +228,7 @@ void opregion_asle_intr(struct drm_device *dev)
 	asle_req = asle->aslc & ASLE_REQ_MSK;
 
 	if (!asle_req) {
-		DRM_DEBUG("non asle set request??\n");
+		DRM_DEBUG_DRIVER("non asle set request??\n");
 		return;
 	}
 
@@ -243,6 +247,73 @@ void opregion_asle_intr(struct drm_device *dev)
 	asle->aslc = asle_stat;
 }
 
+static u32 asle_set_backlight_ironlake(struct drm_device *dev, u32 bclp)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct opregion_asle *asle = dev_priv->opregion.asle;
+	u32 cpu_pwm_ctl, pch_pwm_ctl2;
+	u32 max_backlight, level;
+
+	if (!(bclp & ASLE_BCLP_VALID))
+		return ASLE_BACKLIGHT_FAILED;
+
+	bclp &= ASLE_BCLP_MSK;
+	if (bclp < 0 || bclp > 255)
+		return ASLE_BACKLIGHT_FAILED;
+
+	cpu_pwm_ctl = I915_READ(BLC_PWM_CPU_CTL);
+	pch_pwm_ctl2 = I915_READ(BLC_PWM_PCH_CTL2);
+	/* get the max PWM frequency */
+	max_backlight = (pch_pwm_ctl2 >> 16) & BACKLIGHT_DUTY_CYCLE_MASK;
+	/* calculate the expected PMW frequency */
+	level = (bclp * max_backlight) / 255;
+	/* reserve the high 16 bits */
+	cpu_pwm_ctl &= ~(BACKLIGHT_DUTY_CYCLE_MASK);
+	/* write the updated PWM frequency */
+	I915_WRITE(BLC_PWM_CPU_CTL, cpu_pwm_ctl | level);
+
+	asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
+
+	return 0;
+}
+
+void ironlake_opregion_gse_intr(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct opregion_asle *asle = dev_priv->opregion.asle;
+	u32 asle_stat = 0;
+	u32 asle_req;
+
+	if (!asle)
+		return;
+
+	asle_req = asle->aslc & ASLE_REQ_MSK;
+
+	if (!asle_req) {
+		DRM_DEBUG_DRIVER("non asle set request??\n");
+		return;
+	}
+
+	if (asle_req & ASLE_SET_ALS_ILLUM) {
+		DRM_DEBUG_DRIVER("Illum is not supported\n");
+		asle_stat |= ASLE_ALS_ILLUM_FAILED;
+	}
+
+	if (asle_req & ASLE_SET_BACKLIGHT)
+		asle_stat |= asle_set_backlight_ironlake(dev, asle->bclp);
+
+	if (asle_req & ASLE_SET_PFIT) {
+		DRM_DEBUG_DRIVER("Pfit is not supported\n");
+		asle_stat |= ASLE_PFIT_FAILED;
+	}
+
+	if (asle_req & ASLE_SET_PWM_FREQ) {
+		DRM_DEBUG_DRIVER("PWM freq is not supported\n");
+		asle_stat |= ASLE_PWM_FREQ_FAILED;
+	}
+
+	asle->aslc = asle_stat;
+}
 #define ASLE_ALS_EN    (1<<0)
 #define ASLE_BLC_EN    (1<<1)
 #define ASLE_PFIT_EN   (1<<2)
@@ -258,8 +329,7 @@ void opregion_enable_asle(struct drm_device *dev)
 			unsigned long irqflags;
 
 			spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
-			i915_enable_pipestat(dev_priv, 1,
-					     I915_LEGACY_BLC_EVENT_ENABLE);
+			intel_enable_asle(dev);
 			spin_unlock_irqrestore(&dev_priv->user_irq_lock,
 					       irqflags);
 		}
@@ -361,9 +431,9 @@ int intel_opregion_init(struct drm_device *dev, int resume)
 	int err = 0;
 
 	pci_read_config_dword(dev->pdev, PCI_ASLS, &asls);
-	DRM_DEBUG("graphic opregion physical addr: 0x%x\n", asls);
+	DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls);
 	if (asls == 0) {
-		DRM_DEBUG("ACPI OpRegion not supported!\n");
+		DRM_DEBUG_DRIVER("ACPI OpRegion not supported!\n");
 		return -ENOTSUPP;
 	}
 
@@ -373,30 +443,30 @@ int intel_opregion_init(struct drm_device *dev, int resume)
 
 	opregion->header = base;
 	if (memcmp(opregion->header->signature, OPREGION_SIGNATURE, 16)) {
-		DRM_DEBUG("opregion signature mismatch\n");
+		DRM_DEBUG_DRIVER("opregion signature mismatch\n");
 		err = -EINVAL;
 		goto err_out;
 	}
 
 	mboxes = opregion->header->mboxes;
 	if (mboxes & MBOX_ACPI) {
-		DRM_DEBUG("Public ACPI methods supported\n");
+		DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
 		opregion->acpi = base + OPREGION_ACPI_OFFSET;
 		if (drm_core_check_feature(dev, DRIVER_MODESET))
 			intel_didl_outputs(dev);
 	} else {
-		DRM_DEBUG("Public ACPI methods not supported\n");
+		DRM_DEBUG_DRIVER("Public ACPI methods not supported\n");
 		err = -ENOTSUPP;
 		goto err_out;
 	}
 	opregion->enabled = 1;
 
 	if (mboxes & MBOX_SWSCI) {
-		DRM_DEBUG("SWSCI supported\n");
+		DRM_DEBUG_DRIVER("SWSCI supported\n");
 		opregion->swsci = base + OPREGION_SWSCI_OFFSET;
 	}
 	if (mboxes & MBOX_ASLE) {
-		DRM_DEBUG("ASLE supported\n");
+		DRM_DEBUG_DRIVER("ASLE supported\n");
 		opregion->asle = base + OPREGION_ASLE_OFFSET;
 		opregion_enable_asle(dev);
 	}

+ 47 - 24
drivers/gpu/drm/i915/i915_reg.h

@@ -140,6 +140,7 @@
 #define MI_NOOP			MI_INSTR(0, 0)
 #define MI_USER_INTERRUPT	MI_INSTR(0x02, 0)
 #define MI_WAIT_FOR_EVENT       MI_INSTR(0x03, 0)
+#define   MI_WAIT_FOR_OVERLAY_FLIP	(1<<16)
 #define   MI_WAIT_FOR_PLANE_B_FLIP      (1<<6)
 #define   MI_WAIT_FOR_PLANE_A_FLIP      (1<<2)
 #define   MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
@@ -151,7 +152,13 @@
 #define   MI_END_SCENE		(1 << 4) /* flush binner and incr scene count */
 #define MI_BATCH_BUFFER_END	MI_INSTR(0x0a, 0)
 #define MI_REPORT_HEAD		MI_INSTR(0x07, 0)
+#define MI_OVERLAY_FLIP		MI_INSTR(0x11,0)
+#define   MI_OVERLAY_CONTINUE	(0x0<<21)
+#define   MI_OVERLAY_ON		(0x1<<21)
+#define   MI_OVERLAY_OFF	(0x2<<21)
 #define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0)
+#define MI_DISPLAY_FLIP		MI_INSTR(0x14, 2)
+#define   MI_DISPLAY_FLIP_PLANE(n) ((n) << 20)
 #define MI_STORE_DWORD_IMM	MI_INSTR(0x20, 1)
 #define   MI_MEM_VIRTUAL	(1 << 22) /* 965+ only */
 #define MI_STORE_DWORD_INDEX	MI_INSTR(0x21, 1)
@@ -260,6 +267,8 @@
 #define HWS_PGA		0x02080
 #define HWS_ADDRESS_MASK	0xfffff000
 #define HWS_START_ADDRESS_SHIFT	4
+#define PWRCTXA		0x2088 /* 965GM+ only */
+#define   PWRCTX_EN	(1<<0)
 #define IPEIR		0x02088
 #define IPEHR		0x0208c
 #define INSTDONE	0x02090
@@ -405,6 +414,13 @@
 # define GPIO_DATA_VAL_IN		(1 << 12)
 # define GPIO_DATA_PULLUP_DISABLE	(1 << 13)
 
+#define GMBUS0			0x5100
+#define GMBUS1			0x5104
+#define GMBUS2			0x5108
+#define GMBUS3			0x510c
+#define GMBUS4			0x5110
+#define GMBUS5			0x5120
+
 /*
  * Clock control & power management
  */
@@ -435,7 +451,7 @@
 #define   DPLLB_LVDS_P2_CLOCK_DIV_7	(1 << 24) /* i915 */
 #define   DPLL_P2_CLOCK_DIV_MASK	0x03000000 /* i915 */
 #define   DPLL_FPA01_P1_POST_DIV_MASK	0x00ff0000 /* i915 */
-#define   DPLL_FPA01_P1_POST_DIV_MASK_IGD	0x00ff8000 /* IGD */
+#define   DPLL_FPA01_P1_POST_DIV_MASK_PINEVIEW	0x00ff8000 /* Pineview */
 
 #define I915_FIFO_UNDERRUN_STATUS		(1UL<<31)
 #define I915_CRC_ERROR_ENABLE			(1UL<<29)
@@ -512,7 +528,7 @@
  */
 #define   DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS	0x003f0000
 #define   DPLL_FPA01_P1_POST_DIV_SHIFT	16
-#define   DPLL_FPA01_P1_POST_DIV_SHIFT_IGD 15
+#define   DPLL_FPA01_P1_POST_DIV_SHIFT_PINEVIEW 15
 /* i830, required in DVO non-gang */
 #define   PLL_P2_DIVIDE_BY_4		(1 << 23)
 #define   PLL_P1_DIVIDE_BY_TWO		(1 << 21) /* i830 */
@@ -522,7 +538,7 @@
 #define   PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
 #define   PLL_REF_INPUT_MASK		(3 << 13)
 #define   PLL_LOAD_PULSE_PHASE_SHIFT		9
-/* IGDNG */
+/* Ironlake */
 # define PLL_REF_SDVO_HDMI_MULTIPLIER_SHIFT     9
 # define PLL_REF_SDVO_HDMI_MULTIPLIER_MASK      (7 << 9)
 # define PLL_REF_SDVO_HDMI_MULTIPLIER(x)	(((x)-1) << 9)
@@ -586,12 +602,12 @@
 #define FPB0	0x06048
 #define FPB1	0x0604c
 #define   FP_N_DIV_MASK		0x003f0000
-#define   FP_N_IGD_DIV_MASK	0x00ff0000
+#define   FP_N_PINEVIEW_DIV_MASK	0x00ff0000
 #define   FP_N_DIV_SHIFT		16
 #define   FP_M1_DIV_MASK	0x00003f00
 #define   FP_M1_DIV_SHIFT		 8
 #define   FP_M2_DIV_MASK	0x0000003f
-#define   FP_M2_IGD_DIV_MASK	0x000000ff
+#define   FP_M2_PINEVIEW_DIV_MASK	0x000000ff
 #define   FP_M2_DIV_SHIFT		 0
 #define DPLL_TEST	0x606c
 #define   DPLLB_TEST_SDVO_DIV_1		(0 << 22)
@@ -769,7 +785,8 @@
 
 /** GM965 GM45 render standby register */
 #define MCHBAR_RENDER_STANDBY	0x111B8
-
+#define   RCX_SW_EXIT		(1<<23)
+#define   RSX_STATUS_MASK	0x00700000
 #define PEG_BAND_GAP_DATA	0x14d68
 
 /*
@@ -844,7 +861,6 @@
 #define   SDVOB_HOTPLUG_INT_EN			(1 << 26)
 #define   SDVOC_HOTPLUG_INT_EN			(1 << 25)
 #define   TV_HOTPLUG_INT_EN			(1 << 18)
-#define   CRT_EOS_INT_EN			(1 << 10)
 #define   CRT_HOTPLUG_INT_EN			(1 << 9)
 #define   CRT_HOTPLUG_FORCE_DETECT		(1 << 3)
 #define CRT_HOTPLUG_ACTIVATION_PERIOD_32	(0 << 8)
@@ -868,7 +884,6 @@
 			 HDMID_HOTPLUG_INT_EN |	  \
 			 SDVOB_HOTPLUG_INT_EN |	  \
 			 SDVOC_HOTPLUG_INT_EN |	  \
-			 TV_HOTPLUG_INT_EN |	  \
 			 CRT_HOTPLUG_INT_EN)
 
 
@@ -879,7 +894,6 @@
 #define   DPC_HOTPLUG_INT_STATUS		(1 << 28)
 #define   HDMID_HOTPLUG_INT_STATUS		(1 << 27)
 #define   DPD_HOTPLUG_INT_STATUS		(1 << 27)
-#define   CRT_EOS_INT_STATUS			(1 << 12)
 #define   CRT_HOTPLUG_INT_STATUS		(1 << 11)
 #define   TV_HOTPLUG_INT_STATUS			(1 << 10)
 #define   CRT_HOTPLUG_MONITOR_MASK		(3 << 8)
@@ -1620,7 +1634,7 @@
 #define   DP_CLOCK_OUTPUT_ENABLE	(1 << 13)
 
 #define   DP_SCRAMBLING_DISABLE		(1 << 12)
-#define   DP_SCRAMBLING_DISABLE_IGDNG	(1 << 7)
+#define   DP_SCRAMBLING_DISABLE_IRONLAKE	(1 << 7)
 
 /** limit RGB values to avoid confusing TVs */
 #define   DP_COLOR_RANGE_16_235		(1 << 8)
@@ -1808,7 +1822,7 @@
 #define DSPFW3			0x7003c
 #define   DSPFW_HPLL_SR_EN	(1<<31)
 #define   DSPFW_CURSOR_SR_SHIFT	24
-#define   IGD_SELF_REFRESH_EN	(1<<30)
+#define   PINEVIEW_SELF_REFRESH_EN	(1<<30)
 
 /* FIFO watermark sizes etc */
 #define G4X_FIFO_LINE_SIZE	64
@@ -1824,16 +1838,16 @@
 #define G4X_MAX_WM		0x3f
 #define I915_MAX_WM		0x3f
 
-#define IGD_DISPLAY_FIFO	512 /* in 64byte unit */
-#define IGD_FIFO_LINE_SIZE	64
-#define IGD_MAX_WM		0x1ff
-#define IGD_DFT_WM		0x3f
-#define IGD_DFT_HPLLOFF_WM	0
-#define IGD_GUARD_WM		10
-#define IGD_CURSOR_FIFO		64
-#define IGD_CURSOR_MAX_WM	0x3f
-#define IGD_CURSOR_DFT_WM	0
-#define IGD_CURSOR_GUARD_WM	5
+#define PINEVIEW_DISPLAY_FIFO	512 /* in 64byte unit */
+#define PINEVIEW_FIFO_LINE_SIZE	64
+#define PINEVIEW_MAX_WM		0x1ff
+#define PINEVIEW_DFT_WM		0x3f
+#define PINEVIEW_DFT_HPLLOFF_WM	0
+#define PINEVIEW_GUARD_WM		10
+#define PINEVIEW_CURSOR_FIFO		64
+#define PINEVIEW_CURSOR_MAX_WM	0x3f
+#define PINEVIEW_CURSOR_DFT_WM	0
+#define PINEVIEW_CURSOR_GUARD_WM	5
 
 /*
  * The two pipe frame counter registers are not synchronized, so
@@ -1907,6 +1921,7 @@
 #define   DISPPLANE_16BPP			(0x5<<26)
 #define   DISPPLANE_32BPP_NO_ALPHA		(0x6<<26)
 #define   DISPPLANE_32BPP			(0x7<<26)
+#define   DISPPLANE_32BPP_30BIT_NO_ALPHA	(0xa<<26)
 #define   DISPPLANE_STEREO_ENABLE		(1<<25)
 #define   DISPPLANE_STEREO_DISABLE		0
 #define   DISPPLANE_SEL_PIPE_MASK		(1<<24)
@@ -1918,7 +1933,7 @@
 #define   DISPPLANE_NO_LINE_DOUBLE		0
 #define   DISPPLANE_STEREO_POLARITY_FIRST	0
 #define   DISPPLANE_STEREO_POLARITY_SECOND	(1<<18)
-#define   DISPPLANE_TRICKLE_FEED_DISABLE	(1<<14) /* IGDNG */
+#define   DISPPLANE_TRICKLE_FEED_DISABLE	(1<<14) /* Ironlake */
 #define   DISPPLANE_TILED			(1<<10)
 #define DSPAADDR		0x70184
 #define DSPASTRIDE		0x70188
@@ -1971,7 +1986,7 @@
 # define VGA_2X_MODE				(1 << 30)
 # define VGA_PIPE_B_SELECT			(1 << 29)
 
-/* IGDNG */
+/* Ironlake */
 
 #define CPU_VGACNTRL	0x41000
 
@@ -2117,6 +2132,7 @@
 #define SDE_PORTC_HOTPLUG       (1 << 9)
 #define SDE_PORTB_HOTPLUG       (1 << 8)
 #define SDE_SDVOB_HOTPLUG       (1 << 6)
+#define SDE_HOTPLUG_MASK	(0xf << 8)
 
 #define SDEISR  0xc4000
 #define SDEIMR  0xc4004
@@ -2157,6 +2173,13 @@
 #define PCH_GPIOE               0xc5020
 #define PCH_GPIOF               0xc5024
 
+#define PCH_GMBUS0		0xc5100
+#define PCH_GMBUS1		0xc5104
+#define PCH_GMBUS2		0xc5108
+#define PCH_GMBUS3		0xc510c
+#define PCH_GMBUS4		0xc5110
+#define PCH_GMBUS5		0xc5120
+
 #define PCH_DPLL_A              0xc6014
 #define PCH_DPLL_B              0xc6018
 
@@ -2292,7 +2315,7 @@
 #define  FDI_DP_PORT_WIDTH_X3           (2<<19)
 #define  FDI_DP_PORT_WIDTH_X4           (3<<19)
 #define  FDI_TX_ENHANCE_FRAME_ENABLE    (1<<18)
-/* IGDNG: hardwired to 1 */
+/* Ironlake: hardwired to 1 */
 #define  FDI_TX_PLL_ENABLE              (1<<14)
 /* both Tx and Rx */
 #define  FDI_SCRAMBLING_ENABLE          (0<<7)

+ 44 - 42
drivers/gpu/drm/i915/i915_suspend.c

@@ -27,14 +27,14 @@
 #include "drmP.h"
 #include "drm.h"
 #include "i915_drm.h"
-#include "i915_drv.h"
+#include "intel_drv.h"
 
 static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32	dpll_reg;
 
-	if (IS_IGDNG(dev)) {
+	if (IS_IRONLAKE(dev)) {
 		dpll_reg = (pipe == PIPE_A) ? PCH_DPLL_A: PCH_DPLL_B;
 	} else {
 		dpll_reg = (pipe == PIPE_A) ? DPLL_A: DPLL_B;
@@ -53,7 +53,7 @@ static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
 	if (!i915_pipe_enabled(dev, pipe))
 		return;
 
-	if (IS_IGDNG(dev))
+	if (IS_IRONLAKE(dev))
 		reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B;
 
 	if (pipe == PIPE_A)
@@ -75,7 +75,7 @@ static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
 	if (!i915_pipe_enabled(dev, pipe))
 		return;
 
-	if (IS_IGDNG(dev))
+	if (IS_IRONLAKE(dev))
 		reg = (pipe == PIPE_A) ? LGC_PALETTE_A : LGC_PALETTE_B;
 
 	if (pipe == PIPE_A)
@@ -239,7 +239,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
 		return;
 
-	if (IS_IGDNG(dev)) {
+	if (IS_IRONLAKE(dev)) {
 		dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
 		dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
 	}
@@ -247,7 +247,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
 	/* Pipe & plane A info */
 	dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
 	dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
-	if (IS_IGDNG(dev)) {
+	if (IS_IRONLAKE(dev)) {
 		dev_priv->saveFPA0 = I915_READ(PCH_FPA0);
 		dev_priv->saveFPA1 = I915_READ(PCH_FPA1);
 		dev_priv->saveDPLL_A = I915_READ(PCH_DPLL_A);
@@ -256,7 +256,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
 		dev_priv->saveFPA1 = I915_READ(FPA1);
 		dev_priv->saveDPLL_A = I915_READ(DPLL_A);
 	}
-	if (IS_I965G(dev) && !IS_IGDNG(dev))
+	if (IS_I965G(dev) && !IS_IRONLAKE(dev))
 		dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
 	dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
 	dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
@@ -264,10 +264,10 @@ static void i915_save_modeset_reg(struct drm_device *dev)
 	dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A);
 	dev_priv->saveVBLANK_A = I915_READ(VBLANK_A);
 	dev_priv->saveVSYNC_A = I915_READ(VSYNC_A);
-	if (!IS_IGDNG(dev))
+	if (!IS_IRONLAKE(dev))
 		dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
 
-	if (IS_IGDNG(dev)) {
+	if (IS_IRONLAKE(dev)) {
 		dev_priv->savePIPEA_DATA_M1 = I915_READ(PIPEA_DATA_M1);
 		dev_priv->savePIPEA_DATA_N1 = I915_READ(PIPEA_DATA_N1);
 		dev_priv->savePIPEA_LINK_M1 = I915_READ(PIPEA_LINK_M1);
@@ -304,7 +304,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
 	/* Pipe & plane B info */
 	dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
 	dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC);
-	if (IS_IGDNG(dev)) {
+	if (IS_IRONLAKE(dev)) {
 		dev_priv->saveFPB0 = I915_READ(PCH_FPB0);
 		dev_priv->saveFPB1 = I915_READ(PCH_FPB1);
 		dev_priv->saveDPLL_B = I915_READ(PCH_DPLL_B);
@@ -313,7 +313,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
 		dev_priv->saveFPB1 = I915_READ(FPB1);
 		dev_priv->saveDPLL_B = I915_READ(DPLL_B);
 	}
-	if (IS_I965G(dev) && !IS_IGDNG(dev))
+	if (IS_I965G(dev) && !IS_IRONLAKE(dev))
 		dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
 	dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
 	dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
@@ -321,10 +321,10 @@ static void i915_save_modeset_reg(struct drm_device *dev)
 	dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B);
 	dev_priv->saveVBLANK_B = I915_READ(VBLANK_B);
 	dev_priv->saveVSYNC_B = I915_READ(VSYNC_B);
-	if (!IS_IGDNG(dev))
+	if (!IS_IRONLAKE(dev))
 		dev_priv->saveBCLRPAT_B = I915_READ(BCLRPAT_B);
 
-	if (IS_IGDNG(dev)) {
+	if (IS_IRONLAKE(dev)) {
 		dev_priv->savePIPEB_DATA_M1 = I915_READ(PIPEB_DATA_M1);
 		dev_priv->savePIPEB_DATA_N1 = I915_READ(PIPEB_DATA_N1);
 		dev_priv->savePIPEB_LINK_M1 = I915_READ(PIPEB_LINK_M1);
@@ -369,7 +369,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
 	if (drm_core_check_feature(dev, DRIVER_MODESET))
 		return;
 
-	if (IS_IGDNG(dev)) {
+	if (IS_IRONLAKE(dev)) {
 		dpll_a_reg = PCH_DPLL_A;
 		dpll_b_reg = PCH_DPLL_B;
 		fpa0_reg = PCH_FPA0;
@@ -385,7 +385,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
 		fpb1_reg = FPB1;
 	}
 
-	if (IS_IGDNG(dev)) {
+	if (IS_IRONLAKE(dev)) {
 		I915_WRITE(PCH_DREF_CONTROL, dev_priv->savePCH_DREF_CONTROL);
 		I915_WRITE(DISP_ARB_CTL, dev_priv->saveDISP_ARB_CTL);
 	}
@@ -402,7 +402,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
 	/* Actually enable it */
 	I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A);
 	DRM_UDELAY(150);
-	if (IS_I965G(dev) && !IS_IGDNG(dev))
+	if (IS_I965G(dev) && !IS_IRONLAKE(dev))
 		I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
 	DRM_UDELAY(150);
 
@@ -413,10 +413,10 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
 	I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
 	I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
 	I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
-	if (!IS_IGDNG(dev))
+	if (!IS_IRONLAKE(dev))
 		I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
 
-	if (IS_IGDNG(dev)) {
+	if (IS_IRONLAKE(dev)) {
 		I915_WRITE(PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1);
 		I915_WRITE(PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1);
 		I915_WRITE(PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1);
@@ -467,7 +467,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
 	/* Actually enable it */
 	I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B);
 	DRM_UDELAY(150);
-	if (IS_I965G(dev) && !IS_IGDNG(dev))
+	if (IS_I965G(dev) && !IS_IRONLAKE(dev))
 		I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
 	DRM_UDELAY(150);
 
@@ -478,10 +478,10 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
 	I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
 	I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
 	I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
-	if (!IS_IGDNG(dev))
+	if (!IS_IRONLAKE(dev))
 		I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
 
-	if (IS_IGDNG(dev)) {
+	if (IS_IRONLAKE(dev)) {
 		I915_WRITE(PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1);
 		I915_WRITE(PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1);
 		I915_WRITE(PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1);
@@ -546,14 +546,14 @@ void i915_save_display(struct drm_device *dev)
 		dev_priv->saveCURSIZE = I915_READ(CURSIZE);
 
 	/* CRT state */
-	if (IS_IGDNG(dev)) {
+	if (IS_IRONLAKE(dev)) {
 		dev_priv->saveADPA = I915_READ(PCH_ADPA);
 	} else {
 		dev_priv->saveADPA = I915_READ(ADPA);
 	}
 
 	/* LVDS state */
-	if (IS_IGDNG(dev)) {
+	if (IS_IRONLAKE(dev)) {
 		dev_priv->savePP_CONTROL = I915_READ(PCH_PP_CONTROL);
 		dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_PCH_CTL1);
 		dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_PCH_CTL2);
@@ -571,10 +571,10 @@ void i915_save_display(struct drm_device *dev)
 			dev_priv->saveLVDS = I915_READ(LVDS);
 	}
 
-	if (!IS_I830(dev) && !IS_845G(dev) && !IS_IGDNG(dev))
+	if (!IS_I830(dev) && !IS_845G(dev) && !IS_IRONLAKE(dev))
 		dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
 
-	if (IS_IGDNG(dev)) {
+	if (IS_IRONLAKE(dev)) {
 		dev_priv->savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
 		dev_priv->savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
 		dev_priv->savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
@@ -614,7 +614,7 @@ void i915_save_display(struct drm_device *dev)
 	dev_priv->saveVGA0 = I915_READ(VGA0);
 	dev_priv->saveVGA1 = I915_READ(VGA1);
 	dev_priv->saveVGA_PD = I915_READ(VGA_PD);
-	if (IS_IGDNG(dev))
+	if (IS_IRONLAKE(dev))
 		dev_priv->saveVGACNTRL = I915_READ(CPU_VGACNTRL);
 	else
 		dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
@@ -656,24 +656,24 @@ void i915_restore_display(struct drm_device *dev)
 		I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
 
 	/* CRT state */
-	if (IS_IGDNG(dev))
+	if (IS_IRONLAKE(dev))
 		I915_WRITE(PCH_ADPA, dev_priv->saveADPA);
 	else
 		I915_WRITE(ADPA, dev_priv->saveADPA);
 
 	/* LVDS state */
-	if (IS_I965G(dev) && !IS_IGDNG(dev))
+	if (IS_I965G(dev) && !IS_IRONLAKE(dev))
 		I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
 
-	if (IS_IGDNG(dev)) {
+	if (IS_IRONLAKE(dev)) {
 		I915_WRITE(PCH_LVDS, dev_priv->saveLVDS);
 	} else if (IS_MOBILE(dev) && !IS_I830(dev))
 		I915_WRITE(LVDS, dev_priv->saveLVDS);
 
-	if (!IS_I830(dev) && !IS_845G(dev) && !IS_IGDNG(dev))
+	if (!IS_I830(dev) && !IS_845G(dev) && !IS_IRONLAKE(dev))
 		I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
 
-	if (IS_IGDNG(dev)) {
+	if (IS_IRONLAKE(dev)) {
 		I915_WRITE(BLC_PWM_PCH_CTL1, dev_priv->saveBLC_PWM_CTL);
 		I915_WRITE(BLC_PWM_PCH_CTL2, dev_priv->saveBLC_PWM_CTL2);
 		I915_WRITE(BLC_PWM_CPU_CTL, dev_priv->saveBLC_CPU_PWM_CTL);
@@ -713,7 +713,7 @@ void i915_restore_display(struct drm_device *dev)
 	}
 
 	/* VGA state */
-	if (IS_IGDNG(dev))
+	if (IS_IRONLAKE(dev))
 		I915_WRITE(CPU_VGACNTRL, dev_priv->saveVGACNTRL);
 	else
 		I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
@@ -733,8 +733,10 @@ int i915_save_state(struct drm_device *dev)
 	pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
 
 	/* Render Standby */
-	if (IS_I965G(dev) && IS_MOBILE(dev))
+	if (I915_HAS_RC6(dev)) {
 		dev_priv->saveRENDERSTANDBY = I915_READ(MCHBAR_RENDER_STANDBY);
+		dev_priv->savePWRCTXA = I915_READ(PWRCTXA);
+	}
 
 	/* Hardware status page */
 	dev_priv->saveHWS = I915_READ(HWS_PGA);
@@ -742,7 +744,7 @@ int i915_save_state(struct drm_device *dev)
 	i915_save_display(dev);
 
 	/* Interrupt state */
-	if (IS_IGDNG(dev)) {
+	if (IS_IRONLAKE(dev)) {
 		dev_priv->saveDEIER = I915_READ(DEIER);
 		dev_priv->saveDEIMR = I915_READ(DEIMR);
 		dev_priv->saveGTIER = I915_READ(GTIER);
@@ -754,10 +756,6 @@ int i915_save_state(struct drm_device *dev)
 		dev_priv->saveIMR = I915_READ(IMR);
 	}
 
-	/* Clock gating state */
-	dev_priv->saveD_STATE = I915_READ(D_STATE);
-	dev_priv->saveDSPCLK_GATE_D = I915_READ(DSPCLK_GATE_D); /* Not sure about this */
-
 	/* Cache mode state */
 	dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
 
@@ -796,8 +794,10 @@ int i915_restore_state(struct drm_device *dev)
 	pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
 
 	/* Render Standby */
-	if (IS_I965G(dev) && IS_MOBILE(dev))
+	if (I915_HAS_RC6(dev)) {
 		I915_WRITE(MCHBAR_RENDER_STANDBY, dev_priv->saveRENDERSTANDBY);
+		I915_WRITE(PWRCTXA, dev_priv->savePWRCTXA);
+	}
 
 	/* Hardware status page */
 	I915_WRITE(HWS_PGA, dev_priv->saveHWS);
@@ -817,7 +817,7 @@ int i915_restore_state(struct drm_device *dev)
 	i915_restore_display(dev);
 
 	/* Interrupt state */
-	if (IS_IGDNG(dev)) {
+	if (IS_IRONLAKE(dev)) {
 		I915_WRITE(DEIER, dev_priv->saveDEIER);
 		I915_WRITE(DEIMR, dev_priv->saveDEIMR);
 		I915_WRITE(GTIER, dev_priv->saveGTIER);
@@ -830,8 +830,7 @@ int i915_restore_state(struct drm_device *dev)
 	}
 
 	/* Clock gating state */
-	I915_WRITE (D_STATE, dev_priv->saveD_STATE);
-	I915_WRITE (DSPCLK_GATE_D, dev_priv->saveDSPCLK_GATE_D);
+	intel_init_clock_gating(dev);
 
 	/* Cache mode state */
 	I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
@@ -846,6 +845,9 @@ int i915_restore_state(struct drm_device *dev)
 	for (i = 0; i < 3; i++)
 		I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
 
+	/* I2C state */
+	intel_i2c_reset_gmbus(dev);
+
 	return 0;
 }
 

+ 120 - 17
drivers/gpu/drm/i915/intel_bios.c

@@ -114,6 +114,8 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
 	struct lvds_dvo_timing *dvo_timing;
 	struct drm_display_mode *panel_fixed_mode;
 	int lfp_data_size, dvo_timing_offset;
+	int i, temp_downclock;
+	struct drm_display_mode *temp_mode;
 
 	/* Defaults if we can't find VBT info */
 	dev_priv->lvds_dither = 0;
@@ -159,9 +161,49 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
 
 	dev_priv->lfp_lvds_vbt_mode = panel_fixed_mode;
 
-	DRM_DEBUG("Found panel mode in BIOS VBT tables:\n");
+	DRM_DEBUG_KMS("Found panel mode in BIOS VBT tables:\n");
 	drm_mode_debug_printmodeline(panel_fixed_mode);
 
+	temp_mode = kzalloc(sizeof(*temp_mode), GFP_KERNEL);
+	temp_downclock = panel_fixed_mode->clock;
+	/*
+	 * enumerate the LVDS panel timing info entry in VBT to check whether
+	 * the LVDS downclock is found.
+	 */
+	for (i = 0; i < 16; i++) {
+		entry = (struct bdb_lvds_lfp_data_entry *)
+			((uint8_t *)lvds_lfp_data->data + (lfp_data_size * i));
+		dvo_timing = (struct lvds_dvo_timing *)
+			((unsigned char *)entry + dvo_timing_offset);
+
+		fill_detail_timing_data(temp_mode, dvo_timing);
+
+		if (temp_mode->hdisplay == panel_fixed_mode->hdisplay &&
+		temp_mode->hsync_start == panel_fixed_mode->hsync_start &&
+		temp_mode->hsync_end == panel_fixed_mode->hsync_end &&
+		temp_mode->htotal == panel_fixed_mode->htotal &&
+		temp_mode->vdisplay == panel_fixed_mode->vdisplay &&
+		temp_mode->vsync_start == panel_fixed_mode->vsync_start &&
+		temp_mode->vsync_end == panel_fixed_mode->vsync_end &&
+		temp_mode->vtotal == panel_fixed_mode->vtotal &&
+		temp_mode->clock < temp_downclock) {
+			/*
+			 * downclock is already found. But we expect
+			 * to find the lower downclock.
+			 */
+			temp_downclock = temp_mode->clock;
+		}
+		/* clear it to zero */
+		memset(temp_mode, 0, sizeof(*temp_mode));
+	}
+	kfree(temp_mode);
+	if (temp_downclock < panel_fixed_mode->clock) {
+		dev_priv->lvds_downclock_avail = 1;
+		dev_priv->lvds_downclock = temp_downclock;
+		DRM_DEBUG_KMS("LVDS downclock is found in VBT. ",
+				"Normal Clock %dKHz, downclock %dKHz\n",
+				temp_downclock, panel_fixed_mode->clock);
+	}
 	return;
 }
 
@@ -217,7 +259,7 @@ parse_general_features(struct drm_i915_private *dev_priv,
 			if (IS_I85X(dev_priv->dev))
 				dev_priv->lvds_ssc_freq =
 					general->ssc_freq ? 66 : 48;
-			else if (IS_IGDNG(dev_priv->dev))
+			else if (IS_IRONLAKE(dev_priv->dev))
 				dev_priv->lvds_ssc_freq =
 					general->ssc_freq ? 100 : 120;
 			else
@@ -241,22 +283,18 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
 		GPIOF,
 	};
 
-	/* Set sensible defaults in case we can't find the general block
-	   or it is the wrong chipset */
-	dev_priv->crt_ddc_bus = -1;
-
 	general = find_section(bdb, BDB_GENERAL_DEFINITIONS);
 	if (general) {
 		u16 block_size = get_blocksize(general);
 		if (block_size >= sizeof(*general)) {
 			int bus_pin = general->crt_ddc_gmbus_pin;
-			DRM_DEBUG("crt_ddc_bus_pin: %d\n", bus_pin);
+			DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
 			if ((bus_pin >= 1) && (bus_pin <= 6)) {
 				dev_priv->crt_ddc_bus =
 					crt_bus_map_table[bus_pin-1];
 			}
 		} else {
-			DRM_DEBUG("BDB_GD too small (%d). Invalid.\n",
+			DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n",
 				  block_size);
 		}
 	}
@@ -274,7 +312,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
 
 	p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
 	if (!p_defs) {
-		DRM_DEBUG("No general definition block is found\n");
+		DRM_DEBUG_KMS("No general definition block is found\n");
 		return;
 	}
 	/* judge whether the size of child device meets the requirements.
@@ -284,7 +322,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
 	 */
 	if (p_defs->child_dev_size != sizeof(*p_child)) {
 		/* different child dev size . Ignore it */
-		DRM_DEBUG("different child size is found. Invalid.\n");
+		DRM_DEBUG_KMS("different child size is found. Invalid.\n");
 		return;
 	}
 	/* get the block size of general definitions */
@@ -310,11 +348,11 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
 		if (p_child->dvo_port != DEVICE_PORT_DVOB &&
 			p_child->dvo_port != DEVICE_PORT_DVOC) {
 			/* skip the incorrect SDVO port */
-			DRM_DEBUG("Incorrect SDVO port. Skip it \n");
+			DRM_DEBUG_KMS("Incorrect SDVO port. Skip it \n");
 			continue;
 		}
-		DRM_DEBUG("the SDVO device with slave addr %2x is found on "
-				"%s port\n",
+		DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on"
+				" %s port\n",
 				p_child->slave_addr,
 				(p_child->dvo_port == DEVICE_PORT_DVOB) ?
 					"SDVOB" : "SDVOC");
@@ -325,21 +363,21 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
 			p_mapping->dvo_wiring = p_child->dvo_wiring;
 			p_mapping->initialized = 1;
 		} else {
-			DRM_DEBUG("Maybe one SDVO port is shared by "
+			DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
 					 "two SDVO device.\n");
 		}
 		if (p_child->slave2_addr) {
 			/* Maybe this is a SDVO device with multiple inputs */
 			/* And the mapping info is not added */
-			DRM_DEBUG("there exists the slave2_addr. Maybe this "
-				"is a SDVO device with multiple inputs.\n");
+			DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this"
+				" is a SDVO device with multiple inputs.\n");
 		}
 		count++;
 	}
 
 	if (!count) {
 		/* No SDVO device info is found */
-		DRM_DEBUG("No SDVO device info is found in VBT\n");
+		DRM_DEBUG_KMS("No SDVO device info is found in VBT\n");
 	}
 	return;
 }
@@ -366,6 +404,70 @@ parse_driver_features(struct drm_i915_private *dev_priv,
 		dev_priv->render_reclock_avail = true;
 }
 
+static void
+parse_device_mapping(struct drm_i915_private *dev_priv,
+		       struct bdb_header *bdb)
+{
+	struct bdb_general_definitions *p_defs;
+	struct child_device_config *p_child, *child_dev_ptr;
+	int i, child_device_num, count;
+	u16	block_size;
+
+	p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
+	if (!p_defs) {
+		DRM_DEBUG_KMS("No general definition block is found\n");
+		return;
+	}
+	/* judge whether the size of child device meets the requirements.
+	 * If the child device size obtained from general definition block
+	 * is different with sizeof(struct child_device_config), skip the
+	 * parsing of sdvo device info
+	 */
+	if (p_defs->child_dev_size != sizeof(*p_child)) {
+		/* different child dev size . Ignore it */
+		DRM_DEBUG_KMS("different child size is found. Invalid.\n");
+		return;
+	}
+	/* get the block size of general definitions */
+	block_size = get_blocksize(p_defs);
+	/* get the number of child device */
+	child_device_num = (block_size - sizeof(*p_defs)) /
+				sizeof(*p_child);
+	count = 0;
+	/* get the number of child device that is present */
+	for (i = 0; i < child_device_num; i++) {
+		p_child = &(p_defs->devices[i]);
+		if (!p_child->device_type) {
+			/* skip the device block if device type is invalid */
+			continue;
+		}
+		count++;
+	}
+	if (!count) {
+		DRM_DEBUG_KMS("no child dev is parsed from VBT \n");
+		return;
+	}
+	dev_priv->child_dev = kzalloc(sizeof(*p_child) * count, GFP_KERNEL);
+	if (!dev_priv->child_dev) {
+		DRM_DEBUG_KMS("No memory space for child device\n");
+		return;
+	}
+
+	dev_priv->child_dev_num = count;
+	count = 0;
+	for (i = 0; i < child_device_num; i++) {
+		p_child = &(p_defs->devices[i]);
+		if (!p_child->device_type) {
+			/* skip the device block if device type is invalid */
+			continue;
+		}
+		child_dev_ptr = dev_priv->child_dev + count;
+		count++;
+		memcpy((void *)child_dev_ptr, (void *)p_child,
+					sizeof(*p_child));
+	}
+	return;
+}
 /**
  * intel_init_bios - initialize VBIOS settings & find VBT
  * @dev: DRM device
@@ -417,6 +519,7 @@ intel_init_bios(struct drm_device *dev)
 	parse_lfp_panel_data(dev_priv, bdb);
 	parse_sdvo_panel_data(dev_priv, bdb);
 	parse_sdvo_device_mapping(dev_priv, bdb);
+	parse_device_mapping(dev_priv, bdb);
 	parse_driver_features(dev_priv, bdb);
 
 	pci_unmap_rom(pdev, bios);

+ 17 - 0
drivers/gpu/drm/i915/intel_bios.h

@@ -549,4 +549,21 @@ bool intel_init_bios(struct drm_device *dev);
 #define   SWF14_APM_STANDBY	0x1
 #define   SWF14_APM_RESTORE	0x0
 
+/* Add the device class for LFP, TV, HDMI */
+#define	 DEVICE_TYPE_INT_LFP	0x1022
+#define	 DEVICE_TYPE_INT_TV	0x1009
+#define	 DEVICE_TYPE_HDMI	0x60D2
+#define	 DEVICE_TYPE_DP		0x68C6
+#define	 DEVICE_TYPE_eDP	0x78C6
+
+/* define the DVO port for HDMI output type */
+#define		DVO_B		1
+#define		DVO_C		2
+#define		DVO_D		3
+
+/* define the PORT for DP output type */
+#define		PORT_IDPB	7
+#define		PORT_IDPC	8
+#define		PORT_IDPD	9
+
 #endif /* _I830_BIOS_H_ */

+ 11 - 39
drivers/gpu/drm/i915/intel_crt.c

@@ -39,7 +39,7 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 temp, reg;
 
-	if (IS_IGDNG(dev))
+	if (IS_IRONLAKE(dev))
 		reg = PCH_ADPA;
 	else
 		reg = ADPA;
@@ -64,34 +64,6 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
 	}
 
 	I915_WRITE(reg, temp);
-
-	if (IS_IGD(dev)) {
-		if (mode == DRM_MODE_DPMS_OFF) {
-			/* turn off DAC */
-			temp = I915_READ(PORT_HOTPLUG_EN);
-			temp &= ~CRT_EOS_INT_EN;
-			I915_WRITE(PORT_HOTPLUG_EN, temp);
-
-			temp = I915_READ(PORT_HOTPLUG_STAT);
-			if (temp & CRT_EOS_INT_STATUS)
-				I915_WRITE(PORT_HOTPLUG_STAT,
-					CRT_EOS_INT_STATUS);
-		} else {
-			/* turn on DAC. EOS interrupt must be enabled after DAC
-			 * is enabled, so it sounds not good to enable it in
-			 * i915_driver_irq_postinstall()
-			 * wait 12.5ms after DAC is enabled
-			 */
-			msleep(13);
-			temp = I915_READ(PORT_HOTPLUG_STAT);
-			if (temp & CRT_EOS_INT_STATUS)
-				I915_WRITE(PORT_HOTPLUG_STAT,
-					CRT_EOS_INT_STATUS);
-			temp = I915_READ(PORT_HOTPLUG_EN);
-			temp |= CRT_EOS_INT_EN;
-			I915_WRITE(PORT_HOTPLUG_EN, temp);
-		}
-	}
 }
 
 static int intel_crt_mode_valid(struct drm_connector *connector,
@@ -141,7 +113,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
 	else
 		dpll_md_reg = DPLL_B_MD;
 
-	if (IS_IGDNG(dev))
+	if (IS_IRONLAKE(dev))
 		adpa_reg = PCH_ADPA;
 	else
 		adpa_reg = ADPA;
@@ -150,7 +122,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
 	 * Disable separate mode multiplier used when cloning SDVO to CRT
 	 * XXX this needs to be adjusted when we really are cloning
 	 */
-	if (IS_I965G(dev) && !IS_IGDNG(dev)) {
+	if (IS_I965G(dev) && !IS_IRONLAKE(dev)) {
 		dpll_md = I915_READ(dpll_md_reg);
 		I915_WRITE(dpll_md_reg,
 			   dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
@@ -164,18 +136,18 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
 
 	if (intel_crtc->pipe == 0) {
 		adpa |= ADPA_PIPE_A_SELECT;
-		if (!IS_IGDNG(dev))
+		if (!IS_IRONLAKE(dev))
 			I915_WRITE(BCLRPAT_A, 0);
 	} else {
 		adpa |= ADPA_PIPE_B_SELECT;
-		if (!IS_IGDNG(dev))
+		if (!IS_IRONLAKE(dev))
 			I915_WRITE(BCLRPAT_B, 0);
 	}
 
 	I915_WRITE(adpa_reg, adpa);
 }
 
-static bool intel_igdng_crt_detect_hotplug(struct drm_connector *connector)
+static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
 {
 	struct drm_device *dev = connector->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -194,7 +166,7 @@ static bool intel_igdng_crt_detect_hotplug(struct drm_connector *connector)
 			ADPA_CRT_HOTPLUG_ENABLE |
 			ADPA_CRT_HOTPLUG_FORCE_TRIGGER);
 
-	DRM_DEBUG("pch crt adpa 0x%x", adpa);
+	DRM_DEBUG_KMS("pch crt adpa 0x%x", adpa);
 	I915_WRITE(PCH_ADPA, adpa);
 
 	while ((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) != 0)
@@ -227,8 +199,8 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
 	u32 hotplug_en;
 	int i, tries = 0;
 
-	if (IS_IGDNG(dev))
-		return intel_igdng_crt_detect_hotplug(connector);
+	if (IS_IRONLAKE(dev))
+		return intel_ironlake_crt_detect_hotplug(connector);
 
 	/*
 	 * On 4 series desktop, CRT detect sequence need to be done twice
@@ -549,12 +521,12 @@ void intel_crt_init(struct drm_device *dev)
 					  &intel_output->enc);
 
 	/* Set up the DDC bus. */
-	if (IS_IGDNG(dev))
+	if (IS_IRONLAKE(dev))
 		i2c_reg = PCH_GPIOA;
 	else {
 		i2c_reg = GPIOA;
 		/* Use VBT information for CRT DDC if available */
-		if (dev_priv->crt_ddc_bus != -1)
+		if (dev_priv->crt_ddc_bus != 0)
 			i2c_reg = dev_priv->crt_ddc_bus;
 	}
 	intel_output->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A");

File diff suppressed because it is too large
+ 314 - 236
drivers/gpu/drm/i915/intel_display.c


+ 137 - 25
drivers/gpu/drm/i915/intel_dp.c

@@ -33,7 +33,8 @@
 #include "intel_drv.h"
 #include "i915_drm.h"
 #include "i915_drv.h"
-#include "intel_dp.h"
+#include "drm_dp_helper.h"
+
 
 #define DP_LINK_STATUS_SIZE	6
 #define DP_LINK_CHECK_TIMEOUT	(10 * 1000)
@@ -223,8 +224,8 @@ intel_dp_aux_ch(struct intel_output *intel_output,
 	 */
 	if (IS_eDP(intel_output))
 		aux_clock_divider = 225; /* eDP input clock at 450Mhz */
-	else if (IS_IGDNG(dev))
-		aux_clock_divider = 62; /* IGDNG: input clock fixed at 125Mhz */
+	else if (IS_IRONLAKE(dev))
+		aux_clock_divider = 62; /* IRL input clock fixed at 125Mhz */
 	else
 		aux_clock_divider = intel_hrawclk(dev) / 2;
 
@@ -282,7 +283,7 @@ intel_dp_aux_ch(struct intel_output *intel_output,
 	/* Timeouts occur when the device isn't connected, so they're
 	 * "normal" -- don't fill the kernel log with these */
 	if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
-		DRM_DEBUG("dp_aux_ch timeout status 0x%08x\n", status);
+		DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
 		return -ETIMEDOUT;
 	}
 
@@ -382,17 +383,77 @@ intel_dp_aux_native_read(struct intel_output *intel_output,
 }
 
 static int
-intel_dp_i2c_aux_ch(struct i2c_adapter *adapter,
-		    uint8_t *send, int send_bytes,
-		    uint8_t *recv, int recv_bytes)
+intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
+		    uint8_t write_byte, uint8_t *read_byte)
 {
+	struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
 	struct intel_dp_priv *dp_priv = container_of(adapter,
 						     struct intel_dp_priv,
 						     adapter);
 	struct intel_output *intel_output = dp_priv->intel_output;
+	uint16_t address = algo_data->address;
+	uint8_t msg[5];
+	uint8_t reply[2];
+	int msg_bytes;
+	int reply_bytes;
+	int ret;
+
+	/* Set up the command byte */
+	if (mode & MODE_I2C_READ)
+		msg[0] = AUX_I2C_READ << 4;
+	else
+		msg[0] = AUX_I2C_WRITE << 4;
+
+	if (!(mode & MODE_I2C_STOP))
+		msg[0] |= AUX_I2C_MOT << 4;
 
-	return intel_dp_aux_ch(intel_output,
-			       send, send_bytes, recv, recv_bytes);
+	msg[1] = address >> 8;
+	msg[2] = address;
+
+	switch (mode) {
+	case MODE_I2C_WRITE:
+		msg[3] = 0;
+		msg[4] = write_byte;
+		msg_bytes = 5;
+		reply_bytes = 1;
+		break;
+	case MODE_I2C_READ:
+		msg[3] = 0;
+		msg_bytes = 4;
+		reply_bytes = 2;
+		break;
+	default:
+		msg_bytes = 3;
+		reply_bytes = 1;
+		break;
+	}
+
+	for (;;) {
+	  ret = intel_dp_aux_ch(intel_output,
+				msg, msg_bytes,
+				reply, reply_bytes);
+		if (ret < 0) {
+			DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
+			return ret;
+		}
+		switch (reply[0] & AUX_I2C_REPLY_MASK) {
+		case AUX_I2C_REPLY_ACK:
+			if (mode == MODE_I2C_READ) {
+				*read_byte = reply[1];
+			}
+			return reply_bytes - 1;
+		case AUX_I2C_REPLY_NACK:
+			DRM_DEBUG_KMS("aux_ch nack\n");
+			return -EREMOTEIO;
+		case AUX_I2C_REPLY_DEFER:
+			DRM_DEBUG_KMS("aux_ch defer\n");
+			udelay(100);
+			break;
+		default:
+			DRM_ERROR("aux_ch invalid reply 0x%02x\n", reply[0]);
+			return -EREMOTEIO;
+		}
+	}
 }
 
 static int
@@ -435,7 +496,8 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
 				dp_priv->link_bw = bws[clock];
 				dp_priv->lane_count = lane_count;
 				adjusted_mode->clock = intel_dp_link_clock(dp_priv->link_bw);
-				DRM_DEBUG("Display port link bw %02x lane count %d clock %d\n",
+				DRM_DEBUG_KMS("Display port link bw %02x lane "
+						"count %d clock %d\n",
 				       dp_priv->link_bw, dp_priv->lane_count,
 				       adjusted_mode->clock);
 				return true;
@@ -514,7 +576,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
 	intel_dp_compute_m_n(3, lane_count,
 			     mode->clock, adjusted_mode->clock, &m_n);
 
-	if (IS_IGDNG(dev)) {
+	if (IS_IRONLAKE(dev)) {
 		if (intel_crtc->pipe == 0) {
 			I915_WRITE(TRANSA_DATA_M1,
 				   ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) |
@@ -606,23 +668,23 @@ intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
 	}
 }
 
-static void igdng_edp_backlight_on (struct drm_device *dev)
+static void ironlake_edp_backlight_on (struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 pp;
 
-	DRM_DEBUG("\n");
+	DRM_DEBUG_KMS("\n");
 	pp = I915_READ(PCH_PP_CONTROL);
 	pp |= EDP_BLC_ENABLE;
 	I915_WRITE(PCH_PP_CONTROL, pp);
 }
 
-static void igdng_edp_backlight_off (struct drm_device *dev)
+static void ironlake_edp_backlight_off (struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 pp;
 
-	DRM_DEBUG("\n");
+	DRM_DEBUG_KMS("\n");
 	pp = I915_READ(PCH_PP_CONTROL);
 	pp &= ~EDP_BLC_ENABLE;
 	I915_WRITE(PCH_PP_CONTROL, pp);
@@ -641,13 +703,13 @@ intel_dp_dpms(struct drm_encoder *encoder, int mode)
 		if (dp_reg & DP_PORT_EN) {
 			intel_dp_link_down(intel_output, dp_priv->DP);
 			if (IS_eDP(intel_output))
-				igdng_edp_backlight_off(dev);
+				ironlake_edp_backlight_off(dev);
 		}
 	} else {
 		if (!(dp_reg & DP_PORT_EN)) {
 			intel_dp_link_train(intel_output, dp_priv->DP, dp_priv->link_configuration);
 			if (IS_eDP(intel_output))
-				igdng_edp_backlight_on(dev);
+				ironlake_edp_backlight_on(dev);
 		}
 	}
 	dp_priv->dpms_mode = mode;
@@ -1010,7 +1072,7 @@ intel_dp_link_down(struct intel_output *intel_output, uint32_t DP)
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_dp_priv *dp_priv = intel_output->dev_priv;
 
-	DRM_DEBUG("\n");
+	DRM_DEBUG_KMS("\n");
 
 	if (IS_eDP(intel_output)) {
 		DP &= ~DP_PLL_ENABLE;
@@ -1071,7 +1133,7 @@ intel_dp_check_link_status(struct intel_output *intel_output)
 }
 
 static enum drm_connector_status
-igdng_dp_detect(struct drm_connector *connector)
+ironlake_dp_detect(struct drm_connector *connector)
 {
 	struct intel_output *intel_output = to_intel_output(connector);
 	struct intel_dp_priv *dp_priv = intel_output->dev_priv;
@@ -1106,8 +1168,8 @@ intel_dp_detect(struct drm_connector *connector)
 
 	dp_priv->has_audio = false;
 
-	if (IS_IGDNG(dev))
-		return igdng_dp_detect(connector);
+	if (IS_IRONLAKE(dev))
+		return ironlake_dp_detect(connector);
 
 	temp = I915_READ(PORT_HOTPLUG_EN);
 
@@ -1227,7 +1289,53 @@ intel_dp_hot_plug(struct intel_output *intel_output)
 	if (dp_priv->dpms_mode == DRM_MODE_DPMS_ON)
 		intel_dp_check_link_status(intel_output);
 }
-
+/*
+ * Enumerate the child dev array parsed from VBT to check whether
+ * the given DP is present.
+ * If it is present, return 1.
+ * If it is not present, return false.
+ * If no child dev is parsed from VBT, it is assumed that the given
+ * DP is present.
+ */
+static int dp_is_present_in_vbt(struct drm_device *dev, int dp_reg)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct child_device_config *p_child;
+	int i, dp_port, ret;
+
+	if (!dev_priv->child_dev_num)
+		return 1;
+
+	dp_port = 0;
+	if (dp_reg == DP_B || dp_reg == PCH_DP_B)
+		dp_port = PORT_IDPB;
+	else if (dp_reg == DP_C || dp_reg == PCH_DP_C)
+		dp_port = PORT_IDPC;
+	else if (dp_reg == DP_D || dp_reg == PCH_DP_D)
+		dp_port = PORT_IDPD;
+
+	ret = 0;
+	for (i = 0; i < dev_priv->child_dev_num; i++) {
+		p_child = dev_priv->child_dev + i;
+		/*
+		 * If the device type is not DP, continue.
+		 */
+		if (p_child->device_type != DEVICE_TYPE_DP &&
+			p_child->device_type != DEVICE_TYPE_eDP)
+			continue;
+		/* Find the eDP port */
+		if (dp_reg == DP_A && p_child->device_type == DEVICE_TYPE_eDP) {
+			ret = 1;
+			break;
+		}
+		/* Find the DP port */
+		if (p_child->dvo_port == dp_port) {
+			ret = 1;
+			break;
+		}
+	}
+	return ret;
+}
 void
 intel_dp_init(struct drm_device *dev, int output_reg)
 {
@@ -1237,6 +1345,10 @@ intel_dp_init(struct drm_device *dev, int output_reg)
 	struct intel_dp_priv *dp_priv;
 	const char *name = NULL;
 
+	if (!dp_is_present_in_vbt(dev, output_reg)) {
+		DRM_DEBUG_KMS("DP is not present. Ignore it\n");
+		return;
+	}
 	intel_output = kcalloc(sizeof(struct intel_output) + 
 			       sizeof(struct intel_dp_priv), 1, GFP_KERNEL);
 	if (!intel_output)
@@ -1254,11 +1366,11 @@ intel_dp_init(struct drm_device *dev, int output_reg)
 	else
 		intel_output->type = INTEL_OUTPUT_DISPLAYPORT;
 
-	if (output_reg == DP_B)
+	if (output_reg == DP_B || output_reg == PCH_DP_B)
 		intel_output->clone_mask = (1 << INTEL_DP_B_CLONE_BIT);
-	else if (output_reg == DP_C)
+	else if (output_reg == DP_C || output_reg == PCH_DP_C)
 		intel_output->clone_mask = (1 << INTEL_DP_C_CLONE_BIT);
-	else if (output_reg == DP_D)
+	else if (output_reg == DP_D || output_reg == PCH_DP_D)
 		intel_output->clone_mask = (1 << INTEL_DP_D_CLONE_BIT);
 
 	if (IS_eDP(intel_output)) {

+ 44 - 0
drivers/gpu/drm/i915/intel_drv.h

@@ -110,6 +110,32 @@ struct intel_output {
 	int clone_mask;
 };
 
+struct intel_crtc;
+struct intel_overlay {
+	struct drm_device *dev;
+	struct intel_crtc *crtc;
+	struct drm_i915_gem_object *vid_bo;
+	struct drm_i915_gem_object *old_vid_bo;
+	int active;
+	int pfit_active;
+	u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */
+	u32 color_key;
+	u32 brightness, contrast, saturation;
+	u32 old_xscale, old_yscale;
+	/* register access */
+	u32 flip_addr;
+	struct drm_i915_gem_object *reg_bo;
+	void *virt_addr;
+	/* flip handling */
+	uint32_t last_flip_req;
+	int hw_wedged;
+#define HW_WEDGED		1
+#define NEEDS_WAIT_FOR_FLIP	2
+#define RELEASE_OLD_VID		3
+#define SWITCH_OFF_STAGE_1	4
+#define SWITCH_OFF_STAGE_2	5
+};
+
 struct intel_crtc {
 	struct drm_crtc base;
 	enum pipe pipe;
@@ -121,6 +147,8 @@ struct intel_crtc {
 	bool busy; /* is scanout buffer being updated frequently? */
 	struct timer_list idle_timer;
 	bool lowfreq_avail;
+	struct intel_overlay *overlay;
+	struct intel_unpin_work *unpin_work;
 };
 
 #define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
@@ -134,6 +162,8 @@ void intel_i2c_destroy(struct i2c_adapter *adapter);
 int intel_ddc_get_modes(struct intel_output *intel_output);
 extern bool intel_ddc_probe(struct intel_output *intel_output);
 void intel_i2c_quirk_set(struct drm_device *dev, bool enable);
+void intel_i2c_reset_gmbus(struct drm_device *dev);
+
 extern void intel_crt_init(struct drm_device *dev);
 extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
 extern bool intel_sdvo_init(struct drm_device *dev, int output_device);
@@ -148,6 +178,7 @@ intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
 extern void intel_edp_link_config (struct intel_output *, int *, int *);
 
 
+extern int intel_panel_fitter_pipe (struct drm_device *dev);
 extern void intel_crtc_load_lut(struct drm_crtc *crtc);
 extern void intel_encoder_prepare (struct drm_encoder *encoder);
 extern void intel_encoder_commit (struct drm_encoder *encoder);
@@ -177,10 +208,23 @@ extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
 				    u16 blue, int regno);
 extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
 				    u16 *blue, int regno);
+extern void intel_init_clock_gating(struct drm_device *dev);
 
 extern int intel_framebuffer_create(struct drm_device *dev,
 				    struct drm_mode_fb_cmd *mode_cmd,
 				    struct drm_framebuffer **fb,
 				    struct drm_gem_object *obj);
 
+extern void intel_prepare_page_flip(struct drm_device *dev, int plane);
+extern void intel_finish_page_flip(struct drm_device *dev, int pipe);
+
+extern void intel_setup_overlay(struct drm_device *dev);
+extern void intel_cleanup_overlay(struct drm_device *dev);
+extern int intel_overlay_switch_off(struct intel_overlay *overlay);
+extern int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
+						int interruptible);
+extern int intel_overlay_put_image(struct drm_device *dev, void *data,
+				   struct drm_file *file_priv);
+extern int intel_overlay_attrs(struct drm_device *dev, void *data,
+			       struct drm_file *file_priv);
 #endif /* __INTEL_DRV_H__ */

+ 4 - 3
drivers/gpu/drm/i915/intel_fb.c

@@ -230,8 +230,9 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
 	par->intel_fb = intel_fb;
 
 	/* To allow resizeing without swapping buffers */
-	DRM_DEBUG("allocated %dx%d fb: 0x%08x, bo %p\n", intel_fb->base.width,
-		  intel_fb->base.height, obj_priv->gtt_offset, fbo);
+	DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
+			intel_fb->base.width, intel_fb->base.height,
+			obj_priv->gtt_offset, fbo);
 
 	mutex_unlock(&dev->struct_mutex);
 	return 0;
@@ -249,7 +250,7 @@ int intelfb_probe(struct drm_device *dev)
 {
 	int ret;
 
-	DRM_DEBUG("\n");
+	DRM_DEBUG_KMS("\n");
 	ret = drm_fb_helper_single_fb_probe(dev, 32, intelfb_create);
 	return ret;
 }

+ 52 - 3
drivers/gpu/drm/i915/intel_hdmi.c

@@ -82,7 +82,7 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
 	/* HW workaround, need to toggle enable bit off and on for 12bpc, but
 	 * we do this anyway which shows more stable in testing.
 	 */
-	if (IS_IGDNG(dev)) {
+	if (IS_IRONLAKE(dev)) {
 		I915_WRITE(hdmi_priv->sdvox_reg, temp & ~SDVO_ENABLE);
 		POSTING_READ(hdmi_priv->sdvox_reg);
 	}
@@ -99,7 +99,7 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
 	/* HW workaround, need to write this twice for issue that may result
 	 * in first write getting masked.
 	 */
-	if (IS_IGDNG(dev)) {
+	if (IS_IRONLAKE(dev)) {
 		I915_WRITE(hdmi_priv->sdvox_reg, temp);
 		POSTING_READ(hdmi_priv->sdvox_reg);
 	}
@@ -225,7 +225,52 @@ static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
 	.destroy = intel_hdmi_enc_destroy,
 };
 
-
+/*
+ * Enumerate the child dev array parsed from VBT to check whether
+ * the given HDMI is present.
+ * If it is present, return 1.
+ * If it is not present, return false.
+ * If no child dev is parsed from VBT, it assumes that the given
+ * HDMI is present.
+ */
+static int hdmi_is_present_in_vbt(struct drm_device *dev, int hdmi_reg)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct child_device_config *p_child;
+	int i, hdmi_port, ret;
+
+	if (!dev_priv->child_dev_num)
+		return 1;
+
+	if (hdmi_reg == SDVOB)
+		hdmi_port = DVO_B;
+	else if (hdmi_reg == SDVOC)
+		hdmi_port = DVO_C;
+	else if (hdmi_reg == HDMIB)
+		hdmi_port = DVO_B;
+	else if (hdmi_reg == HDMIC)
+		hdmi_port = DVO_C;
+	else if (hdmi_reg == HDMID)
+		hdmi_port = DVO_D;
+	else
+		return 0;
+
+	ret = 0;
+	for (i = 0; i < dev_priv->child_dev_num; i++) {
+		p_child = dev_priv->child_dev + i;
+		/*
+		 * If the device type is not HDMI, continue.
+		 */
+		if (p_child->device_type != DEVICE_TYPE_HDMI)
+			continue;
+		/* Find the HDMI port */
+		if (p_child->dvo_port == hdmi_port) {
+			ret = 1;
+			break;
+		}
+	}
+	return ret;
+}
 void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -233,6 +278,10 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
 	struct intel_output *intel_output;
 	struct intel_hdmi_priv *hdmi_priv;
 
+	if (!hdmi_is_present_in_vbt(dev, sdvox_reg)) {
+		DRM_DEBUG_KMS("HDMI is not present. Ignored it \n");
+		return;
+	}
 	intel_output = kcalloc(sizeof(struct intel_output) +
 			       sizeof(struct intel_hdmi_priv), 1, GFP_KERNEL);
 	if (!intel_output)

+ 20 - 1
drivers/gpu/drm/i915/intel_i2c.c

@@ -39,7 +39,7 @@ void intel_i2c_quirk_set(struct drm_device *dev, bool enable)
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
 	/* When using bit bashing for I2C, this bit needs to be set to 1 */
-	if (!IS_IGD(dev))
+	if (!IS_PINEVIEW(dev))
 		return;
 	if (enable)
 		I915_WRITE(DSPCLK_GATE_D,
@@ -118,6 +118,23 @@ static void set_data(void *data, int state_high)
 	udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
 }
 
+/* Clears the GMBUS setup.  Our driver doesn't make use of the GMBUS I2C
+ * engine, but if the BIOS leaves it enabled, then that can break our use
+ * of the bit-banging I2C interfaces.  This is notably the case with the
+ * Mac Mini in EFI mode.
+ */
+void
+intel_i2c_reset_gmbus(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (IS_IRONLAKE(dev)) {
+		I915_WRITE(PCH_GMBUS0, 0);
+	} else {
+		I915_WRITE(GMBUS0, 0);
+	}
+}
+
 /**
  * intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg
  * @dev: DRM device
@@ -168,6 +185,8 @@ struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
 	if(i2c_bit_add_bus(&chan->adapter))
 		goto out_free;
 
+	intel_i2c_reset_gmbus(dev);
+
 	/* JJJ:  raise SCL and SDA? */
 	intel_i2c_quirk_set(dev, true);
 	set_data(chan, 1);

+ 118 - 22
drivers/gpu/drm/i915/intel_lvds.c

@@ -56,7 +56,7 @@ static void intel_lvds_set_backlight(struct drm_device *dev, int level)
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 blc_pwm_ctl, reg;
 
-	if (IS_IGDNG(dev))
+	if (IS_IRONLAKE(dev))
 		reg = BLC_PWM_CPU_CTL;
 	else
 		reg = BLC_PWM_CTL;
@@ -74,7 +74,7 @@ static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 reg;
 
-	if (IS_IGDNG(dev))
+	if (IS_IRONLAKE(dev))
 		reg = BLC_PWM_PCH_CTL2;
 	else
 		reg = BLC_PWM_CTL;
@@ -91,7 +91,7 @@ static void intel_lvds_set_power(struct drm_device *dev, bool on)
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 pp_status, ctl_reg, status_reg;
 
-	if (IS_IGDNG(dev)) {
+	if (IS_IRONLAKE(dev)) {
 		ctl_reg = PCH_PP_CONTROL;
 		status_reg = PCH_PP_STATUS;
 	} else {
@@ -137,7 +137,7 @@ static void intel_lvds_save(struct drm_connector *connector)
 	u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
 	u32 pwm_ctl_reg;
 
-	if (IS_IGDNG(dev)) {
+	if (IS_IRONLAKE(dev)) {
 		pp_on_reg = PCH_PP_ON_DELAYS;
 		pp_off_reg = PCH_PP_OFF_DELAYS;
 		pp_ctl_reg = PCH_PP_CONTROL;
@@ -174,7 +174,7 @@ static void intel_lvds_restore(struct drm_connector *connector)
 	u32 pp_on_reg, pp_off_reg, pp_ctl_reg, pp_div_reg;
 	u32 pwm_ctl_reg;
 
-	if (IS_IGDNG(dev)) {
+	if (IS_IRONLAKE(dev)) {
 		pp_on_reg = PCH_PP_ON_DELAYS;
 		pp_off_reg = PCH_PP_OFF_DELAYS;
 		pp_ctl_reg = PCH_PP_CONTROL;
@@ -297,7 +297,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
 	}
 
 	/* full screen scale for now */
-	if (IS_IGDNG(dev))
+	if (IS_IRONLAKE(dev))
 		goto out;
 
 	/* 965+ wants fuzzy fitting */
@@ -327,7 +327,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
 	 * to register description and PRM.
 	 * Change the value here to see the borders for debugging
 	 */
-	if (!IS_IGDNG(dev)) {
+	if (!IS_IRONLAKE(dev)) {
 		I915_WRITE(BCLRPAT_A, 0);
 		I915_WRITE(BCLRPAT_B, 0);
 	}
@@ -548,7 +548,7 @@ static void intel_lvds_prepare(struct drm_encoder *encoder)
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 reg;
 
-	if (IS_IGDNG(dev))
+	if (IS_IRONLAKE(dev))
 		reg = BLC_PWM_CPU_CTL;
 	else
 		reg = BLC_PWM_CTL;
@@ -587,7 +587,7 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
 	 * settings.
 	 */
 
-	if (IS_IGDNG(dev))
+	if (IS_IRONLAKE(dev))
 		return;
 
 	/*
@@ -913,6 +913,101 @@ static int intel_lid_present(void)
 }
 #endif
 
+/**
+ * intel_find_lvds_downclock - find the reduced downclock for LVDS in EDID
+ * @dev: drm device
+ * @connector: LVDS connector
+ *
+ * Find the reduced downclock for LVDS in EDID.
+ */
+static void intel_find_lvds_downclock(struct drm_device *dev,
+				struct drm_connector *connector)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct drm_display_mode *scan, *panel_fixed_mode;
+	int temp_downclock;
+
+	panel_fixed_mode = dev_priv->panel_fixed_mode;
+	temp_downclock = panel_fixed_mode->clock;
+
+	mutex_lock(&dev->mode_config.mutex);
+	list_for_each_entry(scan, &connector->probed_modes, head) {
+		/*
+		 * If one mode has the same resolution with the fixed_panel
+		 * mode while they have the different refresh rate, it means
+		 * that the reduced downclock is found for the LVDS. In such
+		 * case we can set the different FPx0/1 to dynamically select
+		 * between low and high frequency.
+		 */
+		if (scan->hdisplay == panel_fixed_mode->hdisplay &&
+			scan->hsync_start == panel_fixed_mode->hsync_start &&
+			scan->hsync_end == panel_fixed_mode->hsync_end &&
+			scan->htotal == panel_fixed_mode->htotal &&
+			scan->vdisplay == panel_fixed_mode->vdisplay &&
+			scan->vsync_start == panel_fixed_mode->vsync_start &&
+			scan->vsync_end == panel_fixed_mode->vsync_end &&
+			scan->vtotal == panel_fixed_mode->vtotal) {
+			if (scan->clock < temp_downclock) {
+				/*
+				 * The downclock is already found. But we
+				 * expect to find the lower downclock.
+				 */
+				temp_downclock = scan->clock;
+			}
+		}
+	}
+	mutex_unlock(&dev->mode_config.mutex);
+	if (temp_downclock < panel_fixed_mode->clock) {
+		/* We found the downclock for LVDS. */
+		dev_priv->lvds_downclock_avail = 1;
+		dev_priv->lvds_downclock = temp_downclock;
+		DRM_DEBUG_KMS("LVDS downclock is found in EDID. "
+				"Normal clock %dKhz, downclock %dKhz\n",
+				panel_fixed_mode->clock, temp_downclock);
+	}
+	return;
+}
+
+/*
+ * Enumerate the child dev array parsed from VBT to check whether
+ * the LVDS is present.
+ * If it is present, return 1.
+ * If it is not present, return false.
+ * If no child dev is parsed from VBT, it assumes that the LVDS is present.
+ * Note: The addin_offset should also be checked for LVDS panel.
+ * Only when it is non-zero, it is assumed that it is present.
+ */
+static int lvds_is_present_in_vbt(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct child_device_config *p_child;
+	int i, ret;
+
+	if (!dev_priv->child_dev_num)
+		return 1;
+
+	ret = 0;
+	for (i = 0; i < dev_priv->child_dev_num; i++) {
+		p_child = dev_priv->child_dev + i;
+		/*
+		 * If the device type is not LFP, continue.
+		 * If the device type is 0x22, it is also regarded as LFP.
+		 */
+		if (p_child->device_type != DEVICE_TYPE_INT_LFP &&
+			p_child->device_type != DEVICE_TYPE_LFP)
+			continue;
+
+		/* The addin_offset should be checked. Only when it is
+		 * non-zero, it is regarded as present.
+		 */
+		if (p_child->addin_offset) {
+			ret = 1;
+			break;
+		}
+	}
+	return ret;
+}
+
 /**
  * intel_lvds_init - setup LVDS connectors on this device
  * @dev: drm device
@@ -936,21 +1031,20 @@ void intel_lvds_init(struct drm_device *dev)
 	if (dmi_check_system(intel_no_lvds))
 		return;
 
-	/* Assume that any device without an ACPI LID device also doesn't
-	 * have an integrated LVDS.  We would be better off parsing the BIOS
-	 * to get a reliable indicator, but that code isn't written yet.
-	 *
-	 * In the case of all-in-one desktops using LVDS that we've seen,
-	 * they're using SDVO LVDS.
+	/*
+	 * Assume LVDS is present if there's an ACPI lid device or if the
+	 * device is present in the VBT.
 	 */
-	if (!intel_lid_present())
+	if (!lvds_is_present_in_vbt(dev) && !intel_lid_present()) {
+		DRM_DEBUG_KMS("LVDS is not present in VBT and no lid detected\n");
 		return;
+	}
 
-	if (IS_IGDNG(dev)) {
+	if (IS_IRONLAKE(dev)) {
 		if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
 			return;
 		if (dev_priv->edp_support) {
-			DRM_DEBUG("disable LVDS for eDP support\n");
+			DRM_DEBUG_KMS("disable LVDS for eDP support\n");
 			return;
 		}
 		gpio = PCH_GPIOC;
@@ -1023,6 +1117,7 @@ void intel_lvds_init(struct drm_device *dev)
 			dev_priv->panel_fixed_mode =
 				drm_mode_duplicate(dev, scan);
 			mutex_unlock(&dev->mode_config.mutex);
+			intel_find_lvds_downclock(dev, connector);
 			goto out;
 		}
 		mutex_unlock(&dev->mode_config.mutex);
@@ -1047,8 +1142,8 @@ void intel_lvds_init(struct drm_device *dev)
 	 * correct mode.
 	 */
 
-	/* IGDNG: FIXME if still fail, not try pipe mode now */
-	if (IS_IGDNG(dev))
+	/* Ironlake: FIXME if still fail, not try pipe mode now */
+	if (IS_IRONLAKE(dev))
 		goto failed;
 
 	lvds = I915_READ(LVDS);
@@ -1069,7 +1164,7 @@ void intel_lvds_init(struct drm_device *dev)
 		goto failed;
 
 out:
-	if (IS_IGDNG(dev)) {
+	if (IS_IRONLAKE(dev)) {
 		u32 pwm;
 		/* make sure PWM is enabled */
 		pwm = I915_READ(BLC_PWM_CPU_CTL2);
@@ -1082,7 +1177,7 @@ out:
 	}
 	dev_priv->lid_notifier.notifier_call = intel_lid_notify;
 	if (acpi_lid_notifier_register(&dev_priv->lid_notifier)) {
-		DRM_DEBUG("lid notifier registration failed\n");
+		DRM_DEBUG_KMS("lid notifier registration failed\n");
 		dev_priv->lid_notifier.notifier_call = NULL;
 	}
 	drm_sysfs_connector_add(connector);
@@ -1093,5 +1188,6 @@ failed:
 	if (intel_output->ddc_bus)
 		intel_i2c_destroy(intel_output->ddc_bus);
 	drm_connector_cleanup(connector);
+	drm_encoder_cleanup(encoder);
 	kfree(intel_output);
 }

+ 1416 - 0
drivers/gpu/drm/i915/intel_overlay.c

@@ -0,0 +1,1416 @@
+/*
+ * Copyright © 2009
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors:
+ *    Daniel Vetter <daniel@ffwll.ch>
+ *
+ * Derived from Xorg ddx, xf86-video-intel, src/i830_video.c
+ */
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "intel_drv.h"
+
+/* Limits for overlay size. According to intel doc, the real limits are:
+ * Y width: 4095, UV width (planar): 2047, Y height: 2047,
+ * UV width (planar): * 1023. But the xorg thinks 2048 for height and width. Use
+ * the mininum of both.  */
+#define IMAGE_MAX_WIDTH		2048
+#define IMAGE_MAX_HEIGHT	2046 /* 2 * 1023 */
+/* on 830 and 845 these large limits result in the card hanging */
+#define IMAGE_MAX_WIDTH_LEGACY	1024
+#define IMAGE_MAX_HEIGHT_LEGACY	1088
+
+/* overlay register definitions */
+/* OCMD register */
+#define OCMD_TILED_SURFACE	(0x1<<19)
+#define OCMD_MIRROR_MASK	(0x3<<17)
+#define OCMD_MIRROR_MODE	(0x3<<17)
+#define OCMD_MIRROR_HORIZONTAL	(0x1<<17)
+#define OCMD_MIRROR_VERTICAL	(0x2<<17)
+#define OCMD_MIRROR_BOTH	(0x3<<17)
+#define OCMD_BYTEORDER_MASK	(0x3<<14) /* zero for YUYV or FOURCC YUY2 */
+#define OCMD_UV_SWAP		(0x1<<14) /* YVYU */
+#define OCMD_Y_SWAP		(0x2<<14) /* UYVY or FOURCC UYVY */
+#define OCMD_Y_AND_UV_SWAP	(0x3<<14) /* VYUY */
+#define OCMD_SOURCE_FORMAT_MASK (0xf<<10)
+#define OCMD_RGB_888		(0x1<<10) /* not in i965 Intel docs */
+#define OCMD_RGB_555		(0x2<<10) /* not in i965 Intel docs */
+#define OCMD_RGB_565		(0x3<<10) /* not in i965 Intel docs */
+#define OCMD_YUV_422_PACKED	(0x8<<10)
+#define OCMD_YUV_411_PACKED	(0x9<<10) /* not in i965 Intel docs */
+#define OCMD_YUV_420_PLANAR	(0xc<<10)
+#define OCMD_YUV_422_PLANAR	(0xd<<10)
+#define OCMD_YUV_410_PLANAR	(0xe<<10) /* also 411 */
+#define OCMD_TVSYNCFLIP_PARITY	(0x1<<9)
+#define OCMD_TVSYNCFLIP_ENABLE	(0x1<<7)
+#define OCMD_BUF_TYPE_MASK	(Ox1<<5)
+#define OCMD_BUF_TYPE_FRAME	(0x0<<5)
+#define OCMD_BUF_TYPE_FIELD	(0x1<<5)
+#define OCMD_TEST_MODE		(0x1<<4)
+#define OCMD_BUFFER_SELECT	(0x3<<2)
+#define OCMD_BUFFER0		(0x0<<2)
+#define OCMD_BUFFER1		(0x1<<2)
+#define OCMD_FIELD_SELECT	(0x1<<2)
+#define OCMD_FIELD0		(0x0<<1)
+#define OCMD_FIELD1		(0x1<<1)
+#define OCMD_ENABLE		(0x1<<0)
+
+/* OCONFIG register */
+#define OCONF_PIPE_MASK		(0x1<<18)
+#define OCONF_PIPE_A		(0x0<<18)
+#define OCONF_PIPE_B		(0x1<<18)
+#define OCONF_GAMMA2_ENABLE	(0x1<<16)
+#define OCONF_CSC_MODE_BT601	(0x0<<5)
+#define OCONF_CSC_MODE_BT709	(0x1<<5)
+#define OCONF_CSC_BYPASS	(0x1<<4)
+#define OCONF_CC_OUT_8BIT	(0x1<<3)
+#define OCONF_TEST_MODE		(0x1<<2)
+#define OCONF_THREE_LINE_BUFFER	(0x1<<0)
+#define OCONF_TWO_LINE_BUFFER	(0x0<<0)
+
+/* DCLRKM (dst-key) register */
+#define DST_KEY_ENABLE		(0x1<<31)
+#define CLK_RGB24_MASK		0x0
+#define CLK_RGB16_MASK		0x070307
+#define CLK_RGB15_MASK		0x070707
+#define CLK_RGB8I_MASK		0xffffff
+
+#define RGB16_TO_COLORKEY(c) \
+	(((c & 0xF800) << 8) | ((c & 0x07E0) << 5) | ((c & 0x001F) << 3))
+#define RGB15_TO_COLORKEY(c) \
+	(((c & 0x7c00) << 9) | ((c & 0x03E0) << 6) | ((c & 0x001F) << 3))
+
+/* overlay flip addr flag */
+#define OFC_UPDATE		0x1
+
+/* polyphase filter coefficients */
+#define N_HORIZ_Y_TAPS          5
+#define N_VERT_Y_TAPS           3
+#define N_HORIZ_UV_TAPS         3
+#define N_VERT_UV_TAPS          3
+#define N_PHASES                17
+#define MAX_TAPS                5
+
+/* memory bufferd overlay registers */
+struct overlay_registers {
+    u32 OBUF_0Y;
+    u32 OBUF_1Y;
+    u32 OBUF_0U;
+    u32 OBUF_0V;
+    u32 OBUF_1U;
+    u32 OBUF_1V;
+    u32 OSTRIDE;
+    u32 YRGB_VPH;
+    u32 UV_VPH;
+    u32 HORZ_PH;
+    u32 INIT_PHS;
+    u32 DWINPOS;
+    u32 DWINSZ;
+    u32 SWIDTH;
+    u32 SWIDTHSW;
+    u32 SHEIGHT;
+    u32 YRGBSCALE;
+    u32 UVSCALE;
+    u32 OCLRC0;
+    u32 OCLRC1;
+    u32 DCLRKV;
+    u32 DCLRKM;
+    u32 SCLRKVH;
+    u32 SCLRKVL;
+    u32 SCLRKEN;
+    u32 OCONFIG;
+    u32 OCMD;
+    u32 RESERVED1; /* 0x6C */
+    u32 OSTART_0Y;
+    u32 OSTART_1Y;
+    u32 OSTART_0U;
+    u32 OSTART_0V;
+    u32 OSTART_1U;
+    u32 OSTART_1V;
+    u32 OTILEOFF_0Y;
+    u32 OTILEOFF_1Y;
+    u32 OTILEOFF_0U;
+    u32 OTILEOFF_0V;
+    u32 OTILEOFF_1U;
+    u32 OTILEOFF_1V;
+    u32 FASTHSCALE; /* 0xA0 */
+    u32 UVSCALEV; /* 0xA4 */
+    u32 RESERVEDC[(0x200 - 0xA8) / 4]; /* 0xA8 - 0x1FC */
+    u16 Y_VCOEFS[N_VERT_Y_TAPS * N_PHASES]; /* 0x200 */
+    u16 RESERVEDD[0x100 / 2 - N_VERT_Y_TAPS * N_PHASES];
+    u16 Y_HCOEFS[N_HORIZ_Y_TAPS * N_PHASES]; /* 0x300 */
+    u16 RESERVEDE[0x200 / 2 - N_HORIZ_Y_TAPS * N_PHASES];
+    u16 UV_VCOEFS[N_VERT_UV_TAPS * N_PHASES]; /* 0x500 */
+    u16 RESERVEDF[0x100 / 2 - N_VERT_UV_TAPS * N_PHASES];
+    u16 UV_HCOEFS[N_HORIZ_UV_TAPS * N_PHASES]; /* 0x600 */
+    u16 RESERVEDG[0x100 / 2 - N_HORIZ_UV_TAPS * N_PHASES];
+};
+
+/* overlay flip addr flag */
+#define OFC_UPDATE		0x1
+
+#define OVERLAY_NONPHYSICAL(dev) (IS_G33(dev) || IS_I965G(dev))
+#define OVERLAY_EXISTS(dev) (!IS_G4X(dev) && !IS_IRONLAKE(dev))
+
+
+static struct overlay_registers *intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
+{
+        drm_i915_private_t *dev_priv = overlay->dev->dev_private;
+	struct overlay_registers *regs;
+
+	/* no recursive mappings */
+	BUG_ON(overlay->virt_addr);
+
+	if (OVERLAY_NONPHYSICAL(overlay->dev)) {
+		regs = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
+				overlay->reg_bo->gtt_offset);
+
+		if (!regs) {
+			DRM_ERROR("failed to map overlay regs in GTT\n");
+			return NULL;
+		}
+	} else
+		regs = overlay->reg_bo->phys_obj->handle->vaddr;
+
+	return overlay->virt_addr = regs;
+}
+
+static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay)
+{
+	struct drm_device *dev = overlay->dev;
+        drm_i915_private_t *dev_priv = dev->dev_private;
+
+	if (OVERLAY_NONPHYSICAL(overlay->dev))
+		io_mapping_unmap_atomic(overlay->virt_addr);
+
+	overlay->virt_addr = NULL;
+
+	I915_READ(OVADD); /* flush wc cashes */
+
+	return;
+}
+
+/* overlay needs to be disable in OCMD reg */
+static int intel_overlay_on(struct intel_overlay *overlay)
+{
+	struct drm_device *dev = overlay->dev;
+        drm_i915_private_t *dev_priv = dev->dev_private;
+	int ret;
+	RING_LOCALS;
+
+	BUG_ON(overlay->active);
+
+	overlay->active = 1;
+	overlay->hw_wedged = NEEDS_WAIT_FOR_FLIP;
+
+	BEGIN_LP_RING(6);
+	OUT_RING(MI_FLUSH);
+	OUT_RING(MI_NOOP);
+	OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON);
+	OUT_RING(overlay->flip_addr | OFC_UPDATE);
+	OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+	OUT_RING(MI_NOOP);
+	ADVANCE_LP_RING();
+
+	overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+	if (overlay->last_flip_req == 0)
+		return -ENOMEM;
+
+	ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+	if (ret != 0)
+		return ret;
+
+	overlay->hw_wedged = 0;
+	overlay->last_flip_req = 0;
+	return 0;
+}
+
+/* overlay needs to be enabled in OCMD reg */
+static void intel_overlay_continue(struct intel_overlay *overlay,
+			    bool load_polyphase_filter)
+{
+	struct drm_device *dev = overlay->dev;
+        drm_i915_private_t *dev_priv = dev->dev_private;
+	u32 flip_addr = overlay->flip_addr;
+	u32 tmp;
+	RING_LOCALS;
+
+	BUG_ON(!overlay->active);
+
+	if (load_polyphase_filter)
+		flip_addr |= OFC_UPDATE;
+
+	/* check for underruns */
+	tmp = I915_READ(DOVSTA);
+	if (tmp & (1 << 17))
+		DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
+
+	BEGIN_LP_RING(4);
+	OUT_RING(MI_FLUSH);
+	OUT_RING(MI_NOOP);
+	OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+	OUT_RING(flip_addr);
+        ADVANCE_LP_RING();
+
+	overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+}
+
+static int intel_overlay_wait_flip(struct intel_overlay *overlay)
+{
+	struct drm_device *dev = overlay->dev;
+        drm_i915_private_t *dev_priv = dev->dev_private;
+	int ret;
+	u32 tmp;
+	RING_LOCALS;
+
+	if (overlay->last_flip_req != 0) {
+		ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+		if (ret == 0) {
+			overlay->last_flip_req = 0;
+
+			tmp = I915_READ(ISR);
+
+			if (!(tmp & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT))
+				return 0;
+		}
+	}
+
+	/* synchronous slowpath */
+	overlay->hw_wedged = RELEASE_OLD_VID;
+
+	BEGIN_LP_RING(2);
+        OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+        OUT_RING(MI_NOOP);
+        ADVANCE_LP_RING();
+
+	overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+	if (overlay->last_flip_req == 0)
+		return -ENOMEM;
+
+	ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+	if (ret != 0)
+		return ret;
+
+	overlay->hw_wedged = 0;
+	overlay->last_flip_req = 0;
+	return 0;
+}
+
+/* overlay needs to be disabled in OCMD reg */
+static int intel_overlay_off(struct intel_overlay *overlay)
+{
+	u32 flip_addr = overlay->flip_addr;
+	struct drm_device *dev = overlay->dev;
+        drm_i915_private_t *dev_priv = dev->dev_private;
+	int ret;
+	RING_LOCALS;
+
+	BUG_ON(!overlay->active);
+
+	/* According to intel docs the overlay hw may hang (when switching
+	 * off) without loading the filter coeffs. It is however unclear whether
+	 * this applies to the disabling of the overlay or to the switching off
+	 * of the hw. Do it in both cases */
+	flip_addr |= OFC_UPDATE;
+
+	/* wait for overlay to go idle */
+	overlay->hw_wedged = SWITCH_OFF_STAGE_1;
+
+	BEGIN_LP_RING(6);
+	OUT_RING(MI_FLUSH);
+	OUT_RING(MI_NOOP);
+	OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
+	OUT_RING(flip_addr);
+        OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+        OUT_RING(MI_NOOP);
+        ADVANCE_LP_RING();
+
+	overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+	if (overlay->last_flip_req == 0)
+		return -ENOMEM;
+
+	ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+	if (ret != 0)
+		return ret;
+
+	/* turn overlay off */
+	overlay->hw_wedged = SWITCH_OFF_STAGE_2;
+
+	BEGIN_LP_RING(6);
+        OUT_RING(MI_FLUSH);
+        OUT_RING(MI_NOOP);
+        OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
+	OUT_RING(flip_addr);
+        OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+        OUT_RING(MI_NOOP);
+	ADVANCE_LP_RING();
+
+	overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+	if (overlay->last_flip_req == 0)
+		return -ENOMEM;
+
+	ret = i915_do_wait_request(dev, overlay->last_flip_req, 1);
+	if (ret != 0)
+		return ret;
+
+	overlay->hw_wedged = 0;
+	overlay->last_flip_req = 0;
+	return ret;
+}
+
+static void intel_overlay_off_tail(struct intel_overlay *overlay)
+{
+	struct drm_gem_object *obj;
+
+	/* never have the overlay hw on without showing a frame */
+	BUG_ON(!overlay->vid_bo);
+	obj = overlay->vid_bo->obj;
+
+	i915_gem_object_unpin(obj);
+	drm_gem_object_unreference(obj);
+	overlay->vid_bo = NULL;
+
+	overlay->crtc->overlay = NULL;
+	overlay->crtc = NULL;
+	overlay->active = 0;
+}
+
+/* recover from an interruption due to a signal
+ * We have to be careful not to repeat work forever an make forward progess. */
+int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
+					 int interruptible)
+{
+	struct drm_device *dev = overlay->dev;
+        drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_gem_object *obj;
+	u32 flip_addr;
+	int ret;
+	RING_LOCALS;
+
+	if (overlay->hw_wedged == HW_WEDGED)
+		return -EIO;
+
+	if (overlay->last_flip_req == 0) {
+		overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+		if (overlay->last_flip_req == 0)
+			return -ENOMEM;
+	}
+
+	ret = i915_do_wait_request(dev, overlay->last_flip_req, interruptible);
+	if (ret != 0)
+		return ret;
+
+	switch (overlay->hw_wedged) {
+		case RELEASE_OLD_VID:
+			obj = overlay->old_vid_bo->obj;
+			i915_gem_object_unpin(obj);
+			drm_gem_object_unreference(obj);
+			overlay->old_vid_bo = NULL;
+			break;
+		case SWITCH_OFF_STAGE_1:
+			flip_addr = overlay->flip_addr;
+			flip_addr |= OFC_UPDATE;
+
+			overlay->hw_wedged = SWITCH_OFF_STAGE_2;
+
+			BEGIN_LP_RING(6);
+			OUT_RING(MI_FLUSH);
+			OUT_RING(MI_NOOP);
+			OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_OFF);
+			OUT_RING(flip_addr);
+			OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
+			OUT_RING(MI_NOOP);
+			ADVANCE_LP_RING();
+
+			overlay->last_flip_req = i915_add_request(dev, NULL, 0);
+			if (overlay->last_flip_req == 0)
+				return -ENOMEM;
+
+			ret = i915_do_wait_request(dev, overlay->last_flip_req,
+					interruptible);
+			if (ret != 0)
+				return ret;
+
+		case SWITCH_OFF_STAGE_2:
+			intel_overlay_off_tail(overlay);
+			break;
+		default:
+			BUG_ON(overlay->hw_wedged != NEEDS_WAIT_FOR_FLIP);
+	}
+
+	overlay->hw_wedged = 0;
+	overlay->last_flip_req = 0;
+	return 0;
+}
+
+/* Wait for pending overlay flip and release old frame.
+ * Needs to be called before the overlay register are changed
+ * via intel_overlay_(un)map_regs_atomic */
+static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
+{
+	int ret;
+	struct drm_gem_object *obj;
+
+	/* only wait if there is actually an old frame to release to
+	 * guarantee forward progress */
+	if (!overlay->old_vid_bo)
+		return 0;
+
+	ret = intel_overlay_wait_flip(overlay);
+	if (ret != 0)
+		return ret;
+
+	obj = overlay->old_vid_bo->obj;
+	i915_gem_object_unpin(obj);
+	drm_gem_object_unreference(obj);
+	overlay->old_vid_bo = NULL;
+
+	return 0;
+}
+
+struct put_image_params {
+	int format;
+	short dst_x;
+	short dst_y;
+	short dst_w;
+	short dst_h;
+	short src_w;
+	short src_scan_h;
+	short src_scan_w;
+	short src_h;
+	short stride_Y;
+	short stride_UV;
+	int offset_Y;
+	int offset_U;
+	int offset_V;
+};
+
+static int packed_depth_bytes(u32 format)
+{
+	switch (format & I915_OVERLAY_DEPTH_MASK) {
+		case I915_OVERLAY_YUV422:
+			return 4;
+		case I915_OVERLAY_YUV411:
+			/* return 6; not implemented */
+		default:
+			return -EINVAL;
+	}
+}
+
+static int packed_width_bytes(u32 format, short width)
+{
+	switch (format & I915_OVERLAY_DEPTH_MASK) {
+		case I915_OVERLAY_YUV422:
+			return width << 1;
+		default:
+			return -EINVAL;
+	}
+}
+
+static int uv_hsubsampling(u32 format)
+{
+	switch (format & I915_OVERLAY_DEPTH_MASK) {
+		case I915_OVERLAY_YUV422:
+		case I915_OVERLAY_YUV420:
+			return 2;
+		case I915_OVERLAY_YUV411:
+		case I915_OVERLAY_YUV410:
+			return 4;
+		default:
+			return -EINVAL;
+	}
+}
+
+static int uv_vsubsampling(u32 format)
+{
+	switch (format & I915_OVERLAY_DEPTH_MASK) {
+		case I915_OVERLAY_YUV420:
+		case I915_OVERLAY_YUV410:
+			return 2;
+		case I915_OVERLAY_YUV422:
+		case I915_OVERLAY_YUV411:
+			return 1;
+		default:
+			return -EINVAL;
+	}
+}
+
+static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width)
+{
+	u32 mask, shift, ret;
+	if (IS_I9XX(dev)) {
+		mask = 0x3f;
+		shift = 6;
+	} else {
+		mask = 0x1f;
+		shift = 5;
+	}
+	ret = ((offset + width + mask) >> shift) - (offset >> shift);
+	if (IS_I9XX(dev))
+		ret <<= 1;
+	ret -=1;
+	return ret << 2;
+}
+
+static const u16 y_static_hcoeffs[N_HORIZ_Y_TAPS * N_PHASES] = {
+	0x3000, 0xb4a0, 0x1930, 0x1920, 0xb4a0,
+	0x3000, 0xb500, 0x19d0, 0x1880, 0xb440,
+	0x3000, 0xb540, 0x1a88, 0x2f80, 0xb3e0,
+	0x3000, 0xb580, 0x1b30, 0x2e20, 0xb380,
+	0x3000, 0xb5c0, 0x1bd8, 0x2cc0, 0xb320,
+	0x3020, 0xb5e0, 0x1c60, 0x2b80, 0xb2c0,
+	0x3020, 0xb5e0, 0x1cf8, 0x2a20, 0xb260,
+	0x3020, 0xb5e0, 0x1d80, 0x28e0, 0xb200,
+	0x3020, 0xb5c0, 0x1e08, 0x3f40, 0xb1c0,
+	0x3020, 0xb580, 0x1e78, 0x3ce0, 0xb160,
+	0x3040, 0xb520, 0x1ed8, 0x3aa0, 0xb120,
+	0x3040, 0xb4a0, 0x1f30, 0x3880, 0xb0e0,
+	0x3040, 0xb400, 0x1f78, 0x3680, 0xb0a0,
+	0x3020, 0xb340, 0x1fb8, 0x34a0, 0xb060,
+	0x3020, 0xb240, 0x1fe0, 0x32e0, 0xb040,
+	0x3020, 0xb140, 0x1ff8, 0x3160, 0xb020,
+	0xb000, 0x3000, 0x0800, 0x3000, 0xb000};
+static const u16 uv_static_hcoeffs[N_HORIZ_UV_TAPS * N_PHASES] = {
+	0x3000, 0x1800, 0x1800, 0xb000, 0x18d0, 0x2e60,
+	0xb000, 0x1990, 0x2ce0, 0xb020, 0x1a68, 0x2b40,
+	0xb040, 0x1b20, 0x29e0, 0xb060, 0x1bd8, 0x2880,
+	0xb080, 0x1c88, 0x3e60, 0xb0a0, 0x1d28, 0x3c00,
+	0xb0c0, 0x1db8, 0x39e0, 0xb0e0, 0x1e40, 0x37e0,
+	0xb100, 0x1eb8, 0x3620, 0xb100, 0x1f18, 0x34a0,
+	0xb100, 0x1f68, 0x3360, 0xb0e0, 0x1fa8, 0x3240,
+	0xb0c0, 0x1fe0, 0x3140, 0xb060, 0x1ff0, 0x30a0,
+	0x3000, 0x0800, 0x3000};
+
+static void update_polyphase_filter(struct overlay_registers *regs)
+{
+	memcpy(regs->Y_HCOEFS, y_static_hcoeffs, sizeof(y_static_hcoeffs));
+	memcpy(regs->UV_HCOEFS, uv_static_hcoeffs, sizeof(uv_static_hcoeffs));
+}
+
+static bool update_scaling_factors(struct intel_overlay *overlay,
+				   struct overlay_registers *regs,
+				   struct put_image_params *params)
+{
+	/* fixed point with a 12 bit shift */
+	u32 xscale, yscale, xscale_UV, yscale_UV;
+#define FP_SHIFT 12
+#define FRACT_MASK 0xfff
+	bool scale_changed = false;
+	int uv_hscale = uv_hsubsampling(params->format);
+	int uv_vscale = uv_vsubsampling(params->format);
+
+	if (params->dst_w > 1)
+		xscale = ((params->src_scan_w - 1) << FP_SHIFT)
+			/(params->dst_w);
+	else
+		xscale = 1 << FP_SHIFT;
+
+	if (params->dst_h > 1)
+		yscale = ((params->src_scan_h - 1) << FP_SHIFT)
+			/(params->dst_h);
+	else
+		yscale = 1 << FP_SHIFT;
+
+	/*if (params->format & I915_OVERLAY_YUV_PLANAR) {*/
+		xscale_UV = xscale/uv_hscale;
+		yscale_UV = yscale/uv_vscale;
+		/* make the Y scale to UV scale ratio an exact multiply */
+		xscale = xscale_UV * uv_hscale;
+		yscale = yscale_UV * uv_vscale;
+	/*} else {
+		xscale_UV = 0;
+		yscale_UV = 0;
+	}*/
+
+	if (xscale != overlay->old_xscale || yscale != overlay->old_yscale)
+		scale_changed = true;
+	overlay->old_xscale = xscale;
+	overlay->old_yscale = yscale;
+
+	regs->YRGBSCALE = ((yscale & FRACT_MASK) << 20)
+		| ((xscale >> FP_SHIFT) << 16)
+		| ((xscale & FRACT_MASK) << 3);
+	regs->UVSCALE = ((yscale_UV & FRACT_MASK) << 20)
+		| ((xscale_UV >> FP_SHIFT) << 16)
+		| ((xscale_UV & FRACT_MASK) << 3);
+	regs->UVSCALEV = ((yscale >> FP_SHIFT) << 16)
+		| ((yscale_UV >> FP_SHIFT) << 0);
+
+	if (scale_changed)
+		update_polyphase_filter(regs);
+
+	return scale_changed;
+}
+
+static void update_colorkey(struct intel_overlay *overlay,
+			    struct overlay_registers *regs)
+{
+	u32 key = overlay->color_key;
+	switch (overlay->crtc->base.fb->bits_per_pixel) {
+		case 8:
+			regs->DCLRKV = 0;
+			regs->DCLRKM = CLK_RGB8I_MASK | DST_KEY_ENABLE;
+		case 16:
+			if (overlay->crtc->base.fb->depth == 15) {
+				regs->DCLRKV = RGB15_TO_COLORKEY(key);
+				regs->DCLRKM = CLK_RGB15_MASK | DST_KEY_ENABLE;
+			} else {
+				regs->DCLRKV = RGB16_TO_COLORKEY(key);
+				regs->DCLRKM = CLK_RGB16_MASK | DST_KEY_ENABLE;
+			}
+		case 24:
+		case 32:
+			regs->DCLRKV = key;
+			regs->DCLRKM = CLK_RGB24_MASK | DST_KEY_ENABLE;
+	}
+}
+
+static u32 overlay_cmd_reg(struct put_image_params *params)
+{
+	u32 cmd = OCMD_ENABLE | OCMD_BUF_TYPE_FRAME | OCMD_BUFFER0;
+
+	if (params->format & I915_OVERLAY_YUV_PLANAR) {
+		switch (params->format & I915_OVERLAY_DEPTH_MASK) {
+			case I915_OVERLAY_YUV422:
+				cmd |= OCMD_YUV_422_PLANAR;
+				break;
+			case I915_OVERLAY_YUV420:
+				cmd |= OCMD_YUV_420_PLANAR;
+				break;
+			case I915_OVERLAY_YUV411:
+			case I915_OVERLAY_YUV410:
+				cmd |= OCMD_YUV_410_PLANAR;
+				break;
+		}
+	} else { /* YUV packed */
+		switch (params->format & I915_OVERLAY_DEPTH_MASK) {
+			case I915_OVERLAY_YUV422:
+				cmd |= OCMD_YUV_422_PACKED;
+				break;
+			case I915_OVERLAY_YUV411:
+				cmd |= OCMD_YUV_411_PACKED;
+				break;
+		}
+
+		switch (params->format & I915_OVERLAY_SWAP_MASK) {
+			case I915_OVERLAY_NO_SWAP:
+				break;
+			case I915_OVERLAY_UV_SWAP:
+				cmd |= OCMD_UV_SWAP;
+				break;
+			case I915_OVERLAY_Y_SWAP:
+				cmd |= OCMD_Y_SWAP;
+				break;
+			case I915_OVERLAY_Y_AND_UV_SWAP:
+				cmd |= OCMD_Y_AND_UV_SWAP;
+				break;
+		}
+	}
+
+	return cmd;
+}
+
+int intel_overlay_do_put_image(struct intel_overlay *overlay,
+			       struct drm_gem_object *new_bo,
+			       struct put_image_params *params)
+{
+	int ret, tmp_width;
+	struct overlay_registers *regs;
+	bool scale_changed = false;
+	struct drm_i915_gem_object *bo_priv = new_bo->driver_private;
+	struct drm_device *dev = overlay->dev;
+
+	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+	BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
+	BUG_ON(!overlay);
+
+	ret = intel_overlay_release_old_vid(overlay);
+	if (ret != 0)
+		return ret;
+
+	ret = i915_gem_object_pin(new_bo, PAGE_SIZE);
+	if (ret != 0)
+		return ret;
+
+	ret = i915_gem_object_set_to_gtt_domain(new_bo, 0);
+	if (ret != 0)
+		goto out_unpin;
+
+	if (!overlay->active) {
+		regs = intel_overlay_map_regs_atomic(overlay);
+		if (!regs) {
+			ret = -ENOMEM;
+			goto out_unpin;
+		}
+		regs->OCONFIG = OCONF_CC_OUT_8BIT;
+		if (IS_I965GM(overlay->dev))
+			regs->OCONFIG |= OCONF_CSC_MODE_BT709;
+		regs->OCONFIG |= overlay->crtc->pipe == 0 ?
+			OCONF_PIPE_A : OCONF_PIPE_B;
+		intel_overlay_unmap_regs_atomic(overlay);
+
+		ret = intel_overlay_on(overlay);
+		if (ret != 0)
+			goto out_unpin;
+	}
+
+	regs = intel_overlay_map_regs_atomic(overlay);
+	if (!regs) {
+		ret = -ENOMEM;
+		goto out_unpin;
+	}
+
+	regs->DWINPOS = (params->dst_y << 16) | params->dst_x;
+	regs->DWINSZ = (params->dst_h << 16) | params->dst_w;
+
+	if (params->format & I915_OVERLAY_YUV_PACKED)
+		tmp_width = packed_width_bytes(params->format, params->src_w);
+	else
+		tmp_width = params->src_w;
+
+	regs->SWIDTH = params->src_w;
+	regs->SWIDTHSW = calc_swidthsw(overlay->dev,
+			params->offset_Y, tmp_width);
+	regs->SHEIGHT = params->src_h;
+	regs->OBUF_0Y = bo_priv->gtt_offset + params-> offset_Y;
+	regs->OSTRIDE = params->stride_Y;
+
+	if (params->format & I915_OVERLAY_YUV_PLANAR) {
+		int uv_hscale = uv_hsubsampling(params->format);
+		int uv_vscale = uv_vsubsampling(params->format);
+		u32 tmp_U, tmp_V;
+		regs->SWIDTH |= (params->src_w/uv_hscale) << 16;
+		tmp_U = calc_swidthsw(overlay->dev, params->offset_U,
+				params->src_w/uv_hscale);
+		tmp_V = calc_swidthsw(overlay->dev, params->offset_V,
+				params->src_w/uv_hscale);
+		regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16;
+		regs->SHEIGHT |= (params->src_h/uv_vscale) << 16;
+		regs->OBUF_0U = bo_priv->gtt_offset + params->offset_U;
+		regs->OBUF_0V = bo_priv->gtt_offset + params->offset_V;
+		regs->OSTRIDE |= params->stride_UV << 16;
+	}
+
+	scale_changed = update_scaling_factors(overlay, regs, params);
+
+	update_colorkey(overlay, regs);
+
+	regs->OCMD = overlay_cmd_reg(params);
+
+	intel_overlay_unmap_regs_atomic(overlay);
+
+	intel_overlay_continue(overlay, scale_changed);
+
+	overlay->old_vid_bo = overlay->vid_bo;
+	overlay->vid_bo = new_bo->driver_private;
+
+	return 0;
+
+out_unpin:
+	i915_gem_object_unpin(new_bo);
+	return ret;
+}
+
+int intel_overlay_switch_off(struct intel_overlay *overlay)
+{
+	int ret;
+	struct overlay_registers *regs;
+	struct drm_device *dev = overlay->dev;
+
+	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+	BUG_ON(!mutex_is_locked(&dev->mode_config.mutex));
+
+	if (overlay->hw_wedged) {
+		ret = intel_overlay_recover_from_interrupt(overlay, 1);
+		if (ret != 0)
+			return ret;
+	}
+
+	if (!overlay->active)
+		return 0;
+
+	ret = intel_overlay_release_old_vid(overlay);
+	if (ret != 0)
+		return ret;
+
+	regs = intel_overlay_map_regs_atomic(overlay);
+	regs->OCMD = 0;
+	intel_overlay_unmap_regs_atomic(overlay);
+
+	ret = intel_overlay_off(overlay);
+	if (ret != 0)
+		return ret;
+
+	intel_overlay_off_tail(overlay);
+
+	return 0;
+}
+
+static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
+					  struct intel_crtc *crtc)
+{
+        drm_i915_private_t *dev_priv = overlay->dev->dev_private;
+	u32 pipeconf;
+	int pipeconf_reg = (crtc->pipe == 0) ? PIPEACONF : PIPEBCONF;
+
+	if (!crtc->base.enabled || crtc->dpms_mode != DRM_MODE_DPMS_ON)
+		return -EINVAL;
+
+	pipeconf = I915_READ(pipeconf_reg);
+
+	/* can't use the overlay with double wide pipe */
+	if (!IS_I965G(overlay->dev) && pipeconf & PIPEACONF_DOUBLE_WIDE)
+		return -EINVAL;
+
+	return 0;
+}
+
+static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
+{
+	struct drm_device *dev = overlay->dev;
+        drm_i915_private_t *dev_priv = dev->dev_private;
+	u32 ratio;
+	u32 pfit_control = I915_READ(PFIT_CONTROL);
+
+	/* XXX: This is not the same logic as in the xorg driver, but more in
+	 * line with the intel documentation for the i965 */
+	if (!IS_I965G(dev) && (pfit_control & VERT_AUTO_SCALE)) {
+		ratio = I915_READ(PFIT_AUTO_RATIOS) >> PFIT_VERT_SCALE_SHIFT;
+	} else { /* on i965 use the PGM reg to read out the autoscaler values */
+		ratio = I915_READ(PFIT_PGM_RATIOS);
+		if (IS_I965G(dev))
+			ratio >>= PFIT_VERT_SCALE_SHIFT_965;
+		else
+			ratio >>= PFIT_VERT_SCALE_SHIFT;
+	}
+
+	overlay->pfit_vscale_ratio = ratio;
+}
+
+static int check_overlay_dst(struct intel_overlay *overlay,
+			     struct drm_intel_overlay_put_image *rec)
+{
+	struct drm_display_mode *mode = &overlay->crtc->base.mode;
+
+	if ((rec->dst_x < mode->crtc_hdisplay)
+	    && (rec->dst_x + rec->dst_width
+		    <= mode->crtc_hdisplay)
+	    && (rec->dst_y < mode->crtc_vdisplay)
+	    && (rec->dst_y + rec->dst_height
+		    <= mode->crtc_vdisplay))
+		return 0;
+	else
+		return -EINVAL;
+}
+
+static int check_overlay_scaling(struct put_image_params *rec)
+{
+	u32 tmp;
+
+	/* downscaling limit is 8.0 */
+	tmp = ((rec->src_scan_h << 16) / rec->dst_h) >> 16;
+	if (tmp > 7)
+		return -EINVAL;
+	tmp = ((rec->src_scan_w << 16) / rec->dst_w) >> 16;
+	if (tmp > 7)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int check_overlay_src(struct drm_device *dev,
+			     struct drm_intel_overlay_put_image *rec,
+			     struct drm_gem_object *new_bo)
+{
+	u32 stride_mask;
+	int depth;
+	int uv_hscale = uv_hsubsampling(rec->flags);
+	int uv_vscale = uv_vsubsampling(rec->flags);
+	size_t tmp;
+
+	/* check src dimensions */
+	if (IS_845G(dev) || IS_I830(dev)) {
+		if (rec->src_height > IMAGE_MAX_HEIGHT_LEGACY
+		    || rec->src_width > IMAGE_MAX_WIDTH_LEGACY)
+			return -EINVAL;
+	} else {
+		if (rec->src_height > IMAGE_MAX_HEIGHT
+		    || rec->src_width > IMAGE_MAX_WIDTH)
+			return -EINVAL;
+	}
+	/* better safe than sorry, use 4 as the maximal subsampling ratio */
+	if (rec->src_height < N_VERT_Y_TAPS*4
+	    || rec->src_width < N_HORIZ_Y_TAPS*4)
+		return -EINVAL;
+
+	/* check alingment constrains */
+	switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
+		case I915_OVERLAY_RGB:
+			/* not implemented */
+			return -EINVAL;
+		case I915_OVERLAY_YUV_PACKED:
+			depth = packed_depth_bytes(rec->flags);
+			if (uv_vscale != 1)
+				return -EINVAL;
+			if (depth < 0)
+				return depth;
+			/* ignore UV planes */
+			rec->stride_UV = 0;
+			rec->offset_U = 0;
+			rec->offset_V = 0;
+			/* check pixel alignment */
+			if (rec->offset_Y % depth)
+				return -EINVAL;
+			break;
+		case I915_OVERLAY_YUV_PLANAR:
+			if (uv_vscale < 0 || uv_hscale < 0)
+				return -EINVAL;
+			/* no offset restrictions for planar formats */
+			break;
+		default:
+			return -EINVAL;
+	}
+
+	if (rec->src_width % uv_hscale)
+		return -EINVAL;
+
+	/* stride checking */
+	stride_mask = 63;
+
+	if (rec->stride_Y & stride_mask || rec->stride_UV & stride_mask)
+		return -EINVAL;
+	if (IS_I965G(dev) && rec->stride_Y < 512)
+		return -EINVAL;
+
+	tmp = (rec->flags & I915_OVERLAY_TYPE_MASK) == I915_OVERLAY_YUV_PLANAR ?
+		4 : 8;
+	if (rec->stride_Y > tmp*1024 || rec->stride_UV > 2*1024)
+		return -EINVAL;
+
+	/* check buffer dimensions */
+	switch (rec->flags & I915_OVERLAY_TYPE_MASK) {
+		case I915_OVERLAY_RGB:
+		case I915_OVERLAY_YUV_PACKED:
+			/* always 4 Y values per depth pixels */
+			if (packed_width_bytes(rec->flags, rec->src_width)
+					> rec->stride_Y)
+				return -EINVAL;
+
+			tmp = rec->stride_Y*rec->src_height;
+			if (rec->offset_Y + tmp > new_bo->size)
+				return -EINVAL;
+			break;
+		case I915_OVERLAY_YUV_PLANAR:
+			if (rec->src_width > rec->stride_Y)
+				return -EINVAL;
+			if (rec->src_width/uv_hscale > rec->stride_UV)
+				return -EINVAL;
+
+			tmp = rec->stride_Y*rec->src_height;
+			if (rec->offset_Y + tmp > new_bo->size)
+				return -EINVAL;
+			tmp = rec->stride_UV*rec->src_height;
+			tmp /= uv_vscale;
+			if (rec->offset_U + tmp > new_bo->size
+			    || rec->offset_V + tmp > new_bo->size)
+				return -EINVAL;
+			break;
+	}
+
+	return 0;
+}
+
+int intel_overlay_put_image(struct drm_device *dev, void *data,
+                            struct drm_file *file_priv)
+{
+	struct drm_intel_overlay_put_image *put_image_rec = data;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_overlay *overlay;
+	struct drm_mode_object *drmmode_obj;
+	struct intel_crtc *crtc;
+	struct drm_gem_object *new_bo;
+	struct put_image_params *params;
+	int ret;
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	overlay = dev_priv->overlay;
+	if (!overlay) {
+		DRM_DEBUG("userspace bug: no overlay\n");
+		return -ENODEV;
+	}
+
+	if (!(put_image_rec->flags & I915_OVERLAY_ENABLE)) {
+		mutex_lock(&dev->mode_config.mutex);
+		mutex_lock(&dev->struct_mutex);
+
+		ret = intel_overlay_switch_off(overlay);
+
+		mutex_unlock(&dev->struct_mutex);
+		mutex_unlock(&dev->mode_config.mutex);
+
+		return ret;
+	}
+
+	params = kmalloc(sizeof(struct put_image_params), GFP_KERNEL);
+	if (!params)
+		return -ENOMEM;
+
+	drmmode_obj = drm_mode_object_find(dev, put_image_rec->crtc_id,
+                        DRM_MODE_OBJECT_CRTC);
+	if (!drmmode_obj)
+		return -ENOENT;
+	crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
+
+	new_bo = drm_gem_object_lookup(dev, file_priv,
+			put_image_rec->bo_handle);
+	if (!new_bo)
+		return -ENOENT;
+
+	mutex_lock(&dev->mode_config.mutex);
+	mutex_lock(&dev->struct_mutex);
+
+	if (overlay->hw_wedged) {
+		ret = intel_overlay_recover_from_interrupt(overlay, 1);
+		if (ret != 0)
+			goto out_unlock;
+	}
+
+	if (overlay->crtc != crtc) {
+		struct drm_display_mode *mode = &crtc->base.mode;
+		ret = intel_overlay_switch_off(overlay);
+		if (ret != 0)
+			goto out_unlock;
+
+		ret = check_overlay_possible_on_crtc(overlay, crtc);
+		if (ret != 0)
+			goto out_unlock;
+
+		overlay->crtc = crtc;
+		crtc->overlay = overlay;
+
+		if (intel_panel_fitter_pipe(dev) == crtc->pipe
+		    /* and line to wide, i.e. one-line-mode */
+		    && mode->hdisplay > 1024) {
+			overlay->pfit_active = 1;
+			update_pfit_vscale_ratio(overlay);
+		} else
+			overlay->pfit_active = 0;
+	}
+
+	ret = check_overlay_dst(overlay, put_image_rec);
+	if (ret != 0)
+		goto out_unlock;
+
+	if (overlay->pfit_active) {
+		params->dst_y = ((((u32)put_image_rec->dst_y) << 12) /
+			overlay->pfit_vscale_ratio);
+		/* shifting right rounds downwards, so add 1 */
+		params->dst_h = ((((u32)put_image_rec->dst_height) << 12) /
+			overlay->pfit_vscale_ratio) + 1;
+	} else {
+		params->dst_y = put_image_rec->dst_y;
+		params->dst_h = put_image_rec->dst_height;
+	}
+	params->dst_x = put_image_rec->dst_x;
+	params->dst_w = put_image_rec->dst_width;
+
+	params->src_w = put_image_rec->src_width;
+	params->src_h = put_image_rec->src_height;
+	params->src_scan_w = put_image_rec->src_scan_width;
+	params->src_scan_h = put_image_rec->src_scan_height;
+	if (params->src_scan_h > params->src_h
+	    || params->src_scan_w > params->src_w) {
+		ret = -EINVAL;
+		goto out_unlock;
+	}
+
+	ret = check_overlay_src(dev, put_image_rec, new_bo);
+	if (ret != 0)
+		goto out_unlock;
+	params->format = put_image_rec->flags & ~I915_OVERLAY_FLAGS_MASK;
+	params->stride_Y = put_image_rec->stride_Y;
+	params->stride_UV = put_image_rec->stride_UV;
+	params->offset_Y = put_image_rec->offset_Y;
+	params->offset_U = put_image_rec->offset_U;
+	params->offset_V = put_image_rec->offset_V;
+
+	/* Check scaling after src size to prevent a divide-by-zero. */
+	ret = check_overlay_scaling(params);
+	if (ret != 0)
+		goto out_unlock;
+
+	ret = intel_overlay_do_put_image(overlay, new_bo, params);
+	if (ret != 0)
+		goto out_unlock;
+
+	mutex_unlock(&dev->struct_mutex);
+	mutex_unlock(&dev->mode_config.mutex);
+
+	kfree(params);
+
+	return 0;
+
+out_unlock:
+	mutex_unlock(&dev->struct_mutex);
+	mutex_unlock(&dev->mode_config.mutex);
+	drm_gem_object_unreference(new_bo);
+	kfree(params);
+
+	return ret;
+}
+
+static void update_reg_attrs(struct intel_overlay *overlay,
+			     struct overlay_registers *regs)
+{
+	regs->OCLRC0 = (overlay->contrast << 18) | (overlay->brightness & 0xff);
+	regs->OCLRC1 = overlay->saturation;
+}
+
+static bool check_gamma_bounds(u32 gamma1, u32 gamma2)
+{
+	int i;
+
+	if (gamma1 & 0xff000000 || gamma2 & 0xff000000)
+		return false;
+
+	for (i = 0; i < 3; i++) {
+		if (((gamma1 >> i * 8) & 0xff) >= ((gamma2 >> i*8) & 0xff))
+			return false;
+	}
+
+	return true;
+}
+
+static bool check_gamma5_errata(u32 gamma5)
+{
+	int i;
+
+	for (i = 0; i < 3; i++) {
+		if (((gamma5 >> i*8) & 0xff) == 0x80)
+			return false;
+	}
+
+	return true;
+}
+
+static int check_gamma(struct drm_intel_overlay_attrs *attrs)
+{
+	if (!check_gamma_bounds(0, attrs->gamma0)
+	    || !check_gamma_bounds(attrs->gamma0, attrs->gamma1)
+	    || !check_gamma_bounds(attrs->gamma1, attrs->gamma2)
+	    || !check_gamma_bounds(attrs->gamma2, attrs->gamma3)
+	    || !check_gamma_bounds(attrs->gamma3, attrs->gamma4)
+	    || !check_gamma_bounds(attrs->gamma4, attrs->gamma5)
+	    || !check_gamma_bounds(attrs->gamma5, 0x00ffffff))
+		return -EINVAL;
+	if (!check_gamma5_errata(attrs->gamma5))
+		return -EINVAL;
+	return 0;
+}
+
+int intel_overlay_attrs(struct drm_device *dev, void *data,
+                        struct drm_file *file_priv)
+{
+	struct drm_intel_overlay_attrs *attrs = data;
+        drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_overlay *overlay;
+	struct overlay_registers *regs;
+	int ret;
+
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
+		return -EINVAL;
+	}
+
+	overlay = dev_priv->overlay;
+	if (!overlay) {
+		DRM_DEBUG("userspace bug: no overlay\n");
+		return -ENODEV;
+	}
+
+	mutex_lock(&dev->mode_config.mutex);
+	mutex_lock(&dev->struct_mutex);
+
+	if (!(attrs->flags & I915_OVERLAY_UPDATE_ATTRS)) {
+		attrs->color_key = overlay->color_key;
+		attrs->brightness = overlay->brightness;
+		attrs->contrast = overlay->contrast;
+		attrs->saturation = overlay->saturation;
+
+		if (IS_I9XX(dev)) {
+			attrs->gamma0 = I915_READ(OGAMC0);
+			attrs->gamma1 = I915_READ(OGAMC1);
+			attrs->gamma2 = I915_READ(OGAMC2);
+			attrs->gamma3 = I915_READ(OGAMC3);
+			attrs->gamma4 = I915_READ(OGAMC4);
+			attrs->gamma5 = I915_READ(OGAMC5);
+		}
+		ret = 0;
+	} else {
+		overlay->color_key = attrs->color_key;
+		if (attrs->brightness >= -128 && attrs->brightness <= 127) {
+			overlay->brightness = attrs->brightness;
+		} else {
+			ret = -EINVAL;
+			goto out_unlock;
+		}
+		if (attrs->contrast <= 255) {
+			overlay->contrast = attrs->contrast;
+		} else {
+			ret = -EINVAL;
+			goto out_unlock;
+		}
+		if (attrs->saturation <= 1023) {
+			overlay->saturation = attrs->saturation;
+		} else {
+			ret = -EINVAL;
+			goto out_unlock;
+		}
+
+		regs = intel_overlay_map_regs_atomic(overlay);
+		if (!regs) {
+			ret = -ENOMEM;
+			goto out_unlock;
+		}
+
+		update_reg_attrs(overlay, regs);
+
+		intel_overlay_unmap_regs_atomic(overlay);
+
+		if (attrs->flags & I915_OVERLAY_UPDATE_GAMMA) {
+			if (!IS_I9XX(dev)) {
+				ret = -EINVAL;
+				goto out_unlock;
+			}
+
+			if (overlay->active) {
+				ret = -EBUSY;
+				goto out_unlock;
+			}
+
+			ret = check_gamma(attrs);
+			if (ret != 0)
+				goto out_unlock;
+
+			I915_WRITE(OGAMC0, attrs->gamma0);
+			I915_WRITE(OGAMC1, attrs->gamma1);
+			I915_WRITE(OGAMC2, attrs->gamma2);
+			I915_WRITE(OGAMC3, attrs->gamma3);
+			I915_WRITE(OGAMC4, attrs->gamma4);
+			I915_WRITE(OGAMC5, attrs->gamma5);
+		}
+		ret = 0;
+	}
+
+out_unlock:
+	mutex_unlock(&dev->struct_mutex);
+	mutex_unlock(&dev->mode_config.mutex);
+
+	return ret;
+}
+
+void intel_setup_overlay(struct drm_device *dev)
+{
+        drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_overlay *overlay;
+	struct drm_gem_object *reg_bo;
+	struct overlay_registers *regs;
+	int ret;
+
+	if (!OVERLAY_EXISTS(dev))
+		return;
+
+	overlay = kzalloc(sizeof(struct intel_overlay), GFP_KERNEL);
+	if (!overlay)
+		return;
+	overlay->dev = dev;
+
+	reg_bo = drm_gem_object_alloc(dev, PAGE_SIZE);
+	if (!reg_bo)
+		goto out_free;
+	overlay->reg_bo = reg_bo->driver_private;
+
+	if (OVERLAY_NONPHYSICAL(dev)) {
+		ret = i915_gem_object_pin(reg_bo, PAGE_SIZE);
+		if (ret) {
+                        DRM_ERROR("failed to pin overlay register bo\n");
+                        goto out_free_bo;
+                }
+		overlay->flip_addr = overlay->reg_bo->gtt_offset;
+	} else {
+		ret = i915_gem_attach_phys_object(dev, reg_bo,
+				I915_GEM_PHYS_OVERLAY_REGS);
+                if (ret) {
+                        DRM_ERROR("failed to attach phys overlay regs\n");
+                        goto out_free_bo;
+                }
+		overlay->flip_addr = overlay->reg_bo->phys_obj->handle->busaddr;
+	}
+
+	/* init all values */
+	overlay->color_key = 0x0101fe;
+	overlay->brightness = -19;
+	overlay->contrast = 75;
+	overlay->saturation = 146;
+
+	regs = intel_overlay_map_regs_atomic(overlay);
+	if (!regs)
+		goto out_free_bo;
+
+	memset(regs, 0, sizeof(struct overlay_registers));
+	update_polyphase_filter(regs);
+
+	update_reg_attrs(overlay, regs);
+
+	intel_overlay_unmap_regs_atomic(overlay);
+
+	dev_priv->overlay = overlay;
+	DRM_INFO("initialized overlay support\n");
+	return;
+
+out_free_bo:
+	drm_gem_object_unreference(reg_bo);
+out_free:
+	kfree(overlay);
+	return;
+}
+
+void intel_cleanup_overlay(struct drm_device *dev)
+{
+        drm_i915_private_t *dev_priv = dev->dev_private;
+
+	if (dev_priv->overlay) {
+		/* The bo's should be free'd by the generic code already.
+		 * Furthermore modesetting teardown happens beforehand so the
+		 * hardware should be off already */
+		BUG_ON(dev_priv->overlay->active);
+
+		kfree(dev_priv->overlay);
+	}
+}

+ 4 - 10
drivers/gpu/drm/i915/intel_sdvo.c

@@ -36,8 +36,6 @@
 #include "i915_drv.h"
 #include "intel_sdvo_regs.h"
 
-#undef SDVO_DEBUG
-
 static char *tv_format_names[] = {
 	"NTSC_M"   , "NTSC_J"  , "NTSC_443",
 	"PAL_B"    , "PAL_D"   , "PAL_G"   ,
@@ -356,7 +354,6 @@ static const struct _sdvo_cmd_name {
 #define SDVO_NAME(dev_priv) ((dev_priv)->output_device == SDVOB ? "SDVOB" : "SDVOC")
 #define SDVO_PRIV(output)   ((struct intel_sdvo_priv *) (output)->dev_priv)
 
-#ifdef SDVO_DEBUG
 static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd,
 				   void *args, int args_len)
 {
@@ -379,9 +376,6 @@ static void intel_sdvo_debug_write(struct intel_output *intel_output, u8 cmd,
 		DRM_LOG_KMS("(%02X)", cmd);
 	DRM_LOG_KMS("\n");
 }
-#else
-#define intel_sdvo_debug_write(o, c, a, l)
-#endif
 
 static void intel_sdvo_write_cmd(struct intel_output *intel_output, u8 cmd,
 				 void *args, int args_len)
@@ -398,7 +392,6 @@ static void intel_sdvo_write_cmd(struct intel_output *intel_output, u8 cmd,
 	intel_sdvo_write_byte(intel_output, SDVO_I2C_OPCODE, cmd);
 }
 
-#ifdef SDVO_DEBUG
 static const char *cmd_status_names[] = {
 	"Power on",
 	"Success",
@@ -427,9 +420,6 @@ static void intel_sdvo_debug_response(struct intel_output *intel_output,
 		DRM_LOG_KMS("(??? %d)", status);
 	DRM_LOG_KMS("\n");
 }
-#else
-#define intel_sdvo_debug_response(o, r, l, s)
-#endif
 
 static u8 intel_sdvo_read_response(struct intel_output *intel_output,
 				   void *response, int response_len)
@@ -1627,6 +1617,10 @@ static enum drm_connector_status intel_sdvo_detect(struct drm_connector *connect
 
 	intel_sdvo_write_cmd(intel_output,
 			     SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0);
+	if (sdvo_priv->is_tv) {
+		/* add 30ms delay when the output type is SDVO-TV */
+		mdelay(30);
+	}
 	status = intel_sdvo_read_response(intel_output, &response, 2);
 
 	DRM_DEBUG_KMS("SDVO response %d %d\n", response & 0xff, response >> 8);

+ 47 - 11
drivers/gpu/drm/i915/intel_tv.c

@@ -1213,20 +1213,17 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
 		tv_ctl |= TV_TRILEVEL_SYNC;
 	if (tv_mode->pal_burst)
 		tv_ctl |= TV_PAL_BURST;
+
 	scctl1 = 0;
-	/* dda1 implies valid video levels */
-	if (tv_mode->dda1_inc) {
+	if (tv_mode->dda1_inc)
 		scctl1 |= TV_SC_DDA1_EN;
-	}
-
 	if (tv_mode->dda2_inc)
 		scctl1 |= TV_SC_DDA2_EN;
-
 	if (tv_mode->dda3_inc)
 		scctl1 |= TV_SC_DDA3_EN;
-
 	scctl1 |= tv_mode->sc_reset;
-	scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT;
+	if (video_levels)
+		scctl1 |= video_levels->burst << TV_BURST_LEVEL_SHIFT;
 	scctl1 |= tv_mode->dda1_inc << TV_SCDDA1_INC_SHIFT;
 
 	scctl2 = tv_mode->dda2_size << TV_SCDDA2_SIZE_SHIFT |
@@ -1416,16 +1413,16 @@ intel_tv_detect_type (struct drm_crtc *crtc, struct intel_output *intel_output)
 	 *  0 0 0 Component
 	 */
 	if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) {
-		DRM_DEBUG("Detected Composite TV connection\n");
+		DRM_DEBUG_KMS("Detected Composite TV connection\n");
 		type = DRM_MODE_CONNECTOR_Composite;
 	} else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) {
-		DRM_DEBUG("Detected S-Video TV connection\n");
+		DRM_DEBUG_KMS("Detected S-Video TV connection\n");
 		type = DRM_MODE_CONNECTOR_SVIDEO;
 	} else if ((tv_dac & TVDAC_SENSE_MASK) == 0) {
-		DRM_DEBUG("Detected Component TV connection\n");
+		DRM_DEBUG_KMS("Detected Component TV connection\n");
 		type = DRM_MODE_CONNECTOR_Component;
 	} else {
-		DRM_DEBUG("No TV connection detected\n");
+		DRM_DEBUG_KMS("No TV connection detected\n");
 		type = -1;
 	}
 
@@ -1702,6 +1699,41 @@ static const struct drm_encoder_funcs intel_tv_enc_funcs = {
 	.destroy = intel_tv_enc_destroy,
 };
 
+/*
+ * Enumerate the child dev array parsed from VBT to check whether
+ * the integrated TV is present.
+ * If it is present, return 1.
+ * If it is not present, return false.
+ * If no child dev is parsed from VBT, it assumes that the TV is present.
+ */
+static int tv_is_present_in_vbt(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct child_device_config *p_child;
+	int i, ret;
+
+	if (!dev_priv->child_dev_num)
+		return 1;
+
+	ret = 0;
+	for (i = 0; i < dev_priv->child_dev_num; i++) {
+		p_child = dev_priv->child_dev + i;
+		/*
+		 * If the device type is not TV, continue.
+		 */
+		if (p_child->device_type != DEVICE_TYPE_INT_TV &&
+			p_child->device_type != DEVICE_TYPE_TV)
+			continue;
+		/* Only when the addin_offset is non-zero, it is regarded
+		 * as present.
+		 */
+		if (p_child->addin_offset) {
+			ret = 1;
+			break;
+		}
+	}
+	return ret;
+}
 
 void
 intel_tv_init(struct drm_device *dev)
@@ -1717,6 +1749,10 @@ intel_tv_init(struct drm_device *dev)
 	if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED)
 		return;
 
+	if (!tv_is_present_in_vbt(dev)) {
+		DRM_DEBUG_KMS("Integrated TV is not present.\n");
+		return;
+	}
 	/* Even if we have an encoder we may not have a connector */
 	if (!dev_priv->int_tv_support)
 		return;

+ 1 - 1
drivers/gpu/drm/radeon/Makefile

@@ -49,7 +49,7 @@ radeon-y += radeon_device.o radeon_kms.o \
 	radeon_cs.o radeon_bios.o radeon_benchmark.o r100.o r300.o r420.o \
 	rs400.o rs600.o rs690.o rv515.o r520.o r600.o rv770.o radeon_test.o \
 	r200.o radeon_legacy_tv.o r600_cs.o r600_blit.o r600_blit_shaders.o \
-	r600_blit_kms.o radeon_pm.o
+	r600_blit_kms.o radeon_pm.o atombios_dp.o
 
 radeon-$(CONFIG_COMPAT) += radeon_ioc32.o
 

+ 29 - 4
drivers/gpu/drm/radeon/atom.c

@@ -263,10 +263,10 @@ static uint32_t atom_get_src_int(atom_exec_context *ctx, uint8_t attr,
 	case ATOM_ARG_FB:
 		idx = U8(*ptr);
 		(*ptr)++;
+		val = gctx->scratch[((gctx->fb_base + idx) / 4)];
 		if (print)
 			DEBUG("FB[0x%02X]", idx);
-		printk(KERN_INFO "FB access is not implemented.\n");
-		return 0;
+		break;
 	case ATOM_ARG_IMM:
 		switch (align) {
 		case ATOM_SRC_DWORD:
@@ -488,9 +488,9 @@ static void atom_put_dst(atom_exec_context *ctx, int arg, uint8_t attr,
 	case ATOM_ARG_FB:
 		idx = U8(*ptr);
 		(*ptr)++;
+		gctx->scratch[((gctx->fb_base + idx) / 4)] = val;
 		DEBUG("FB[0x%02X]", idx);
-		printk(KERN_INFO "FB access is not implemented.\n");
-		return;
+		break;
 	case ATOM_ARG_PLL:
 		idx = U8(*ptr);
 		(*ptr)++;
@@ -1214,3 +1214,28 @@ void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev,
 		*crev = CU8(idx + 3);
 	return;
 }
+
+int atom_allocate_fb_scratch(struct atom_context *ctx)
+{
+	int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware);
+	uint16_t data_offset;
+	int usage_bytes;
+	struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage;
+
+	atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset);
+
+	firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
+
+	DRM_DEBUG("atom firmware requested %08x %dkb\n",
+		  firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware,
+		  firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb);
+
+	usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024;
+	if (usage_bytes == 0)
+		usage_bytes = 20 * 1024;
+	/* allocate some scratch memory */
+	ctx->scratch = kzalloc(usage_bytes, GFP_KERNEL);
+	if (!ctx->scratch)
+		return -ENOMEM;
+	return 0;
+}

+ 2 - 0
drivers/gpu/drm/radeon/atom.h

@@ -132,6 +132,7 @@ struct atom_context {
 	uint8_t shift;
 	int cs_equal, cs_above;
 	int io_mode;
+	uint32_t *scratch;
 };
 
 extern int atom_debug;
@@ -142,6 +143,7 @@ int atom_asic_init(struct atom_context *);
 void atom_destroy(struct atom_context *);
 void atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size, uint8_t *frev, uint8_t *crev, uint16_t *data_start);
 void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t *frev, uint8_t *crev);
+int atom_allocate_fb_scratch(struct atom_context *ctx);
 #include "atom-types.h"
 #include "atombios.h"
 #include "ObjectID.h"

+ 1 - 1
drivers/gpu/drm/radeon/atombios.h

@@ -2680,7 +2680,7 @@ typedef struct _ATOM_I2C_RECORD {
 typedef struct _ATOM_HPD_INT_RECORD {
 	ATOM_COMMON_RECORD_HEADER sheader;
 	UCHAR ucHPDIntGPIOID;	/* Corresponding block in GPIO_PIN_INFO table gives the pin info */
-	UCHAR ucPluggged_PinState;
+	UCHAR ucPlugged_PinState;
 } ATOM_HPD_INT_RECORD;
 
 typedef struct _ATOM_OUTPUT_PROTECTION_RECORD {

+ 41 - 18
drivers/gpu/drm/radeon/atombios_crtc.c

@@ -241,6 +241,7 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
 {
 	struct drm_device *dev = crtc->dev;
 	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
 
 	switch (mode) {
 	case DRM_MODE_DPMS_ON:
@@ -248,20 +249,19 @@ void atombios_crtc_dpms(struct drm_crtc *crtc, int mode)
 		if (ASIC_IS_DCE3(rdev))
 			atombios_enable_crtc_memreq(crtc, 1);
 		atombios_blank_crtc(crtc, 0);
+		drm_vblank_post_modeset(dev, radeon_crtc->crtc_id);
+		radeon_crtc_load_lut(crtc);
 		break;
 	case DRM_MODE_DPMS_STANDBY:
 	case DRM_MODE_DPMS_SUSPEND:
 	case DRM_MODE_DPMS_OFF:
+		drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
 		atombios_blank_crtc(crtc, 1);
 		if (ASIC_IS_DCE3(rdev))
 			atombios_enable_crtc_memreq(crtc, 0);
 		atombios_enable_crtc(crtc, 0);
 		break;
 	}
-
-	if (mode != DRM_MODE_DPMS_OFF) {
-		radeon_crtc_load_lut(crtc);
-	}
 }
 
 static void
@@ -457,9 +457,8 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
 				if (encoder->encoder_type !=
 				    DRM_MODE_ENCODER_DAC)
 					pll_flags |= RADEON_PLL_NO_ODD_POST_DIV;
-				if (!ASIC_IS_AVIVO(rdev)
-				    && (encoder->encoder_type ==
-					DRM_MODE_ENCODER_LVDS))
+				if (encoder->encoder_type ==
+					DRM_MODE_ENCODER_LVDS)
 					pll_flags |= RADEON_PLL_USE_REF_DIV;
 			}
 			radeon_encoder = to_radeon_encoder(encoder);
@@ -500,8 +499,18 @@ void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
 	else
 		pll = &rdev->clock.p2pll;
 
-	radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
-			   &ref_div, &post_div, pll_flags);
+	if (ASIC_IS_AVIVO(rdev)) {
+		if (radeon_new_pll)
+			radeon_compute_pll_avivo(pll, adjusted_clock, &pll_clock,
+						 &fb_div, &frac_fb_div,
+						 &ref_div, &post_div, pll_flags);
+		else
+			radeon_compute_pll(pll, adjusted_clock, &pll_clock,
+					   &fb_div, &frac_fb_div,
+					   &ref_div, &post_div, pll_flags);
+	} else
+		radeon_compute_pll(pll, adjusted_clock, &pll_clock, &fb_div, &frac_fb_div,
+				   &ref_div, &post_div, pll_flags);
 
 	index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
 	atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
@@ -574,21 +583,32 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
 	struct radeon_device *rdev = dev->dev_private;
 	struct radeon_framebuffer *radeon_fb;
 	struct drm_gem_object *obj;
-	struct drm_radeon_gem_object *obj_priv;
+	struct radeon_bo *rbo;
 	uint64_t fb_location;
 	uint32_t fb_format, fb_pitch_pixels, tiling_flags;
+	int r;
 
-	if (!crtc->fb)
-		return -EINVAL;
+	/* no fb bound */
+	if (!crtc->fb) {
+		DRM_DEBUG("No FB bound\n");
+		return 0;
+	}
 
 	radeon_fb = to_radeon_framebuffer(crtc->fb);
 
+	/* Pin framebuffer & get tilling informations */
 	obj = radeon_fb->obj;
-	obj_priv = obj->driver_private;
-
-	if (radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &fb_location)) {
+	rbo = obj->driver_private;
+	r = radeon_bo_reserve(rbo, false);
+	if (unlikely(r != 0))
+		return r;
+	r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_location);
+	if (unlikely(r != 0)) {
+		radeon_bo_unreserve(rbo);
 		return -EINVAL;
 	}
+	radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
+	radeon_bo_unreserve(rbo);
 
 	switch (crtc->fb->bits_per_pixel) {
 	case 8:
@@ -618,8 +638,6 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
 		return -EINVAL;
 	}
 
-	radeon_object_get_tiling_flags(obj->driver_private,
-				       &tiling_flags, NULL);
 	if (tiling_flags & RADEON_TILING_MACRO)
 		fb_format |= AVIVO_D1GRPH_MACRO_ADDRESS_MODE;
 
@@ -674,7 +692,12 @@ int atombios_crtc_set_base(struct drm_crtc *crtc, int x, int y,
 
 	if (old_fb && old_fb != crtc->fb) {
 		radeon_fb = to_radeon_framebuffer(old_fb);
-		radeon_gem_object_unpin(radeon_fb->obj);
+		rbo = radeon_fb->obj->driver_private;
+		r = radeon_bo_reserve(rbo, false);
+		if (unlikely(r != 0))
+			return r;
+		radeon_bo_unpin(rbo);
+		radeon_bo_unreserve(rbo);
 	}
 
 	/* Bytes per pixel may have changed */

+ 790 - 0
drivers/gpu/drm/radeon/atombios_dp.c

@@ -0,0 +1,790 @@
+/*
+ * Copyright 2007-8 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ *          Alex Deucher
+ */
+#include "drmP.h"
+#include "radeon_drm.h"
+#include "radeon.h"
+
+#include "atom.h"
+#include "atom-bits.h"
+#include "drm_dp_helper.h"
+
+/* move these to drm_dp_helper.c/h */
+#define DP_LINK_CONFIGURATION_SIZE 9
+#define DP_LINK_STATUS_SIZE	   6
+#define DP_DPCD_SIZE	           8
+
+static char *voltage_names[] = {
+        "0.4V", "0.6V", "0.8V", "1.2V"
+};
+static char *pre_emph_names[] = {
+        "0dB", "3.5dB", "6dB", "9.5dB"
+};
+
+static const int dp_clocks[] = {
+	54000,  /* 1 lane, 1.62 Ghz */
+	90000,  /* 1 lane, 2.70 Ghz */
+	108000, /* 2 lane, 1.62 Ghz */
+	180000, /* 2 lane, 2.70 Ghz */
+	216000, /* 4 lane, 1.62 Ghz */
+	360000, /* 4 lane, 2.70 Ghz */
+};
+
+static const int num_dp_clocks = sizeof(dp_clocks) / sizeof(int);
+
+/* common helper functions */
+static int dp_lanes_for_mode_clock(u8 dpcd[DP_DPCD_SIZE], int mode_clock)
+{
+	int i;
+	u8 max_link_bw;
+	u8 max_lane_count;
+
+	if (!dpcd)
+		return 0;
+
+	max_link_bw = dpcd[DP_MAX_LINK_RATE];
+	max_lane_count = dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
+
+	switch (max_link_bw) {
+	case DP_LINK_BW_1_62:
+	default:
+		for (i = 0; i < num_dp_clocks; i++) {
+			if (i % 2)
+				continue;
+			switch (max_lane_count) {
+			case 1:
+				if (i > 1)
+					return 0;
+				break;
+			case 2:
+				if (i > 3)
+					return 0;
+				break;
+			case 4:
+			default:
+				break;
+			}
+			if (dp_clocks[i] > mode_clock) {
+				if (i < 2)
+					return 1;
+				else if (i < 4)
+					return 2;
+				else
+					return 4;
+			}
+		}
+		break;
+	case DP_LINK_BW_2_7:
+		for (i = 0; i < num_dp_clocks; i++) {
+			switch (max_lane_count) {
+			case 1:
+				if (i > 1)
+					return 0;
+				break;
+			case 2:
+				if (i > 3)
+					return 0;
+				break;
+			case 4:
+			default:
+				break;
+			}
+			if (dp_clocks[i] > mode_clock) {
+				if (i < 2)
+					return 1;
+				else if (i < 4)
+					return 2;
+				else
+					return 4;
+			}
+		}
+		break;
+	}
+
+	return 0;
+}
+
+static int dp_link_clock_for_mode_clock(u8 dpcd[DP_DPCD_SIZE], int mode_clock)
+{
+	int i;
+	u8 max_link_bw;
+	u8 max_lane_count;
+
+	if (!dpcd)
+		return 0;
+
+	max_link_bw = dpcd[DP_MAX_LINK_RATE];
+	max_lane_count = dpcd[DP_MAX_LANE_COUNT] & DP_MAX_LANE_COUNT_MASK;
+
+	switch (max_link_bw) {
+	case DP_LINK_BW_1_62:
+	default:
+		for (i = 0; i < num_dp_clocks; i++) {
+			if (i % 2)
+				continue;
+			switch (max_lane_count) {
+			case 1:
+				if (i > 1)
+					return 0;
+				break;
+			case 2:
+				if (i > 3)
+					return 0;
+				break;
+			case 4:
+			default:
+				break;
+			}
+			if (dp_clocks[i] > mode_clock)
+				return 162000;
+		}
+		break;
+	case DP_LINK_BW_2_7:
+		for (i = 0; i < num_dp_clocks; i++) {
+			switch (max_lane_count) {
+			case 1:
+				if (i > 1)
+					return 0;
+				break;
+			case 2:
+				if (i > 3)
+					return 0;
+				break;
+			case 4:
+			default:
+				break;
+			}
+			if (dp_clocks[i] > mode_clock)
+				return (i % 2) ? 270000 : 162000;
+		}
+	}
+
+	return 0;
+}
+
+int dp_mode_valid(u8 dpcd[DP_DPCD_SIZE], int mode_clock)
+{
+	int lanes = dp_lanes_for_mode_clock(dpcd, mode_clock);
+	int bw = dp_lanes_for_mode_clock(dpcd, mode_clock);
+
+	if ((lanes == 0) || (bw == 0))
+		return MODE_CLOCK_HIGH;
+
+	return MODE_OK;
+}
+
+static u8 dp_link_status(u8 link_status[DP_LINK_STATUS_SIZE], int r)
+{
+	return link_status[r - DP_LANE0_1_STATUS];
+}
+
+static u8 dp_get_lane_status(u8 link_status[DP_LINK_STATUS_SIZE],
+			     int lane)
+{
+	int i = DP_LANE0_1_STATUS + (lane >> 1);
+	int s = (lane & 1) * 4;
+	u8 l = dp_link_status(link_status, i);
+	return (l >> s) & 0xf;
+}
+
+static bool dp_clock_recovery_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+				 int lane_count)
+{
+	int lane;
+	u8 lane_status;
+
+	for (lane = 0; lane < lane_count; lane++) {
+		lane_status = dp_get_lane_status(link_status, lane);
+		if ((lane_status & DP_LANE_CR_DONE) == 0)
+			return false;
+	}
+	return true;
+}
+
+static bool dp_channel_eq_ok(u8 link_status[DP_LINK_STATUS_SIZE],
+			     int lane_count)
+{
+	u8 lane_align;
+	u8 lane_status;
+	int lane;
+
+	lane_align = dp_link_status(link_status,
+				    DP_LANE_ALIGN_STATUS_UPDATED);
+	if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0)
+		return false;
+	for (lane = 0; lane < lane_count; lane++) {
+		lane_status = dp_get_lane_status(link_status, lane);
+		if ((lane_status & DP_CHANNEL_EQ_BITS) != DP_CHANNEL_EQ_BITS)
+			return false;
+	}
+	return true;
+}
+
+static u8 dp_get_adjust_request_voltage(uint8_t link_status[DP_LINK_STATUS_SIZE],
+					int lane)
+
+{
+	int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+	int s = ((lane & 1) ?
+		 DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT :
+		 DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT);
+	u8 l = dp_link_status(link_status, i);
+
+	return ((l >> s) & 0x3) << DP_TRAIN_VOLTAGE_SWING_SHIFT;
+}
+
+static u8 dp_get_adjust_request_pre_emphasis(uint8_t link_status[DP_LINK_STATUS_SIZE],
+					     int lane)
+{
+	int i = DP_ADJUST_REQUEST_LANE0_1 + (lane >> 1);
+	int s = ((lane & 1) ?
+		 DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT :
+		 DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT);
+	u8 l = dp_link_status(link_status, i);
+
+	return ((l >> s) & 0x3) << DP_TRAIN_PRE_EMPHASIS_SHIFT;
+}
+
+/* XXX fix me -- chip specific */
+#define DP_VOLTAGE_MAX         DP_TRAIN_VOLTAGE_SWING_1200
+static u8 dp_pre_emphasis_max(u8 voltage_swing)
+{
+	switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) {
+	case DP_TRAIN_VOLTAGE_SWING_400:
+		return DP_TRAIN_PRE_EMPHASIS_6;
+	case DP_TRAIN_VOLTAGE_SWING_600:
+		return DP_TRAIN_PRE_EMPHASIS_6;
+	case DP_TRAIN_VOLTAGE_SWING_800:
+		return DP_TRAIN_PRE_EMPHASIS_3_5;
+	case DP_TRAIN_VOLTAGE_SWING_1200:
+	default:
+		return DP_TRAIN_PRE_EMPHASIS_0;
+	}
+}
+
+static void dp_get_adjust_train(u8 link_status[DP_LINK_STATUS_SIZE],
+				int lane_count,
+				u8 train_set[4])
+{
+	u8 v = 0;
+	u8 p = 0;
+	int lane;
+
+	for (lane = 0; lane < lane_count; lane++) {
+		u8 this_v = dp_get_adjust_request_voltage(link_status, lane);
+		u8 this_p = dp_get_adjust_request_pre_emphasis(link_status, lane);
+
+		DRM_DEBUG("requested signal parameters: lane %d voltage %s pre_emph %s\n",
+			  lane,
+			  voltage_names[this_v >> DP_TRAIN_VOLTAGE_SWING_SHIFT],
+			  pre_emph_names[this_p >> DP_TRAIN_PRE_EMPHASIS_SHIFT]);
+
+		if (this_v > v)
+			v = this_v;
+		if (this_p > p)
+			p = this_p;
+	}
+
+	if (v >= DP_VOLTAGE_MAX)
+		v = DP_VOLTAGE_MAX | DP_TRAIN_MAX_SWING_REACHED;
+
+	if (p >= dp_pre_emphasis_max(v))
+		p = dp_pre_emphasis_max(v) | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED;
+
+	DRM_DEBUG("using signal parameters: voltage %s pre_emph %s\n",
+		  voltage_names[(v & DP_TRAIN_VOLTAGE_SWING_MASK) >> DP_TRAIN_VOLTAGE_SWING_SHIFT],
+		  pre_emph_names[(p & DP_TRAIN_PRE_EMPHASIS_MASK) >> DP_TRAIN_PRE_EMPHASIS_SHIFT]);
+
+	for (lane = 0; lane < 4; lane++)
+		train_set[lane] = v | p;
+}
+
+
+/* radeon aux chan functions */
+bool radeon_process_aux_ch(struct radeon_i2c_chan *chan, u8 *req_bytes,
+			   int num_bytes, u8 *read_byte,
+			   u8 read_buf_len, u8 delay)
+{
+	struct drm_device *dev = chan->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION args;
+	int index = GetIndexIntoMasterTable(COMMAND, ProcessAuxChannelTransaction);
+	unsigned char *base;
+
+	memset(&args, 0, sizeof(args));
+
+	base = (unsigned char *)rdev->mode_info.atom_context->scratch;
+
+	memcpy(base, req_bytes, num_bytes);
+
+	args.lpAuxRequest = 0;
+	args.lpDataOut = 16;
+	args.ucDataOutLen = 0;
+	args.ucChannelID = chan->rec.i2c_id;
+	args.ucDelay = delay / 10;
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+	if (args.ucReplyStatus) {
+		DRM_DEBUG("failed to get auxch %02x%02x %02x %02x 0x%02x %02x\n",
+			  req_bytes[1], req_bytes[0], req_bytes[2], req_bytes[3],
+			  chan->rec.i2c_id, args.ucReplyStatus);
+		return false;
+	}
+
+	if (args.ucDataOutLen && read_byte && read_buf_len) {
+		if (read_buf_len < args.ucDataOutLen) {
+			DRM_ERROR("Buffer to small for return answer %d %d\n",
+				  read_buf_len, args.ucDataOutLen);
+			return false;
+		}
+		{
+			int len = min(read_buf_len, args.ucDataOutLen);
+			memcpy(read_byte, base + 16, len);
+		}
+	}
+	return true;
+}
+
+bool radeon_dp_aux_native_write(struct radeon_connector *radeon_connector, uint16_t address,
+				uint8_t send_bytes, uint8_t *send)
+{
+	struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+	u8 msg[20];
+	u8 msg_len, dp_msg_len;
+	bool ret;
+
+	dp_msg_len = 4;
+	msg[0] = address;
+	msg[1] = address >> 8;
+	msg[2] = AUX_NATIVE_WRITE << 4;
+	dp_msg_len += send_bytes;
+	msg[3] = (dp_msg_len << 4) | (send_bytes - 1);
+
+	if (send_bytes > 16)
+		return false;
+
+	memcpy(&msg[4], send, send_bytes);
+	msg_len = 4 + send_bytes;
+	ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, msg, msg_len, NULL, 0, 0);
+	return ret;
+}
+
+bool radeon_dp_aux_native_read(struct radeon_connector *radeon_connector, uint16_t address,
+			       uint8_t delay, uint8_t expected_bytes,
+			       uint8_t *read_p)
+{
+	struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+	u8 msg[20];
+	u8 msg_len, dp_msg_len;
+	bool ret = false;
+	msg_len = 4;
+	dp_msg_len = 4;
+	msg[0] = address;
+	msg[1] = address >> 8;
+	msg[2] = AUX_NATIVE_READ << 4;
+	msg[3] = (dp_msg_len) << 4;
+	msg[3] |= expected_bytes - 1;
+
+	ret = radeon_process_aux_ch(dig_connector->dp_i2c_bus, msg, msg_len, read_p, expected_bytes, delay);
+	return ret;
+}
+
+/* radeon dp functions */
+static u8 radeon_dp_encoder_service(struct radeon_device *rdev, int action, int dp_clock,
+				    uint8_t ucconfig, uint8_t lane_num)
+{
+	DP_ENCODER_SERVICE_PARAMETERS args;
+	int index = GetIndexIntoMasterTable(COMMAND, DPEncoderService);
+
+	memset(&args, 0, sizeof(args));
+	args.ucLinkClock = dp_clock / 10;
+	args.ucConfig = ucconfig;
+	args.ucAction = action;
+	args.ucLaneNum = lane_num;
+	args.ucStatus = 0;
+
+	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+	return args.ucStatus;
+}
+
+u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector)
+{
+	struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+	struct drm_device *dev = radeon_connector->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+
+	return radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_GET_SINK_TYPE, 0,
+					 dig_connector->dp_i2c_bus->rec.i2c_id, 0);
+}
+
+bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
+{
+	struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+	u8 msg[25];
+	int ret;
+
+	ret = radeon_dp_aux_native_read(radeon_connector, DP_DPCD_REV, 0, 8, msg);
+	if (ret) {
+		memcpy(dig_connector->dpcd, msg, 8);
+		{
+			int i;
+			DRM_DEBUG("DPCD: ");
+			for (i = 0; i < 8; i++)
+				DRM_DEBUG("%02x ", msg[i]);
+			DRM_DEBUG("\n");
+		}
+		return true;
+	}
+	dig_connector->dpcd[0] = 0;
+	return false;
+}
+
+void radeon_dp_set_link_config(struct drm_connector *connector,
+			       struct drm_display_mode *mode)
+{
+	struct radeon_connector *radeon_connector;
+	struct radeon_connector_atom_dig *dig_connector;
+
+	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
+		return;
+
+	radeon_connector = to_radeon_connector(connector);
+	if (!radeon_connector->con_priv)
+		return;
+	dig_connector = radeon_connector->con_priv;
+
+	dig_connector->dp_clock =
+		dp_link_clock_for_mode_clock(dig_connector->dpcd, mode->clock);
+	dig_connector->dp_lane_count =
+		dp_lanes_for_mode_clock(dig_connector->dpcd, mode->clock);
+}
+
+int radeon_dp_mode_valid_helper(struct radeon_connector *radeon_connector,
+				struct drm_display_mode *mode)
+{
+	struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+
+	return dp_mode_valid(dig_connector->dpcd, mode->clock);
+}
+
+static bool atom_dp_get_link_status(struct radeon_connector *radeon_connector,
+				    u8 link_status[DP_LINK_STATUS_SIZE])
+{
+	int ret;
+	ret = radeon_dp_aux_native_read(radeon_connector, DP_LANE0_1_STATUS, 100,
+					DP_LINK_STATUS_SIZE, link_status);
+	if (!ret) {
+		DRM_ERROR("displayport link status failed\n");
+		return false;
+	}
+
+	DRM_DEBUG("link status %02x %02x %02x %02x %02x %02x\n",
+		  link_status[0], link_status[1], link_status[2],
+		  link_status[3], link_status[4], link_status[5]);
+	return true;
+}
+
+bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector)
+{
+	struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+	u8 link_status[DP_LINK_STATUS_SIZE];
+
+	if (!atom_dp_get_link_status(radeon_connector, link_status))
+		return false;
+	if (dp_channel_eq_ok(link_status, dig_connector->dp_lane_count))
+		return false;
+	return true;
+}
+
+static void dp_set_power(struct radeon_connector *radeon_connector, u8 power_state)
+{
+	struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+
+	if (dig_connector->dpcd[0] >= 0x11) {
+		radeon_dp_aux_native_write(radeon_connector, DP_SET_POWER, 1,
+					   &power_state);
+	}
+}
+
+static void dp_set_downspread(struct radeon_connector *radeon_connector, u8 downspread)
+{
+	radeon_dp_aux_native_write(radeon_connector, DP_DOWNSPREAD_CTRL, 1,
+				   &downspread);
+}
+
+static void dp_set_link_bw_lanes(struct radeon_connector *radeon_connector,
+				 u8 link_configuration[DP_LINK_CONFIGURATION_SIZE])
+{
+	radeon_dp_aux_native_write(radeon_connector, DP_LINK_BW_SET, 2,
+				   link_configuration);
+}
+
+static void dp_update_dpvs_emph(struct radeon_connector *radeon_connector,
+				struct drm_encoder *encoder,
+				u8 train_set[4])
+{
+	struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+	int i;
+
+	for (i = 0; i < dig_connector->dp_lane_count; i++)
+		atombios_dig_transmitter_setup(encoder,
+					       ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH,
+					       i, train_set[i]);
+
+	radeon_dp_aux_native_write(radeon_connector, DP_TRAINING_LANE0_SET,
+				   dig_connector->dp_lane_count, train_set);
+}
+
+static void dp_set_training(struct radeon_connector *radeon_connector,
+			    u8 training)
+{
+	radeon_dp_aux_native_write(radeon_connector, DP_TRAINING_PATTERN_SET,
+				   1, &training);
+}
+
+void dp_link_train(struct drm_encoder *encoder,
+		   struct drm_connector *connector)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_atom_dig *dig;
+	struct radeon_connector *radeon_connector;
+	struct radeon_connector_atom_dig *dig_connector;
+	int enc_id = 0;
+	bool clock_recovery, channel_eq;
+	u8 link_status[DP_LINK_STATUS_SIZE];
+	u8 link_configuration[DP_LINK_CONFIGURATION_SIZE];
+	u8 tries, voltage;
+	u8 train_set[4];
+	int i;
+
+	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
+		return;
+
+	if (!radeon_encoder->enc_priv)
+		return;
+	dig = radeon_encoder->enc_priv;
+
+	radeon_connector = to_radeon_connector(connector);
+	if (!radeon_connector->con_priv)
+		return;
+	dig_connector = radeon_connector->con_priv;
+
+	if (ASIC_IS_DCE32(rdev)) {
+		if (dig->dig_block)
+			enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER;
+		else
+			enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER;
+		if (dig_connector->linkb)
+			enc_id |= ATOM_DP_CONFIG_LINK_B;
+		else
+			enc_id |= ATOM_DP_CONFIG_LINK_A;
+	} else {
+		if (dig_connector->linkb)
+			enc_id |= ATOM_DP_CONFIG_DIG2_ENCODER | ATOM_DP_CONFIG_LINK_B;
+		else
+			enc_id |= ATOM_DP_CONFIG_DIG1_ENCODER | ATOM_DP_CONFIG_LINK_A;
+	}
+
+	memset(link_configuration, 0, DP_LINK_CONFIGURATION_SIZE);
+	if (dig_connector->dp_clock == 270000)
+		link_configuration[0] = DP_LINK_BW_2_7;
+	else
+		link_configuration[0] = DP_LINK_BW_1_62;
+	link_configuration[1] = dig_connector->dp_lane_count;
+	if (dig_connector->dpcd[0] >= 0x11)
+		link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
+
+	/* power up the sink */
+	dp_set_power(radeon_connector, DP_SET_POWER_D0);
+	/* disable the training pattern on the sink */
+	dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE);
+	/* set link bw and lanes on the sink */
+	dp_set_link_bw_lanes(radeon_connector, link_configuration);
+	/* disable downspread on the sink */
+	dp_set_downspread(radeon_connector, 0);
+	/* start training on the source */
+	radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_START,
+				  dig_connector->dp_clock, enc_id, 0);
+	/* set training pattern 1 on the source */
+	radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
+				  dig_connector->dp_clock, enc_id, 0);
+
+	/* set initial vs/emph */
+	memset(train_set, 0, 4);
+	udelay(400);
+	/* set training pattern 1 on the sink */
+	dp_set_training(radeon_connector, DP_TRAINING_PATTERN_1);
+
+	dp_update_dpvs_emph(radeon_connector, encoder, train_set);
+
+	/* clock recovery loop */
+	clock_recovery = false;
+	tries = 0;
+	voltage = 0xff;
+	for (;;) {
+		udelay(100);
+		if (!atom_dp_get_link_status(radeon_connector, link_status))
+			break;
+
+		if (dp_clock_recovery_ok(link_status, dig_connector->dp_lane_count)) {
+			clock_recovery = true;
+			break;
+		}
+
+		for (i = 0; i < dig_connector->dp_lane_count; i++) {
+			if ((train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
+				break;
+		}
+		if (i == dig_connector->dp_lane_count) {
+			DRM_ERROR("clock recovery reached max voltage\n");
+			break;
+		}
+
+		if ((train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
+			++tries;
+			if (tries == 5) {
+				DRM_ERROR("clock recovery tried 5 times\n");
+				break;
+			}
+		} else
+			tries = 0;
+
+		voltage = train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
+
+		/* Compute new train_set as requested by sink */
+		dp_get_adjust_train(link_status, dig_connector->dp_lane_count, train_set);
+		dp_update_dpvs_emph(radeon_connector, encoder, train_set);
+	}
+	if (!clock_recovery)
+		DRM_ERROR("clock recovery failed\n");
+	else
+		DRM_DEBUG("clock recovery at voltage %d pre-emphasis %d\n",
+			  train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
+			  (train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK) >>
+			  DP_TRAIN_PRE_EMPHASIS_SHIFT);
+
+
+	/* set training pattern 2 on the sink */
+	dp_set_training(radeon_connector, DP_TRAINING_PATTERN_2);
+	/* set training pattern 2 on the source */
+	radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_PATTERN_SEL,
+				  dig_connector->dp_clock, enc_id, 1);
+
+	/* channel equalization loop */
+	tries = 0;
+	channel_eq = false;
+	for (;;) {
+		udelay(400);
+		if (!atom_dp_get_link_status(radeon_connector, link_status))
+			break;
+
+		if (dp_channel_eq_ok(link_status, dig_connector->dp_lane_count)) {
+			channel_eq = true;
+			break;
+		}
+
+		/* Try 5 times */
+		if (tries > 5) {
+			DRM_ERROR("channel eq failed: 5 tries\n");
+			break;
+		}
+
+		/* Compute new train_set as requested by sink */
+		dp_get_adjust_train(link_status, dig_connector->dp_lane_count, train_set);
+		dp_update_dpvs_emph(radeon_connector, encoder, train_set);
+
+		tries++;
+	}
+
+	if (!channel_eq)
+		DRM_ERROR("channel eq failed\n");
+	else
+		DRM_DEBUG("channel eq at voltage %d pre-emphasis %d\n",
+			  train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK,
+			  (train_set[0] & DP_TRAIN_PRE_EMPHASIS_MASK)
+			  >> DP_TRAIN_PRE_EMPHASIS_SHIFT);
+
+	/* disable the training pattern on the sink */
+	dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE);
+
+	radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
+				  dig_connector->dp_clock, enc_id, 0);
+}
+
+int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
+			 uint8_t write_byte, uint8_t *read_byte)
+{
+	struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data;
+	struct radeon_i2c_chan *auxch = (struct radeon_i2c_chan *)adapter;
+	int ret = 0;
+	uint16_t address = algo_data->address;
+	uint8_t msg[5];
+	uint8_t reply[2];
+	int msg_len, dp_msg_len;
+	int reply_bytes;
+
+	/* Set up the command byte */
+	if (mode & MODE_I2C_READ)
+		msg[2] = AUX_I2C_READ << 4;
+	else
+		msg[2] = AUX_I2C_WRITE << 4;
+
+	if (!(mode & MODE_I2C_STOP))
+		msg[2] |= AUX_I2C_MOT << 4;
+
+	msg[0] = address;
+	msg[1] = address >> 8;
+
+	reply_bytes = 1;
+
+	msg_len = 4;
+	dp_msg_len = 3;
+	switch (mode) {
+	case MODE_I2C_WRITE:
+		msg[4] = write_byte;
+		msg_len++;
+		dp_msg_len += 2;
+		break;
+	case MODE_I2C_READ:
+		dp_msg_len += 1;
+		break;
+	default:
+		break;
+	}
+
+	msg[3] = (dp_msg_len) << 4;
+	ret = radeon_process_aux_ch(auxch, msg, msg_len, reply, reply_bytes, 0);
+
+	if (ret) {
+		if (read_byte)
+			*read_byte = reply[0];
+		return reply_bytes;
+	}
+	return -EREMOTEIO;
+}
+

+ 197 - 48
drivers/gpu/drm/radeon/r100.c

@@ -65,6 +65,95 @@ MODULE_FIRMWARE(FIRMWARE_R520);
  * r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280
  */
 
+/* hpd for digital panel detect/disconnect */
+bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
+{
+	bool connected = false;
+
+	switch (hpd) {
+	case RADEON_HPD_1:
+		if (RREG32(RADEON_FP_GEN_CNTL) & RADEON_FP_DETECT_SENSE)
+			connected = true;
+		break;
+	case RADEON_HPD_2:
+		if (RREG32(RADEON_FP2_GEN_CNTL) & RADEON_FP2_DETECT_SENSE)
+			connected = true;
+		break;
+	default:
+		break;
+	}
+	return connected;
+}
+
+void r100_hpd_set_polarity(struct radeon_device *rdev,
+			   enum radeon_hpd_id hpd)
+{
+	u32 tmp;
+	bool connected = r100_hpd_sense(rdev, hpd);
+
+	switch (hpd) {
+	case RADEON_HPD_1:
+		tmp = RREG32(RADEON_FP_GEN_CNTL);
+		if (connected)
+			tmp &= ~RADEON_FP_DETECT_INT_POL;
+		else
+			tmp |= RADEON_FP_DETECT_INT_POL;
+		WREG32(RADEON_FP_GEN_CNTL, tmp);
+		break;
+	case RADEON_HPD_2:
+		tmp = RREG32(RADEON_FP2_GEN_CNTL);
+		if (connected)
+			tmp &= ~RADEON_FP2_DETECT_INT_POL;
+		else
+			tmp |= RADEON_FP2_DETECT_INT_POL;
+		WREG32(RADEON_FP2_GEN_CNTL, tmp);
+		break;
+	default:
+		break;
+	}
+}
+
+void r100_hpd_init(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	struct drm_connector *connector;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		switch (radeon_connector->hpd.hpd) {
+		case RADEON_HPD_1:
+			rdev->irq.hpd[0] = true;
+			break;
+		case RADEON_HPD_2:
+			rdev->irq.hpd[1] = true;
+			break;
+		default:
+			break;
+		}
+	}
+	r100_irq_set(rdev);
+}
+
+void r100_hpd_fini(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	struct drm_connector *connector;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		switch (radeon_connector->hpd.hpd) {
+		case RADEON_HPD_1:
+			rdev->irq.hpd[0] = false;
+			break;
+		case RADEON_HPD_2:
+			rdev->irq.hpd[1] = false;
+			break;
+		default:
+			break;
+		}
+	}
+}
+
 /*
  * PCI GART
  */
@@ -94,6 +183,15 @@ int r100_pci_gart_init(struct radeon_device *rdev)
 	return radeon_gart_table_ram_alloc(rdev);
 }
 
+/* required on r1xx, r2xx, r300, r(v)350, r420/r481, rs400/rs480 */
+void r100_enable_bm(struct radeon_device *rdev)
+{
+	uint32_t tmp;
+	/* Enable bus mastering */
+	tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
+	WREG32(RADEON_BUS_CNTL, tmp);
+}
+
 int r100_pci_gart_enable(struct radeon_device *rdev)
 {
 	uint32_t tmp;
@@ -105,9 +203,6 @@ int r100_pci_gart_enable(struct radeon_device *rdev)
 	WREG32(RADEON_AIC_LO_ADDR, rdev->mc.gtt_location);
 	tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
 	WREG32(RADEON_AIC_HI_ADDR, tmp);
-	/* Enable bus mastering */
-	tmp = RREG32(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
-	WREG32(RADEON_BUS_CNTL, tmp);
 	/* set PCI GART page-table base address */
 	WREG32(RADEON_AIC_PT_BASE, rdev->gart.table_addr);
 	tmp = RREG32(RADEON_AIC_CNTL) | RADEON_PCIGART_TRANSLATE_EN;
@@ -157,6 +252,12 @@ int r100_irq_set(struct radeon_device *rdev)
 	if (rdev->irq.crtc_vblank_int[1]) {
 		tmp |= RADEON_CRTC2_VBLANK_MASK;
 	}
+	if (rdev->irq.hpd[0]) {
+		tmp |= RADEON_FP_DETECT_MASK;
+	}
+	if (rdev->irq.hpd[1]) {
+		tmp |= RADEON_FP2_DETECT_MASK;
+	}
 	WREG32(RADEON_GEN_INT_CNTL, tmp);
 	return 0;
 }
@@ -175,8 +276,9 @@ void r100_irq_disable(struct radeon_device *rdev)
 static inline uint32_t r100_irq_ack(struct radeon_device *rdev)
 {
 	uint32_t irqs = RREG32(RADEON_GEN_INT_STATUS);
-	uint32_t irq_mask = RADEON_SW_INT_TEST | RADEON_CRTC_VBLANK_STAT |
-		RADEON_CRTC2_VBLANK_STAT;
+	uint32_t irq_mask = RADEON_SW_INT_TEST |
+		RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT |
+		RADEON_FP_DETECT_STAT | RADEON_FP2_DETECT_STAT;
 
 	if (irqs) {
 		WREG32(RADEON_GEN_INT_STATUS, irqs);
@@ -187,6 +289,7 @@ static inline uint32_t r100_irq_ack(struct radeon_device *rdev)
 int r100_irq_process(struct radeon_device *rdev)
 {
 	uint32_t status, msi_rearm;
+	bool queue_hotplug = false;
 
 	status = r100_irq_ack(rdev);
 	if (!status) {
@@ -207,8 +310,18 @@ int r100_irq_process(struct radeon_device *rdev)
 		if (status & RADEON_CRTC2_VBLANK_STAT) {
 			drm_handle_vblank(rdev->ddev, 1);
 		}
+		if (status & RADEON_FP_DETECT_STAT) {
+			queue_hotplug = true;
+			DRM_DEBUG("HPD1\n");
+		}
+		if (status & RADEON_FP2_DETECT_STAT) {
+			queue_hotplug = true;
+			DRM_DEBUG("HPD2\n");
+		}
 		status = r100_irq_ack(rdev);
 	}
+	if (queue_hotplug)
+		queue_work(rdev->wq, &rdev->hotplug_work);
 	if (rdev->msi_enabled) {
 		switch (rdev->family) {
 		case CHIP_RS400:
@@ -255,24 +368,27 @@ int r100_wb_init(struct radeon_device *rdev)
 	int r;
 
 	if (rdev->wb.wb_obj == NULL) {
-		r = radeon_object_create(rdev, NULL, RADEON_GPU_PAGE_SIZE,
-					 true,
-					 RADEON_GEM_DOMAIN_GTT,
-					 false, &rdev->wb.wb_obj);
+		r = radeon_bo_create(rdev, NULL, RADEON_GPU_PAGE_SIZE, true,
+					RADEON_GEM_DOMAIN_GTT,
+					&rdev->wb.wb_obj);
 		if (r) {
-			DRM_ERROR("radeon: failed to create WB buffer (%d).\n", r);
+			dev_err(rdev->dev, "(%d) create WB buffer failed\n", r);
 			return r;
 		}
-		r = radeon_object_pin(rdev->wb.wb_obj,
-				      RADEON_GEM_DOMAIN_GTT,
-				      &rdev->wb.gpu_addr);
+		r = radeon_bo_reserve(rdev->wb.wb_obj, false);
+		if (unlikely(r != 0))
+			return r;
+		r = radeon_bo_pin(rdev->wb.wb_obj, RADEON_GEM_DOMAIN_GTT,
+					&rdev->wb.gpu_addr);
 		if (r) {
-			DRM_ERROR("radeon: failed to pin WB buffer (%d).\n", r);
+			dev_err(rdev->dev, "(%d) pin WB buffer failed\n", r);
+			radeon_bo_unreserve(rdev->wb.wb_obj);
 			return r;
 		}
-		r = radeon_object_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
+		r = radeon_bo_kmap(rdev->wb.wb_obj, (void **)&rdev->wb.wb);
+		radeon_bo_unreserve(rdev->wb.wb_obj);
 		if (r) {
-			DRM_ERROR("radeon: failed to map WB buffer (%d).\n", r);
+			dev_err(rdev->dev, "(%d) map WB buffer failed\n", r);
 			return r;
 		}
 	}
@@ -290,11 +406,19 @@ void r100_wb_disable(struct radeon_device *rdev)
 
 void r100_wb_fini(struct radeon_device *rdev)
 {
+	int r;
+
 	r100_wb_disable(rdev);
 	if (rdev->wb.wb_obj) {
-		radeon_object_kunmap(rdev->wb.wb_obj);
-		radeon_object_unpin(rdev->wb.wb_obj);
-		radeon_object_unref(&rdev->wb.wb_obj);
+		r = radeon_bo_reserve(rdev->wb.wb_obj, false);
+		if (unlikely(r != 0)) {
+			dev_err(rdev->dev, "(%d) can't finish WB\n", r);
+			return;
+		}
+		radeon_bo_kunmap(rdev->wb.wb_obj);
+		radeon_bo_unpin(rdev->wb.wb_obj);
+		radeon_bo_unreserve(rdev->wb.wb_obj);
+		radeon_bo_unref(&rdev->wb.wb_obj);
 		rdev->wb.wb = NULL;
 		rdev->wb.wb_obj = NULL;
 	}
@@ -1288,17 +1412,17 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
 
 int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
 					 struct radeon_cs_packet *pkt,
-					 struct radeon_object *robj)
+					 struct radeon_bo *robj)
 {
 	unsigned idx;
 	u32 value;
 	idx = pkt->idx + 1;
 	value = radeon_get_ib_value(p, idx + 2);
-	if ((value + 1) > radeon_object_size(robj)) {
+	if ((value + 1) > radeon_bo_size(robj)) {
 		DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER "
 			  "(need %u have %lu) !\n",
 			  value + 1,
-			  radeon_object_size(robj));
+			  radeon_bo_size(robj));
 		return -EINVAL;
 	}
 	return 0;
@@ -1583,6 +1707,14 @@ void r100_gpu_init(struct radeon_device *rdev)
 	r100_hdp_reset(rdev);
 }
 
+void r100_hdp_flush(struct radeon_device *rdev)
+{
+	u32 tmp;
+	tmp = RREG32(RADEON_HOST_PATH_CNTL);
+	tmp |= RADEON_HDP_READ_BUFFER_INVALIDATE;
+	WREG32(RADEON_HOST_PATH_CNTL, tmp);
+}
+
 void r100_hdp_reset(struct radeon_device *rdev)
 {
 	uint32_t tmp;
@@ -1650,6 +1782,17 @@ int r100_gpu_reset(struct radeon_device *rdev)
 	return 0;
 }
 
+void r100_set_common_regs(struct radeon_device *rdev)
+{
+	/* set these so they don't interfere with anything */
+	WREG32(RADEON_OV0_SCALE_CNTL, 0);
+	WREG32(RADEON_SUBPIC_CNTL, 0);
+	WREG32(RADEON_VIPH_CONTROL, 0);
+	WREG32(RADEON_I2C_CNTL_1, 0);
+	WREG32(RADEON_DVI_I2C_CNTL_1, 0);
+	WREG32(RADEON_CAP0_TRIG_CNTL, 0);
+	WREG32(RADEON_CAP1_TRIG_CNTL, 0);
+}
 
 /*
  * VRAM info
@@ -2594,7 +2737,7 @@ static int r100_cs_track_cube(struct radeon_device *rdev,
 			      struct r100_cs_track *track, unsigned idx)
 {
 	unsigned face, w, h;
-	struct radeon_object *cube_robj;
+	struct radeon_bo *cube_robj;
 	unsigned long size;
 
 	for (face = 0; face < 5; face++) {
@@ -2607,9 +2750,9 @@ static int r100_cs_track_cube(struct radeon_device *rdev,
 
 		size += track->textures[idx].cube_info[face].offset;
 
-		if (size > radeon_object_size(cube_robj)) {
+		if (size > radeon_bo_size(cube_robj)) {
 			DRM_ERROR("Cube texture offset greater than object size %lu %lu\n",
-				  size, radeon_object_size(cube_robj));
+				  size, radeon_bo_size(cube_robj));
 			r100_cs_track_texture_print(&track->textures[idx]);
 			return -1;
 		}
@@ -2620,7 +2763,7 @@ static int r100_cs_track_cube(struct radeon_device *rdev,
 static int r100_cs_track_texture_check(struct radeon_device *rdev,
 				       struct r100_cs_track *track)
 {
-	struct radeon_object *robj;
+	struct radeon_bo *robj;
 	unsigned long size;
 	unsigned u, i, w, h;
 	int ret;
@@ -2676,9 +2819,9 @@ static int r100_cs_track_texture_check(struct radeon_device *rdev,
 				  "%u\n", track->textures[u].tex_coord_type, u);
 			return -EINVAL;
 		}
-		if (size > radeon_object_size(robj)) {
+		if (size > radeon_bo_size(robj)) {
 			DRM_ERROR("Texture of unit %u needs %lu bytes but is "
-				  "%lu\n", u, size, radeon_object_size(robj));
+				  "%lu\n", u, size, radeon_bo_size(robj));
 			r100_cs_track_texture_print(&track->textures[u]);
 			return -EINVAL;
 		}
@@ -2700,10 +2843,10 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
 		}
 		size = track->cb[i].pitch * track->cb[i].cpp * track->maxy;
 		size += track->cb[i].offset;
-		if (size > radeon_object_size(track->cb[i].robj)) {
+		if (size > radeon_bo_size(track->cb[i].robj)) {
 			DRM_ERROR("[drm] Buffer too small for color buffer %d "
 				  "(need %lu have %lu) !\n", i, size,
-				  radeon_object_size(track->cb[i].robj));
+				  radeon_bo_size(track->cb[i].robj));
 			DRM_ERROR("[drm] color buffer %d (%u %u %u %u)\n",
 				  i, track->cb[i].pitch, track->cb[i].cpp,
 				  track->cb[i].offset, track->maxy);
@@ -2717,10 +2860,10 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
 		}
 		size = track->zb.pitch * track->zb.cpp * track->maxy;
 		size += track->zb.offset;
-		if (size > radeon_object_size(track->zb.robj)) {
+		if (size > radeon_bo_size(track->zb.robj)) {
 			DRM_ERROR("[drm] Buffer too small for z buffer "
 				  "(need %lu have %lu) !\n", size,
-				  radeon_object_size(track->zb.robj));
+				  radeon_bo_size(track->zb.robj));
 			DRM_ERROR("[drm] zbuffer (%u %u %u %u)\n",
 				  track->zb.pitch, track->zb.cpp,
 				  track->zb.offset, track->maxy);
@@ -2738,11 +2881,12 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
 					  "bound\n", prim_walk, i);
 				return -EINVAL;
 			}
-			if (size > radeon_object_size(track->arrays[i].robj)) {
-				DRM_ERROR("(PW %u) Vertex array %u need %lu dwords "
-					   "have %lu dwords\n", prim_walk, i,
-					   size >> 2,
-					   radeon_object_size(track->arrays[i].robj) >> 2);
+			if (size > radeon_bo_size(track->arrays[i].robj)) {
+				dev_err(rdev->dev, "(PW %u) Vertex array %u "
+					"need %lu dwords have %lu dwords\n",
+					prim_walk, i, size >> 2,
+					radeon_bo_size(track->arrays[i].robj)
+					>> 2);
 				DRM_ERROR("Max indices %u\n", track->max_indx);
 				return -EINVAL;
 			}
@@ -2756,10 +2900,12 @@ int r100_cs_track_check(struct radeon_device *rdev, struct r100_cs_track *track)
 					  "bound\n", prim_walk, i);
 				return -EINVAL;
 			}
-			if (size > radeon_object_size(track->arrays[i].robj)) {
-				DRM_ERROR("(PW %u) Vertex array %u need %lu dwords "
-					   "have %lu dwords\n", prim_walk, i, size >> 2,
-					   radeon_object_size(track->arrays[i].robj) >> 2);
+			if (size > radeon_bo_size(track->arrays[i].robj)) {
+				dev_err(rdev->dev, "(PW %u) Vertex array %u "
+					"need %lu dwords have %lu dwords\n",
+					prim_walk, i, size >> 2,
+					radeon_bo_size(track->arrays[i].robj)
+					>> 2);
 				return -EINVAL;
 			}
 		}
@@ -3101,6 +3247,9 @@ static int r100_startup(struct radeon_device *rdev)
 {
 	int r;
 
+	/* set common regs */
+	r100_set_common_regs(rdev);
+	/* program mc */
 	r100_mc_program(rdev);
 	/* Resume clock */
 	r100_clock_startup(rdev);
@@ -3108,13 +3257,13 @@ static int r100_startup(struct radeon_device *rdev)
 	r100_gpu_init(rdev);
 	/* Initialize GART (initialize after TTM so we can allocate
 	 * memory through TTM but finalize after TTM) */
+	r100_enable_bm(rdev);
 	if (rdev->flags & RADEON_IS_PCI) {
 		r = r100_pci_gart_enable(rdev);
 		if (r)
 			return r;
 	}
 	/* Enable IRQ */
-	rdev->irq.sw_int = true;
 	r100_irq_set(rdev);
 	/* 1M ring buffer */
 	r = r100_cp_init(rdev, 1024 * 1024);
@@ -3150,6 +3299,8 @@ int r100_resume(struct radeon_device *rdev)
 	radeon_combios_asic_init(rdev->ddev);
 	/* Resume clock after posting */
 	r100_clock_startup(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
 	return r100_startup(rdev);
 }
 
@@ -3174,7 +3325,7 @@ void r100_fini(struct radeon_device *rdev)
 		r100_pci_gart_fini(rdev);
 	radeon_irq_kms_fini(rdev);
 	radeon_fence_driver_fini(rdev);
-	radeon_object_fini(rdev);
+	radeon_bo_fini(rdev);
 	radeon_atombios_fini(rdev);
 	kfree(rdev->bios);
 	rdev->bios = NULL;
@@ -3242,10 +3393,8 @@ int r100_init(struct radeon_device *rdev)
 			RREG32(R_0007C0_CP_STAT));
 	}
 	/* check if cards are posted or not */
-	if (!radeon_card_posted(rdev) && rdev->bios) {
-		DRM_INFO("GPU not posted. posting now...\n");
-		radeon_combios_asic_init(rdev->ddev);
-	}
+	if (radeon_boot_test_post_card(rdev) == false)
+		return -EINVAL;
 	/* Set asic errata */
 	r100_errata(rdev);
 	/* Initialize clocks */
@@ -3264,7 +3413,7 @@ int r100_init(struct radeon_device *rdev)
 	if (r)
 		return r;
 	/* Memory manager */
-	r = radeon_object_init(rdev);
+	r = radeon_bo_init(rdev);
 	if (r)
 		return r;
 	if (rdev->flags & RADEON_IS_PCI) {

+ 5 - 5
drivers/gpu/drm/radeon/r100_track.h

@@ -10,26 +10,26 @@
  * CS functions
  */
 struct r100_cs_track_cb {
-	struct radeon_object	*robj;
+	struct radeon_bo	*robj;
 	unsigned		pitch;
 	unsigned		cpp;
 	unsigned		offset;
 };
 
 struct r100_cs_track_array {
-	struct radeon_object	*robj;
+	struct radeon_bo	*robj;
 	unsigned		esize;
 };
 
 struct r100_cs_cube_info {
-	struct radeon_object	*robj;
-	unsigned                offset;
+	struct radeon_bo	*robj;
+	unsigned		offset;
 	unsigned		width;
 	unsigned		height;
 };
 
 struct r100_cs_track_texture {
-	struct radeon_object	*robj;
+	struct radeon_bo	*robj;
 	struct r100_cs_cube_info cube_info[5]; /* info for 5 non-primary faces */
 	unsigned		pitch;
 	unsigned		width;

+ 23 - 10
drivers/gpu/drm/radeon/r300.c

@@ -137,14 +137,19 @@ int rv370_pcie_gart_enable(struct radeon_device *rdev)
 
 void rv370_pcie_gart_disable(struct radeon_device *rdev)
 {
-	uint32_t tmp;
+	u32 tmp;
+	int r;
 
 	tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL);
 	tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD;
 	WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN);
 	if (rdev->gart.table.vram.robj) {
-		radeon_object_kunmap(rdev->gart.table.vram.robj);
-		radeon_object_unpin(rdev->gart.table.vram.robj);
+		r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
+		if (likely(r == 0)) {
+			radeon_bo_kunmap(rdev->gart.table.vram.robj);
+			radeon_bo_unpin(rdev->gart.table.vram.robj);
+			radeon_bo_unreserve(rdev->gart.table.vram.robj);
+		}
 	}
 }
 
@@ -1181,6 +1186,9 @@ static int r300_startup(struct radeon_device *rdev)
 {
 	int r;
 
+	/* set common regs */
+	r100_set_common_regs(rdev);
+	/* program mc */
 	r300_mc_program(rdev);
 	/* Resume clock */
 	r300_clock_startup(rdev);
@@ -1193,13 +1201,18 @@ static int r300_startup(struct radeon_device *rdev)
 		if (r)
 			return r;
 	}
+
+	if (rdev->family == CHIP_R300 ||
+	    rdev->family == CHIP_R350 ||
+	    rdev->family == CHIP_RV350)
+		r100_enable_bm(rdev);
+
 	if (rdev->flags & RADEON_IS_PCI) {
 		r = r100_pci_gart_enable(rdev);
 		if (r)
 			return r;
 	}
 	/* Enable IRQ */
-	rdev->irq.sw_int = true;
 	r100_irq_set(rdev);
 	/* 1M ring buffer */
 	r = r100_cp_init(rdev, 1024 * 1024);
@@ -1237,6 +1250,8 @@ int r300_resume(struct radeon_device *rdev)
 	radeon_combios_asic_init(rdev->ddev);
 	/* Resume clock after posting */
 	r300_clock_startup(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
 	return r300_startup(rdev);
 }
 
@@ -1265,7 +1280,7 @@ void r300_fini(struct radeon_device *rdev)
 		r100_pci_gart_fini(rdev);
 	radeon_irq_kms_fini(rdev);
 	radeon_fence_driver_fini(rdev);
-	radeon_object_fini(rdev);
+	radeon_bo_fini(rdev);
 	radeon_atombios_fini(rdev);
 	kfree(rdev->bios);
 	rdev->bios = NULL;
@@ -1303,10 +1318,8 @@ int r300_init(struct radeon_device *rdev)
 			RREG32(R_0007C0_CP_STAT));
 	}
 	/* check if cards are posted or not */
-	if (!radeon_card_posted(rdev) && rdev->bios) {
-		DRM_INFO("GPU not posted. posting now...\n");
-		radeon_combios_asic_init(rdev->ddev);
-	}
+	if (radeon_boot_test_post_card(rdev) == false)
+		return -EINVAL;
 	/* Set asic errata */
 	r300_errata(rdev);
 	/* Initialize clocks */
@@ -1325,7 +1338,7 @@ int r300_init(struct radeon_device *rdev)
 	if (r)
 		return r;
 	/* Memory manager */
-	r = radeon_object_init(rdev);
+	r = radeon_bo_init(rdev);
 	if (r)
 		return r;
 	if (rdev->flags & RADEON_IS_PCIE) {

+ 13 - 12
drivers/gpu/drm/radeon/r420.c

@@ -169,6 +169,9 @@ static int r420_startup(struct radeon_device *rdev)
 {
 	int r;
 
+	/* set common regs */
+	r100_set_common_regs(rdev);
+	/* program mc */
 	r300_mc_program(rdev);
 	/* Resume clock */
 	r420_clock_resume(rdev);
@@ -186,7 +189,6 @@ static int r420_startup(struct radeon_device *rdev)
 	}
 	r420_pipes_init(rdev);
 	/* Enable IRQ */
-	rdev->irq.sw_int = true;
 	r100_irq_set(rdev);
 	/* 1M ring buffer */
 	r = r100_cp_init(rdev, 1024 * 1024);
@@ -229,7 +231,8 @@ int r420_resume(struct radeon_device *rdev)
 	}
 	/* Resume clock after posting */
 	r420_clock_resume(rdev);
-
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
 	return r420_startup(rdev);
 }
 
@@ -258,7 +261,7 @@ void r420_fini(struct radeon_device *rdev)
 	radeon_agp_fini(rdev);
 	radeon_irq_kms_fini(rdev);
 	radeon_fence_driver_fini(rdev);
-	radeon_object_fini(rdev);
+	radeon_bo_fini(rdev);
 	if (rdev->is_atom_bios) {
 		radeon_atombios_fini(rdev);
 	} else {
@@ -301,14 +304,9 @@ int r420_init(struct radeon_device *rdev)
 			RREG32(R_0007C0_CP_STAT));
 	}
 	/* check if cards are posted or not */
-	if (!radeon_card_posted(rdev) && rdev->bios) {
-		DRM_INFO("GPU not posted. posting now...\n");
-		if (rdev->is_atom_bios) {
-			atom_asic_init(rdev->mode_info.atom_context);
-		} else {
-			radeon_combios_asic_init(rdev->ddev);
-		}
-	}
+	if (radeon_boot_test_post_card(rdev) == false)
+		return -EINVAL;
+
 	/* Initialize clocks */
 	radeon_get_clock_info(rdev->ddev);
 	/* Initialize power management */
@@ -331,10 +329,13 @@ int r420_init(struct radeon_device *rdev)
 		return r;
 	}
 	/* Memory manager */
-	r = radeon_object_init(rdev);
+	r = radeon_bo_init(rdev);
 	if (r) {
 		return r;
 	}
+	if (rdev->family == CHIP_R420)
+		r100_enable_bm(rdev);
+
 	if (rdev->flags & RADEON_IS_PCIE) {
 		r = rv370_pcie_gart_init(rdev);
 		if (r)

+ 2 - 0
drivers/gpu/drm/radeon/r500_reg.h

@@ -716,6 +716,8 @@
 
 #define AVIVO_DVOA_BIT_DEPTH_CONTROL			0x7988
 
+#define AVIVO_DC_GPIO_HPD_A                 0x7e94
+
 #define AVIVO_GPIO_0                        0x7e30
 #define AVIVO_GPIO_1                        0x7e40
 #define AVIVO_GPIO_2                        0x7e50

+ 6 - 2
drivers/gpu/drm/radeon/r520.c

@@ -185,7 +185,6 @@ static int r520_startup(struct radeon_device *rdev)
 			return r;
 	}
 	/* Enable IRQ */
-	rdev->irq.sw_int = true;
 	rs600_irq_set(rdev);
 	/* 1M ring buffer */
 	r = r100_cp_init(rdev, 1024 * 1024);
@@ -221,6 +220,8 @@ int r520_resume(struct radeon_device *rdev)
 	atom_asic_init(rdev->mode_info.atom_context);
 	/* Resume clock after posting */
 	rv515_clock_startup(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
 	return r520_startup(rdev);
 }
 
@@ -254,6 +255,9 @@ int r520_init(struct radeon_device *rdev)
 			RREG32(R_0007C0_CP_STAT));
 	}
 	/* check if cards are posted or not */
+	if (radeon_boot_test_post_card(rdev) == false)
+		return -EINVAL;
+
 	if (!radeon_card_posted(rdev) && rdev->bios) {
 		DRM_INFO("GPU not posted. posting now...\n");
 		atom_asic_init(rdev->mode_info.atom_context);
@@ -277,7 +281,7 @@ int r520_init(struct radeon_device *rdev)
 	if (r)
 		return r;
 	/* Memory manager */
-	r = radeon_object_init(rdev);
+	r = radeon_bo_init(rdev);
 	if (r)
 		return r;
 	r = rv370_pcie_gart_init(rdev);

File diff suppressed because it is too large
+ 1077 - 56
drivers/gpu/drm/radeon/r600.c


+ 21 - 13
drivers/gpu/drm/radeon/r600_blit_kms.c

@@ -473,9 +473,8 @@ int r600_blit_init(struct radeon_device *rdev)
 	obj_size += r6xx_ps_size * 4;
 	obj_size = ALIGN(obj_size, 256);
 
-	r = radeon_object_create(rdev, NULL, obj_size,
-				 true, RADEON_GEM_DOMAIN_VRAM,
-				 false, &rdev->r600_blit.shader_obj);
+	r = radeon_bo_create(rdev, NULL, obj_size, true, RADEON_GEM_DOMAIN_VRAM,
+				&rdev->r600_blit.shader_obj);
 	if (r) {
 		DRM_ERROR("r600 failed to allocate shader\n");
 		return r;
@@ -485,12 +484,14 @@ int r600_blit_init(struct radeon_device *rdev)
 		  obj_size,
 		  rdev->r600_blit.vs_offset, rdev->r600_blit.ps_offset);
 
-	r = radeon_object_kmap(rdev->r600_blit.shader_obj, &ptr);
+	r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+	if (unlikely(r != 0))
+		return r;
+	r = radeon_bo_kmap(rdev->r600_blit.shader_obj, &ptr);
 	if (r) {
 		DRM_ERROR("failed to map blit object %d\n", r);
 		return r;
 	}
-
 	if (rdev->family >= CHIP_RV770)
 		memcpy_toio(ptr + rdev->r600_blit.state_offset,
 			    r7xx_default_state, rdev->r600_blit.state_len * 4);
@@ -500,19 +501,26 @@ int r600_blit_init(struct radeon_device *rdev)
 	if (num_packet2s)
 		memcpy_toio(ptr + rdev->r600_blit.state_offset + (rdev->r600_blit.state_len * 4),
 			    packet2s, num_packet2s * 4);
-
-
 	memcpy(ptr + rdev->r600_blit.vs_offset, r6xx_vs, r6xx_vs_size * 4);
 	memcpy(ptr + rdev->r600_blit.ps_offset, r6xx_ps, r6xx_ps_size * 4);
-
-	radeon_object_kunmap(rdev->r600_blit.shader_obj);
+	radeon_bo_kunmap(rdev->r600_blit.shader_obj);
+	radeon_bo_unreserve(rdev->r600_blit.shader_obj);
 	return 0;
 }
 
 void r600_blit_fini(struct radeon_device *rdev)
 {
-	radeon_object_unpin(rdev->r600_blit.shader_obj);
-	radeon_object_unref(&rdev->r600_blit.shader_obj);
+	int r;
+
+	r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+	if (unlikely(r != 0)) {
+		dev_err(rdev->dev, "(%d) can't finish r600 blit\n", r);
+		goto out_unref;
+	}
+	radeon_bo_unpin(rdev->r600_blit.shader_obj);
+	radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+out_unref:
+	radeon_bo_unref(&rdev->r600_blit.shader_obj);
 }
 
 int r600_vb_ib_get(struct radeon_device *rdev)
@@ -569,9 +577,9 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
 	ring_size = num_loops * dwords_per_loop;
 	/* set default  + shaders */
 	ring_size += 40; /* shaders + def state */
-	ring_size += 3; /* fence emit for VB IB */
+	ring_size += 5; /* fence emit for VB IB */
 	ring_size += 5; /* done copy */
-	ring_size += 3; /* fence emit for done copy */
+	ring_size += 5; /* fence emit for done copy */
 	r = radeon_ring_lock(rdev, ring_size);
 	WARN_ON(r);
 

+ 210 - 2
drivers/gpu/drm/radeon/r600d.h

@@ -456,7 +456,215 @@
 #define         WAIT_2D_IDLECLEAN_bit                           (1 << 16)
 #define         WAIT_3D_IDLECLEAN_bit                           (1 << 17)
 
-
+#define IH_RB_CNTL                                        0x3e00
+#       define IH_RB_ENABLE                               (1 << 0)
+#       define IH_IB_SIZE(x)                              ((x) << 1) /* log2 */
+#       define IH_RB_FULL_DRAIN_ENABLE                    (1 << 6)
+#       define IH_WPTR_WRITEBACK_ENABLE                   (1 << 8)
+#       define IH_WPTR_WRITEBACK_TIMER(x)                 ((x) << 9) /* log2 */
+#       define IH_WPTR_OVERFLOW_ENABLE                    (1 << 16)
+#       define IH_WPTR_OVERFLOW_CLEAR                     (1 << 31)
+#define IH_RB_BASE                                        0x3e04
+#define IH_RB_RPTR                                        0x3e08
+#define IH_RB_WPTR                                        0x3e0c
+#       define RB_OVERFLOW                                (1 << 0)
+#       define WPTR_OFFSET_MASK                           0x3fffc
+#define IH_RB_WPTR_ADDR_HI                                0x3e10
+#define IH_RB_WPTR_ADDR_LO                                0x3e14
+#define IH_CNTL                                           0x3e18
+#       define ENABLE_INTR                                (1 << 0)
+#       define IH_MC_SWAP(x)                              ((x) << 2)
+#       define IH_MC_SWAP_NONE                            0
+#       define IH_MC_SWAP_16BIT                           1
+#       define IH_MC_SWAP_32BIT                           2
+#       define IH_MC_SWAP_64BIT                           3
+#       define RPTR_REARM                                 (1 << 4)
+#       define MC_WRREQ_CREDIT(x)                         ((x) << 15)
+#       define MC_WR_CLEAN_CNT(x)                         ((x) << 20)
+
+#define RLC_CNTL                                          0x3f00
+#       define RLC_ENABLE                                 (1 << 0)
+#define RLC_HB_BASE                                       0x3f10
+#define RLC_HB_CNTL                                       0x3f0c
+#define RLC_HB_RPTR                                       0x3f20
+#define RLC_HB_WPTR                                       0x3f1c
+#define RLC_HB_WPTR_LSB_ADDR                              0x3f14
+#define RLC_HB_WPTR_MSB_ADDR                              0x3f18
+#define RLC_MC_CNTL                                       0x3f44
+#define RLC_UCODE_CNTL                                    0x3f48
+#define RLC_UCODE_ADDR                                    0x3f2c
+#define RLC_UCODE_DATA                                    0x3f30
+
+#define SRBM_SOFT_RESET                                   0xe60
+#       define SOFT_RESET_RLC                             (1 << 13)
+
+#define CP_INT_CNTL                                       0xc124
+#       define CNTX_BUSY_INT_ENABLE                       (1 << 19)
+#       define CNTX_EMPTY_INT_ENABLE                      (1 << 20)
+#       define SCRATCH_INT_ENABLE                         (1 << 25)
+#       define TIME_STAMP_INT_ENABLE                      (1 << 26)
+#       define IB2_INT_ENABLE                             (1 << 29)
+#       define IB1_INT_ENABLE                             (1 << 30)
+#       define RB_INT_ENABLE                              (1 << 31)
+#define CP_INT_STATUS                                     0xc128
+#       define SCRATCH_INT_STAT                           (1 << 25)
+#       define TIME_STAMP_INT_STAT                        (1 << 26)
+#       define IB2_INT_STAT                               (1 << 29)
+#       define IB1_INT_STAT                               (1 << 30)
+#       define RB_INT_STAT                                (1 << 31)
+
+#define GRBM_INT_CNTL                                     0x8060
+#       define RDERR_INT_ENABLE                           (1 << 0)
+#       define WAIT_COUNT_TIMEOUT_INT_ENABLE              (1 << 1)
+#       define GUI_IDLE_INT_ENABLE                        (1 << 19)
+
+#define INTERRUPT_CNTL                                    0x5468
+#       define IH_DUMMY_RD_OVERRIDE                       (1 << 0)
+#       define IH_DUMMY_RD_EN                             (1 << 1)
+#       define IH_REQ_NONSNOOP_EN                         (1 << 3)
+#       define GEN_IH_INT_EN                              (1 << 8)
+#define INTERRUPT_CNTL2                                   0x546c
+
+#define D1MODE_VBLANK_STATUS                              0x6534
+#define D2MODE_VBLANK_STATUS                              0x6d34
+#       define DxMODE_VBLANK_OCCURRED                     (1 << 0)
+#       define DxMODE_VBLANK_ACK                          (1 << 4)
+#       define DxMODE_VBLANK_STAT                         (1 << 12)
+#       define DxMODE_VBLANK_INTERRUPT                    (1 << 16)
+#       define DxMODE_VBLANK_INTERRUPT_TYPE               (1 << 17)
+#define D1MODE_VLINE_STATUS                               0x653c
+#define D2MODE_VLINE_STATUS                               0x6d3c
+#       define DxMODE_VLINE_OCCURRED                      (1 << 0)
+#       define DxMODE_VLINE_ACK                           (1 << 4)
+#       define DxMODE_VLINE_STAT                          (1 << 12)
+#       define DxMODE_VLINE_INTERRUPT                     (1 << 16)
+#       define DxMODE_VLINE_INTERRUPT_TYPE                (1 << 17)
+#define DxMODE_INT_MASK                                   0x6540
+#       define D1MODE_VBLANK_INT_MASK                     (1 << 0)
+#       define D1MODE_VLINE_INT_MASK                      (1 << 4)
+#       define D2MODE_VBLANK_INT_MASK                     (1 << 8)
+#       define D2MODE_VLINE_INT_MASK                      (1 << 12)
+#define DCE3_DISP_INTERRUPT_STATUS                        0x7ddc
+#       define DC_HPD1_INTERRUPT                          (1 << 18)
+#       define DC_HPD2_INTERRUPT                          (1 << 19)
+#define DISP_INTERRUPT_STATUS                             0x7edc
+#       define LB_D1_VLINE_INTERRUPT                      (1 << 2)
+#       define LB_D2_VLINE_INTERRUPT                      (1 << 3)
+#       define LB_D1_VBLANK_INTERRUPT                     (1 << 4)
+#       define LB_D2_VBLANK_INTERRUPT                     (1 << 5)
+#       define DACA_AUTODETECT_INTERRUPT                  (1 << 16)
+#       define DACB_AUTODETECT_INTERRUPT                  (1 << 17)
+#       define DC_HOT_PLUG_DETECT1_INTERRUPT              (1 << 18)
+#       define DC_HOT_PLUG_DETECT2_INTERRUPT              (1 << 19)
+#       define DC_I2C_SW_DONE_INTERRUPT                   (1 << 20)
+#       define DC_I2C_HW_DONE_INTERRUPT                   (1 << 21)
+#define DISP_INTERRUPT_STATUS_CONTINUE                    0x7ee8
+#define DCE3_DISP_INTERRUPT_STATUS_CONTINUE               0x7de8
+#       define DC_HPD4_INTERRUPT                          (1 << 14)
+#       define DC_HPD4_RX_INTERRUPT                       (1 << 15)
+#       define DC_HPD3_INTERRUPT                          (1 << 28)
+#       define DC_HPD1_RX_INTERRUPT                       (1 << 29)
+#       define DC_HPD2_RX_INTERRUPT                       (1 << 30)
+#define DCE3_DISP_INTERRUPT_STATUS_CONTINUE2              0x7dec
+#       define DC_HPD3_RX_INTERRUPT                       (1 << 0)
+#       define DIGA_DP_VID_STREAM_DISABLE_INTERRUPT       (1 << 1)
+#       define DIGA_DP_STEER_FIFO_OVERFLOW_INTERRUPT      (1 << 2)
+#       define DIGB_DP_VID_STREAM_DISABLE_INTERRUPT       (1 << 3)
+#       define DIGB_DP_STEER_FIFO_OVERFLOW_INTERRUPT      (1 << 4)
+#       define AUX1_SW_DONE_INTERRUPT                     (1 << 5)
+#       define AUX1_LS_DONE_INTERRUPT                     (1 << 6)
+#       define AUX2_SW_DONE_INTERRUPT                     (1 << 7)
+#       define AUX2_LS_DONE_INTERRUPT                     (1 << 8)
+#       define AUX3_SW_DONE_INTERRUPT                     (1 << 9)
+#       define AUX3_LS_DONE_INTERRUPT                     (1 << 10)
+#       define AUX4_SW_DONE_INTERRUPT                     (1 << 11)
+#       define AUX4_LS_DONE_INTERRUPT                     (1 << 12)
+#       define DIGA_DP_FAST_TRAINING_COMPLETE_INTERRUPT   (1 << 13)
+#       define DIGB_DP_FAST_TRAINING_COMPLETE_INTERRUPT   (1 << 14)
+/* DCE 3.2 */
+#       define AUX5_SW_DONE_INTERRUPT                     (1 << 15)
+#       define AUX5_LS_DONE_INTERRUPT                     (1 << 16)
+#       define AUX6_SW_DONE_INTERRUPT                     (1 << 17)
+#       define AUX6_LS_DONE_INTERRUPT                     (1 << 18)
+#       define DC_HPD5_INTERRUPT                          (1 << 19)
+#       define DC_HPD5_RX_INTERRUPT                       (1 << 20)
+#       define DC_HPD6_INTERRUPT                          (1 << 21)
+#       define DC_HPD6_RX_INTERRUPT                       (1 << 22)
+
+#define DACA_AUTO_DETECT_CONTROL                          0x7828
+#define DACB_AUTO_DETECT_CONTROL                          0x7a28
+#define DCE3_DACA_AUTO_DETECT_CONTROL                     0x7028
+#define DCE3_DACB_AUTO_DETECT_CONTROL                     0x7128
+#       define DACx_AUTODETECT_MODE(x)                    ((x) << 0)
+#       define DACx_AUTODETECT_MODE_NONE                  0
+#       define DACx_AUTODETECT_MODE_CONNECT               1
+#       define DACx_AUTODETECT_MODE_DISCONNECT            2
+#       define DACx_AUTODETECT_FRAME_TIME_COUNTER(x)      ((x) << 8)
+/* bit 18 = R/C, 17 = G/Y, 16 = B/Comp */
+#       define DACx_AUTODETECT_CHECK_MASK(x)              ((x) << 16)
+
+#define DCE3_DACA_AUTODETECT_INT_CONTROL                  0x7038
+#define DCE3_DACB_AUTODETECT_INT_CONTROL                  0x7138
+#define DACA_AUTODETECT_INT_CONTROL                       0x7838
+#define DACB_AUTODETECT_INT_CONTROL                       0x7a38
+#       define DACx_AUTODETECT_ACK                        (1 << 0)
+#       define DACx_AUTODETECT_INT_ENABLE                 (1 << 16)
+
+#define DC_HOT_PLUG_DETECT1_CONTROL                       0x7d00
+#define DC_HOT_PLUG_DETECT2_CONTROL                       0x7d10
+#define DC_HOT_PLUG_DETECT3_CONTROL                       0x7d24
+#       define DC_HOT_PLUG_DETECTx_EN                     (1 << 0)
+
+#define DC_HOT_PLUG_DETECT1_INT_STATUS                    0x7d04
+#define DC_HOT_PLUG_DETECT2_INT_STATUS                    0x7d14
+#define DC_HOT_PLUG_DETECT3_INT_STATUS                    0x7d28
+#       define DC_HOT_PLUG_DETECTx_INT_STATUS             (1 << 0)
+#       define DC_HOT_PLUG_DETECTx_SENSE                  (1 << 1)
+
+/* DCE 3.0 */
+#define DC_HPD1_INT_STATUS                                0x7d00
+#define DC_HPD2_INT_STATUS                                0x7d0c
+#define DC_HPD3_INT_STATUS                                0x7d18
+#define DC_HPD4_INT_STATUS                                0x7d24
+/* DCE 3.2 */
+#define DC_HPD5_INT_STATUS                                0x7dc0
+#define DC_HPD6_INT_STATUS                                0x7df4
+#       define DC_HPDx_INT_STATUS                         (1 << 0)
+#       define DC_HPDx_SENSE                              (1 << 1)
+#       define DC_HPDx_RX_INT_STATUS                      (1 << 8)
+
+#define DC_HOT_PLUG_DETECT1_INT_CONTROL                   0x7d08
+#define DC_HOT_PLUG_DETECT2_INT_CONTROL                   0x7d18
+#define DC_HOT_PLUG_DETECT3_INT_CONTROL                   0x7d2c
+#       define DC_HOT_PLUG_DETECTx_INT_ACK                (1 << 0)
+#       define DC_HOT_PLUG_DETECTx_INT_POLARITY           (1 << 8)
+#       define DC_HOT_PLUG_DETECTx_INT_EN                 (1 << 16)
+/* DCE 3.0 */
+#define DC_HPD1_INT_CONTROL                               0x7d04
+#define DC_HPD2_INT_CONTROL                               0x7d10
+#define DC_HPD3_INT_CONTROL                               0x7d1c
+#define DC_HPD4_INT_CONTROL                               0x7d28
+/* DCE 3.2 */
+#define DC_HPD5_INT_CONTROL                               0x7dc4
+#define DC_HPD6_INT_CONTROL                               0x7df8
+#       define DC_HPDx_INT_ACK                            (1 << 0)
+#       define DC_HPDx_INT_POLARITY                       (1 << 8)
+#       define DC_HPDx_INT_EN                             (1 << 16)
+#       define DC_HPDx_RX_INT_ACK                         (1 << 20)
+#       define DC_HPDx_RX_INT_EN                          (1 << 24)
+
+/* DCE 3.0 */
+#define DC_HPD1_CONTROL                                   0x7d08
+#define DC_HPD2_CONTROL                                   0x7d14
+#define DC_HPD3_CONTROL                                   0x7d20
+#define DC_HPD4_CONTROL                                   0x7d2c
+/* DCE 3.2 */
+#define DC_HPD5_CONTROL                                   0x7dc8
+#define DC_HPD6_CONTROL                                   0x7dfc
+#       define DC_HPDx_CONNECTION_TIMER(x)                ((x) << 0)
+#       define DC_HPDx_RX_INT_TIMER(x)                    ((x) << 16)
+/* DCE 3.2 */
+#       define DC_HPDx_EN                                 (1 << 28)
 
 /*
  * PM4
@@ -500,7 +708,6 @@
 #define	PACKET3_WAIT_REG_MEM				0x3C
 #define	PACKET3_MEM_WRITE				0x3D
 #define	PACKET3_INDIRECT_BUFFER				0x32
-#define	PACKET3_CP_INTERRUPT				0x40
 #define	PACKET3_SURFACE_SYNC				0x43
 #              define PACKET3_CB0_DEST_BASE_ENA    (1 << 6)
 #              define PACKET3_TC_ACTION_ENA        (1 << 23)
@@ -674,4 +881,5 @@
 #define		S_000E60_SOFT_RESET_TSC(x)		(((x) & 1) << 16)
 #define		S_000E60_SOFT_RESET_VMC(x)		(((x) & 1) << 17)
 
+#define R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL		0x5480
 #endif

+ 103 - 62
drivers/gpu/drm/radeon/radeon.h

@@ -28,8 +28,6 @@
 #ifndef __RADEON_H__
 #define __RADEON_H__
 
-#include "radeon_object.h"
-
 /* TODO: Here are things that needs to be done :
  *	- surface allocator & initializer : (bit like scratch reg) should
  *	  initialize HDP_ stuff on RS600, R600, R700 hw, well anythings
@@ -67,6 +65,11 @@
 #include <linux/list.h>
 #include <linux/kref.h>
 
+#include <ttm/ttm_bo_api.h>
+#include <ttm/ttm_bo_driver.h>
+#include <ttm/ttm_placement.h>
+#include <ttm/ttm_module.h>
+
 #include "radeon_family.h"
 #include "radeon_mode.h"
 #include "radeon_reg.h"
@@ -85,6 +88,7 @@ extern int radeon_benchmarking;
 extern int radeon_testing;
 extern int radeon_connector_table;
 extern int radeon_tv;
+extern int radeon_new_pll;
 
 /*
  * Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -186,76 +190,62 @@ void radeon_fence_unref(struct radeon_fence **fence);
  * Tiling registers
  */
 struct radeon_surface_reg {
-	struct radeon_object *robj;
+	struct radeon_bo *bo;
 };
 
 #define RADEON_GEM_MAX_SURFACES 8
 
 /*
- * Radeon buffer.
+ * TTM.
  */
-struct radeon_object;
+struct radeon_mman {
+	struct ttm_bo_global_ref        bo_global_ref;
+	struct ttm_global_reference	mem_global_ref;
+	bool				mem_global_referenced;
+	struct ttm_bo_device		bdev;
+};
+
+struct radeon_bo {
+	/* Protected by gem.mutex */
+	struct list_head		list;
+	/* Protected by tbo.reserved */
+	u32				placements[3];
+	struct ttm_placement		placement;
+	struct ttm_buffer_object	tbo;
+	struct ttm_bo_kmap_obj		kmap;
+	unsigned			pin_count;
+	void				*kptr;
+	u32				tiling_flags;
+	u32				pitch;
+	int				surface_reg;
+	/* Constant after initialization */
+	struct radeon_device		*rdev;
+	struct drm_gem_object		*gobj;
+};
 
-struct radeon_object_list {
+struct radeon_bo_list {
 	struct list_head	list;
-	struct radeon_object	*robj;
+	struct radeon_bo	*bo;
 	uint64_t		gpu_offset;
 	unsigned		rdomain;
 	unsigned		wdomain;
-	uint32_t                tiling_flags;
+	u32			tiling_flags;
 };
 
-int radeon_object_init(struct radeon_device *rdev);
-void radeon_object_fini(struct radeon_device *rdev);
-int radeon_object_create(struct radeon_device *rdev,
-			 struct drm_gem_object *gobj,
-			 unsigned long size,
-			 bool kernel,
-			 uint32_t domain,
-			 bool interruptible,
-			 struct radeon_object **robj_ptr);
-int radeon_object_kmap(struct radeon_object *robj, void **ptr);
-void radeon_object_kunmap(struct radeon_object *robj);
-void radeon_object_unref(struct radeon_object **robj);
-int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
-		      uint64_t *gpu_addr);
-void radeon_object_unpin(struct radeon_object *robj);
-int radeon_object_wait(struct radeon_object *robj);
-int radeon_object_busy_domain(struct radeon_object *robj, uint32_t *cur_placement);
-int radeon_object_evict_vram(struct radeon_device *rdev);
-int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset);
-void radeon_object_force_delete(struct radeon_device *rdev);
-void radeon_object_list_add_object(struct radeon_object_list *lobj,
-				   struct list_head *head);
-int radeon_object_list_validate(struct list_head *head, void *fence);
-void radeon_object_list_unvalidate(struct list_head *head);
-void radeon_object_list_clean(struct list_head *head);
-int radeon_object_fbdev_mmap(struct radeon_object *robj,
-			     struct vm_area_struct *vma);
-unsigned long radeon_object_size(struct radeon_object *robj);
-void radeon_object_clear_surface_reg(struct radeon_object *robj);
-int radeon_object_check_tiling(struct radeon_object *robj, bool has_moved,
-			       bool force_drop);
-void radeon_object_set_tiling_flags(struct radeon_object *robj,
-				    uint32_t tiling_flags, uint32_t pitch);
-void radeon_object_get_tiling_flags(struct radeon_object *robj, uint32_t *tiling_flags, uint32_t *pitch);
-void radeon_bo_move_notify(struct ttm_buffer_object *bo,
-			   struct ttm_mem_reg *mem);
-void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
 /*
  * GEM objects.
  */
 struct radeon_gem {
+	struct mutex		mutex;
 	struct list_head	objects;
 };
 
 int radeon_gem_init(struct radeon_device *rdev);
 void radeon_gem_fini(struct radeon_device *rdev);
 int radeon_gem_object_create(struct radeon_device *rdev, int size,
-			     int alignment, int initial_domain,
-			     bool discardable, bool kernel,
-			     bool interruptible,
-			     struct drm_gem_object **obj);
+				int alignment, int initial_domain,
+				bool discardable, bool kernel,
+				struct drm_gem_object **obj);
 int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
 			  uint64_t *gpu_addr);
 void radeon_gem_object_unpin(struct drm_gem_object *obj);
@@ -271,7 +261,7 @@ struct radeon_gart_table_ram {
 };
 
 struct radeon_gart_table_vram {
-	struct radeon_object		*robj;
+	struct radeon_bo		*robj;
 	volatile uint32_t		*ptr;
 };
 
@@ -352,11 +342,16 @@ struct radeon_irq {
 	bool		sw_int;
 	/* FIXME: use a define max crtc rather than hardcode it */
 	bool		crtc_vblank_int[2];
+	/* FIXME: use defines for max hpd/dacs */
+	bool            hpd[6];
+	spinlock_t sw_lock;
+	int sw_refcount;
 };
 
 int radeon_irq_kms_init(struct radeon_device *rdev);
 void radeon_irq_kms_fini(struct radeon_device *rdev);
-
+void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev);
+void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev);
 
 /*
  * CP & ring.
@@ -376,7 +371,7 @@ struct radeon_ib {
  */
 struct radeon_ib_pool {
 	struct mutex		mutex;
-	struct radeon_object	*robj;
+	struct radeon_bo	*robj;
 	struct list_head	scheduled_ibs;
 	struct radeon_ib	ibs[RADEON_IB_POOL_SIZE];
 	bool			ready;
@@ -384,7 +379,7 @@ struct radeon_ib_pool {
 };
 
 struct radeon_cp {
-	struct radeon_object	*ring_obj;
+	struct radeon_bo	*ring_obj;
 	volatile uint32_t	*ring;
 	unsigned		rptr;
 	unsigned		wptr;
@@ -399,8 +394,25 @@ struct radeon_cp {
 	bool			ready;
 };
 
+/*
+ * R6xx+ IH ring
+ */
+struct r600_ih {
+	struct radeon_bo	*ring_obj;
+	volatile uint32_t	*ring;
+	unsigned		rptr;
+	unsigned		wptr;
+	unsigned		wptr_old;
+	unsigned		ring_size;
+	uint64_t		gpu_addr;
+	uint32_t		align_mask;
+	uint32_t		ptr_mask;
+	spinlock_t              lock;
+	bool                    enabled;
+};
+
 struct r600_blit {
-	struct radeon_object	*shader_obj;
+	struct radeon_bo	*shader_obj;
 	u64 shader_gpu_addr;
 	u32 vs_offset, ps_offset;
 	u32 state_offset;
@@ -430,8 +442,8 @@ void radeon_ring_fini(struct radeon_device *rdev);
  */
 struct radeon_cs_reloc {
 	struct drm_gem_object		*gobj;
-	struct radeon_object		*robj;
-	struct radeon_object_list	lobj;
+	struct radeon_bo		*robj;
+	struct radeon_bo_list		lobj;
 	uint32_t			handle;
 	uint32_t			flags;
 };
@@ -527,7 +539,7 @@ void radeon_agp_fini(struct radeon_device *rdev);
  * Writeback
  */
 struct radeon_wb {
-	struct radeon_object	*wb_obj;
+	struct radeon_bo	*wb_obj;
 	volatile uint32_t	*wb;
 	uint64_t		gpu_addr;
 };
@@ -639,6 +651,11 @@ struct radeon_asic {
 			       uint32_t offset, uint32_t obj_size);
 	int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
 	void (*bandwidth_update)(struct radeon_device *rdev);
+	void (*hdp_flush)(struct radeon_device *rdev);
+	void (*hpd_init)(struct radeon_device *rdev);
+	void (*hpd_fini)(struct radeon_device *rdev);
+	bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+	void (*hpd_set_polarity)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
 };
 
 /*
@@ -751,9 +768,9 @@ struct radeon_device {
 	uint8_t				*bios;
 	bool				is_atom_bios;
 	uint16_t			bios_header_start;
-	struct radeon_object		*stollen_vga_memory;
+	struct radeon_bo		*stollen_vga_memory;
 	struct fb_info			*fbdev_info;
-	struct radeon_object		*fbdev_robj;
+	struct radeon_bo		*fbdev_rbo;
 	struct radeon_framebuffer	*fbdev_rfb;
 	/* Register mmio */
 	resource_size_t			rmmio_base;
@@ -791,8 +808,12 @@ struct radeon_device {
 	struct radeon_surface_reg surface_regs[RADEON_GEM_MAX_SURFACES];
 	const struct firmware *me_fw;	/* all family ME firmware */
 	const struct firmware *pfp_fw;	/* r6/700 PFP firmware */
+	const struct firmware *rlc_fw;	/* r6/700 RLC firmware */
 	struct r600_blit r600_blit;
 	int msi_enabled; /* msi enabled */
+	struct r600_ih ih; /* r6/700 interrupt ring */
+	struct workqueue_struct *wq;
+	struct work_struct hotplug_work;
 };
 
 int radeon_device_init(struct radeon_device *rdev,
@@ -829,6 +850,10 @@ static inline void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32
 	}
 }
 
+/*
+ * Cast helper
+ */
+#define to_radeon_fence(p) ((struct radeon_fence *)(p))
 
 /*
  * Registers read & write functions.
@@ -965,18 +990,24 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
 #define radeon_get_engine_clock(rdev) (rdev)->asic->get_engine_clock((rdev))
 #define radeon_set_engine_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e))
 #define radeon_get_memory_clock(rdev) (rdev)->asic->get_memory_clock((rdev))
-#define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_engine_clock((rdev), (e))
+#define radeon_set_memory_clock(rdev, e) (rdev)->asic->set_memory_clock((rdev), (e))
 #define radeon_set_pcie_lanes(rdev, l) (rdev)->asic->set_pcie_lanes((rdev), (l))
 #define radeon_set_clock_gating(rdev, e) (rdev)->asic->set_clock_gating((rdev), (e))
 #define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s)))
 #define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->clear_surface_reg((rdev), (r)))
 #define radeon_bandwidth_update(rdev) (rdev)->asic->bandwidth_update((rdev))
+#define radeon_hdp_flush(rdev) (rdev)->asic->hdp_flush((rdev))
+#define radeon_hpd_init(rdev) (rdev)->asic->hpd_init((rdev))
+#define radeon_hpd_fini(rdev) (rdev)->asic->hpd_fini((rdev))
+#define radeon_hpd_sense(rdev, hpd) (rdev)->asic->hpd_sense((rdev), (hpd))
+#define radeon_hpd_set_polarity(rdev, hpd) (rdev)->asic->hpd_set_polarity((rdev), (hpd))
 
 /* Common functions */
 extern int radeon_gart_table_vram_pin(struct radeon_device *rdev);
 extern int radeon_modeset_init(struct radeon_device *rdev);
 extern void radeon_modeset_fini(struct radeon_device *rdev);
 extern bool radeon_card_posted(struct radeon_device *rdev);
+extern bool radeon_boot_test_post_card(struct radeon_device *rdev);
 extern int radeon_clocks_init(struct radeon_device *rdev);
 extern void radeon_clocks_fini(struct radeon_device *rdev);
 extern void radeon_scratch_init(struct radeon_device *rdev);
@@ -984,6 +1015,7 @@ extern void radeon_surface_init(struct radeon_device *rdev);
 extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
 extern void radeon_legacy_set_clock_gating(struct radeon_device *rdev, int enable);
 extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
+extern void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain);
 
 /* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */
 struct r100_mc_save {
@@ -1021,7 +1053,7 @@ extern int r100_cp_reset(struct radeon_device *rdev);
 extern void r100_vga_render_disable(struct radeon_device *rdev);
 extern int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
 						struct radeon_cs_packet *pkt,
-						struct radeon_object *robj);
+						struct radeon_bo *robj);
 extern int r100_cs_parse_packet0(struct radeon_cs_parser *p,
 				struct radeon_cs_packet *pkt,
 				const unsigned *auth, unsigned n,
@@ -1029,6 +1061,8 @@ extern int r100_cs_parse_packet0(struct radeon_cs_parser *p,
 extern int r100_cs_packet_parse(struct radeon_cs_parser *p,
 				struct radeon_cs_packet *pkt,
 				unsigned idx);
+extern void r100_enable_bm(struct radeon_device *rdev);
+extern void r100_set_common_regs(struct radeon_device *rdev);
 
 /* rv200,rv250,rv280 */
 extern void r200_set_safe_registers(struct radeon_device *rdev);
@@ -1104,7 +1138,14 @@ extern void r600_wb_disable(struct radeon_device *rdev);
 extern void r600_scratch_init(struct radeon_device *rdev);
 extern int r600_blit_init(struct radeon_device *rdev);
 extern void r600_blit_fini(struct radeon_device *rdev);
-extern int r600_cp_init_microcode(struct radeon_device *rdev);
+extern int r600_init_microcode(struct radeon_device *rdev);
 extern int r600_gpu_reset(struct radeon_device *rdev);
+/* r600 irq */
+extern int r600_irq_init(struct radeon_device *rdev);
+extern void r600_irq_fini(struct radeon_device *rdev);
+extern void r600_ih_ring_init(struct radeon_device *rdev, unsigned ring_size);
+extern int r600_irq_set(struct radeon_device *rdev);
+
+#include "radeon_object.h"
 
 #endif

+ 70 - 0
drivers/gpu/drm/radeon/radeon_asic.h

@@ -76,6 +76,12 @@ int r100_clear_surface_reg(struct radeon_device *rdev, int reg);
 void r100_bandwidth_update(struct radeon_device *rdev);
 void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
 int r100_ring_test(struct radeon_device *rdev);
+void r100_hdp_flush(struct radeon_device *rdev);
+void r100_hpd_init(struct radeon_device *rdev);
+void r100_hpd_fini(struct radeon_device *rdev);
+bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+void r100_hpd_set_polarity(struct radeon_device *rdev,
+			   enum radeon_hpd_id hpd);
 
 static struct radeon_asic r100_asic = {
 	.init = &r100_init,
@@ -107,6 +113,11 @@ static struct radeon_asic r100_asic = {
 	.set_surface_reg = r100_set_surface_reg,
 	.clear_surface_reg = r100_clear_surface_reg,
 	.bandwidth_update = &r100_bandwidth_update,
+	.hdp_flush = &r100_hdp_flush,
+	.hpd_init = &r100_hpd_init,
+	.hpd_fini = &r100_hpd_fini,
+	.hpd_sense = &r100_hpd_sense,
+	.hpd_set_polarity = &r100_hpd_set_polarity,
 };
 
 
@@ -162,6 +173,11 @@ static struct radeon_asic r300_asic = {
 	.set_surface_reg = r100_set_surface_reg,
 	.clear_surface_reg = r100_clear_surface_reg,
 	.bandwidth_update = &r100_bandwidth_update,
+	.hdp_flush = &r100_hdp_flush,
+	.hpd_init = &r100_hpd_init,
+	.hpd_fini = &r100_hpd_fini,
+	.hpd_sense = &r100_hpd_sense,
+	.hpd_set_polarity = &r100_hpd_set_polarity,
 };
 
 /*
@@ -201,6 +217,11 @@ static struct radeon_asic r420_asic = {
 	.set_surface_reg = r100_set_surface_reg,
 	.clear_surface_reg = r100_clear_surface_reg,
 	.bandwidth_update = &r100_bandwidth_update,
+	.hdp_flush = &r100_hdp_flush,
+	.hpd_init = &r100_hpd_init,
+	.hpd_fini = &r100_hpd_fini,
+	.hpd_sense = &r100_hpd_sense,
+	.hpd_set_polarity = &r100_hpd_set_polarity,
 };
 
 
@@ -245,6 +266,11 @@ static struct radeon_asic rs400_asic = {
 	.set_surface_reg = r100_set_surface_reg,
 	.clear_surface_reg = r100_clear_surface_reg,
 	.bandwidth_update = &r100_bandwidth_update,
+	.hdp_flush = &r100_hdp_flush,
+	.hpd_init = &r100_hpd_init,
+	.hpd_fini = &r100_hpd_fini,
+	.hpd_sense = &r100_hpd_sense,
+	.hpd_set_polarity = &r100_hpd_set_polarity,
 };
 
 
@@ -263,6 +289,12 @@ int rs600_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
 uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg);
 void rs600_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
 void rs600_bandwidth_update(struct radeon_device *rdev);
+void rs600_hpd_init(struct radeon_device *rdev);
+void rs600_hpd_fini(struct radeon_device *rdev);
+bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+void rs600_hpd_set_polarity(struct radeon_device *rdev,
+			    enum radeon_hpd_id hpd);
+
 static struct radeon_asic rs600_asic = {
 	.init = &rs600_init,
 	.fini = &rs600_fini,
@@ -291,6 +323,11 @@ static struct radeon_asic rs600_asic = {
 	.set_pcie_lanes = NULL,
 	.set_clock_gating = &radeon_atom_set_clock_gating,
 	.bandwidth_update = &rs600_bandwidth_update,
+	.hdp_flush = &r100_hdp_flush,
+	.hpd_init = &rs600_hpd_init,
+	.hpd_fini = &rs600_hpd_fini,
+	.hpd_sense = &rs600_hpd_sense,
+	.hpd_set_polarity = &rs600_hpd_set_polarity,
 };
 
 
@@ -334,6 +371,11 @@ static struct radeon_asic rs690_asic = {
 	.set_surface_reg = r100_set_surface_reg,
 	.clear_surface_reg = r100_clear_surface_reg,
 	.bandwidth_update = &rs690_bandwidth_update,
+	.hdp_flush = &r100_hdp_flush,
+	.hpd_init = &rs600_hpd_init,
+	.hpd_fini = &rs600_hpd_fini,
+	.hpd_sense = &rs600_hpd_sense,
+	.hpd_set_polarity = &rs600_hpd_set_polarity,
 };
 
 
@@ -381,6 +423,11 @@ static struct radeon_asic rv515_asic = {
 	.set_surface_reg = r100_set_surface_reg,
 	.clear_surface_reg = r100_clear_surface_reg,
 	.bandwidth_update = &rv515_bandwidth_update,
+	.hdp_flush = &r100_hdp_flush,
+	.hpd_init = &rs600_hpd_init,
+	.hpd_fini = &rs600_hpd_fini,
+	.hpd_sense = &rs600_hpd_sense,
+	.hpd_set_polarity = &rs600_hpd_set_polarity,
 };
 
 
@@ -419,6 +466,11 @@ static struct radeon_asic r520_asic = {
 	.set_surface_reg = r100_set_surface_reg,
 	.clear_surface_reg = r100_clear_surface_reg,
 	.bandwidth_update = &rv515_bandwidth_update,
+	.hdp_flush = &r100_hdp_flush,
+	.hpd_init = &rs600_hpd_init,
+	.hpd_fini = &rs600_hpd_fini,
+	.hpd_sense = &rs600_hpd_sense,
+	.hpd_set_polarity = &rs600_hpd_set_polarity,
 };
 
 /*
@@ -455,6 +507,12 @@ int r600_ring_test(struct radeon_device *rdev);
 int r600_copy_blit(struct radeon_device *rdev,
 		   uint64_t src_offset, uint64_t dst_offset,
 		   unsigned num_pages, struct radeon_fence *fence);
+void r600_hdp_flush(struct radeon_device *rdev);
+void r600_hpd_init(struct radeon_device *rdev);
+void r600_hpd_fini(struct radeon_device *rdev);
+bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
+void r600_hpd_set_polarity(struct radeon_device *rdev,
+			   enum radeon_hpd_id hpd);
 
 static struct radeon_asic r600_asic = {
 	.init = &r600_init,
@@ -470,6 +528,7 @@ static struct radeon_asic r600_asic = {
 	.ring_ib_execute = &r600_ring_ib_execute,
 	.irq_set = &r600_irq_set,
 	.irq_process = &r600_irq_process,
+	.get_vblank_counter = &rs600_get_vblank_counter,
 	.fence_ring_emit = &r600_fence_ring_emit,
 	.cs_parse = &r600_cs_parse,
 	.copy_blit = &r600_copy_blit,
@@ -484,6 +543,11 @@ static struct radeon_asic r600_asic = {
 	.set_surface_reg = r600_set_surface_reg,
 	.clear_surface_reg = r600_clear_surface_reg,
 	.bandwidth_update = &rv515_bandwidth_update,
+	.hdp_flush = &r600_hdp_flush,
+	.hpd_init = &r600_hpd_init,
+	.hpd_fini = &r600_hpd_fini,
+	.hpd_sense = &r600_hpd_sense,
+	.hpd_set_polarity = &r600_hpd_set_polarity,
 };
 
 /*
@@ -509,6 +573,7 @@ static struct radeon_asic rv770_asic = {
 	.ring_ib_execute = &r600_ring_ib_execute,
 	.irq_set = &r600_irq_set,
 	.irq_process = &r600_irq_process,
+	.get_vblank_counter = &rs600_get_vblank_counter,
 	.fence_ring_emit = &r600_fence_ring_emit,
 	.cs_parse = &r600_cs_parse,
 	.copy_blit = &r600_copy_blit,
@@ -523,6 +588,11 @@ static struct radeon_asic rv770_asic = {
 	.set_surface_reg = r600_set_surface_reg,
 	.clear_surface_reg = r600_clear_surface_reg,
 	.bandwidth_update = &rv515_bandwidth_update,
+	.hdp_flush = &r600_hdp_flush,
+	.hpd_init = &r600_hpd_init,
+	.hpd_fini = &r600_hpd_fini,
+	.hpd_sense = &r600_hpd_sense,
+	.hpd_set_polarity = &r600_hpd_set_polarity,
 };
 
 #endif

+ 246 - 86
drivers/gpu/drm/radeon/radeon_atombios.c

@@ -47,7 +47,8 @@ radeon_add_atom_connector(struct drm_device *dev,
 			  int connector_type,
 			  struct radeon_i2c_bus_rec *i2c_bus,
 			  bool linkb, uint32_t igp_lane_info,
-			  uint16_t connector_object_id);
+			  uint16_t connector_object_id,
+			  struct radeon_hpd *hpd);
 
 /* from radeon_legacy_encoder.c */
 extern void
@@ -60,16 +61,16 @@ union atom_supported_devices {
 	struct _ATOM_SUPPORTED_DEVICES_INFO_2d1 info_2d1;
 };
 
-static inline struct radeon_i2c_bus_rec radeon_lookup_gpio(struct drm_device
-							   *dev, uint8_t id)
+static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_device *rdev,
+							       uint8_t id)
 {
-	struct radeon_device *rdev = dev->dev_private;
 	struct atom_context *ctx = rdev->mode_info.atom_context;
-	ATOM_GPIO_I2C_ASSIGMENT gpio;
+	ATOM_GPIO_I2C_ASSIGMENT *gpio;
 	struct radeon_i2c_bus_rec i2c;
 	int index = GetIndexIntoMasterTable(DATA, GPIO_I2C_Info);
 	struct _ATOM_GPIO_I2C_INFO *i2c_info;
 	uint16_t data_offset;
+	int i;
 
 	memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
 	i2c.valid = false;
@@ -78,34 +79,121 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_gpio(struct drm_device
 
 	i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
 
-	gpio = i2c_info->asGPIO_Info[id];
-
-	i2c.mask_clk_reg = le16_to_cpu(gpio.usClkMaskRegisterIndex) * 4;
-	i2c.mask_data_reg = le16_to_cpu(gpio.usDataMaskRegisterIndex) * 4;
-	i2c.put_clk_reg = le16_to_cpu(gpio.usClkEnRegisterIndex) * 4;
-	i2c.put_data_reg = le16_to_cpu(gpio.usDataEnRegisterIndex) * 4;
-	i2c.get_clk_reg = le16_to_cpu(gpio.usClkY_RegisterIndex) * 4;
-	i2c.get_data_reg = le16_to_cpu(gpio.usDataY_RegisterIndex) * 4;
-	i2c.a_clk_reg = le16_to_cpu(gpio.usClkA_RegisterIndex) * 4;
-	i2c.a_data_reg = le16_to_cpu(gpio.usDataA_RegisterIndex) * 4;
-	i2c.mask_clk_mask = (1 << gpio.ucClkMaskShift);
-	i2c.mask_data_mask = (1 << gpio.ucDataMaskShift);
-	i2c.put_clk_mask = (1 << gpio.ucClkEnShift);
-	i2c.put_data_mask = (1 << gpio.ucDataEnShift);
-	i2c.get_clk_mask = (1 << gpio.ucClkY_Shift);
-	i2c.get_data_mask = (1 << gpio.ucDataY_Shift);
-	i2c.a_clk_mask = (1 << gpio.ucClkA_Shift);
-	i2c.a_data_mask = (1 << gpio.ucDataA_Shift);
-	i2c.valid = true;
+
+	for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) {
+		gpio = &i2c_info->asGPIO_Info[i];
+
+		if (gpio->sucI2cId.ucAccess == id) {
+			i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
+			i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
+			i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
+			i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
+			i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
+			i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
+			i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
+			i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
+			i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
+			i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
+			i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
+			i2c.en_data_mask = (1 << gpio->ucDataEnShift);
+			i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
+			i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
+			i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
+			i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
+
+			if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
+				i2c.hw_capable = true;
+			else
+				i2c.hw_capable = false;
+
+			if (gpio->sucI2cId.ucAccess == 0xa0)
+				i2c.mm_i2c = true;
+			else
+				i2c.mm_i2c = false;
+
+			i2c.i2c_id = gpio->sucI2cId.ucAccess;
+
+			i2c.valid = true;
+		}
+	}
 
 	return i2c;
 }
 
+static inline struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rdev,
+							u8 id)
+{
+	struct atom_context *ctx = rdev->mode_info.atom_context;
+	struct radeon_gpio_rec gpio;
+	int index = GetIndexIntoMasterTable(DATA, GPIO_Pin_LUT);
+	struct _ATOM_GPIO_PIN_LUT *gpio_info;
+	ATOM_GPIO_PIN_ASSIGNMENT *pin;
+	u16 data_offset, size;
+	int i, num_indices;
+
+	memset(&gpio, 0, sizeof(struct radeon_gpio_rec));
+	gpio.valid = false;
+
+	atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset);
+
+	gpio_info = (struct _ATOM_GPIO_PIN_LUT *)(ctx->bios + data_offset);
+
+	num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_GPIO_PIN_ASSIGNMENT);
+
+	for (i = 0; i < num_indices; i++) {
+		pin = &gpio_info->asGPIO_Pin[i];
+		if (id == pin->ucGPIO_ID) {
+			gpio.id = pin->ucGPIO_ID;
+			gpio.reg = pin->usGpioPin_AIndex * 4;
+			gpio.mask = (1 << pin->ucGpioPinBitShift);
+			gpio.valid = true;
+			break;
+		}
+	}
+
+	return gpio;
+}
+
+static struct radeon_hpd radeon_atom_get_hpd_info_from_gpio(struct radeon_device *rdev,
+							    struct radeon_gpio_rec *gpio)
+{
+	struct radeon_hpd hpd;
+	hpd.gpio = *gpio;
+	if (gpio->reg == AVIVO_DC_GPIO_HPD_A) {
+		switch(gpio->mask) {
+		case (1 << 0):
+			hpd.hpd = RADEON_HPD_1;
+			break;
+		case (1 << 8):
+			hpd.hpd = RADEON_HPD_2;
+			break;
+		case (1 << 16):
+			hpd.hpd = RADEON_HPD_3;
+			break;
+		case (1 << 24):
+			hpd.hpd = RADEON_HPD_4;
+			break;
+		case (1 << 26):
+			hpd.hpd = RADEON_HPD_5;
+			break;
+		case (1 << 28):
+			hpd.hpd = RADEON_HPD_6;
+			break;
+		default:
+			hpd.hpd = RADEON_HPD_NONE;
+			break;
+		}
+	} else
+		hpd.hpd = RADEON_HPD_NONE;
+	return hpd;
+}
+
 static bool radeon_atom_apply_quirks(struct drm_device *dev,
 				     uint32_t supported_device,
 				     int *connector_type,
 				     struct radeon_i2c_bus_rec *i2c_bus,
-				     uint16_t *line_mux)
+				     uint16_t *line_mux,
+				     struct radeon_hpd *hpd)
 {
 
 	/* Asus M2A-VM HDMI board lists the DVI port as HDMI */
@@ -135,6 +223,23 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
 		}
 	}
 
+	/* HIS X1300 is DVI+VGA, not DVI+DVI */
+	if ((dev->pdev->device == 0x7146) &&
+	    (dev->pdev->subsystem_vendor == 0x17af) &&
+	    (dev->pdev->subsystem_device == 0x2058)) {
+		if (supported_device == ATOM_DEVICE_DFP1_SUPPORT)
+			return false;
+	}
+
+	/* Gigabyte X1300 is DVI+VGA, not DVI+DVI */
+	if ((dev->pdev->device == 0x7142) &&
+	    (dev->pdev->subsystem_vendor == 0x1458) &&
+	    (dev->pdev->subsystem_device == 0x2134)) {
+		if (supported_device == ATOM_DEVICE_DFP1_SUPPORT)
+			return false;
+	}
+
+
 	/* Funky macbooks */
 	if ((dev->pdev->device == 0x71C5) &&
 	    (dev->pdev->subsystem_vendor == 0x106b) &&
@@ -172,6 +277,15 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
 		}
 	}
 
+	/* Acer laptop reports DVI-D as DVI-I */
+	if ((dev->pdev->device == 0x95c4) &&
+	    (dev->pdev->subsystem_vendor == 0x1025) &&
+	    (dev->pdev->subsystem_device == 0x013c)) {
+		if ((*connector_type == DRM_MODE_CONNECTOR_DVII) &&
+		    (supported_device == ATOM_DEVICE_DFP1_SUPPORT))
+			*connector_type = DRM_MODE_CONNECTOR_DVID;
+	}
+
 	return true;
 }
 
@@ -240,16 +354,18 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
 	struct radeon_mode_info *mode_info = &rdev->mode_info;
 	struct atom_context *ctx = mode_info->atom_context;
 	int index = GetIndexIntoMasterTable(DATA, Object_Header);
-	uint16_t size, data_offset;
-	uint8_t frev, crev, line_mux = 0;
+	u16 size, data_offset;
+	u8 frev, crev;
 	ATOM_CONNECTOR_OBJECT_TABLE *con_obj;
 	ATOM_DISPLAY_OBJECT_PATH_TABLE *path_obj;
 	ATOM_OBJECT_HEADER *obj_header;
 	int i, j, path_size, device_support;
 	int connector_type;
-	uint16_t igp_lane_info, conn_id, connector_object_id;
+	u16 igp_lane_info, conn_id, connector_object_id;
 	bool linkb;
 	struct radeon_i2c_bus_rec ddc_bus;
+	struct radeon_gpio_rec gpio;
+	struct radeon_hpd hpd;
 
 	atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
 
@@ -276,7 +392,6 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
 		path = (ATOM_DISPLAY_OBJECT_PATH *) addr;
 		path_size += le16_to_cpu(path->usSize);
 		linkb = false;
-
 		if (device_support & le16_to_cpu(path->usDeviceTag)) {
 			uint8_t con_obj_id, con_obj_num, con_obj_type;
 
@@ -377,10 +492,9 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
 				}
 			}
 
-			/* look up gpio for ddc */
+			/* look up gpio for ddc, hpd */
 			if ((le16_to_cpu(path->usDeviceTag) &
-			     (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT))
-			    == 0) {
+			     (ATOM_DEVICE_TV_SUPPORT | ATOM_DEVICE_CV_SUPPORT)) == 0) {
 				for (j = 0; j < con_obj->ucNumberOfObjects; j++) {
 					if (le16_to_cpu(path->usConnObjectId) ==
 					    le16_to_cpu(con_obj->asObjects[j].
@@ -394,21 +508,34 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
 								 asObjects[j].
 								 usRecordOffset));
 						ATOM_I2C_RECORD *i2c_record;
+						ATOM_HPD_INT_RECORD *hpd_record;
+						ATOM_I2C_ID_CONFIG_ACCESS *i2c_config;
+						hpd.hpd = RADEON_HPD_NONE;
 
 						while (record->ucRecordType > 0
 						       && record->
 						       ucRecordType <=
 						       ATOM_MAX_OBJECT_RECORD_NUMBER) {
-							switch (record->
-								ucRecordType) {
+							switch (record->ucRecordType) {
 							case ATOM_I2C_RECORD_TYPE:
 								i2c_record =
-								    (ATOM_I2C_RECORD
-								     *) record;
-								line_mux =
-								    i2c_record->
-								    sucI2cId.
-								    bfI2C_LineMux;
+								    (ATOM_I2C_RECORD *)
+									record;
+								i2c_config =
+									(ATOM_I2C_ID_CONFIG_ACCESS *)
+									&i2c_record->sucI2cId;
+								ddc_bus = radeon_lookup_i2c_gpio(rdev,
+												 i2c_config->
+												 ucAccess);
+								break;
+							case ATOM_HPD_INT_RECORD_TYPE:
+								hpd_record =
+									(ATOM_HPD_INT_RECORD *)
+									record;
+								gpio = radeon_lookup_gpio(rdev,
+											  hpd_record->ucHPDIntGPIOID);
+								hpd = radeon_atom_get_hpd_info_from_gpio(rdev, &gpio);
+								hpd.plugged_state = hpd_record->ucPlugged_PinState;
 								break;
 							}
 							record =
@@ -421,24 +548,16 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
 						break;
 					}
 				}
-			} else
-				line_mux = 0;
-
-			if ((le16_to_cpu(path->usDeviceTag) ==
-			     ATOM_DEVICE_TV1_SUPPORT)
-			    || (le16_to_cpu(path->usDeviceTag) ==
-				ATOM_DEVICE_TV2_SUPPORT)
-			    || (le16_to_cpu(path->usDeviceTag) ==
-				ATOM_DEVICE_CV_SUPPORT))
+			} else {
+				hpd.hpd = RADEON_HPD_NONE;
 				ddc_bus.valid = false;
-			else
-				ddc_bus = radeon_lookup_gpio(dev, line_mux);
+			}
 
 			conn_id = le16_to_cpu(path->usConnObjectId);
 
 			if (!radeon_atom_apply_quirks
 			    (dev, le16_to_cpu(path->usDeviceTag), &connector_type,
-			     &ddc_bus, &conn_id))
+			     &ddc_bus, &conn_id, &hpd))
 				continue;
 
 			radeon_add_atom_connector(dev,
@@ -447,7 +566,8 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
 							      usDeviceTag),
 						  connector_type, &ddc_bus,
 						  linkb, igp_lane_info,
-						  connector_object_id);
+						  connector_object_id,
+						  &hpd);
 
 		}
 	}
@@ -502,6 +622,7 @@ struct bios_connector {
 	uint16_t devices;
 	int connector_type;
 	struct radeon_i2c_bus_rec ddc_bus;
+	struct radeon_hpd hpd;
 };
 
 bool radeon_get_atom_connector_info_from_supported_devices_table(struct
@@ -517,7 +638,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
 	uint16_t device_support;
 	uint8_t dac;
 	union atom_supported_devices *supported_devices;
-	int i, j;
+	int i, j, max_device;
 	struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
 
 	atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
@@ -527,7 +648,12 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
 
 	device_support = le16_to_cpu(supported_devices->info.usDeviceSupport);
 
-	for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) {
+	if (frev > 1)
+		max_device = ATOM_MAX_SUPPORTED_DEVICE;
+	else
+		max_device = ATOM_MAX_SUPPORTED_DEVICE_INFO;
+
+	for (i = 0; i < max_device; i++) {
 		ATOM_CONNECTOR_INFO_I2C ci =
 		    supported_devices->info.asConnInfo[i];
 
@@ -553,22 +679,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
 
 		dac = ci.sucConnectorInfo.sbfAccess.bfAssociatedDAC;
 
-		if ((rdev->family == CHIP_RS690) ||
-		    (rdev->family == CHIP_RS740)) {
-			if ((i == ATOM_DEVICE_DFP2_INDEX)
-			    && (ci.sucI2cId.sbfAccess.bfI2C_LineMux == 2))
-				bios_connectors[i].line_mux =
-				    ci.sucI2cId.sbfAccess.bfI2C_LineMux + 1;
-			else if ((i == ATOM_DEVICE_DFP3_INDEX)
-				 && (ci.sucI2cId.sbfAccess.bfI2C_LineMux == 1))
-				bios_connectors[i].line_mux =
-				    ci.sucI2cId.sbfAccess.bfI2C_LineMux + 1;
-			else
-				bios_connectors[i].line_mux =
-				    ci.sucI2cId.sbfAccess.bfI2C_LineMux;
-		} else
-			bios_connectors[i].line_mux =
-			    ci.sucI2cId.sbfAccess.bfI2C_LineMux;
+		bios_connectors[i].line_mux =
+			ci.sucI2cId.ucAccess;
 
 		/* give tv unique connector ids */
 		if (i == ATOM_DEVICE_TV1_INDEX) {
@@ -582,8 +694,30 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
 			bios_connectors[i].line_mux = 52;
 		} else
 			bios_connectors[i].ddc_bus =
-			    radeon_lookup_gpio(dev,
-					       bios_connectors[i].line_mux);
+			    radeon_lookup_i2c_gpio(rdev,
+						   bios_connectors[i].line_mux);
+
+		if ((crev > 1) && (frev > 1)) {
+			u8 isb = supported_devices->info_2d1.asIntSrcInfo[i].ucIntSrcBitmap;
+			switch (isb) {
+			case 0x4:
+				bios_connectors[i].hpd.hpd = RADEON_HPD_1;
+				break;
+			case 0xa:
+				bios_connectors[i].hpd.hpd = RADEON_HPD_2;
+				break;
+			default:
+				bios_connectors[i].hpd.hpd = RADEON_HPD_NONE;
+				break;
+			}
+		} else {
+			if (i == ATOM_DEVICE_DFP1_INDEX)
+				bios_connectors[i].hpd.hpd = RADEON_HPD_1;
+			else if (i == ATOM_DEVICE_DFP2_INDEX)
+				bios_connectors[i].hpd.hpd = RADEON_HPD_2;
+			else
+				bios_connectors[i].hpd.hpd = RADEON_HPD_NONE;
+		}
 
 		/* Always set the connector type to VGA for CRT1/CRT2. if they are
 		 * shared with a DVI port, we'll pick up the DVI connector when we
@@ -595,7 +729,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
 
 		if (!radeon_atom_apply_quirks
 		    (dev, (1 << i), &bios_connectors[i].connector_type,
-		     &bios_connectors[i].ddc_bus, &bios_connectors[i].line_mux))
+		     &bios_connectors[i].ddc_bus, &bios_connectors[i].line_mux,
+		     &bios_connectors[i].hpd))
 			continue;
 
 		bios_connectors[i].valid = true;
@@ -617,9 +752,9 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
 	}
 
 	/* combine shared connectors */
-	for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) {
+	for (i = 0; i < max_device; i++) {
 		if (bios_connectors[i].valid) {
-			for (j = 0; j < ATOM_MAX_SUPPORTED_DEVICE; j++) {
+			for (j = 0; j < max_device; j++) {
 				if (bios_connectors[j].valid && (i != j)) {
 					if (bios_connectors[i].line_mux ==
 					    bios_connectors[j].line_mux) {
@@ -643,6 +778,10 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
 							bios_connectors[i].
 							    connector_type =
 							    DRM_MODE_CONNECTOR_DVII;
+							if (bios_connectors[j].devices &
+							    (ATOM_DEVICE_DFP_SUPPORT))
+								bios_connectors[i].hpd =
+									bios_connectors[j].hpd;
 							bios_connectors[j].
 							    valid = false;
 						}
@@ -653,7 +792,7 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
 	}
 
 	/* add the connectors */
-	for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) {
+	for (i = 0; i < max_device; i++) {
 		if (bios_connectors[i].valid) {
 			uint16_t connector_object_id =
 				atombios_get_connector_object_id(dev,
@@ -666,7 +805,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
 						  connector_type,
 						  &bios_connectors[i].ddc_bus,
 						  false, 0,
-						  connector_object_id);
+						  connector_object_id,
+						  &bios_connectors[i].hpd);
 		}
 	}
 
@@ -731,7 +871,8 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
 			 * pre-DCE 3.0 r6xx hardware.  This might need to be adjusted per
 			 * family.
 			 */
-			p1pll->pll_out_min = 64800;
+			if (!radeon_new_pll)
+				p1pll->pll_out_min = 64800;
 		}
 
 		p1pll->pll_in_min =
@@ -861,6 +1002,7 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct
 	struct _ATOM_SPREAD_SPECTRUM_INFO *ss_info;
 	uint8_t frev, crev;
 	struct radeon_atom_ss *ss = NULL;
+	int i;
 
 	if (id > ATOM_MAX_SS_ENTRY)
 		return NULL;
@@ -878,12 +1020,17 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct
 		if (!ss)
 			return NULL;
 
-		ss->percentage = le16_to_cpu(ss_info->asSS_Info[id].usSpreadSpectrumPercentage);
-		ss->type = ss_info->asSS_Info[id].ucSpreadSpectrumType;
-		ss->step = ss_info->asSS_Info[id].ucSS_Step;
-		ss->delay = ss_info->asSS_Info[id].ucSS_Delay;
-		ss->range = ss_info->asSS_Info[id].ucSS_Range;
-		ss->refdiv = ss_info->asSS_Info[id].ucRecommendedRef_Div;
+		for (i = 0; i < ATOM_MAX_SS_ENTRY; i++) {
+			if (ss_info->asSS_Info[i].ucSS_Id == id) {
+				ss->percentage =
+					le16_to_cpu(ss_info->asSS_Info[i].usSpreadSpectrumPercentage);
+				ss->type = ss_info->asSS_Info[i].ucSpreadSpectrumType;
+				ss->step = ss_info->asSS_Info[i].ucSS_Step;
+				ss->delay = ss_info->asSS_Info[i].ucSS_Delay;
+				ss->range = ss_info->asSS_Info[i].ucSS_Range;
+				ss->refdiv = ss_info->asSS_Info[i].ucRecommendedRef_Div;
+			}
+		}
 	}
 	return ss;
 }
@@ -901,7 +1048,7 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
 	struct radeon_device *rdev = dev->dev_private;
 	struct radeon_mode_info *mode_info = &rdev->mode_info;
 	int index = GetIndexIntoMasterTable(DATA, LVDS_Info);
-	uint16_t data_offset;
+	uint16_t data_offset, misc;
 	union lvds_info *lvds_info;
 	uint8_t frev, crev;
 	struct radeon_encoder_atom_dig *lvds = NULL;
@@ -940,6 +1087,19 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
 		lvds->panel_pwr_delay =
 		    le16_to_cpu(lvds_info->info.usOffDelayInMs);
 		lvds->lvds_misc = lvds_info->info.ucLVDS_Misc;
+
+		misc = le16_to_cpu(lvds_info->info.sLCDTiming.susModeMiscInfo.usAccess);
+		if (misc & ATOM_VSYNC_POLARITY)
+			lvds->native_mode.flags |= DRM_MODE_FLAG_NVSYNC;
+		if (misc & ATOM_HSYNC_POLARITY)
+			lvds->native_mode.flags |= DRM_MODE_FLAG_NHSYNC;
+		if (misc & ATOM_COMPOSITESYNC)
+			lvds->native_mode.flags |= DRM_MODE_FLAG_CSYNC;
+		if (misc & ATOM_INTERLACE)
+			lvds->native_mode.flags |= DRM_MODE_FLAG_INTERLACE;
+		if (misc & ATOM_DOUBLE_CLOCK_MODE)
+			lvds->native_mode.flags |= DRM_MODE_FLAG_DBLSCAN;
+
 		/* set crtc values */
 		drm_mode_set_crtcinfo(&lvds->native_mode, CRTC_INTERLACE_HALVE_V);
 

+ 26 - 10
drivers/gpu/drm/radeon/radeon_benchmark.c

@@ -29,8 +29,8 @@
 void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
 			   unsigned sdomain, unsigned ddomain)
 {
-	struct radeon_object *dobj = NULL;
-	struct radeon_object *sobj = NULL;
+	struct radeon_bo *dobj = NULL;
+	struct radeon_bo *sobj = NULL;
 	struct radeon_fence *fence = NULL;
 	uint64_t saddr, daddr;
 	unsigned long start_jiffies;
@@ -41,19 +41,27 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
 
 	size = bsize;
 	n = 1024;
-	r = radeon_object_create(rdev, NULL, size, true, sdomain, false, &sobj);
+	r = radeon_bo_create(rdev, NULL, size, true, sdomain, &sobj);
 	if (r) {
 		goto out_cleanup;
 	}
-	r = radeon_object_pin(sobj, sdomain, &saddr);
+	r = radeon_bo_reserve(sobj, false);
+	if (unlikely(r != 0))
+		goto out_cleanup;
+	r = radeon_bo_pin(sobj, sdomain, &saddr);
+	radeon_bo_unreserve(sobj);
 	if (r) {
 		goto out_cleanup;
 	}
-	r = radeon_object_create(rdev, NULL, size, true, ddomain, false, &dobj);
+	r = radeon_bo_create(rdev, NULL, size, true, ddomain, &dobj);
 	if (r) {
 		goto out_cleanup;
 	}
-	r = radeon_object_pin(dobj, ddomain, &daddr);
+	r = radeon_bo_reserve(dobj, false);
+	if (unlikely(r != 0))
+		goto out_cleanup;
+	r = radeon_bo_pin(dobj, ddomain, &daddr);
+	radeon_bo_unreserve(dobj);
 	if (r) {
 		goto out_cleanup;
 	}
@@ -109,12 +117,20 @@ void radeon_benchmark_move(struct radeon_device *rdev, unsigned bsize,
 	}
 out_cleanup:
 	if (sobj) {
-		radeon_object_unpin(sobj);
-		radeon_object_unref(&sobj);
+		r = radeon_bo_reserve(sobj, false);
+		if (likely(r == 0)) {
+			radeon_bo_unpin(sobj);
+			radeon_bo_unreserve(sobj);
+		}
+		radeon_bo_unref(&sobj);
 	}
 	if (dobj) {
-		radeon_object_unpin(dobj);
-		radeon_object_unref(&dobj);
+		r = radeon_bo_reserve(dobj, false);
+		if (likely(r == 0)) {
+			radeon_bo_unpin(dobj);
+			radeon_bo_unreserve(dobj);
+		}
+		radeon_bo_unref(&dobj);
 	}
 	if (fence) {
 		radeon_fence_unref(&fence);

+ 21 - 2
drivers/gpu/drm/radeon/radeon_clocks.c

@@ -44,6 +44,10 @@ uint32_t radeon_legacy_get_engine_clock(struct radeon_device *rdev)
 
 	ref_div =
 	    RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) & RADEON_M_SPLL_REF_DIV_MASK;
+
+	if (ref_div == 0)
+		return 0;
+
 	sclk = fb_div / ref_div;
 
 	post_div = RREG32_PLL(RADEON_SCLK_CNTL) & RADEON_SCLK_SRC_SEL_MASK;
@@ -70,6 +74,10 @@ static uint32_t radeon_legacy_get_memory_clock(struct radeon_device *rdev)
 
 	ref_div =
 	    RREG32_PLL(RADEON_M_SPLL_REF_FB_DIV) & RADEON_M_SPLL_REF_DIV_MASK;
+
+	if (ref_div == 0)
+		return 0;
+
 	mclk = fb_div / ref_div;
 
 	post_div = RREG32_PLL(RADEON_MCLK_CNTL) & 0x7;
@@ -98,8 +106,19 @@ void radeon_get_clock_info(struct drm_device *dev)
 		ret = radeon_combios_get_clock_info(dev);
 
 	if (ret) {
-		if (p1pll->reference_div < 2)
-			p1pll->reference_div = 12;
+		if (p1pll->reference_div < 2) {
+			if (!ASIC_IS_AVIVO(rdev)) {
+				u32 tmp = RREG32_PLL(RADEON_PPLL_REF_DIV);
+				if (ASIC_IS_R300(rdev))
+					p1pll->reference_div =
+						(tmp & R300_PPLL_REF_DIV_ACC_MASK) >> R300_PPLL_REF_DIV_ACC_SHIFT;
+				else
+					p1pll->reference_div = tmp & RADEON_PPLL_REF_DIV_MASK;
+				if (p1pll->reference_div < 2)
+					p1pll->reference_div = 12;
+			} else
+				p1pll->reference_div = 12;
+		}
 		if (p2pll->reference_div < 2)
 			p2pll->reference_div = 12;
 		if (rdev->family < CHIP_RS600) {

File diff suppressed because it is too large
+ 353 - 140
drivers/gpu/drm/radeon/radeon_combios.c


+ 175 - 19
drivers/gpu/drm/radeon/radeon_connectors.c

@@ -40,6 +40,26 @@ radeon_atombios_connected_scratch_regs(struct drm_connector *connector,
 				       struct drm_encoder *encoder,
 				       bool connected);
 
+void radeon_connector_hotplug(struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+
+	if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
+		radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
+
+	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+		if (radeon_dp_getsinktype(radeon_connector) == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
+			if (radeon_dp_needs_link_train(radeon_connector)) {
+				if (connector->encoder)
+					dp_link_train(connector->encoder, connector);
+			}
+		}
+	}
+
+}
+
 static void radeon_property_change_mode(struct drm_encoder *encoder)
 {
 	struct drm_crtc *crtc = encoder->crtc;
@@ -445,10 +465,10 @@ static enum drm_connector_status radeon_lvds_detect(struct drm_connector *connec
 		ret = connector_status_connected;
 	else {
 		if (radeon_connector->ddc_bus) {
-			radeon_i2c_do_lock(radeon_connector, 1);
+			radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
 			radeon_connector->edid = drm_get_edid(&radeon_connector->base,
 							      &radeon_connector->ddc_bus->adapter);
-			radeon_i2c_do_lock(radeon_connector, 0);
+			radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
 			if (radeon_connector->edid)
 				ret = connector_status_connected;
 		}
@@ -553,17 +573,17 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect
 	if (!encoder)
 		ret = connector_status_disconnected;
 
-	radeon_i2c_do_lock(radeon_connector, 1);
+	radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
 	dret = radeon_ddc_probe(radeon_connector);
-	radeon_i2c_do_lock(radeon_connector, 0);
+	radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
 	if (dret) {
 		if (radeon_connector->edid) {
 			kfree(radeon_connector->edid);
 			radeon_connector->edid = NULL;
 		}
-		radeon_i2c_do_lock(radeon_connector, 1);
+		radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
 		radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
-		radeon_i2c_do_lock(radeon_connector, 0);
+		radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
 
 		if (!radeon_connector->edid) {
 			DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
@@ -708,17 +728,17 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
 	enum drm_connector_status ret = connector_status_disconnected;
 	bool dret;
 
-	radeon_i2c_do_lock(radeon_connector, 1);
+	radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
 	dret = radeon_ddc_probe(radeon_connector);
-	radeon_i2c_do_lock(radeon_connector, 0);
+	radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
 	if (dret) {
 		if (radeon_connector->edid) {
 			kfree(radeon_connector->edid);
 			radeon_connector->edid = NULL;
 		}
-		radeon_i2c_do_lock(radeon_connector, 1);
+		radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
 		radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
-		radeon_i2c_do_lock(radeon_connector, 0);
+		radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
 
 		if (!radeon_connector->edid) {
 			DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
@@ -735,6 +755,39 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
 				ret = connector_status_disconnected;
 			} else
 				ret = connector_status_connected;
+
+			/* multiple connectors on the same encoder with the same ddc line
+			 * This tends to be HDMI and DVI on the same encoder with the
+			 * same ddc line.  If the edid says HDMI, consider the HDMI port
+			 * connected and the DVI port disconnected.  If the edid doesn't
+			 * say HDMI, vice versa.
+			 */
+			if (radeon_connector->shared_ddc && connector_status_connected) {
+				struct drm_device *dev = connector->dev;
+				struct drm_connector *list_connector;
+				struct radeon_connector *list_radeon_connector;
+				list_for_each_entry(list_connector, &dev->mode_config.connector_list, head) {
+					if (connector == list_connector)
+						continue;
+					list_radeon_connector = to_radeon_connector(list_connector);
+					if (radeon_connector->devices == list_radeon_connector->devices) {
+						if (drm_detect_hdmi_monitor(radeon_connector->edid)) {
+							if (connector->connector_type == DRM_MODE_CONNECTOR_DVID) {
+								kfree(radeon_connector->edid);
+								radeon_connector->edid = NULL;
+								ret = connector_status_disconnected;
+							}
+						} else {
+							if ((connector->connector_type == DRM_MODE_CONNECTOR_HDMIA) ||
+							    (connector->connector_type == DRM_MODE_CONNECTOR_HDMIB)) {
+								kfree(radeon_connector->edid);
+								radeon_connector->edid = NULL;
+								ret = connector_status_disconnected;
+							}
+						}
+					}
+				}
+			}
 		}
 	}
 
@@ -863,6 +916,91 @@ struct drm_connector_funcs radeon_dvi_connector_funcs = {
 	.force = radeon_dvi_force,
 };
 
+static void radeon_dp_connector_destroy(struct drm_connector *connector)
+{
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
+
+	if (radeon_connector->ddc_bus)
+		radeon_i2c_destroy(radeon_connector->ddc_bus);
+	if (radeon_connector->edid)
+		kfree(radeon_connector->edid);
+	if (radeon_dig_connector->dp_i2c_bus)
+		radeon_i2c_destroy(radeon_dig_connector->dp_i2c_bus);
+	kfree(radeon_connector->con_priv);
+	drm_sysfs_connector_remove(connector);
+	drm_connector_cleanup(connector);
+	kfree(connector);
+}
+
+static int radeon_dp_get_modes(struct drm_connector *connector)
+{
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	int ret;
+
+	ret = radeon_ddc_get_modes(radeon_connector);
+	return ret;
+}
+
+static enum drm_connector_status radeon_dp_detect(struct drm_connector *connector)
+{
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	enum drm_connector_status ret = connector_status_disconnected;
+	struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
+	u8 sink_type;
+
+	if (radeon_connector->edid) {
+		kfree(radeon_connector->edid);
+		radeon_connector->edid = NULL;
+	}
+
+	sink_type = radeon_dp_getsinktype(radeon_connector);
+	if (sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
+		if (radeon_dp_getdpcd(radeon_connector)) {
+			radeon_dig_connector->dp_sink_type = sink_type;
+			ret = connector_status_connected;
+		}
+	} else {
+		radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
+		if (radeon_ddc_probe(radeon_connector)) {
+			radeon_dig_connector->dp_sink_type = sink_type;
+			ret = connector_status_connected;
+		}
+		radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
+	}
+
+	return ret;
+}
+
+static int radeon_dp_mode_valid(struct drm_connector *connector,
+				  struct drm_display_mode *mode)
+{
+	struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+	struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
+
+	/* XXX check mode bandwidth */
+
+	if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT)
+		return radeon_dp_mode_valid_helper(radeon_connector, mode);
+	else
+		return MODE_OK;
+}
+
+struct drm_connector_helper_funcs radeon_dp_connector_helper_funcs = {
+	.get_modes = radeon_dp_get_modes,
+	.mode_valid = radeon_dp_mode_valid,
+	.best_encoder = radeon_dvi_encoder,
+};
+
+struct drm_connector_funcs radeon_dp_connector_funcs = {
+	.dpms = drm_helper_connector_dpms,
+	.detect = radeon_dp_detect,
+	.fill_modes = drm_helper_probe_single_connector_modes,
+	.set_property = radeon_connector_set_property,
+	.destroy = radeon_dp_connector_destroy,
+	.force = radeon_dvi_force,
+};
+
 void
 radeon_add_atom_connector(struct drm_device *dev,
 			  uint32_t connector_id,
@@ -871,7 +1009,8 @@ radeon_add_atom_connector(struct drm_device *dev,
 			  struct radeon_i2c_bus_rec *i2c_bus,
 			  bool linkb,
 			  uint32_t igp_lane_info,
-			  uint16_t connector_object_id)
+			  uint16_t connector_object_id,
+			  struct radeon_hpd *hpd)
 {
 	struct radeon_device *rdev = dev->dev_private;
 	struct drm_connector *connector;
@@ -911,6 +1050,7 @@ radeon_add_atom_connector(struct drm_device *dev,
 	radeon_connector->devices = supported_device;
 	radeon_connector->shared_ddc = shared_ddc;
 	radeon_connector->connector_object_id = connector_object_id;
+	radeon_connector->hpd = *hpd;
 	switch (connector_type) {
 	case DRM_MODE_CONNECTOR_VGA:
 		drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
@@ -963,10 +1103,12 @@ radeon_add_atom_connector(struct drm_device *dev,
 		drm_connector_attach_property(&radeon_connector->base,
 					      rdev->mode_info.coherent_mode_property,
 					      1);
-		radeon_connector->dac_load_detect = true;
-		drm_connector_attach_property(&radeon_connector->base,
-					      rdev->mode_info.load_detect_property,
-					      1);
+		if (connector_type == DRM_MODE_CONNECTOR_DVII) {
+			radeon_connector->dac_load_detect = true;
+			drm_connector_attach_property(&radeon_connector->base,
+						      rdev->mode_info.load_detect_property,
+						      1);
+		}
 		break;
 	case DRM_MODE_CONNECTOR_HDMIA:
 	case DRM_MODE_CONNECTOR_HDMIB:
@@ -997,16 +1139,23 @@ radeon_add_atom_connector(struct drm_device *dev,
 		radeon_dig_connector->linkb = linkb;
 		radeon_dig_connector->igp_lane_info = igp_lane_info;
 		radeon_connector->con_priv = radeon_dig_connector;
-		drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
-		ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
+		drm_connector_init(dev, &radeon_connector->base, &radeon_dp_connector_funcs, connector_type);
+		ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dp_connector_helper_funcs);
 		if (ret)
 			goto failed;
 		if (i2c_bus->valid) {
+			/* add DP i2c bus */
+			radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch");
+			if (!radeon_dig_connector->dp_i2c_bus)
+				goto failed;
 			radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DP");
 			if (!radeon_connector->ddc_bus)
 				goto failed;
 		}
 		subpixel_order = SubPixelHorizontalRGB;
+		drm_connector_attach_property(&radeon_connector->base,
+					      rdev->mode_info.coherent_mode_property,
+					      1);
 		break;
 	case DRM_MODE_CONNECTOR_SVIDEO:
 	case DRM_MODE_CONNECTOR_Composite:
@@ -1020,6 +1169,9 @@ radeon_add_atom_connector(struct drm_device *dev,
 			drm_connector_attach_property(&radeon_connector->base,
 						      rdev->mode_info.load_detect_property,
 						      1);
+			drm_connector_attach_property(&radeon_connector->base,
+						      rdev->mode_info.tv_std_property,
+						      1);
 		}
 		break;
 	case DRM_MODE_CONNECTOR_LVDS:
@@ -1038,7 +1190,6 @@ radeon_add_atom_connector(struct drm_device *dev,
 			if (!radeon_connector->ddc_bus)
 				goto failed;
 		}
-		drm_mode_create_scaling_mode_property(dev);
 		drm_connector_attach_property(&radeon_connector->base,
 					      dev->mode_config.scaling_mode_property,
 					      DRM_MODE_SCALE_FULLSCREEN);
@@ -1063,7 +1214,8 @@ radeon_add_legacy_connector(struct drm_device *dev,
 			    uint32_t supported_device,
 			    int connector_type,
 			    struct radeon_i2c_bus_rec *i2c_bus,
-			    uint16_t connector_object_id)
+			    uint16_t connector_object_id,
+			    struct radeon_hpd *hpd)
 {
 	struct radeon_device *rdev = dev->dev_private;
 	struct drm_connector *connector;
@@ -1093,6 +1245,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
 	radeon_connector->connector_id = connector_id;
 	radeon_connector->devices = supported_device;
 	radeon_connector->connector_object_id = connector_object_id;
+	radeon_connector->hpd = *hpd;
 	switch (connector_type) {
 	case DRM_MODE_CONNECTOR_VGA:
 		drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
@@ -1160,6 +1313,9 @@ radeon_add_legacy_connector(struct drm_device *dev,
 			drm_connector_attach_property(&radeon_connector->base,
 						      rdev->mode_info.load_detect_property,
 						      1);
+			drm_connector_attach_property(&radeon_connector->base,
+						      rdev->mode_info.tv_std_property,
+						      1);
 		}
 		break;
 	case DRM_MODE_CONNECTOR_LVDS:

+ 4 - 41
drivers/gpu/drm/radeon/radeon_cp.c

@@ -1941,8 +1941,8 @@ struct drm_buf *radeon_freelist_get(struct drm_device * dev)
 	for (t = 0; t < dev_priv->usec_timeout; t++) {
 		u32 done_age = GET_SCRATCH(dev_priv, 1);
 		DRM_DEBUG("done_age = %d\n", done_age);
-		for (i = start; i < dma->buf_count; i++) {
-			buf = dma->buflist[i];
+		for (i = 0; i < dma->buf_count; i++) {
+			buf = dma->buflist[start];
 			buf_priv = buf->dev_private;
 			if (buf->file_priv == NULL || (buf->pending &&
 						       buf_priv->age <=
@@ -1951,7 +1951,8 @@ struct drm_buf *radeon_freelist_get(struct drm_device * dev)
 				buf->pending = 0;
 				return buf;
 			}
-			start = 0;
+			if (++start >= dma->buf_count)
+				start = 0;
 		}
 
 		if (t) {
@@ -1960,47 +1961,9 @@ struct drm_buf *radeon_freelist_get(struct drm_device * dev)
 		}
 	}
 
-	DRM_DEBUG("returning NULL!\n");
 	return NULL;
 }
 
-#if 0
-struct drm_buf *radeon_freelist_get(struct drm_device * dev)
-{
-	struct drm_device_dma *dma = dev->dma;
-	drm_radeon_private_t *dev_priv = dev->dev_private;
-	drm_radeon_buf_priv_t *buf_priv;
-	struct drm_buf *buf;
-	int i, t;
-	int start;
-	u32 done_age;
-
-	done_age = radeon_read_ring_rptr(dev_priv, RADEON_SCRATCHOFF(1));
-	if (++dev_priv->last_buf >= dma->buf_count)
-		dev_priv->last_buf = 0;
-
-	start = dev_priv->last_buf;
-	dev_priv->stats.freelist_loops++;
-
-	for (t = 0; t < 2; t++) {
-		for (i = start; i < dma->buf_count; i++) {
-			buf = dma->buflist[i];
-			buf_priv = buf->dev_private;
-			if (buf->file_priv == 0 || (buf->pending &&
-						    buf_priv->age <=
-						    done_age)) {
-				dev_priv->stats.requested_bufs++;
-				buf->pending = 0;
-				return buf;
-			}
-		}
-		start = 0;
-	}
-
-	return NULL;
-}
-#endif
-
 void radeon_freelist_reset(struct drm_device * dev)
 {
 	struct drm_device_dma *dma = dev->dma;

+ 7 - 6
drivers/gpu/drm/radeon/radeon_cs.c

@@ -76,17 +76,17 @@ int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
 			}
 			p->relocs_ptr[i] = &p->relocs[i];
 			p->relocs[i].robj = p->relocs[i].gobj->driver_private;
-			p->relocs[i].lobj.robj = p->relocs[i].robj;
+			p->relocs[i].lobj.bo = p->relocs[i].robj;
 			p->relocs[i].lobj.rdomain = r->read_domains;
 			p->relocs[i].lobj.wdomain = r->write_domain;
 			p->relocs[i].handle = r->handle;
 			p->relocs[i].flags = r->flags;
 			INIT_LIST_HEAD(&p->relocs[i].lobj.list);
-			radeon_object_list_add_object(&p->relocs[i].lobj,
-						      &p->validated);
+			radeon_bo_list_add_object(&p->relocs[i].lobj,
+						&p->validated);
 		}
 	}
-	return radeon_object_list_validate(&p->validated, p->ib->fence);
+	return radeon_bo_list_validate(&p->validated, p->ib->fence);
 }
 
 int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
@@ -190,9 +190,10 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
 	unsigned i;
 
 	if (error) {
-		radeon_object_list_unvalidate(&parser->validated);
+		radeon_bo_list_unvalidate(&parser->validated,
+						parser->ib->fence);
 	} else {
-		radeon_object_list_clean(&parser->validated);
+		radeon_bo_list_unreserve(&parser->validated);
 	}
 	for (i = 0; i < parser->nrelocs; i++) {
 		if (parser->relocs[i].gobj) {

+ 51 - 11
drivers/gpu/drm/radeon/radeon_device.c

@@ -44,10 +44,11 @@ void radeon_surface_init(struct radeon_device *rdev)
 	if (rdev->family < CHIP_R600) {
 		int i;
 
-		for (i = 0; i < 8; i++) {
-			WREG32(RADEON_SURFACE0_INFO +
-			       i * (RADEON_SURFACE1_INFO - RADEON_SURFACE0_INFO),
-			       0);
+		for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
+			if (rdev->surface_regs[i].bo)
+				radeon_bo_get_surface_reg(rdev->surface_regs[i].bo);
+			else
+				radeon_clear_surface_reg(rdev, i);
 		}
 		/* enable surfaces */
 		WREG32(RADEON_SURFACE_CNTL, 0);
@@ -208,6 +209,24 @@ bool radeon_card_posted(struct radeon_device *rdev)
 
 }
 
+bool radeon_boot_test_post_card(struct radeon_device *rdev)
+{
+	if (radeon_card_posted(rdev))
+		return true;
+
+	if (rdev->bios) {
+		DRM_INFO("GPU not posted. posting now...\n");
+		if (rdev->is_atom_bios)
+			atom_asic_init(rdev->mode_info.atom_context);
+		else
+			radeon_combios_asic_init(rdev->ddev);
+		return true;
+	} else {
+		dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
+		return false;
+	}
+}
+
 int radeon_dummy_page_init(struct radeon_device *rdev)
 {
 	rdev->dummy_page.page = alloc_page(GFP_DMA32 | GFP_KERNEL | __GFP_ZERO);
@@ -463,12 +482,16 @@ int radeon_atombios_init(struct radeon_device *rdev)
 
 	rdev->mode_info.atom_context = atom_parse(atom_card_info, rdev->bios);
 	radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
+	atom_allocate_fb_scratch(rdev->mode_info.atom_context);
 	return 0;
 }
 
 void radeon_atombios_fini(struct radeon_device *rdev)
 {
-	kfree(rdev->mode_info.atom_context);
+	if (rdev->mode_info.atom_context) {
+		kfree(rdev->mode_info.atom_context->scratch);
+		kfree(rdev->mode_info.atom_context);
+	}
 	kfree(rdev->mode_info.atom_card_info);
 }
 
@@ -544,16 +567,24 @@ int radeon_device_init(struct radeon_device *rdev,
 	mutex_init(&rdev->cs_mutex);
 	mutex_init(&rdev->ib_pool.mutex);
 	mutex_init(&rdev->cp.mutex);
+	if (rdev->family >= CHIP_R600)
+		spin_lock_init(&rdev->ih.lock);
+	mutex_init(&rdev->gem.mutex);
 	rwlock_init(&rdev->fence_drv.lock);
 	INIT_LIST_HEAD(&rdev->gem.objects);
 
+	/* setup workqueue */
+	rdev->wq = create_workqueue("radeon");
+	if (rdev->wq == NULL)
+		return -ENOMEM;
+
 	/* Set asic functions */
 	r = radeon_asic_init(rdev);
 	if (r) {
 		return r;
 	}
 
-	if (radeon_agpmode == -1) {
+	if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
 		radeon_agp_disable(rdev);
 	}
 
@@ -620,6 +651,7 @@ void radeon_device_fini(struct radeon_device *rdev)
 	DRM_INFO("radeon: finishing device.\n");
 	rdev->shutdown = true;
 	radeon_fini(rdev);
+	destroy_workqueue(rdev->wq);
 	vga_client_register(rdev->pdev, NULL, NULL, NULL);
 	iounmap(rdev->rmmio);
 	rdev->rmmio = NULL;
@@ -633,6 +665,7 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
 {
 	struct radeon_device *rdev = dev->dev_private;
 	struct drm_crtc *crtc;
+	int r;
 
 	if (dev == NULL || rdev == NULL) {
 		return -ENODEV;
@@ -643,26 +676,31 @@ int radeon_suspend_kms(struct drm_device *dev, pm_message_t state)
 	/* unpin the front buffers */
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 		struct radeon_framebuffer *rfb = to_radeon_framebuffer(crtc->fb);
-		struct radeon_object *robj;
+		struct radeon_bo *robj;
 
 		if (rfb == NULL || rfb->obj == NULL) {
 			continue;
 		}
 		robj = rfb->obj->driver_private;
-		if (robj != rdev->fbdev_robj) {
-			radeon_object_unpin(robj);
+		if (robj != rdev->fbdev_rbo) {
+			r = radeon_bo_reserve(robj, false);
+			if (unlikely(r == 0)) {
+				radeon_bo_unpin(robj);
+				radeon_bo_unreserve(robj);
+			}
 		}
 	}
 	/* evict vram memory */
-	radeon_object_evict_vram(rdev);
+	radeon_bo_evict_vram(rdev);
 	/* wait for gpu to finish processing current batch */
 	radeon_fence_wait_last(rdev);
 
 	radeon_save_bios_scratch_regs(rdev);
 
 	radeon_suspend(rdev);
+	radeon_hpd_fini(rdev);
 	/* evict remaining vram memory */
-	radeon_object_evict_vram(rdev);
+	radeon_bo_evict_vram(rdev);
 
 	pci_save_state(dev->pdev);
 	if (state.event == PM_EVENT_SUSPEND) {
@@ -695,6 +733,8 @@ int radeon_resume_kms(struct drm_device *dev)
 	fb_set_suspend(rdev->fbdev_info, 0);
 	release_console_sem();
 
+	/* reset hpd state */
+	radeon_hpd_init(rdev);
 	/* blat the mode back in */
 	drm_helper_resume_force_mode(dev);
 	return 0;

+ 133 - 12
drivers/gpu/drm/radeon/radeon_display.c

@@ -250,6 +250,16 @@ static const char *connector_names[13] = {
 	"HDMI-B",
 };
 
+static const char *hpd_names[7] = {
+	"NONE",
+	"HPD1",
+	"HPD2",
+	"HPD3",
+	"HPD4",
+	"HPD5",
+	"HPD6",
+};
+
 static void radeon_print_display_setup(struct drm_device *dev)
 {
 	struct drm_connector *connector;
@@ -264,16 +274,18 @@ static void radeon_print_display_setup(struct drm_device *dev)
 		radeon_connector = to_radeon_connector(connector);
 		DRM_INFO("Connector %d:\n", i);
 		DRM_INFO("  %s\n", connector_names[connector->connector_type]);
+		if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
+			DRM_INFO("  %s\n", hpd_names[radeon_connector->hpd.hpd]);
 		if (radeon_connector->ddc_bus)
 			DRM_INFO("  DDC: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
 				 radeon_connector->ddc_bus->rec.mask_clk_reg,
 				 radeon_connector->ddc_bus->rec.mask_data_reg,
 				 radeon_connector->ddc_bus->rec.a_clk_reg,
 				 radeon_connector->ddc_bus->rec.a_data_reg,
-				 radeon_connector->ddc_bus->rec.put_clk_reg,
-				 radeon_connector->ddc_bus->rec.put_data_reg,
-				 radeon_connector->ddc_bus->rec.get_clk_reg,
-				 radeon_connector->ddc_bus->rec.get_data_reg);
+				 radeon_connector->ddc_bus->rec.en_clk_reg,
+				 radeon_connector->ddc_bus->rec.en_data_reg,
+				 radeon_connector->ddc_bus->rec.y_clk_reg,
+				 radeon_connector->ddc_bus->rec.y_data_reg);
 		DRM_INFO("  Encoders:\n");
 		list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
 			radeon_encoder = to_radeon_encoder(encoder);
@@ -324,6 +336,7 @@ static bool radeon_setup_enc_conn(struct drm_device *dev)
 			ret = radeon_get_legacy_connector_info_from_table(dev);
 	}
 	if (ret) {
+		radeon_setup_encoder_clones(dev);
 		radeon_print_display_setup(dev);
 		list_for_each_entry(drm_connector, &dev->mode_config.connector_list, head)
 			radeon_ddc_dump(drm_connector);
@@ -336,12 +349,17 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
 {
 	int ret = 0;
 
+	if (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+		struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
+		if (dig->dp_i2c_bus)
+			radeon_connector->edid = drm_get_edid(&radeon_connector->base, &dig->dp_i2c_bus->adapter);
+	}
 	if (!radeon_connector->ddc_bus)
 		return -1;
 	if (!radeon_connector->edid) {
-		radeon_i2c_do_lock(radeon_connector, 1);
+		radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
 		radeon_connector->edid = drm_get_edid(&radeon_connector->base, &radeon_connector->ddc_bus->adapter);
-		radeon_i2c_do_lock(radeon_connector, 0);
+		radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
 	}
 
 	if (radeon_connector->edid) {
@@ -361,9 +379,9 @@ static int radeon_ddc_dump(struct drm_connector *connector)
 
 	if (!radeon_connector->ddc_bus)
 		return -1;
-	radeon_i2c_do_lock(radeon_connector, 1);
+	radeon_i2c_do_lock(radeon_connector->ddc_bus, 1);
 	edid = drm_get_edid(connector, &radeon_connector->ddc_bus->adapter);
-	radeon_i2c_do_lock(radeon_connector, 0);
+	radeon_i2c_do_lock(radeon_connector->ddc_bus, 0);
 	if (edid) {
 		kfree(edid);
 	}
@@ -542,6 +560,98 @@ void radeon_compute_pll(struct radeon_pll *pll,
 	*post_div_p = best_post_div;
 }
 
+void radeon_compute_pll_avivo(struct radeon_pll *pll,
+			      uint64_t freq,
+			      uint32_t *dot_clock_p,
+			      uint32_t *fb_div_p,
+			      uint32_t *frac_fb_div_p,
+			      uint32_t *ref_div_p,
+			      uint32_t *post_div_p,
+			      int flags)
+{
+	fixed20_12 m, n, frac_n, p, f_vco, f_pclk, best_freq;
+	fixed20_12 pll_out_max, pll_out_min;
+	fixed20_12 pll_in_max, pll_in_min;
+	fixed20_12 reference_freq;
+	fixed20_12 error, ffreq, a, b;
+
+	pll_out_max.full = rfixed_const(pll->pll_out_max);
+	pll_out_min.full = rfixed_const(pll->pll_out_min);
+	pll_in_max.full = rfixed_const(pll->pll_in_max);
+	pll_in_min.full = rfixed_const(pll->pll_in_min);
+	reference_freq.full = rfixed_const(pll->reference_freq);
+	do_div(freq, 10);
+	ffreq.full = rfixed_const(freq);
+	error.full = rfixed_const(100 * 100);
+
+	/* max p */
+	p.full = rfixed_div(pll_out_max, ffreq);
+	p.full = rfixed_floor(p);
+
+	/* min m */
+	m.full = rfixed_div(reference_freq, pll_in_max);
+	m.full = rfixed_ceil(m);
+
+	while (1) {
+		n.full = rfixed_div(ffreq, reference_freq);
+		n.full = rfixed_mul(n, m);
+		n.full = rfixed_mul(n, p);
+
+		f_vco.full = rfixed_div(n, m);
+		f_vco.full = rfixed_mul(f_vco, reference_freq);
+
+		f_pclk.full = rfixed_div(f_vco, p);
+
+		if (f_pclk.full > ffreq.full)
+			error.full = f_pclk.full - ffreq.full;
+		else
+			error.full = ffreq.full - f_pclk.full;
+		error.full = rfixed_div(error, f_pclk);
+		a.full = rfixed_const(100 * 100);
+		error.full = rfixed_mul(error, a);
+
+		a.full = rfixed_mul(m, p);
+		a.full = rfixed_div(n, a);
+		best_freq.full = rfixed_mul(reference_freq, a);
+
+		if (rfixed_trunc(error) < 25)
+			break;
+
+		a.full = rfixed_const(1);
+		m.full = m.full + a.full;
+		a.full = rfixed_div(reference_freq, m);
+		if (a.full >= pll_in_min.full)
+			continue;
+
+		m.full = rfixed_div(reference_freq, pll_in_max);
+		m.full = rfixed_ceil(m);
+		a.full= rfixed_const(1);
+		p.full = p.full - a.full;
+		a.full = rfixed_mul(p, ffreq);
+		if (a.full >= pll_out_min.full)
+			continue;
+		else {
+			DRM_ERROR("Unable to find pll dividers\n");
+			break;
+		}
+	}
+
+	a.full = rfixed_const(10);
+	b.full = rfixed_mul(n, a);
+
+	frac_n.full = rfixed_floor(n);
+	frac_n.full = rfixed_mul(frac_n, a);
+	frac_n.full = b.full - frac_n.full;
+
+	*dot_clock_p = rfixed_trunc(best_freq);
+	*fb_div_p = rfixed_trunc(n);
+	*frac_fb_div_p = rfixed_trunc(frac_n);
+	*ref_div_p = rfixed_trunc(m);
+	*post_div_p = rfixed_trunc(p);
+
+	DRM_DEBUG("%u %d.%d, %d, %d\n", *dot_clock_p * 10, *fb_div_p, *frac_fb_div_p, *ref_div_p, *post_div_p);
+}
+
 static void radeon_user_framebuffer_destroy(struct drm_framebuffer *fb)
 {
 	struct radeon_framebuffer *radeon_fb = to_radeon_framebuffer(fb);
@@ -642,7 +752,7 @@ int radeon_modeset_create_props(struct radeon_device *rdev)
 			return -ENOMEM;
 
 		rdev->mode_info.coherent_mode_property->values[0] = 0;
-		rdev->mode_info.coherent_mode_property->values[0] = 1;
+		rdev->mode_info.coherent_mode_property->values[1] = 1;
 	}
 
 	if (!ASIC_IS_AVIVO(rdev)) {
@@ -666,7 +776,7 @@ int radeon_modeset_create_props(struct radeon_device *rdev)
 	if (!rdev->mode_info.load_detect_property)
 		return -ENOMEM;
 	rdev->mode_info.load_detect_property->values[0] = 0;
-	rdev->mode_info.load_detect_property->values[0] = 1;
+	rdev->mode_info.load_detect_property->values[1] = 1;
 
 	drm_mode_create_scaling_mode_property(rdev->ddev);
 
@@ -723,6 +833,8 @@ int radeon_modeset_init(struct radeon_device *rdev)
 	if (!ret) {
 		return ret;
 	}
+	/* initialize hpd */
+	radeon_hpd_init(rdev);
 	drm_helper_initial_config(rdev->ddev);
 	return 0;
 }
@@ -730,6 +842,7 @@ int radeon_modeset_init(struct radeon_device *rdev)
 void radeon_modeset_fini(struct radeon_device *rdev)
 {
 	if (rdev->mode_info.mode_config_initialized) {
+		radeon_hpd_fini(rdev);
 		drm_mode_config_cleanup(rdev->ddev);
 		rdev->mode_info.mode_config_initialized = false;
 	}
@@ -750,9 +863,17 @@ bool radeon_crtc_scaling_mode_fixup(struct drm_crtc *crtc,
 		if (encoder->crtc != crtc)
 			continue;
 		if (first) {
-			radeon_crtc->rmx_type = radeon_encoder->rmx_type;
+			/* set scaling */
+			if (radeon_encoder->rmx_type == RMX_OFF)
+				radeon_crtc->rmx_type = RMX_OFF;
+			else if (mode->hdisplay < radeon_encoder->native_mode.hdisplay ||
+				 mode->vdisplay < radeon_encoder->native_mode.vdisplay)
+				radeon_crtc->rmx_type = radeon_encoder->rmx_type;
+			else
+				radeon_crtc->rmx_type = RMX_OFF;
+			/* copy native mode */
 			memcpy(&radeon_crtc->native_mode,
-				&radeon_encoder->native_mode,
+			       &radeon_encoder->native_mode,
 				sizeof(struct drm_display_mode));
 			first = false;
 		} else {

+ 4 - 0
drivers/gpu/drm/radeon/radeon_drv.c

@@ -86,6 +86,7 @@ int radeon_benchmarking = 0;
 int radeon_testing = 0;
 int radeon_connector_table = 0;
 int radeon_tv = 1;
+int radeon_new_pll = 1;
 
 MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
 module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -120,6 +121,9 @@ module_param_named(connector_table, radeon_connector_table, int, 0444);
 MODULE_PARM_DESC(tv, "TV enable (0 = disable)");
 module_param_named(tv, radeon_tv, int, 0444);
 
+MODULE_PARM_DESC(r4xx_atom, "Select new PLL code for AVIVO chips");
+module_param_named(new_pll, radeon_new_pll, int, 0444);
+
 static int radeon_suspend(struct drm_device *dev, pm_message_t state)
 {
 	drm_radeon_private_t *dev_priv = dev->dev_private;

+ 0 - 1
drivers/gpu/drm/radeon/radeon_drv.h

@@ -1104,7 +1104,6 @@ extern u32 radeon_get_scratch(drm_radeon_private_t *dev_priv, int index);
 #       define R600_IT_WAIT_REG_MEM             0x00003C00
 #       define R600_IT_MEM_WRITE                0x00003D00
 #       define R600_IT_INDIRECT_BUFFER          0x00003200
-#       define R600_IT_CP_INTERRUPT             0x00004000
 #       define R600_IT_SURFACE_SYNC             0x00004300
 #              define R600_CB0_DEST_BASE_ENA    (1 << 6)
 #              define R600_TC_ACTION_ENA        (1 << 23)

+ 190 - 86
drivers/gpu/drm/radeon/radeon_encoders.c

@@ -35,6 +35,51 @@ extern int atom_debug;
 bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
 				struct drm_display_mode *mode);
 
+static uint32_t radeon_encoder_clones(struct drm_encoder *encoder)
+{
+	struct drm_device *dev = encoder->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct drm_encoder *clone_encoder;
+	uint32_t index_mask = 0;
+	int count;
+
+	/* DIG routing gets problematic */
+	if (rdev->family >= CHIP_R600)
+		return index_mask;
+	/* LVDS/TV are too wacky */
+	if (radeon_encoder->devices & ATOM_DEVICE_LCD_SUPPORT)
+		return index_mask;
+	/* DVO requires 2x ppll clocks depending on tmds chip */
+	if (radeon_encoder->devices & ATOM_DEVICE_DFP2_SUPPORT)
+		return index_mask;
+	
+	count = -1;
+	list_for_each_entry(clone_encoder, &dev->mode_config.encoder_list, head) {
+		struct radeon_encoder *radeon_clone = to_radeon_encoder(clone_encoder);
+		count++;
+
+		if (clone_encoder == encoder)
+			continue;
+		if (radeon_clone->devices & (ATOM_DEVICE_LCD_SUPPORT))
+			continue;
+		if (radeon_clone->devices & ATOM_DEVICE_DFP2_SUPPORT)
+			continue;
+		else
+			index_mask |= (1 << count);
+	}
+	return index_mask;
+}
+
+void radeon_setup_encoder_clones(struct drm_device *dev)
+{
+	struct drm_encoder *encoder;
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		encoder->possible_clones = radeon_encoder_clones(encoder);
+	}
+}
+
 uint32_t
 radeon_get_encoder_id(struct drm_device *dev, uint32_t supported_device, uint8_t dac)
 {
@@ -163,29 +208,6 @@ radeon_get_connector_for_encoder(struct drm_encoder *encoder)
 	return NULL;
 }
 
-/* used for both atom and legacy */
-void radeon_rmx_mode_fixup(struct drm_encoder *encoder,
-			   struct drm_display_mode *mode,
-			   struct drm_display_mode *adjusted_mode)
-{
-	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-	struct drm_device *dev = encoder->dev;
-	struct radeon_device *rdev = dev->dev_private;
-	struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
-
-	if (mode->hdisplay < native_mode->hdisplay ||
-	    mode->vdisplay < native_mode->vdisplay) {
-		int mode_id = adjusted_mode->base.id;
-		*adjusted_mode = *native_mode;
-		if (!ASIC_IS_AVIVO(rdev)) {
-			adjusted_mode->hdisplay = mode->hdisplay;
-			adjusted_mode->vdisplay = mode->vdisplay;
-		}
-		adjusted_mode->base.id = mode_id;
-	}
-}
-
-
 static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
 				   struct drm_display_mode *mode,
 				   struct drm_display_mode *adjusted_mode)
@@ -198,14 +220,24 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
 	radeon_encoder_set_active_device(encoder);
 	drm_mode_set_crtcinfo(adjusted_mode, 0);
 
-	if (radeon_encoder->rmx_type != RMX_OFF)
-		radeon_rmx_mode_fixup(encoder, mode, adjusted_mode);
-
 	/* hw bug */
 	if ((mode->flags & DRM_MODE_FLAG_INTERLACE)
 	    && (mode->crtc_vsync_start < (mode->crtc_vdisplay + 2)))
 		adjusted_mode->crtc_vsync_start = adjusted_mode->crtc_vdisplay + 2;
 
+	/* get the native mode for LVDS */
+	if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) {
+		struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
+		int mode_id = adjusted_mode->base.id;
+		*adjusted_mode = *native_mode;
+		if (!ASIC_IS_AVIVO(rdev)) {
+			adjusted_mode->hdisplay = mode->hdisplay;
+			adjusted_mode->vdisplay = mode->vdisplay;
+		}
+		adjusted_mode->base.id = mode_id;
+	}
+
+	/* get the native mode for TV */
 	if (radeon_encoder->active_device & (ATOM_DEVICE_TV_SUPPORT)) {
 		struct radeon_encoder_atom_dac *tv_dac = radeon_encoder->enc_priv;
 		if (tv_dac) {
@@ -218,6 +250,12 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
 		}
 	}
 
+	if (ASIC_IS_DCE3(rdev) &&
+	    (radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT))) {
+		struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+		radeon_dp_set_link_config(connector, mode);
+	}
+
 	return true;
 }
 
@@ -392,7 +430,7 @@ union lvds_encoder_control {
 	LVDS_ENCODER_CONTROL_PS_ALLOCATION_V2 v2;
 };
 
-static void
+void
 atombios_digital_setup(struct drm_encoder *encoder, int action)
 {
 	struct drm_device *dev = encoder->dev;
@@ -522,6 +560,7 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
 {
 	struct drm_connector *connector;
 	struct radeon_connector *radeon_connector;
+	struct radeon_connector_atom_dig *radeon_dig_connector;
 
 	connector = radeon_get_connector_for_encoder(encoder);
 	if (!connector)
@@ -551,10 +590,10 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
 		return ATOM_ENCODER_MODE_LVDS;
 		break;
 	case DRM_MODE_CONNECTOR_DisplayPort:
-		/*if (radeon_output->MonType == MT_DP)
-		  return ATOM_ENCODER_MODE_DP;
-		  else*/
-		if (drm_detect_hdmi_monitor(radeon_connector->edid))
+		radeon_dig_connector = radeon_connector->con_priv;
+		if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT)
+			return ATOM_ENCODER_MODE_DP;
+		else if (drm_detect_hdmi_monitor(radeon_connector->edid))
 			return ATOM_ENCODER_MODE_HDMI;
 		else
 			return ATOM_ENCODER_MODE_DVI;
@@ -573,6 +612,30 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
 	}
 }
 
+/*
+ * DIG Encoder/Transmitter Setup
+ *
+ * DCE 3.0/3.1
+ * - 2 DIG transmitter blocks. UNIPHY (links A and B) and LVTMA.
+ * Supports up to 3 digital outputs
+ * - 2 DIG encoder blocks.
+ * DIG1 can drive UNIPHY link A or link B
+ * DIG2 can drive UNIPHY link B or LVTMA
+ *
+ * DCE 3.2
+ * - 3 DIG transmitter blocks. UNIPHY0/1/2 (links A and B).
+ * Supports up to 5 digital outputs
+ * - 2 DIG encoder blocks.
+ * DIG1/2 can drive UNIPHY0/1/2 link A or link B
+ *
+ * Routing
+ * crtc -> dig encoder -> UNIPHY/LVTMA (1 or 2 links)
+ * Examples:
+ * crtc0 -> dig2 -> LVTMA   links A+B -> TMDS/HDMI
+ * crtc1 -> dig1 -> UNIPHY0 link  B   -> DP
+ * crtc0 -> dig1 -> UNIPHY2 link  A   -> LVDS
+ * crtc1 -> dig2 -> UNIPHY1 link  B+A -> TMDS/HDMI
+ */
 static void
 atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
 {
@@ -614,10 +677,17 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
 	} else {
 		switch (radeon_encoder->encoder_id) {
 		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
-			index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
+			/* XXX doesn't really matter which dig encoder we pick as long as it's
+			 * not already in use
+			 */
+			if (dig_connector->linkb)
+				index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
+			else
+				index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
 			num = 1;
 			break;
 		case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+			/* Only dig2 encoder can drive LVTMA */
 			index = GetIndexIntoMasterTable(COMMAND, DIG2EncoderControl);
 			num = 2;
 			break;
@@ -652,18 +722,21 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
 		}
 	}
 
-	if (radeon_encoder->pixel_clock > 165000) {
-		args.ucConfig |= ATOM_ENCODER_CONFIG_LINKA_B;
+	args.ucEncoderMode = atombios_get_encoder_mode(encoder);
+
+	if (args.ucEncoderMode == ATOM_ENCODER_MODE_DP) {
+		if (dig_connector->dp_clock == 270000)
+			args.ucConfig |= ATOM_ENCODER_CONFIG_DPLINKRATE_2_70GHZ;
+		args.ucLaneNum = dig_connector->dp_lane_count;
+	} else if (radeon_encoder->pixel_clock > 165000)
 		args.ucLaneNum = 8;
-	} else {
-		if (dig_connector->linkb)
-			args.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
-		else
-			args.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
+	else
 		args.ucLaneNum = 4;
-	}
 
-	args.ucEncoderMode = atombios_get_encoder_mode(encoder);
+	if (dig_connector->linkb)
+		args.ucConfig |= ATOM_ENCODER_CONFIG_LINKB;
+	else
+		args.ucConfig |= ATOM_ENCODER_CONFIG_LINKA;
 
 	atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
 
@@ -674,8 +747,8 @@ union dig_transmitter_control {
 	DIG_TRANSMITTER_CONTROL_PARAMETERS_V2 v2;
 };
 
-static void
-atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
+void
+atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t lane_num, uint8_t lane_set)
 {
 	struct drm_device *dev = encoder->dev;
 	struct radeon_device *rdev = dev->dev_private;
@@ -687,6 +760,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
 	struct drm_connector *connector;
 	struct radeon_connector *radeon_connector;
 	struct radeon_connector_atom_dig *dig_connector;
+	bool is_dp = false;
 
 	connector = radeon_get_connector_for_encoder(encoder);
 	if (!connector)
@@ -704,6 +778,9 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
 
 	dig_connector = radeon_connector->con_priv;
 
+	if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP)
+		is_dp = true;
+
 	memset(&args, 0, sizeof(args));
 
 	if (ASIC_IS_DCE32(rdev))
@@ -724,17 +801,23 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
 	args.v1.ucAction = action;
 	if (action == ATOM_TRANSMITTER_ACTION_INIT) {
 		args.v1.usInitInfo = radeon_connector->connector_object_id;
+	} else if (action == ATOM_TRANSMITTER_ACTION_SETUP_VSEMPH) {
+		args.v1.asMode.ucLaneSel = lane_num;
+		args.v1.asMode.ucLaneSet = lane_set;
 	} else {
-		if (radeon_encoder->pixel_clock > 165000)
+		if (is_dp)
+			args.v1.usPixelClock =
+				cpu_to_le16(dig_connector->dp_clock / 10);
+		else if (radeon_encoder->pixel_clock > 165000)
 			args.v1.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
 		else
 			args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
 	}
 	if (ASIC_IS_DCE32(rdev)) {
-		if (radeon_encoder->pixel_clock > 165000)
-			args.v2.usPixelClock = cpu_to_le16((radeon_encoder->pixel_clock / 2) / 10);
 		if (dig->dig_block)
 			args.v2.acConfig.ucEncoderSel = 1;
+		if (dig_connector->linkb)
+			args.v2.acConfig.ucLinkSel = 1;
 
 		switch (radeon_encoder->encoder_id) {
 		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
@@ -751,7 +834,9 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
 			break;
 		}
 
-		if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+		if (is_dp)
+			args.v2.acConfig.fCoherentMode = 1;
+		else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
 			if (dig->coherent_mode)
 				args.v2.acConfig.fCoherentMode = 1;
 		}
@@ -760,17 +845,20 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
 
 		switch (radeon_encoder->encoder_id) {
 		case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
-			args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
+			/* XXX doesn't really matter which dig encoder we pick as long as it's
+			 * not already in use
+			 */
+			if (dig_connector->linkb)
+				args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
+			else
+				args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
 			if (rdev->flags & RADEON_IS_IGP) {
 				if (radeon_encoder->pixel_clock > 165000) {
-					args.v1.ucConfig |= (ATOM_TRANSMITTER_CONFIG_8LANE_LINK |
-							     ATOM_TRANSMITTER_CONFIG_LINKA_B);
 					if (dig_connector->igp_lane_info & 0x3)
 						args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7;
 					else if (dig_connector->igp_lane_info & 0xc)
 						args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15;
 				} else {
-					args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA;
 					if (dig_connector->igp_lane_info & 0x1)
 						args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3;
 					else if (dig_connector->igp_lane_info & 0x2)
@@ -780,35 +868,25 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action)
 					else if (dig_connector->igp_lane_info & 0x8)
 						args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15;
 				}
-			} else {
-				if (radeon_encoder->pixel_clock > 165000)
-					args.v1.ucConfig |= (ATOM_TRANSMITTER_CONFIG_8LANE_LINK |
-							     ATOM_TRANSMITTER_CONFIG_LINKA_B |
-							     ATOM_TRANSMITTER_CONFIG_LANE_0_7);
-				else {
-					if (dig_connector->linkb)
-						args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB | ATOM_TRANSMITTER_CONFIG_LANE_0_3;
-					else
-						args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA | ATOM_TRANSMITTER_CONFIG_LANE_0_3;
-				}
 			}
 			break;
 		case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+			/* Only dig2 encoder can drive LVTMA */
 			args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG2_ENCODER;
-			if (radeon_encoder->pixel_clock > 165000)
-				args.v1.ucConfig |= (ATOM_TRANSMITTER_CONFIG_8LANE_LINK |
-						     ATOM_TRANSMITTER_CONFIG_LINKA_B |
-						     ATOM_TRANSMITTER_CONFIG_LANE_0_7);
-			else {
-				if (dig_connector->linkb)
-					args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB | ATOM_TRANSMITTER_CONFIG_LANE_0_3;
-				else
-					args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA | ATOM_TRANSMITTER_CONFIG_LANE_0_3;
-			}
 			break;
 		}
 
-		if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
+		if (radeon_encoder->pixel_clock > 165000)
+			args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK;
+
+		if (dig_connector->linkb)
+			args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB;
+		else
+			args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKA;
+
+		if (is_dp)
+			args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
+		else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
 			if (dig->coherent_mode)
 				args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
 		}
@@ -918,12 +996,16 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
 	if (is_dig) {
 		switch (mode) {
 		case DRM_MODE_DPMS_ON:
-			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE);
+			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
+			{
+				struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+				dp_link_train(encoder, connector);
+			}
 			break;
 		case DRM_MODE_DPMS_STANDBY:
 		case DRM_MODE_DPMS_SUSPEND:
 		case DRM_MODE_DPMS_OFF:
-			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE);
+			atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
 			break;
 		}
 	} else {
@@ -1025,13 +1107,33 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
 						args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
 					else
 						args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
-				} else
-					args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
+				} else {
+					struct drm_connector *connector;
+					struct radeon_connector *radeon_connector;
+					struct radeon_connector_atom_dig *dig_connector;
+
+					connector = radeon_get_connector_for_encoder(encoder);
+					if (!connector)
+						return;
+					radeon_connector = to_radeon_connector(connector);
+					if (!radeon_connector->con_priv)
+						return;
+					dig_connector = radeon_connector->con_priv;
+
+					/* XXX doesn't really matter which dig encoder we pick as long as it's
+					 * not already in use
+					 */
+					if (dig_connector->linkb)
+						args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
+					else
+						args.v2.ucEncoderID = ASIC_INT_DIG1_ENCODER_ID;
+				}
 				break;
 			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1:
 				args.v2.ucEncoderID = ASIC_INT_DVO_ENCODER_ID;
 				break;
 			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
+				/* Only dig2 encoder can drive LVTMA */
 				args.v2.ucEncoderID = ASIC_INT_DIG2_ENCODER_ID;
 				break;
 			case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1:
@@ -1104,11 +1206,14 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
 	struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
 
-	if (radeon_encoder->enc_priv) {
-		struct radeon_encoder_atom_dig *dig;
+	if (radeon_encoder->active_device &
+	    (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) {
+		if (radeon_encoder->enc_priv) {
+			struct radeon_encoder_atom_dig *dig;
 
-		dig = radeon_encoder->enc_priv;
-		dig->dig_block = radeon_crtc->crtc_id;
+			dig = radeon_encoder->enc_priv;
+			dig->dig_block = radeon_crtc->crtc_id;
+		}
 	}
 	radeon_encoder->pixel_clock = adjusted_mode->clock;
 
@@ -1134,14 +1239,14 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
 	case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
 	case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
 		/* disable the encoder and transmitter */
-		atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE);
+		atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE, 0, 0);
 		atombios_dig_encoder_setup(encoder, ATOM_DISABLE);
 
 		/* setup and enable the encoder and transmitter */
 		atombios_dig_encoder_setup(encoder, ATOM_ENABLE);
-		atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT);
-		atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP);
-		atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE);
+		atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_INIT, 0, 0);
+		atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0);
+		atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
 		break;
 	case ENCODER_OBJECT_ID_INTERNAL_DDI:
 		atombios_ddia_setup(encoder, ATOM_ENABLE);
@@ -1354,7 +1459,6 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su
 		encoder->possible_crtcs = 0x1;
 	else
 		encoder->possible_crtcs = 0x3;
-	encoder->possible_clones = 0;
 
 	radeon_encoder->enc_priv = NULL;
 

+ 47 - 25
drivers/gpu/drm/radeon/radeon_fb.c

@@ -140,7 +140,7 @@ int radeonfb_create(struct drm_device *dev,
 	struct radeon_framebuffer *rfb;
 	struct drm_mode_fb_cmd mode_cmd;
 	struct drm_gem_object *gobj = NULL;
-	struct radeon_object *robj = NULL;
+	struct radeon_bo *rbo = NULL;
 	struct device *device = &rdev->pdev->dev;
 	int size, aligned_size, ret;
 	u64 fb_gpuaddr;
@@ -168,14 +168,14 @@ int radeonfb_create(struct drm_device *dev,
 	ret = radeon_gem_object_create(rdev, aligned_size, 0,
 			RADEON_GEM_DOMAIN_VRAM,
 			false, ttm_bo_type_kernel,
-			false, &gobj);
+			&gobj);
 	if (ret) {
 		printk(KERN_ERR "failed to allocate framebuffer (%d %d)\n",
 		       surface_width, surface_height);
 		ret = -ENOMEM;
 		goto out;
 	}
-	robj = gobj->driver_private;
+	rbo = gobj->driver_private;
 
 	if (fb_tiled)
 		tiling_flags = RADEON_TILING_MACRO;
@@ -192,8 +192,13 @@ int radeonfb_create(struct drm_device *dev,
 	}
 #endif
 
-	if (tiling_flags)
-		radeon_object_set_tiling_flags(robj, tiling_flags | RADEON_TILING_SURFACE, mode_cmd.pitch);
+	if (tiling_flags) {
+		ret = radeon_bo_set_tiling_flags(rbo,
+					tiling_flags | RADEON_TILING_SURFACE,
+					mode_cmd.pitch);
+		if (ret)
+			dev_err(rdev->dev, "FB failed to set tiling flags\n");
+	}
 	mutex_lock(&rdev->ddev->struct_mutex);
 	fb = radeon_framebuffer_create(rdev->ddev, &mode_cmd, gobj);
 	if (fb == NULL) {
@@ -201,10 +206,19 @@ int radeonfb_create(struct drm_device *dev,
 		ret = -ENOMEM;
 		goto out_unref;
 	}
-	ret = radeon_object_pin(robj, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr);
+	ret = radeon_bo_reserve(rbo, false);
+	if (unlikely(ret != 0))
+		goto out_unref;
+	ret = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &fb_gpuaddr);
+	if (ret) {
+		radeon_bo_unreserve(rbo);
+		goto out_unref;
+	}
+	if (fb_tiled)
+		radeon_bo_check_tiling(rbo, 0, 0);
+	ret = radeon_bo_kmap(rbo, &fbptr);
+	radeon_bo_unreserve(rbo);
 	if (ret) {
-		printk(KERN_ERR "failed to pin framebuffer\n");
-		ret = -ENOMEM;
 		goto out_unref;
 	}
 
@@ -213,7 +227,7 @@ int radeonfb_create(struct drm_device *dev,
 	*fb_p = fb;
 	rfb = to_radeon_framebuffer(fb);
 	rdev->fbdev_rfb = rfb;
-	rdev->fbdev_robj = robj;
+	rdev->fbdev_rbo = rbo;
 
 	info = framebuffer_alloc(sizeof(struct radeon_fb_device), device);
 	if (info == NULL) {
@@ -234,15 +248,7 @@ int radeonfb_create(struct drm_device *dev,
 	if (ret)
 		goto out_unref;
 
-	if (fb_tiled)
-		radeon_object_check_tiling(robj, 0, 0);
-
-	ret = radeon_object_kmap(robj, &fbptr);
-	if (ret) {
-		goto out_unref;
-	}
-
-	memset_io(fbptr, 0, aligned_size);
+	memset_io(fbptr, 0xff, aligned_size);
 
 	strcpy(info->fix.id, "radeondrmfb");
 
@@ -288,8 +294,12 @@ int radeonfb_create(struct drm_device *dev,
 	return 0;
 
 out_unref:
-	if (robj) {
-		radeon_object_kunmap(robj);
+	if (rbo) {
+		ret = radeon_bo_reserve(rbo, false);
+		if (likely(ret == 0)) {
+			radeon_bo_kunmap(rbo);
+			radeon_bo_unreserve(rbo);
+		}
 	}
 	if (fb && ret) {
 		list_del(&fb->filp_head);
@@ -321,14 +331,22 @@ int radeon_parse_options(char *options)
 
 int radeonfb_probe(struct drm_device *dev)
 {
-	return drm_fb_helper_single_fb_probe(dev, 32, &radeonfb_create);
+	struct radeon_device *rdev = dev->dev_private;
+	int bpp_sel = 32;
+
+	/* select 8 bpp console on RN50 or 16MB cards */
+	if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024))
+		bpp_sel = 8;
+
+	return drm_fb_helper_single_fb_probe(dev, bpp_sel, &radeonfb_create);
 }
 
 int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
 {
 	struct fb_info *info;
 	struct radeon_framebuffer *rfb = to_radeon_framebuffer(fb);
-	struct radeon_object *robj;
+	struct radeon_bo *rbo;
+	int r;
 
 	if (!fb) {
 		return -EINVAL;
@@ -336,10 +354,14 @@ int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
 	info = fb->fbdev;
 	if (info) {
 		struct radeon_fb_device *rfbdev = info->par;
-		robj = rfb->obj->driver_private;
+		rbo = rfb->obj->driver_private;
 		unregister_framebuffer(info);
-		radeon_object_kunmap(robj);
-		radeon_object_unpin(robj);
+		r = radeon_bo_reserve(rbo, false);
+		if (likely(r == 0)) {
+			radeon_bo_kunmap(rbo);
+			radeon_bo_unpin(rbo);
+			radeon_bo_unreserve(rbo);
+		}
 		drm_fb_helper_free(&rfbdev->helper);
 		framebuffer_release(info);
 	}

+ 6 - 41
drivers/gpu/drm/radeon/radeon_fence.c

@@ -168,37 +168,6 @@ bool radeon_fence_signaled(struct radeon_fence *fence)
 	return signaled;
 }
 
-int r600_fence_wait(struct radeon_fence *fence,  bool intr, bool lazy)
-{
-	struct radeon_device *rdev;
-	int ret = 0;
-
-	rdev = fence->rdev;
-
-	__set_current_state(intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
-
-	while (1) {
-		if (radeon_fence_signaled(fence))
-			break;
-
-		if (time_after_eq(jiffies, fence->timeout)) {
-			ret = -EBUSY;
-			break;
-		}
-
-		if (lazy)
-			schedule_timeout(1);
-
-		if (intr && signal_pending(current)) {
-			ret = -ERESTARTSYS;
-			break;
-		}
-	}
-	__set_current_state(TASK_RUNNING);
-	return ret;
-}
-
-
 int radeon_fence_wait(struct radeon_fence *fence, bool intr)
 {
 	struct radeon_device *rdev;
@@ -216,13 +185,6 @@ int radeon_fence_wait(struct radeon_fence *fence, bool intr)
 		return 0;
 	}
 
-	if (rdev->family >= CHIP_R600) {
-		r = r600_fence_wait(fence, intr, 0);
-		if (r == -ERESTARTSYS)
-			return -EBUSY;
-		return r;
-	}
-
 retry:
 	cur_jiffies = jiffies;
 	timeout = HZ / 100;
@@ -231,14 +193,17 @@ retry:
 	}
 
 	if (intr) {
+		radeon_irq_kms_sw_irq_get(rdev);
 		r = wait_event_interruptible_timeout(rdev->fence_drv.queue,
 				radeon_fence_signaled(fence), timeout);
-		if (unlikely(r == -ERESTARTSYS)) {
-			return -EBUSY;
-		}
+		radeon_irq_kms_sw_irq_put(rdev);
+		if (unlikely(r < 0))
+			return r;
 	} else {
+		radeon_irq_kms_sw_irq_get(rdev);
 		r = wait_event_timeout(rdev->fence_drv.queue,
 			 radeon_fence_signaled(fence), timeout);
+		radeon_irq_kms_sw_irq_put(rdev);
 	}
 	if (unlikely(!radeon_fence_signaled(fence))) {
 		if (unlikely(r == 0)) {

+ 17 - 0
drivers/gpu/drm/radeon/radeon_fixed.h

@@ -38,6 +38,23 @@ typedef union rfixed {
 #define fixed_init_half(A) { .full = rfixed_const_half((A)) }
 #define rfixed_trunc(A) ((A).full >> 12)
 
+static inline u32 rfixed_floor(fixed20_12 A)
+{
+	u32 non_frac = rfixed_trunc(A);
+
+	return rfixed_const(non_frac);
+}
+
+static inline u32 rfixed_ceil(fixed20_12 A)
+{
+	u32 non_frac = rfixed_trunc(A);
+
+	if (A.full > rfixed_const(non_frac))
+		return rfixed_const(non_frac + 1);
+	else
+		return rfixed_const(non_frac);
+}
+
 static inline u32 rfixed_div(fixed20_12 A, fixed20_12 B)
 {
 	u64 tmp = ((u64)A.full << 13);

+ 23 - 19
drivers/gpu/drm/radeon/radeon_gart.c

@@ -78,11 +78,9 @@ int radeon_gart_table_vram_alloc(struct radeon_device *rdev)
 	int r;
 
 	if (rdev->gart.table.vram.robj == NULL) {
-		r = radeon_object_create(rdev, NULL,
-					 rdev->gart.table_size,
-					 true,
-					 RADEON_GEM_DOMAIN_VRAM,
-					 false, &rdev->gart.table.vram.robj);
+		r = radeon_bo_create(rdev, NULL, rdev->gart.table_size,
+					true, RADEON_GEM_DOMAIN_VRAM,
+					&rdev->gart.table.vram.robj);
 		if (r) {
 			return r;
 		}
@@ -95,32 +93,38 @@ int radeon_gart_table_vram_pin(struct radeon_device *rdev)
 	uint64_t gpu_addr;
 	int r;
 
-	r = radeon_object_pin(rdev->gart.table.vram.robj,
-			      RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
-	if (r) {
-		radeon_object_unref(&rdev->gart.table.vram.robj);
+	r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
+	if (unlikely(r != 0))
 		return r;
-	}
-	r = radeon_object_kmap(rdev->gart.table.vram.robj,
-			       (void **)&rdev->gart.table.vram.ptr);
+	r = radeon_bo_pin(rdev->gart.table.vram.robj,
+				RADEON_GEM_DOMAIN_VRAM, &gpu_addr);
 	if (r) {
-		radeon_object_unpin(rdev->gart.table.vram.robj);
-		radeon_object_unref(&rdev->gart.table.vram.robj);
-		DRM_ERROR("radeon: failed to map gart vram table.\n");
+		radeon_bo_unreserve(rdev->gart.table.vram.robj);
 		return r;
 	}
+	r = radeon_bo_kmap(rdev->gart.table.vram.robj,
+				(void **)&rdev->gart.table.vram.ptr);
+	if (r)
+		radeon_bo_unpin(rdev->gart.table.vram.robj);
+	radeon_bo_unreserve(rdev->gart.table.vram.robj);
 	rdev->gart.table_addr = gpu_addr;
-	return 0;
+	return r;
 }
 
 void radeon_gart_table_vram_free(struct radeon_device *rdev)
 {
+	int r;
+
 	if (rdev->gart.table.vram.robj == NULL) {
 		return;
 	}
-	radeon_object_kunmap(rdev->gart.table.vram.robj);
-	radeon_object_unpin(rdev->gart.table.vram.robj);
-	radeon_object_unref(&rdev->gart.table.vram.robj);
+	r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
+	if (likely(r == 0)) {
+		radeon_bo_kunmap(rdev->gart.table.vram.robj);
+		radeon_bo_unpin(rdev->gart.table.vram.robj);
+		radeon_bo_unreserve(rdev->gart.table.vram.robj);
+	}
+	radeon_bo_unref(&rdev->gart.table.vram.robj);
 }
 
 

+ 55 - 49
drivers/gpu/drm/radeon/radeon_gem.c

@@ -38,22 +38,21 @@ int radeon_gem_object_init(struct drm_gem_object *obj)
 
 void radeon_gem_object_free(struct drm_gem_object *gobj)
 {
-	struct radeon_object *robj = gobj->driver_private;
+	struct radeon_bo *robj = gobj->driver_private;
 
 	gobj->driver_private = NULL;
 	if (robj) {
-		radeon_object_unref(&robj);
+		radeon_bo_unref(&robj);
 	}
 }
 
 int radeon_gem_object_create(struct radeon_device *rdev, int size,
-			     int alignment, int initial_domain,
-			     bool discardable, bool kernel,
-			     bool interruptible,
-			     struct drm_gem_object **obj)
+				int alignment, int initial_domain,
+				bool discardable, bool kernel,
+				struct drm_gem_object **obj)
 {
 	struct drm_gem_object *gobj;
-	struct radeon_object *robj;
+	struct radeon_bo *robj;
 	int r;
 
 	*obj = NULL;
@@ -65,8 +64,7 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
 	if (alignment < PAGE_SIZE) {
 		alignment = PAGE_SIZE;
 	}
-	r = radeon_object_create(rdev, gobj, size, kernel, initial_domain,
-				 interruptible, &robj);
+	r = radeon_bo_create(rdev, gobj, size, kernel, initial_domain, &robj);
 	if (r) {
 		DRM_ERROR("Failed to allocate GEM object (%d, %d, %u)\n",
 			  size, initial_domain, alignment);
@@ -83,33 +81,33 @@ int radeon_gem_object_create(struct radeon_device *rdev, int size,
 int radeon_gem_object_pin(struct drm_gem_object *obj, uint32_t pin_domain,
 			  uint64_t *gpu_addr)
 {
-	struct radeon_object *robj = obj->driver_private;
-	uint32_t flags;
+	struct radeon_bo *robj = obj->driver_private;
+	int r;
 
-	switch (pin_domain) {
-	case RADEON_GEM_DOMAIN_VRAM:
-		flags = TTM_PL_FLAG_VRAM;
-		break;
-	case RADEON_GEM_DOMAIN_GTT:
-		flags = TTM_PL_FLAG_TT;
-		break;
-	default:
-		flags = TTM_PL_FLAG_SYSTEM;
-		break;
-	}
-	return radeon_object_pin(robj, flags, gpu_addr);
+	r = radeon_bo_reserve(robj, false);
+	if (unlikely(r != 0))
+		return r;
+	r = radeon_bo_pin(robj, pin_domain, gpu_addr);
+	radeon_bo_unreserve(robj);
+	return r;
 }
 
 void radeon_gem_object_unpin(struct drm_gem_object *obj)
 {
-	struct radeon_object *robj = obj->driver_private;
-	radeon_object_unpin(robj);
+	struct radeon_bo *robj = obj->driver_private;
+	int r;
+
+	r = radeon_bo_reserve(robj, false);
+	if (likely(r == 0)) {
+		radeon_bo_unpin(robj);
+		radeon_bo_unreserve(robj);
+	}
 }
 
 int radeon_gem_set_domain(struct drm_gem_object *gobj,
 			  uint32_t rdomain, uint32_t wdomain)
 {
-	struct radeon_object *robj;
+	struct radeon_bo *robj;
 	uint32_t domain;
 	int r;
 
@@ -127,11 +125,12 @@ int radeon_gem_set_domain(struct drm_gem_object *gobj,
 	}
 	if (domain == RADEON_GEM_DOMAIN_CPU) {
 		/* Asking for cpu access wait for object idle */
-		r = radeon_object_wait(robj);
+		r = radeon_bo_wait(robj, NULL, false);
 		if (r) {
 			printk(KERN_ERR "Failed to wait for object !\n");
 			return r;
 		}
+		radeon_hdp_flush(robj->rdev);
 	}
 	return 0;
 }
@@ -144,7 +143,7 @@ int radeon_gem_init(struct radeon_device *rdev)
 
 void radeon_gem_fini(struct radeon_device *rdev)
 {
-	radeon_object_force_delete(rdev);
+	radeon_bo_force_delete(rdev);
 }
 
 
@@ -158,9 +157,13 @@ int radeon_gem_info_ioctl(struct drm_device *dev, void *data,
 	struct drm_radeon_gem_info *args = data;
 
 	args->vram_size = rdev->mc.real_vram_size;
-	/* FIXME: report somethings that makes sense */
-	args->vram_visible = rdev->mc.real_vram_size - (4 * 1024 * 1024);
-	args->gart_size = rdev->mc.gtt_size;
+	args->vram_visible = rdev->mc.real_vram_size;
+	if (rdev->stollen_vga_memory)
+		args->vram_visible -= radeon_bo_size(rdev->stollen_vga_memory);
+	if (rdev->fbdev_rbo)
+		args->vram_visible -= radeon_bo_size(rdev->fbdev_rbo);
+	args->gart_size = rdev->mc.gtt_size - rdev->cp.ring_size - 4096 -
+		RADEON_IB_POOL_SIZE*64*1024;
 	return 0;
 }
 
@@ -192,8 +195,8 @@ int radeon_gem_create_ioctl(struct drm_device *dev, void *data,
 	/* create a gem object to contain this object in */
 	args->size = roundup(args->size, PAGE_SIZE);
 	r = radeon_gem_object_create(rdev, args->size, args->alignment,
-				     args->initial_domain, false,
-				     false, true, &gobj);
+					args->initial_domain, false,
+					false, &gobj);
 	if (r) {
 		return r;
 	}
@@ -218,7 +221,7 @@ int radeon_gem_set_domain_ioctl(struct drm_device *dev, void *data,
 	 * just validate the BO into a certain domain */
 	struct drm_radeon_gem_set_domain *args = data;
 	struct drm_gem_object *gobj;
-	struct radeon_object *robj;
+	struct radeon_bo *robj;
 	int r;
 
 	/* for now if someone requests domain CPU -
@@ -244,19 +247,18 @@ int radeon_gem_mmap_ioctl(struct drm_device *dev, void *data,
 {
 	struct drm_radeon_gem_mmap *args = data;
 	struct drm_gem_object *gobj;
-	struct radeon_object *robj;
-	int r;
+	struct radeon_bo *robj;
 
 	gobj = drm_gem_object_lookup(dev, filp, args->handle);
 	if (gobj == NULL) {
 		return -EINVAL;
 	}
 	robj = gobj->driver_private;
-	r = radeon_object_mmap(robj, &args->addr_ptr);
+	args->addr_ptr = radeon_bo_mmap_offset(robj);
 	mutex_lock(&dev->struct_mutex);
 	drm_gem_object_unreference(gobj);
 	mutex_unlock(&dev->struct_mutex);
-	return r;
+	return 0;
 }
 
 int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
@@ -264,16 +266,16 @@ int radeon_gem_busy_ioctl(struct drm_device *dev, void *data,
 {
 	struct drm_radeon_gem_busy *args = data;
 	struct drm_gem_object *gobj;
-	struct radeon_object *robj;
+	struct radeon_bo *robj;
 	int r;
-	uint32_t cur_placement;
+	uint32_t cur_placement = 0;
 
 	gobj = drm_gem_object_lookup(dev, filp, args->handle);
 	if (gobj == NULL) {
 		return -EINVAL;
 	}
 	robj = gobj->driver_private;
-	r = radeon_object_busy_domain(robj, &cur_placement);
+	r = radeon_bo_wait(robj, &cur_placement, true);
 	switch (cur_placement) {
 	case TTM_PL_VRAM:
 		args->domain = RADEON_GEM_DOMAIN_VRAM;
@@ -297,7 +299,7 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
 {
 	struct drm_radeon_gem_wait_idle *args = data;
 	struct drm_gem_object *gobj;
-	struct radeon_object *robj;
+	struct radeon_bo *robj;
 	int r;
 
 	gobj = drm_gem_object_lookup(dev, filp, args->handle);
@@ -305,10 +307,11 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
 		return -EINVAL;
 	}
 	robj = gobj->driver_private;
-	r = radeon_object_wait(robj);
+	r = radeon_bo_wait(robj, NULL, false);
 	mutex_lock(&dev->struct_mutex);
 	drm_gem_object_unreference(gobj);
 	mutex_unlock(&dev->struct_mutex);
+	radeon_hdp_flush(robj->rdev);
 	return r;
 }
 
@@ -317,7 +320,7 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
 {
 	struct drm_radeon_gem_set_tiling *args = data;
 	struct drm_gem_object *gobj;
-	struct radeon_object *robj;
+	struct radeon_bo *robj;
 	int r = 0;
 
 	DRM_DEBUG("%d \n", args->handle);
@@ -325,7 +328,7 @@ int radeon_gem_set_tiling_ioctl(struct drm_device *dev, void *data,
 	if (gobj == NULL)
 		return -EINVAL;
 	robj = gobj->driver_private;
-	radeon_object_set_tiling_flags(robj, args->tiling_flags, args->pitch);
+	r = radeon_bo_set_tiling_flags(robj, args->tiling_flags, args->pitch);
 	mutex_lock(&dev->struct_mutex);
 	drm_gem_object_unreference(gobj);
 	mutex_unlock(&dev->struct_mutex);
@@ -337,16 +340,19 @@ int radeon_gem_get_tiling_ioctl(struct drm_device *dev, void *data,
 {
 	struct drm_radeon_gem_get_tiling *args = data;
 	struct drm_gem_object *gobj;
-	struct radeon_object *robj;
+	struct radeon_bo *rbo;
 	int r = 0;
 
 	DRM_DEBUG("\n");
 	gobj = drm_gem_object_lookup(dev, filp, args->handle);
 	if (gobj == NULL)
 		return -EINVAL;
-	robj = gobj->driver_private;
-	radeon_object_get_tiling_flags(robj, &args->tiling_flags,
-				       &args->pitch);
+	rbo = gobj->driver_private;
+	r = radeon_bo_reserve(rbo, false);
+	if (unlikely(r != 0))
+		return r;
+	radeon_bo_get_tiling_flags(rbo, &args->tiling_flags, &args->pitch);
+	radeon_bo_unreserve(rbo);
 	mutex_lock(&dev->struct_mutex);
 	drm_gem_object_unreference(gobj);
 	mutex_unlock(&dev->struct_mutex);

+ 141 - 41
drivers/gpu/drm/radeon/radeon_i2c.c

@@ -59,35 +59,43 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
 }
 
 
-void radeon_i2c_do_lock(struct radeon_connector *radeon_connector, int lock_state)
+void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state)
 {
-	struct radeon_device *rdev = radeon_connector->base.dev->dev_private;
+	struct radeon_device *rdev = i2c->dev->dev_private;
+	struct radeon_i2c_bus_rec *rec = &i2c->rec;
 	uint32_t temp;
-	struct radeon_i2c_bus_rec *rec = &radeon_connector->ddc_bus->rec;
 
 	/* RV410 appears to have a bug where the hw i2c in reset
 	 * holds the i2c port in a bad state - switch hw i2c away before
 	 * doing DDC - do this for all r200s/r300s/r400s for safety sake
 	 */
-	if ((rdev->family >= CHIP_R200) && !ASIC_IS_AVIVO(rdev)) {
-		if (rec->a_clk_reg == RADEON_GPIO_MONID) {
-			WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
-						R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1)));
-		} else {
-			WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
-						R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3)));
+	if (rec->hw_capable) {
+		if ((rdev->family >= CHIP_R200) && !ASIC_IS_AVIVO(rdev)) {
+			if (rec->a_clk_reg == RADEON_GPIO_MONID) {
+				WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
+							       R200_DVI_I2C_PIN_SEL(R200_SEL_DDC1)));
+			} else {
+				WREG32(RADEON_DVI_I2C_CNTL_0, (RADEON_I2C_SOFT_RST |
+							       R200_DVI_I2C_PIN_SEL(R200_SEL_DDC3)));
+			}
 		}
 	}
-	if (lock_state) {
-		temp = RREG32(rec->a_clk_reg);
-		temp &= ~(rec->a_clk_mask);
-		WREG32(rec->a_clk_reg, temp);
-
-		temp = RREG32(rec->a_data_reg);
-		temp &= ~(rec->a_data_mask);
-		WREG32(rec->a_data_reg, temp);
-	}
 
+	/* clear the output pin values */
+	temp = RREG32(rec->a_clk_reg) & ~rec->a_clk_mask;
+	WREG32(rec->a_clk_reg, temp);
+
+	temp = RREG32(rec->a_data_reg) & ~rec->a_data_mask;
+	WREG32(rec->a_data_reg, temp);
+
+	/* set the pins to input */
+	temp = RREG32(rec->en_clk_reg) & ~rec->en_clk_mask;
+	WREG32(rec->en_clk_reg, temp);
+
+	temp = RREG32(rec->en_data_reg) & ~rec->en_data_mask;
+	WREG32(rec->en_data_reg, temp);
+
+	/* mask the gpio pins for software use */
 	temp = RREG32(rec->mask_clk_reg);
 	if (lock_state)
 		temp |= rec->mask_clk_mask;
@@ -112,8 +120,9 @@ static int get_clock(void *i2c_priv)
 	struct radeon_i2c_bus_rec *rec = &i2c->rec;
 	uint32_t val;
 
-	val = RREG32(rec->get_clk_reg);
-	val &= rec->get_clk_mask;
+	/* read the value off the pin */
+	val = RREG32(rec->y_clk_reg);
+	val &= rec->y_clk_mask;
 
 	return (val != 0);
 }
@@ -126,8 +135,10 @@ static int get_data(void *i2c_priv)
 	struct radeon_i2c_bus_rec *rec = &i2c->rec;
 	uint32_t val;
 
-	val = RREG32(rec->get_data_reg);
-	val &= rec->get_data_mask;
+	/* read the value off the pin */
+	val = RREG32(rec->y_data_reg);
+	val &= rec->y_data_mask;
+
 	return (val != 0);
 }
 
@@ -138,9 +149,10 @@ static void set_clock(void *i2c_priv, int clock)
 	struct radeon_i2c_bus_rec *rec = &i2c->rec;
 	uint32_t val;
 
-	val = RREG32(rec->put_clk_reg) & (uint32_t)~(rec->put_clk_mask);
-	val |= clock ? 0 : rec->put_clk_mask;
-	WREG32(rec->put_clk_reg, val);
+	/* set pin direction */
+	val = RREG32(rec->en_clk_reg) & ~rec->en_clk_mask;
+	val |= clock ? 0 : rec->en_clk_mask;
+	WREG32(rec->en_clk_reg, val);
 }
 
 static void set_data(void *i2c_priv, int data)
@@ -150,14 +162,15 @@ static void set_data(void *i2c_priv, int data)
 	struct radeon_i2c_bus_rec *rec = &i2c->rec;
 	uint32_t val;
 
-	val = RREG32(rec->put_data_reg) & (uint32_t)~(rec->put_data_mask);
-	val |= data ? 0 : rec->put_data_mask;
-	WREG32(rec->put_data_reg, val);
+	/* set pin direction */
+	val = RREG32(rec->en_data_reg) & ~rec->en_data_mask;
+	val |= data ? 0 : rec->en_data_mask;
+	WREG32(rec->en_data_reg, val);
 }
 
 struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
-		struct radeon_i2c_bus_rec *rec,
-		const char *name)
+					  struct radeon_i2c_bus_rec *rec,
+					  const char *name)
 {
 	struct radeon_i2c_chan *i2c;
 	int ret;
@@ -167,20 +180,19 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
 		return NULL;
 
 	i2c->adapter.owner = THIS_MODULE;
-	i2c->adapter.algo_data = &i2c->algo;
 	i2c->dev = dev;
-	i2c->algo.setsda = set_data;
-	i2c->algo.setscl = set_clock;
-	i2c->algo.getsda = get_data;
-	i2c->algo.getscl = get_clock;
-	i2c->algo.udelay = 20;
+	i2c_set_adapdata(&i2c->adapter, i2c);
+	i2c->adapter.algo_data = &i2c->algo.bit;
+	i2c->algo.bit.setsda = set_data;
+	i2c->algo.bit.setscl = set_clock;
+	i2c->algo.bit.getsda = get_data;
+	i2c->algo.bit.getscl = get_clock;
+	i2c->algo.bit.udelay = 20;
 	/* vesa says 2.2 ms is enough, 1 jiffy doesn't seem to always
 	 * make this, 2 jiffies is a lot more reliable */
-	i2c->algo.timeout = 2;
-	i2c->algo.data = i2c;
+	i2c->algo.bit.timeout = 2;
+	i2c->algo.bit.data = i2c;
 	i2c->rec = *rec;
-	i2c_set_adapdata(&i2c->adapter, i2c);
-
 	ret = i2c_bit_add_bus(&i2c->adapter);
 	if (ret) {
 		DRM_INFO("Failed to register i2c %s\n", name);
@@ -194,6 +206,38 @@ out_free:
 
 }
 
+struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev,
+					     struct radeon_i2c_bus_rec *rec,
+					     const char *name)
+{
+	struct radeon_i2c_chan *i2c;
+	int ret;
+
+	i2c = kzalloc(sizeof(struct radeon_i2c_chan), GFP_KERNEL);
+	if (i2c == NULL)
+		return NULL;
+
+	i2c->rec = *rec;
+	i2c->adapter.owner = THIS_MODULE;
+	i2c->dev = dev;
+	i2c_set_adapdata(&i2c->adapter, i2c);
+	i2c->adapter.algo_data = &i2c->algo.dp;
+	i2c->algo.dp.aux_ch = radeon_dp_i2c_aux_ch;
+	i2c->algo.dp.address = 0;
+	ret = i2c_dp_aux_add_bus(&i2c->adapter);
+	if (ret) {
+		DRM_INFO("Failed to register i2c %s\n", name);
+		goto out_free;
+	}
+
+	return i2c;
+out_free:
+	kfree(i2c);
+	return NULL;
+
+}
+
+
 void radeon_i2c_destroy(struct radeon_i2c_chan *i2c)
 {
 	if (!i2c)
@@ -207,3 +251,59 @@ struct drm_encoder *radeon_best_encoder(struct drm_connector *connector)
 {
 	return NULL;
 }
+
+void radeon_i2c_sw_get_byte(struct radeon_i2c_chan *i2c_bus,
+			    u8 slave_addr,
+			    u8 addr,
+			    u8 *val)
+{
+	u8 out_buf[2];
+	u8 in_buf[2];
+	struct i2c_msg msgs[] = {
+		{
+			.addr = slave_addr,
+			.flags = 0,
+			.len = 1,
+			.buf = out_buf,
+		},
+		{
+			.addr = slave_addr,
+			.flags = I2C_M_RD,
+			.len = 1,
+			.buf = in_buf,
+		}
+	};
+
+	out_buf[0] = addr;
+	out_buf[1] = 0;
+
+	if (i2c_transfer(&i2c_bus->adapter, msgs, 2) == 2) {
+		*val = in_buf[0];
+		DRM_DEBUG("val = 0x%02x\n", *val);
+	} else {
+		DRM_ERROR("i2c 0x%02x 0x%02x read failed\n",
+			  addr, *val);
+	}
+}
+
+void radeon_i2c_sw_put_byte(struct radeon_i2c_chan *i2c_bus,
+			    u8 slave_addr,
+			    u8 addr,
+			    u8 val)
+{
+	uint8_t out_buf[2];
+	struct i2c_msg msg = {
+		.addr = slave_addr,
+		.flags = 0,
+		.len = 2,
+		.buf = out_buf,
+	};
+
+	out_buf[0] = addr;
+	out_buf[1] = val;
+
+	if (i2c_transfer(&i2c_bus->adapter, &msg, 1) != 1)
+		DRM_ERROR("i2c 0x%02x 0x%02x write failed\n",
+			  addr, val);
+}
+

+ 58 - 3
drivers/gpu/drm/radeon/radeon_irq_kms.c

@@ -39,11 +39,32 @@ irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS)
 	return radeon_irq_process(rdev);
 }
 
+/*
+ * Handle hotplug events outside the interrupt handler proper.
+ */
+static void radeon_hotplug_work_func(struct work_struct *work)
+{
+	struct radeon_device *rdev = container_of(work, struct radeon_device,
+						  hotplug_work);
+	struct drm_device *dev = rdev->ddev;
+	struct drm_mode_config *mode_config = &dev->mode_config;
+	struct drm_connector *connector;
+
+	if (mode_config->num_connector) {
+		list_for_each_entry(connector, &mode_config->connector_list, head)
+			radeon_connector_hotplug(connector);
+	}
+	/* Just fire off a uevent and let userspace tell us what to do */
+	drm_sysfs_hotplug_event(dev);
+}
+
 void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
 {
 	struct radeon_device *rdev = dev->dev_private;
 	unsigned i;
 
+	INIT_WORK(&rdev->hotplug_work, radeon_hotplug_work_func);
+
 	/* Disable *all* interrupts */
 	rdev->irq.sw_int = false;
 	for (i = 0; i < 2; i++) {
@@ -87,17 +108,25 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
 
 	if (rdev->flags & RADEON_SINGLE_CRTC)
 		num_crtc = 1;
-
+	spin_lock_init(&rdev->irq.sw_lock);
 	r = drm_vblank_init(rdev->ddev, num_crtc);
 	if (r) {
 		return r;
 	}
 	/* enable msi */
 	rdev->msi_enabled = 0;
-	if (rdev->family >= CHIP_RV380) {
+	/* MSIs don't seem to work on my rs780;
+	 * not sure about rs880 or other rs780s.
+	 * Needs more investigation.
+	 */
+	if ((rdev->family >= CHIP_RV380) &&
+	    (rdev->family != CHIP_RS780) &&
+	    (rdev->family != CHIP_RS880)) {
 		int ret = pci_enable_msi(rdev->pdev);
-		if (!ret)
+		if (!ret) {
 			rdev->msi_enabled = 1;
+			DRM_INFO("radeon: using MSI.\n");
+		}
 	}
 	drm_irq_install(rdev->ddev);
 	rdev->irq.installed = true;
@@ -114,3 +143,29 @@ void radeon_irq_kms_fini(struct radeon_device *rdev)
 			pci_disable_msi(rdev->pdev);
 	}
 }
+
+void radeon_irq_kms_sw_irq_get(struct radeon_device *rdev)
+{
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&rdev->irq.sw_lock, irqflags);
+	if (rdev->ddev->irq_enabled && (++rdev->irq.sw_refcount == 1)) {
+		rdev->irq.sw_int = true;
+		radeon_irq_set(rdev);
+	}
+	spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags);
+}
+
+void radeon_irq_kms_sw_irq_put(struct radeon_device *rdev)
+{
+	unsigned long irqflags;
+
+	spin_lock_irqsave(&rdev->irq.sw_lock, irqflags);
+	BUG_ON(rdev->ddev->irq_enabled && rdev->irq.sw_refcount <= 0);
+	if (rdev->ddev->irq_enabled && (--rdev->irq.sw_refcount == 0)) {
+		rdev->irq.sw_int = false;
+		radeon_irq_set(rdev);
+	}
+	spin_unlock_irqrestore(&rdev->irq.sw_lock, irqflags);
+}
+

+ 20 - 22
drivers/gpu/drm/radeon/radeon_kms.c

@@ -30,10 +30,19 @@
 #include "radeon.h"
 #include "radeon_drm.h"
 
+int radeon_driver_unload_kms(struct drm_device *dev)
+{
+	struct radeon_device *rdev = dev->dev_private;
+
+	if (rdev == NULL)
+		return 0;
+	radeon_modeset_fini(rdev);
+	radeon_device_fini(rdev);
+	kfree(rdev);
+	dev->dev_private = NULL;
+	return 0;
+}
 
-/*
- * Driver load/unload
- */
 int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
 {
 	struct radeon_device *rdev;
@@ -62,31 +71,20 @@ int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags)
 	 */
 	r = radeon_device_init(rdev, dev, dev->pdev, flags);
 	if (r) {
-		DRM_ERROR("Fatal error while trying to initialize radeon.\n");
-		return r;
+		dev_err(&dev->pdev->dev, "Fatal error during GPU init\n");
+		goto out;
 	}
 	/* Again modeset_init should fail only on fatal error
 	 * otherwise it should provide enough functionalities
 	 * for shadowfb to run
 	 */
 	r = radeon_modeset_init(rdev);
-	if (r) {
-		return r;
-	}
-	return 0;
-}
-
-int radeon_driver_unload_kms(struct drm_device *dev)
-{
-	struct radeon_device *rdev = dev->dev_private;
-
-	if (rdev == NULL)
-		return 0;
-	radeon_modeset_fini(rdev);
-	radeon_device_fini(rdev);
-	kfree(rdev);
-	dev->dev_private = NULL;
-	return 0;
+	if (r)
+		dev_err(&dev->pdev->dev, "Fatal error during modeset init\n");
+out:
+	if (r)
+		radeon_driver_unload_kms(dev);
+	return r;
 }
 
 

+ 76 - 28
drivers/gpu/drm/radeon/radeon_legacy_crtc.c

@@ -30,6 +30,18 @@
 #include "radeon.h"
 #include "atom.h"
 
+static void radeon_overscan_setup(struct drm_crtc *crtc,
+				  struct drm_display_mode *mode)
+{
+	struct drm_device *dev = crtc->dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
+	WREG32(RADEON_OVR_CLR + radeon_crtc->crtc_offset, 0);
+	WREG32(RADEON_OVR_WID_LEFT_RIGHT + radeon_crtc->crtc_offset, 0);
+	WREG32(RADEON_OVR_WID_TOP_BOTTOM + radeon_crtc->crtc_offset, 0);
+}
+
 static void radeon_legacy_rmx_mode_set(struct drm_crtc *crtc,
 				       struct drm_display_mode *mode,
 				       struct drm_display_mode *adjusted_mode)
@@ -292,8 +304,7 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
 	uint32_t mask;
 
 	if (radeon_crtc->crtc_id)
-		mask = (RADEON_CRTC2_EN |
-			RADEON_CRTC2_DISP_DIS |
+		mask = (RADEON_CRTC2_DISP_DIS |
 			RADEON_CRTC2_VSYNC_DIS |
 			RADEON_CRTC2_HSYNC_DIS |
 			RADEON_CRTC2_DISP_REQ_EN_B);
@@ -305,7 +316,7 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
 	switch (mode) {
 	case DRM_MODE_DPMS_ON:
 		if (radeon_crtc->crtc_id)
-			WREG32_P(RADEON_CRTC2_GEN_CNTL, RADEON_CRTC2_EN, ~mask);
+			WREG32_P(RADEON_CRTC2_GEN_CNTL, RADEON_CRTC2_EN, ~(RADEON_CRTC2_EN | mask));
 		else {
 			WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_EN, ~(RADEON_CRTC_EN |
 									 RADEON_CRTC_DISP_REQ_EN_B));
@@ -319,7 +330,7 @@ void radeon_crtc_dpms(struct drm_crtc *crtc, int mode)
 	case DRM_MODE_DPMS_OFF:
 		drm_vblank_pre_modeset(dev, radeon_crtc->crtc_id);
 		if (radeon_crtc->crtc_id)
-			WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~mask);
+			WREG32_P(RADEON_CRTC2_GEN_CNTL, mask, ~(RADEON_CRTC2_EN | mask));
 		else {
 			WREG32_P(RADEON_CRTC_GEN_CNTL, RADEON_CRTC_DISP_REQ_EN_B, ~(RADEON_CRTC_EN |
 										    RADEON_CRTC_DISP_REQ_EN_B));
@@ -400,14 +411,21 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
 	struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
 	struct radeon_framebuffer *radeon_fb;
 	struct drm_gem_object *obj;
+	struct radeon_bo *rbo;
 	uint64_t base;
 	uint32_t crtc_offset, crtc_offset_cntl, crtc_tile_x0_y0 = 0;
 	uint32_t crtc_pitch, pitch_pixels;
 	uint32_t tiling_flags;
 	int format;
 	uint32_t gen_cntl_reg, gen_cntl_val;
+	int r;
 
 	DRM_DEBUG("\n");
+	/* no fb bound */
+	if (!crtc->fb) {
+		DRM_DEBUG("No FB bound\n");
+		return 0;
+	}
 
 	radeon_fb = to_radeon_framebuffer(crtc->fb);
 
@@ -431,10 +449,22 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
 		return false;
 	}
 
+	/* Pin framebuffer & get tilling informations */
 	obj = radeon_fb->obj;
-	if (radeon_gem_object_pin(obj, RADEON_GEM_DOMAIN_VRAM, &base)) {
+	rbo = obj->driver_private;
+	r = radeon_bo_reserve(rbo, false);
+	if (unlikely(r != 0))
+		return r;
+	r = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, &base);
+	if (unlikely(r != 0)) {
+		radeon_bo_unreserve(rbo);
 		return -EINVAL;
 	}
+	radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
+	radeon_bo_unreserve(rbo);
+	if (tiling_flags & RADEON_TILING_MICRO)
+		DRM_ERROR("trying to scanout microtiled buffer\n");
+
 	/* if scanout was in GTT this really wouldn't work */
 	/* crtc offset is from display base addr not FB location */
 	radeon_crtc->legacy_display_base_addr = rdev->mc.vram_location;
@@ -449,10 +479,6 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
 		       (crtc->fb->bits_per_pixel * 8));
 	crtc_pitch |= crtc_pitch << 16;
 
-	radeon_object_get_tiling_flags(obj->driver_private,
-				       &tiling_flags, NULL);
-	if (tiling_flags & RADEON_TILING_MICRO)
-		DRM_ERROR("trying to scanout microtiled buffer\n");
 
 	if (tiling_flags & RADEON_TILING_MACRO) {
 		if (ASIC_IS_R300(rdev))
@@ -530,7 +556,12 @@ int radeon_crtc_set_base(struct drm_crtc *crtc, int x, int y,
 
 	if (old_fb && old_fb != crtc->fb) {
 		radeon_fb = to_radeon_framebuffer(old_fb);
-		radeon_gem_object_unpin(radeon_fb->obj);
+		rbo = radeon_fb->obj->driver_private;
+		r = radeon_bo_reserve(rbo, false);
+		if (unlikely(r != 0))
+			return r;
+		radeon_bo_unpin(rbo);
+		radeon_bo_unreserve(rbo);
 	}
 
 	/* Bytes per pixel may have changed */
@@ -642,12 +673,8 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod
 		uint32_t crtc2_gen_cntl;
 		uint32_t disp2_merge_cntl;
 
-		/* check to see if TV DAC is enabled for another crtc and keep it enabled */
-		if (RREG32(RADEON_CRTC2_GEN_CNTL) & RADEON_CRTC2_CRT2_ON)
-			crtc2_gen_cntl = RADEON_CRTC2_CRT2_ON;
-		else
-			crtc2_gen_cntl = 0;
-
+		/* if TV DAC is enabled for another crtc and keep it enabled */
+		crtc2_gen_cntl = RREG32(RADEON_CRTC2_GEN_CNTL) & 0x00718080;
 		crtc2_gen_cntl |= ((format << 8)
 				   | RADEON_CRTC2_VSYNC_DIS
 				   | RADEON_CRTC2_HSYNC_DIS
@@ -676,7 +703,8 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod
 		uint32_t crtc_ext_cntl;
 		uint32_t disp_merge_cntl;
 
-		crtc_gen_cntl = (RADEON_CRTC_EXT_DISP_EN
+		crtc_gen_cntl = RREG32(RADEON_CRTC_GEN_CNTL) & 0x00718000;
+		crtc_gen_cntl |= (RADEON_CRTC_EXT_DISP_EN
 				 | (format << 8)
 				 | RADEON_CRTC_DISP_REQ_EN_B
 				 | ((mode->flags & DRM_MODE_FLAG_DBLSCAN)
@@ -779,15 +807,17 @@ static void radeon_set_pll(struct drm_crtc *crtc, struct drm_display_mode *mode)
 			if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
 				pll_flags |= RADEON_PLL_NO_ODD_POST_DIV;
 			if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) {
-				struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
-				struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv;
-				if (lvds) {
-					if (lvds->use_bios_dividers) {
-						pll_ref_div = lvds->panel_ref_divider;
-						pll_fb_post_div   = (lvds->panel_fb_divider |
-								     (lvds->panel_post_divider << 16));
-						htotal_cntl  = 0;
-						use_bios_divs = true;
+				if (!rdev->is_atom_bios) {
+					struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+					struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv;
+					if (lvds) {
+						if (lvds->use_bios_dividers) {
+							pll_ref_div = lvds->panel_ref_divider;
+							pll_fb_post_div   = (lvds->panel_fb_divider |
+									     (lvds->panel_post_divider << 16));
+							htotal_cntl  = 0;
+							use_bios_divs = true;
+						}
 					}
 				}
 				pll_flags |= RADEON_PLL_USE_REF_DIV;
@@ -1027,6 +1057,7 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc,
 	radeon_crtc_set_base(crtc, x, y, old_fb);
 	radeon_set_crtc_timing(crtc, adjusted_mode);
 	radeon_set_pll(crtc, adjusted_mode);
+	radeon_overscan_setup(crtc, adjusted_mode);
 	if (radeon_crtc->crtc_id == 0) {
 		radeon_legacy_rmx_mode_set(crtc, mode, adjusted_mode);
 	} else {
@@ -1042,12 +1073,29 @@ static int radeon_crtc_mode_set(struct drm_crtc *crtc,
 
 static void radeon_crtc_prepare(struct drm_crtc *crtc)
 {
-	radeon_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+	struct drm_device *dev = crtc->dev;
+	struct drm_crtc *crtci;
+
+	/*
+	* The hardware wedges sometimes if you reconfigure one CRTC
+	* whilst another is running (see fdo bug #24611).
+	*/
+	list_for_each_entry(crtci, &dev->mode_config.crtc_list, head)
+		radeon_crtc_dpms(crtci, DRM_MODE_DPMS_OFF);
 }
 
 static void radeon_crtc_commit(struct drm_crtc *crtc)
 {
-	radeon_crtc_dpms(crtc, DRM_MODE_DPMS_ON);
+	struct drm_device *dev = crtc->dev;
+	struct drm_crtc *crtci;
+
+	/*
+	* Reenable the CRTCs that should be running.
+	*/
+	list_for_each_entry(crtci, &dev->mode_config.crtc_list, head) {
+		if (crtci->enabled)
+			radeon_crtc_dpms(crtci, DRM_MODE_DPMS_ON);
+	}
 }
 
 static const struct drm_crtc_helper_funcs legacy_helper_funcs = {

+ 66 - 59
drivers/gpu/drm/radeon/radeon_legacy_encoders.c

@@ -136,7 +136,14 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder,
 	lvds_pll_cntl &= ~RADEON_LVDS_PLL_EN;
 
 	lvds_ss_gen_cntl = RREG32(RADEON_LVDS_SS_GEN_CNTL);
-	if ((!rdev->is_atom_bios)) {
+	if (rdev->is_atom_bios) {
+		/* LVDS_GEN_CNTL parameters are computed in LVDSEncoderControl
+		 * need to call that on resume to set up the reg properly.
+		 */
+		radeon_encoder->pixel_clock = adjusted_mode->clock;
+		atombios_digital_setup(encoder, PANEL_ENCODER_ACTION_ENABLE);
+		lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
+	} else {
 		struct radeon_encoder_lvds *lvds = (struct radeon_encoder_lvds *)radeon_encoder->enc_priv;
 		if (lvds) {
 			DRM_DEBUG("bios LVDS_GEN_CNTL: 0x%x\n", lvds->lvds_gen_cntl);
@@ -147,8 +154,7 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder,
 					     (lvds->panel_blon_delay << RADEON_LVDS_PWRSEQ_DELAY2_SHIFT));
 		} else
 			lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
-	} else
-		lvds_gen_cntl = RREG32(RADEON_LVDS_GEN_CNTL);
+	}
 	lvds_gen_cntl |= RADEON_LVDS_DISPLAY_DIS;
 	lvds_gen_cntl &= ~(RADEON_LVDS_ON |
 			   RADEON_LVDS_BLON |
@@ -184,9 +190,9 @@ static void radeon_legacy_lvds_mode_set(struct drm_encoder *encoder,
 		radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
 }
 
-static bool radeon_legacy_lvds_mode_fixup(struct drm_encoder *encoder,
-					  struct drm_display_mode *mode,
-					  struct drm_display_mode *adjusted_mode)
+static bool radeon_legacy_mode_fixup(struct drm_encoder *encoder,
+				     struct drm_display_mode *mode,
+				     struct drm_display_mode *adjusted_mode)
 {
 	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
 
@@ -194,15 +200,22 @@ static bool radeon_legacy_lvds_mode_fixup(struct drm_encoder *encoder,
 	radeon_encoder_set_active_device(encoder);
 	drm_mode_set_crtcinfo(adjusted_mode, 0);
 
-	if (radeon_encoder->rmx_type != RMX_OFF)
-		radeon_rmx_mode_fixup(encoder, mode, adjusted_mode);
+	/* get the native mode for LVDS */
+	if (radeon_encoder->active_device & (ATOM_DEVICE_LCD_SUPPORT)) {
+		struct drm_display_mode *native_mode = &radeon_encoder->native_mode;
+		int mode_id = adjusted_mode->base.id;
+		*adjusted_mode = *native_mode;
+		adjusted_mode->hdisplay = mode->hdisplay;
+		adjusted_mode->vdisplay = mode->vdisplay;
+		adjusted_mode->base.id = mode_id;
+	}
 
 	return true;
 }
 
 static const struct drm_encoder_helper_funcs radeon_legacy_lvds_helper_funcs = {
 	.dpms = radeon_legacy_lvds_dpms,
-	.mode_fixup = radeon_legacy_lvds_mode_fixup,
+	.mode_fixup = radeon_legacy_mode_fixup,
 	.prepare = radeon_legacy_lvds_prepare,
 	.mode_set = radeon_legacy_lvds_mode_set,
 	.commit = radeon_legacy_lvds_commit,
@@ -214,17 +227,6 @@ static const struct drm_encoder_funcs radeon_legacy_lvds_enc_funcs = {
 	.destroy = radeon_enc_destroy,
 };
 
-static bool radeon_legacy_primary_dac_mode_fixup(struct drm_encoder *encoder,
-						 struct drm_display_mode *mode,
-						 struct drm_display_mode *adjusted_mode)
-{
-	/* set the active encoder to connector routing */
-	radeon_encoder_set_active_device(encoder);
-	drm_mode_set_crtcinfo(adjusted_mode, 0);
-
-	return true;
-}
-
 static void radeon_legacy_primary_dac_dpms(struct drm_encoder *encoder, int mode)
 {
 	struct drm_device *dev = encoder->dev;
@@ -410,7 +412,7 @@ static enum drm_connector_status radeon_legacy_primary_dac_detect(struct drm_enc
 
 static const struct drm_encoder_helper_funcs radeon_legacy_primary_dac_helper_funcs = {
 	.dpms = radeon_legacy_primary_dac_dpms,
-	.mode_fixup = radeon_legacy_primary_dac_mode_fixup,
+	.mode_fixup = radeon_legacy_mode_fixup,
 	.prepare = radeon_legacy_primary_dac_prepare,
 	.mode_set = radeon_legacy_primary_dac_mode_set,
 	.commit = radeon_legacy_primary_dac_commit,
@@ -423,16 +425,6 @@ static const struct drm_encoder_funcs radeon_legacy_primary_dac_enc_funcs = {
 	.destroy = radeon_enc_destroy,
 };
 
-static bool radeon_legacy_tmds_int_mode_fixup(struct drm_encoder *encoder,
-					      struct drm_display_mode *mode,
-					      struct drm_display_mode *adjusted_mode)
-{
-
-	drm_mode_set_crtcinfo(adjusted_mode, 0);
-
-	return true;
-}
-
 static void radeon_legacy_tmds_int_dpms(struct drm_encoder *encoder, int mode)
 {
 	struct drm_device *dev = encoder->dev;
@@ -584,7 +576,7 @@ static void radeon_legacy_tmds_int_mode_set(struct drm_encoder *encoder,
 
 static const struct drm_encoder_helper_funcs radeon_legacy_tmds_int_helper_funcs = {
 	.dpms = radeon_legacy_tmds_int_dpms,
-	.mode_fixup = radeon_legacy_tmds_int_mode_fixup,
+	.mode_fixup = radeon_legacy_mode_fixup,
 	.prepare = radeon_legacy_tmds_int_prepare,
 	.mode_set = radeon_legacy_tmds_int_mode_set,
 	.commit = radeon_legacy_tmds_int_commit,
@@ -596,17 +588,6 @@ static const struct drm_encoder_funcs radeon_legacy_tmds_int_enc_funcs = {
 	.destroy = radeon_enc_destroy,
 };
 
-static bool radeon_legacy_tmds_ext_mode_fixup(struct drm_encoder *encoder,
-					      struct drm_display_mode *mode,
-					      struct drm_display_mode *adjusted_mode)
-{
-	/* set the active encoder to connector routing */
-	radeon_encoder_set_active_device(encoder);
-	drm_mode_set_crtcinfo(adjusted_mode, 0);
-
-	return true;
-}
-
 static void radeon_legacy_tmds_ext_dpms(struct drm_encoder *encoder, int mode)
 {
 	struct drm_device *dev = encoder->dev;
@@ -697,6 +678,8 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder,
 			/*if (mode->clock > 165000)
 			  fp2_gen_cntl |= R300_FP2_DVO_DUAL_CHANNEL_EN;*/
 		}
+		if (!radeon_combios_external_tmds_setup(encoder))
+			radeon_external_tmds_setup(encoder);
 	}
 
 	if (radeon_crtc->crtc_id == 0) {
@@ -724,9 +707,22 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder,
 		radeon_combios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
 }
 
+static void radeon_ext_tmds_enc_destroy(struct drm_encoder *encoder)
+{
+	struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+	struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv;
+	if (tmds) {
+		if (tmds->i2c_bus)
+			radeon_i2c_destroy(tmds->i2c_bus);
+	}
+	kfree(radeon_encoder->enc_priv);
+	drm_encoder_cleanup(encoder);
+	kfree(radeon_encoder);
+}
+
 static const struct drm_encoder_helper_funcs radeon_legacy_tmds_ext_helper_funcs = {
 	.dpms = radeon_legacy_tmds_ext_dpms,
-	.mode_fixup = radeon_legacy_tmds_ext_mode_fixup,
+	.mode_fixup = radeon_legacy_mode_fixup,
 	.prepare = radeon_legacy_tmds_ext_prepare,
 	.mode_set = radeon_legacy_tmds_ext_mode_set,
 	.commit = radeon_legacy_tmds_ext_commit,
@@ -735,20 +731,9 @@ static const struct drm_encoder_helper_funcs radeon_legacy_tmds_ext_helper_funcs
 
 
 static const struct drm_encoder_funcs radeon_legacy_tmds_ext_enc_funcs = {
-	.destroy = radeon_enc_destroy,
+	.destroy = radeon_ext_tmds_enc_destroy,
 };
 
-static bool radeon_legacy_tv_dac_mode_fixup(struct drm_encoder *encoder,
-					    struct drm_display_mode *mode,
-					    struct drm_display_mode *adjusted_mode)
-{
-	/* set the active encoder to connector routing */
-	radeon_encoder_set_active_device(encoder);
-	drm_mode_set_crtcinfo(adjusted_mode, 0);
-
-	return true;
-}
-
 static void radeon_legacy_tv_dac_dpms(struct drm_encoder *encoder, int mode)
 {
 	struct drm_device *dev = encoder->dev;
@@ -1265,7 +1250,7 @@ static enum drm_connector_status radeon_legacy_tv_dac_detect(struct drm_encoder
 
 static const struct drm_encoder_helper_funcs radeon_legacy_tv_dac_helper_funcs = {
 	.dpms = radeon_legacy_tv_dac_dpms,
-	.mode_fixup = radeon_legacy_tv_dac_mode_fixup,
+	.mode_fixup = radeon_legacy_mode_fixup,
 	.prepare = radeon_legacy_tv_dac_prepare,
 	.mode_set = radeon_legacy_tv_dac_mode_set,
 	.commit = radeon_legacy_tv_dac_commit,
@@ -1302,6 +1287,29 @@ static struct radeon_encoder_int_tmds *radeon_legacy_get_tmds_info(struct radeon
 	return tmds;
 }
 
+static struct radeon_encoder_ext_tmds *radeon_legacy_get_ext_tmds_info(struct radeon_encoder *encoder)
+{
+	struct drm_device *dev = encoder->base.dev;
+	struct radeon_device *rdev = dev->dev_private;
+	struct radeon_encoder_ext_tmds *tmds = NULL;
+	bool ret;
+
+	if (rdev->is_atom_bios)
+		return NULL;
+
+	tmds = kzalloc(sizeof(struct radeon_encoder_ext_tmds), GFP_KERNEL);
+
+	if (!tmds)
+		return NULL;
+
+	ret = radeon_legacy_get_ext_tmds_info_from_combios(encoder, tmds);
+
+	if (ret == false)
+		radeon_legacy_get_ext_tmds_info_from_table(encoder, tmds);
+
+	return tmds;
+}
+
 void
 radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t supported_device)
 {
@@ -1329,7 +1337,6 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t
 		encoder->possible_crtcs = 0x1;
 	else
 		encoder->possible_crtcs = 0x3;
-	encoder->possible_clones = 0;
 
 	radeon_encoder->enc_priv = NULL;
 
@@ -1373,7 +1380,7 @@ radeon_add_legacy_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t
 		drm_encoder_init(dev, encoder, &radeon_legacy_tmds_ext_enc_funcs, DRM_MODE_ENCODER_TMDS);
 		drm_encoder_helper_add(encoder, &radeon_legacy_tmds_ext_helper_funcs);
 		if (!rdev->is_atom_bios)
-			radeon_combios_get_ext_tmds_info(radeon_encoder);
+			radeon_encoder->enc_priv = radeon_legacy_get_ext_tmds_info(radeon_encoder);
 		break;
 	}
 }

+ 129 - 20
drivers/gpu/drm/radeon/radeon_mode.h

@@ -33,6 +33,7 @@
 #include <drm_crtc.h>
 #include <drm_mode.h>
 #include <drm_edid.h>
+#include <drm_dp_helper.h>
 #include <linux/i2c.h>
 #include <linux/i2c-id.h>
 #include <linux/i2c-algo-bit.h>
@@ -89,24 +90,45 @@ enum radeon_tv_std {
 	TV_STD_PAL_CN,
 };
 
+/* radeon gpio-based i2c
+ * 1. "mask" reg and bits
+ *    grabs the gpio pins for software use
+ *    0=not held  1=held
+ * 2. "a" reg and bits
+ *    output pin value
+ *    0=low 1=high
+ * 3. "en" reg and bits
+ *    sets the pin direction
+ *    0=input 1=output
+ * 4. "y" reg and bits
+ *    input pin value
+ *    0=low 1=high
+ */
 struct radeon_i2c_bus_rec {
 	bool valid;
+	/* id used by atom */
+	uint8_t i2c_id;
+	/* can be used with hw i2c engine */
+	bool hw_capable;
+	/* uses multi-media i2c engine */
+	bool mm_i2c;
+	/* regs and bits */
 	uint32_t mask_clk_reg;
 	uint32_t mask_data_reg;
 	uint32_t a_clk_reg;
 	uint32_t a_data_reg;
-	uint32_t put_clk_reg;
-	uint32_t put_data_reg;
-	uint32_t get_clk_reg;
-	uint32_t get_data_reg;
+	uint32_t en_clk_reg;
+	uint32_t en_data_reg;
+	uint32_t y_clk_reg;
+	uint32_t y_data_reg;
 	uint32_t mask_clk_mask;
 	uint32_t mask_data_mask;
-	uint32_t put_clk_mask;
-	uint32_t put_data_mask;
-	uint32_t get_clk_mask;
-	uint32_t get_data_mask;
 	uint32_t a_clk_mask;
 	uint32_t a_data_mask;
+	uint32_t en_clk_mask;
+	uint32_t en_data_mask;
+	uint32_t y_clk_mask;
+	uint32_t y_data_mask;
 };
 
 struct radeon_tmds_pll {
@@ -150,9 +172,12 @@ struct radeon_pll {
 };
 
 struct radeon_i2c_chan {
-	struct drm_device *dev;
 	struct i2c_adapter adapter;
-	struct i2c_algo_bit_data algo;
+	struct drm_device *dev;
+	union {
+		struct i2c_algo_dp_aux_data dp;
+		struct i2c_algo_bit_data bit;
+	} algo;
 	struct radeon_i2c_bus_rec rec;
 };
 
@@ -170,6 +195,11 @@ enum radeon_connector_table {
 	CT_EMAC,
 };
 
+enum radeon_dvo_chip {
+	DVO_SIL164,
+	DVO_SIL1178,
+};
+
 struct radeon_mode_info {
 	struct atom_context *atom_context;
 	struct card_info *atom_card_info;
@@ -261,6 +291,13 @@ struct radeon_encoder_int_tmds {
 	struct radeon_tmds_pll tmds_pll[4];
 };
 
+struct radeon_encoder_ext_tmds {
+	/* tmds over dvo */
+	struct radeon_i2c_chan *i2c_bus;
+	uint8_t slave_addr;
+	enum radeon_dvo_chip dvo_chip;
+};
+
 /* spread spectrum */
 struct radeon_atom_ss {
 	uint16_t percentage;
@@ -302,6 +339,35 @@ struct radeon_encoder {
 struct radeon_connector_atom_dig {
 	uint32_t igp_lane_info;
 	bool linkb;
+	/* displayport */
+	struct radeon_i2c_chan *dp_i2c_bus;
+	u8 dpcd[8];
+	u8 dp_sink_type;
+	int dp_clock;
+	int dp_lane_count;
+};
+
+struct radeon_gpio_rec {
+	bool valid;
+	u8 id;
+	u32 reg;
+	u32 mask;
+};
+
+enum radeon_hpd_id {
+	RADEON_HPD_NONE = 0,
+	RADEON_HPD_1,
+	RADEON_HPD_2,
+	RADEON_HPD_3,
+	RADEON_HPD_4,
+	RADEON_HPD_5,
+	RADEON_HPD_6,
+};
+
+struct radeon_hpd {
+	enum radeon_hpd_id hpd;
+	u8 plugged_state;
+	struct radeon_gpio_rec gpio;
 };
 
 struct radeon_connector {
@@ -318,6 +384,7 @@ struct radeon_connector {
 	void *con_priv;
 	bool dac_load_detect;
 	uint16_t connector_object_id;
+	struct radeon_hpd hpd;
 };
 
 struct radeon_framebuffer {
@@ -325,10 +392,37 @@ struct radeon_framebuffer {
 	struct drm_gem_object *obj;
 };
 
+extern void radeon_connector_hotplug(struct drm_connector *connector);
+extern bool radeon_dp_needs_link_train(struct radeon_connector *radeon_connector);
+extern int radeon_dp_mode_valid_helper(struct radeon_connector *radeon_connector,
+				       struct drm_display_mode *mode);
+extern void radeon_dp_set_link_config(struct drm_connector *connector,
+				      struct drm_display_mode *mode);
+extern void dp_link_train(struct drm_encoder *encoder,
+			  struct drm_connector *connector);
+extern u8 radeon_dp_getsinktype(struct radeon_connector *radeon_connector);
+extern bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector);
+extern void atombios_dig_transmitter_setup(struct drm_encoder *encoder,
+					   int action, uint8_t lane_num,
+					   uint8_t lane_set);
+extern int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
+				uint8_t write_byte, uint8_t *read_byte);
+
+extern struct radeon_i2c_chan *radeon_i2c_create_dp(struct drm_device *dev,
+						    struct radeon_i2c_bus_rec *rec,
+						    const char *name);
 extern struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
 						 struct radeon_i2c_bus_rec *rec,
 						 const char *name);
 extern void radeon_i2c_destroy(struct radeon_i2c_chan *i2c);
+extern void radeon_i2c_sw_get_byte(struct radeon_i2c_chan *i2c_bus,
+				   u8 slave_addr,
+				   u8 addr,
+				   u8 *val);
+extern void radeon_i2c_sw_put_byte(struct radeon_i2c_chan *i2c,
+				   u8 slave_addr,
+				   u8 addr,
+				   u8 val);
 extern bool radeon_ddc_probe(struct radeon_connector *radeon_connector);
 extern int radeon_ddc_get_modes(struct radeon_connector *radeon_connector);
 
@@ -343,12 +437,24 @@ extern void radeon_compute_pll(struct radeon_pll *pll,
 			       uint32_t *post_div_p,
 			       int flags);
 
+extern void radeon_compute_pll_avivo(struct radeon_pll *pll,
+				     uint64_t freq,
+				     uint32_t *dot_clock_p,
+				     uint32_t *fb_div_p,
+				     uint32_t *frac_fb_div_p,
+				     uint32_t *ref_div_p,
+				     uint32_t *post_div_p,
+				     int flags);
+
+extern void radeon_setup_encoder_clones(struct drm_device *dev);
+
 struct drm_encoder *radeon_encoder_legacy_lvds_add(struct drm_device *dev, int bios_index);
 struct drm_encoder *radeon_encoder_legacy_primary_dac_add(struct drm_device *dev, int bios_index, int with_tv);
 struct drm_encoder *radeon_encoder_legacy_tv_dac_add(struct drm_device *dev, int bios_index, int with_tv);
 struct drm_encoder *radeon_encoder_legacy_tmds_int_add(struct drm_device *dev, int bios_index);
 struct drm_encoder *radeon_encoder_legacy_tmds_ext_add(struct drm_device *dev, int bios_index);
 extern void atombios_external_tmds_setup(struct drm_encoder *encoder, int action);
+extern void atombios_digital_setup(struct drm_encoder *encoder, int action);
 extern int atombios_get_encoder_mode(struct drm_encoder *encoder);
 extern void radeon_encoder_set_active_device(struct drm_encoder *encoder);
 
@@ -378,12 +484,16 @@ extern bool radeon_atom_get_clock_info(struct drm_device *dev);
 extern bool radeon_combios_get_clock_info(struct drm_device *dev);
 extern struct radeon_encoder_atom_dig *
 radeon_atombios_get_lvds_info(struct radeon_encoder *encoder);
-bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder,
-				   struct radeon_encoder_int_tmds *tmds);
-bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder,
-					   struct radeon_encoder_int_tmds *tmds);
-bool radeon_legacy_get_tmds_info_from_table(struct radeon_encoder *encoder,
-					    struct radeon_encoder_int_tmds *tmds);
+extern bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder,
+					  struct radeon_encoder_int_tmds *tmds);
+extern bool radeon_legacy_get_tmds_info_from_combios(struct radeon_encoder *encoder,
+						     struct radeon_encoder_int_tmds *tmds);
+extern bool radeon_legacy_get_tmds_info_from_table(struct radeon_encoder *encoder,
+						   struct radeon_encoder_int_tmds *tmds);
+extern bool radeon_legacy_get_ext_tmds_info_from_combios(struct radeon_encoder *encoder,
+							 struct radeon_encoder_ext_tmds *tmds);
+extern bool radeon_legacy_get_ext_tmds_info_from_table(struct radeon_encoder *encoder,
+						       struct radeon_encoder_ext_tmds *tmds);
 extern struct radeon_encoder_primary_dac *
 radeon_atombios_get_primary_dac_info(struct radeon_encoder *encoder);
 extern struct radeon_encoder_tv_dac *
@@ -395,6 +505,8 @@ extern struct radeon_encoder_tv_dac *
 radeon_combios_get_tv_dac_info(struct radeon_encoder *encoder);
 extern struct radeon_encoder_primary_dac *
 radeon_combios_get_primary_dac_info(struct radeon_encoder *encoder);
+extern bool radeon_combios_external_tmds_setup(struct drm_encoder *encoder);
+extern void radeon_external_tmds_setup(struct drm_encoder *encoder);
 extern void radeon_combios_output_lock(struct drm_encoder *encoder, bool lock);
 extern void radeon_combios_initialize_bios_scratch_regs(struct drm_device *dev);
 extern void radeon_atom_output_lock(struct drm_encoder *encoder, bool lock);
@@ -426,16 +538,13 @@ void radeon_atombios_init_crtc(struct drm_device *dev,
 			       struct radeon_crtc *radeon_crtc);
 void radeon_legacy_init_crtc(struct drm_device *dev,
 			     struct radeon_crtc *radeon_crtc);
-void radeon_i2c_do_lock(struct radeon_connector *radeon_connector, int lock_state);
+extern void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state);
 
 void radeon_get_clock_info(struct drm_device *dev);
 
 extern bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev);
 extern bool radeon_get_atom_connector_info_from_supported_devices_table(struct drm_device *dev);
 
-void radeon_rmx_mode_fixup(struct drm_encoder *encoder,
-			   struct drm_display_mode *mode,
-			   struct drm_display_mode *adjusted_mode);
 void radeon_enc_destroy(struct drm_encoder *encoder);
 void radeon_copy_fb(struct drm_device *dev, struct drm_gem_object *dst_obj);
 void radeon_combios_asic_init(struct drm_device *dev);

+ 224 - 336
drivers/gpu/drm/radeon/radeon_object.c

@@ -34,74 +34,32 @@
 #include "radeon_drm.h"
 #include "radeon.h"
 
-struct radeon_object {
-	struct ttm_buffer_object	tobj;
-	struct list_head		list;
-	struct radeon_device		*rdev;
-	struct drm_gem_object		*gobj;
-	struct ttm_bo_kmap_obj		kmap;
-	unsigned			pin_count;
-	uint64_t			gpu_addr;
-	void				*kptr;
-	bool				is_iomem;
-	uint32_t			tiling_flags;
-	uint32_t			pitch;
-	int				surface_reg;
-};
 
 int radeon_ttm_init(struct radeon_device *rdev);
 void radeon_ttm_fini(struct radeon_device *rdev);
+static void radeon_bo_clear_surface_reg(struct radeon_bo *bo);
 
 /*
  * To exclude mutual BO access we rely on bo_reserve exclusion, as all
  * function are calling it.
  */
 
-static int radeon_object_reserve(struct radeon_object *robj, bool interruptible)
+static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo)
 {
-	return ttm_bo_reserve(&robj->tobj, interruptible, false, false, 0);
-}
-
-static void radeon_object_unreserve(struct radeon_object *robj)
-{
-	ttm_bo_unreserve(&robj->tobj);
-}
-
-static void radeon_ttm_object_object_destroy(struct ttm_buffer_object *tobj)
-{
-	struct radeon_object *robj;
+	struct radeon_bo *bo;
 
-	robj = container_of(tobj, struct radeon_object, tobj);
-	list_del_init(&robj->list);
-	radeon_object_clear_surface_reg(robj);
-	kfree(robj);
+	bo = container_of(tbo, struct radeon_bo, tbo);
+	mutex_lock(&bo->rdev->gem.mutex);
+	list_del_init(&bo->list);
+	mutex_unlock(&bo->rdev->gem.mutex);
+	radeon_bo_clear_surface_reg(bo);
+	kfree(bo);
 }
 
-static inline void radeon_object_gpu_addr(struct radeon_object *robj)
+static inline u32 radeon_ttm_flags_from_domain(u32 domain)
 {
-	/* Default gpu address */
-	robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
-	if (robj->tobj.mem.mm_node == NULL) {
-		return;
-	}
-	robj->gpu_addr = ((u64)robj->tobj.mem.mm_node->start) << PAGE_SHIFT;
-	switch (robj->tobj.mem.mem_type) {
-	case TTM_PL_VRAM:
-		robj->gpu_addr += (u64)robj->rdev->mc.vram_location;
-		break;
-	case TTM_PL_TT:
-		robj->gpu_addr += (u64)robj->rdev->mc.gtt_location;
-		break;
-	default:
-		DRM_ERROR("Unknown placement %d\n", robj->tobj.mem.mem_type);
-		robj->gpu_addr = 0xFFFFFFFFFFFFFFFFULL;
-		return;
-	}
-}
+	u32 flags = 0;
 
-static inline uint32_t radeon_object_flags_from_domain(uint32_t domain)
-{
-	uint32_t flags = 0;
 	if (domain & RADEON_GEM_DOMAIN_VRAM) {
 		flags |= TTM_PL_FLAG_VRAM | TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED;
 	}
@@ -117,17 +75,32 @@ static inline uint32_t radeon_object_flags_from_domain(uint32_t domain)
 	return flags;
 }
 
-int radeon_object_create(struct radeon_device *rdev,
-			 struct drm_gem_object *gobj,
-			 unsigned long size,
-			 bool kernel,
-			 uint32_t domain,
-			 bool interruptible,
-			 struct radeon_object **robj_ptr)
+void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
+{
+	u32 c = 0;
+
+	rbo->placement.fpfn = 0;
+	rbo->placement.lpfn = 0;
+	rbo->placement.placement = rbo->placements;
+	rbo->placement.busy_placement = rbo->placements;
+	if (domain & RADEON_GEM_DOMAIN_VRAM)
+		rbo->placements[c++] = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED |
+					TTM_PL_FLAG_VRAM;
+	if (domain & RADEON_GEM_DOMAIN_GTT)
+		rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
+	if (domain & RADEON_GEM_DOMAIN_CPU)
+		rbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
+	rbo->placement.num_placement = c;
+	rbo->placement.num_busy_placement = c;
+}
+
+int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
+			unsigned long size, bool kernel, u32 domain,
+			struct radeon_bo **bo_ptr)
 {
-	struct radeon_object *robj;
+	struct radeon_bo *bo;
 	enum ttm_bo_type type;
-	uint32_t flags;
+	u32 flags;
 	int r;
 
 	if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
@@ -138,206 +111,125 @@ int radeon_object_create(struct radeon_device *rdev,
 	} else {
 		type = ttm_bo_type_device;
 	}
-	*robj_ptr = NULL;
-	robj = kzalloc(sizeof(struct radeon_object), GFP_KERNEL);
-	if (robj == NULL) {
+	*bo_ptr = NULL;
+	bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
+	if (bo == NULL)
 		return -ENOMEM;
-	}
-	robj->rdev = rdev;
-	robj->gobj = gobj;
-	robj->surface_reg = -1;
-	INIT_LIST_HEAD(&robj->list);
-
-	flags = radeon_object_flags_from_domain(domain);
-	r = ttm_buffer_object_init(&rdev->mman.bdev, &robj->tobj, size, type, flags,
-				   0, 0, false, NULL, size,
-				   &radeon_ttm_object_object_destroy);
+	bo->rdev = rdev;
+	bo->gobj = gobj;
+	bo->surface_reg = -1;
+	INIT_LIST_HEAD(&bo->list);
+
+	flags = radeon_ttm_flags_from_domain(domain);
+	/* Kernel allocation are uninterruptible */
+	r = ttm_buffer_object_init(&rdev->mman.bdev, &bo->tbo, size, type,
+					flags, 0, 0, !kernel, NULL, size,
+					&radeon_ttm_bo_destroy);
 	if (unlikely(r != 0)) {
-		/* ttm call radeon_ttm_object_object_destroy if error happen */
-		DRM_ERROR("Failed to allocate TTM object (%ld, 0x%08X, %u)\n",
-			  size, flags, 0);
+		if (r != -ERESTARTSYS)
+			dev_err(rdev->dev,
+				"object_init failed for (%ld, 0x%08X)\n",
+				size, flags);
 		return r;
 	}
-	*robj_ptr = robj;
+	*bo_ptr = bo;
 	if (gobj) {
-		list_add_tail(&robj->list, &rdev->gem.objects);
+		mutex_lock(&bo->rdev->gem.mutex);
+		list_add_tail(&bo->list, &rdev->gem.objects);
+		mutex_unlock(&bo->rdev->gem.mutex);
 	}
 	return 0;
 }
 
-int radeon_object_kmap(struct radeon_object *robj, void **ptr)
+int radeon_bo_kmap(struct radeon_bo *bo, void **ptr)
 {
+	bool is_iomem;
 	int r;
 
-	spin_lock(&robj->tobj.lock);
-	if (robj->kptr) {
+	if (bo->kptr) {
 		if (ptr) {
-			*ptr = robj->kptr;
+			*ptr = bo->kptr;
 		}
-		spin_unlock(&robj->tobj.lock);
 		return 0;
 	}
-	spin_unlock(&robj->tobj.lock);
-	r = ttm_bo_kmap(&robj->tobj, 0, robj->tobj.num_pages, &robj->kmap);
+	r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap);
 	if (r) {
 		return r;
 	}
-	spin_lock(&robj->tobj.lock);
-	robj->kptr = ttm_kmap_obj_virtual(&robj->kmap, &robj->is_iomem);
-	spin_unlock(&robj->tobj.lock);
+	bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem);
 	if (ptr) {
-		*ptr = robj->kptr;
+		*ptr = bo->kptr;
 	}
-	radeon_object_check_tiling(robj, 0, 0);
+	radeon_bo_check_tiling(bo, 0, 0);
 	return 0;
 }
 
-void radeon_object_kunmap(struct radeon_object *robj)
+void radeon_bo_kunmap(struct radeon_bo *bo)
 {
-	spin_lock(&robj->tobj.lock);
-	if (robj->kptr == NULL) {
-		spin_unlock(&robj->tobj.lock);
+	if (bo->kptr == NULL)
 		return;
-	}
-	robj->kptr = NULL;
-	spin_unlock(&robj->tobj.lock);
-	radeon_object_check_tiling(robj, 0, 0);
-	ttm_bo_kunmap(&robj->kmap);
+	bo->kptr = NULL;
+	radeon_bo_check_tiling(bo, 0, 0);
+	ttm_bo_kunmap(&bo->kmap);
 }
 
-void radeon_object_unref(struct radeon_object **robj)
+void radeon_bo_unref(struct radeon_bo **bo)
 {
-	struct ttm_buffer_object *tobj;
+	struct ttm_buffer_object *tbo;
 
-	if ((*robj) == NULL) {
+	if ((*bo) == NULL)
 		return;
-	}
-	tobj = &((*robj)->tobj);
-	ttm_bo_unref(&tobj);
-	if (tobj == NULL) {
-		*robj = NULL;
-	}
-}
-
-int radeon_object_mmap(struct radeon_object *robj, uint64_t *offset)
-{
-	*offset = robj->tobj.addr_space_offset;
-	return 0;
+	tbo = &((*bo)->tbo);
+	ttm_bo_unref(&tbo);
+	if (tbo == NULL)
+		*bo = NULL;
 }
 
-int radeon_object_pin(struct radeon_object *robj, uint32_t domain,
-		      uint64_t *gpu_addr)
+int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
 {
-	uint32_t flags;
-	uint32_t tmp;
-	int r;
+	int r, i;
 
-	flags = radeon_object_flags_from_domain(domain);
-	spin_lock(&robj->tobj.lock);
-	if (robj->pin_count) {
-		robj->pin_count++;
-		if (gpu_addr != NULL) {
-			*gpu_addr = robj->gpu_addr;
-		}
-		spin_unlock(&robj->tobj.lock);
+	radeon_ttm_placement_from_domain(bo, domain);
+	if (bo->pin_count) {
+		bo->pin_count++;
+		if (gpu_addr)
+			*gpu_addr = radeon_bo_gpu_offset(bo);
 		return 0;
 	}
-	spin_unlock(&robj->tobj.lock);
-	r = radeon_object_reserve(robj, false);
-	if (unlikely(r != 0)) {
-		DRM_ERROR("radeon: failed to reserve object for pinning it.\n");
-		return r;
-	}
-	tmp = robj->tobj.mem.placement;
-	ttm_flag_masked(&tmp, flags, TTM_PL_MASK_MEM);
-	robj->tobj.proposed_placement = tmp | TTM_PL_FLAG_NO_EVICT | TTM_PL_MASK_CACHING;
-	r = ttm_buffer_object_validate(&robj->tobj,
-				       robj->tobj.proposed_placement,
-				       false, false);
-	radeon_object_gpu_addr(robj);
-	if (gpu_addr != NULL) {
-		*gpu_addr = robj->gpu_addr;
-	}
-	robj->pin_count = 1;
-	if (unlikely(r != 0)) {
-		DRM_ERROR("radeon: failed to pin object.\n");
-	}
-	radeon_object_unreserve(robj);
+	radeon_ttm_placement_from_domain(bo, domain);
+	for (i = 0; i < bo->placement.num_placement; i++)
+		bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
+	r = ttm_buffer_object_validate(&bo->tbo, &bo->placement, false, false);
+	if (likely(r == 0)) {
+		bo->pin_count = 1;
+		if (gpu_addr != NULL)
+			*gpu_addr = radeon_bo_gpu_offset(bo);
+	}
+	if (unlikely(r != 0))
+		dev_err(bo->rdev->dev, "%p pin failed\n", bo);
 	return r;
 }
 
-void radeon_object_unpin(struct radeon_object *robj)
+int radeon_bo_unpin(struct radeon_bo *bo)
 {
-	uint32_t flags;
-	int r;
+	int r, i;
 
-	spin_lock(&robj->tobj.lock);
-	if (!robj->pin_count) {
-		spin_unlock(&robj->tobj.lock);
-		printk(KERN_WARNING "Unpin not necessary for %p !\n", robj);
-		return;
-	}
-	robj->pin_count--;
-	if (robj->pin_count) {
-		spin_unlock(&robj->tobj.lock);
-		return;
-	}
-	spin_unlock(&robj->tobj.lock);
-	r = radeon_object_reserve(robj, false);
-	if (unlikely(r != 0)) {
-		DRM_ERROR("radeon: failed to reserve object for unpinning it.\n");
-		return;
-	}
-	flags = robj->tobj.mem.placement;
-	robj->tobj.proposed_placement = flags & ~TTM_PL_FLAG_NO_EVICT;
-	r = ttm_buffer_object_validate(&robj->tobj,
-				       robj->tobj.proposed_placement,
-				       false, false);
-	if (unlikely(r != 0)) {
-		DRM_ERROR("radeon: failed to unpin buffer.\n");
-	}
-	radeon_object_unreserve(robj);
-}
-
-int radeon_object_wait(struct radeon_object *robj)
-{
-	int r = 0;
-
-	/* FIXME: should use block reservation instead */
-	r = radeon_object_reserve(robj, true);
-	if (unlikely(r != 0)) {
-		DRM_ERROR("radeon: failed to reserve object for waiting.\n");
-		return r;
-	}
-	spin_lock(&robj->tobj.lock);
-	if (robj->tobj.sync_obj) {
-		r = ttm_bo_wait(&robj->tobj, true, true, false);
-	}
-	spin_unlock(&robj->tobj.lock);
-	radeon_object_unreserve(robj);
-	return r;
-}
-
-int radeon_object_busy_domain(struct radeon_object *robj, uint32_t *cur_placement)
-{
-	int r = 0;
-
-	r = radeon_object_reserve(robj, true);
-	if (unlikely(r != 0)) {
-		DRM_ERROR("radeon: failed to reserve object for waiting.\n");
-		return r;
-	}
-	spin_lock(&robj->tobj.lock);
-	*cur_placement = robj->tobj.mem.mem_type;
-	if (robj->tobj.sync_obj) {
-		r = ttm_bo_wait(&robj->tobj, true, true, true);
+	if (!bo->pin_count) {
+		dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo);
+		return 0;
 	}
-	spin_unlock(&robj->tobj.lock);
-	radeon_object_unreserve(robj);
+	bo->pin_count--;
+	if (bo->pin_count)
+		return 0;
+	for (i = 0; i < bo->placement.num_placement; i++)
+		bo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
+	r = ttm_buffer_object_validate(&bo->tbo, &bo->placement, false, false);
+	if (unlikely(r != 0))
+		dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo);
 	return r;
 }
 
-int radeon_object_evict_vram(struct radeon_device *rdev)
+int radeon_bo_evict_vram(struct radeon_device *rdev)
 {
 	if (rdev->flags & RADEON_IS_IGP) {
 		/* Useless to evict on IGP chips */
@@ -346,30 +238,32 @@ int radeon_object_evict_vram(struct radeon_device *rdev)
 	return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
 }
 
-void radeon_object_force_delete(struct radeon_device *rdev)
+void radeon_bo_force_delete(struct radeon_device *rdev)
 {
-	struct radeon_object *robj, *n;
+	struct radeon_bo *bo, *n;
 	struct drm_gem_object *gobj;
 
 	if (list_empty(&rdev->gem.objects)) {
 		return;
 	}
-	DRM_ERROR("Userspace still has active objects !\n");
-	list_for_each_entry_safe(robj, n, &rdev->gem.objects, list) {
+	dev_err(rdev->dev, "Userspace still has active objects !\n");
+	list_for_each_entry_safe(bo, n, &rdev->gem.objects, list) {
 		mutex_lock(&rdev->ddev->struct_mutex);
-		gobj = robj->gobj;
-		DRM_ERROR("Force free for (%p,%p,%lu,%lu)\n",
-			  gobj, robj, (unsigned long)gobj->size,
-			  *((unsigned long *)&gobj->refcount));
-		list_del_init(&robj->list);
-		radeon_object_unref(&robj);
+		gobj = bo->gobj;
+		dev_err(rdev->dev, "%p %p %lu %lu force free\n",
+			gobj, bo, (unsigned long)gobj->size,
+			*((unsigned long *)&gobj->refcount));
+		mutex_lock(&bo->rdev->gem.mutex);
+		list_del_init(&bo->list);
+		mutex_unlock(&bo->rdev->gem.mutex);
+		radeon_bo_unref(&bo);
 		gobj->driver_private = NULL;
 		drm_gem_object_unreference(gobj);
 		mutex_unlock(&rdev->ddev->struct_mutex);
 	}
 }
 
-int radeon_object_init(struct radeon_device *rdev)
+int radeon_bo_init(struct radeon_device *rdev)
 {
 	/* Add an MTRR for the VRAM */
 	rdev->mc.vram_mtrr = mtrr_add(rdev->mc.aper_base, rdev->mc.aper_size,
@@ -382,13 +276,13 @@ int radeon_object_init(struct radeon_device *rdev)
 	return radeon_ttm_init(rdev);
 }
 
-void radeon_object_fini(struct radeon_device *rdev)
+void radeon_bo_fini(struct radeon_device *rdev)
 {
 	radeon_ttm_fini(rdev);
 }
 
-void radeon_object_list_add_object(struct radeon_object_list *lobj,
-				   struct list_head *head)
+void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
+				struct list_head *head)
 {
 	if (lobj->wdomain) {
 		list_add(&lobj->list, head);
@@ -397,72 +291,63 @@ void radeon_object_list_add_object(struct radeon_object_list *lobj,
 	}
 }
 
-int radeon_object_list_reserve(struct list_head *head)
+int radeon_bo_list_reserve(struct list_head *head)
 {
-	struct radeon_object_list *lobj;
+	struct radeon_bo_list *lobj;
 	int r;
 
 	list_for_each_entry(lobj, head, list){
-		if (!lobj->robj->pin_count) {
-			r = radeon_object_reserve(lobj->robj, true);
-			if (unlikely(r != 0)) {
-				DRM_ERROR("radeon: failed to reserve object.\n");
-				return r;
-			}
-		} else {
-		}
+		r = radeon_bo_reserve(lobj->bo, false);
+		if (unlikely(r != 0))
+			return r;
 	}
 	return 0;
 }
 
-void radeon_object_list_unreserve(struct list_head *head)
+void radeon_bo_list_unreserve(struct list_head *head)
 {
-	struct radeon_object_list *lobj;
+	struct radeon_bo_list *lobj;
 
 	list_for_each_entry(lobj, head, list) {
-		if (!lobj->robj->pin_count) {
-			radeon_object_unreserve(lobj->robj);
-		}
+		/* only unreserve object we successfully reserved */
+		if (radeon_bo_is_reserved(lobj->bo))
+			radeon_bo_unreserve(lobj->bo);
 	}
 }
 
-int radeon_object_list_validate(struct list_head *head, void *fence)
+int radeon_bo_list_validate(struct list_head *head, void *fence)
 {
-	struct radeon_object_list *lobj;
-	struct radeon_object *robj;
+	struct radeon_bo_list *lobj;
+	struct radeon_bo *bo;
 	struct radeon_fence *old_fence = NULL;
 	int r;
 
-	r = radeon_object_list_reserve(head);
+	r = radeon_bo_list_reserve(head);
 	if (unlikely(r != 0)) {
-		radeon_object_list_unreserve(head);
 		return r;
 	}
 	list_for_each_entry(lobj, head, list) {
-		robj = lobj->robj;
-		if (!robj->pin_count) {
+		bo = lobj->bo;
+		if (!bo->pin_count) {
 			if (lobj->wdomain) {
-				robj->tobj.proposed_placement =
-					radeon_object_flags_from_domain(lobj->wdomain);
+				radeon_ttm_placement_from_domain(bo,
+								lobj->wdomain);
 			} else {
-				robj->tobj.proposed_placement =
-					radeon_object_flags_from_domain(lobj->rdomain);
+				radeon_ttm_placement_from_domain(bo,
+								lobj->rdomain);
 			}
-			r = ttm_buffer_object_validate(&robj->tobj,
-						       robj->tobj.proposed_placement,
-						       true, false);
-			if (unlikely(r)) {
-				DRM_ERROR("radeon: failed to validate.\n");
+			r = ttm_buffer_object_validate(&bo->tbo,
+						&bo->placement,
+						true, false);
+			if (unlikely(r))
 				return r;
-			}
-			radeon_object_gpu_addr(robj);
 		}
-		lobj->gpu_offset = robj->gpu_addr;
-		lobj->tiling_flags = robj->tiling_flags;
+		lobj->gpu_offset = radeon_bo_gpu_offset(bo);
+		lobj->tiling_flags = bo->tiling_flags;
 		if (fence) {
-			old_fence = (struct radeon_fence *)robj->tobj.sync_obj;
-			robj->tobj.sync_obj = radeon_fence_ref(fence);
-			robj->tobj.sync_obj_arg = NULL;
+			old_fence = (struct radeon_fence *)bo->tbo.sync_obj;
+			bo->tbo.sync_obj = radeon_fence_ref(fence);
+			bo->tbo.sync_obj_arg = NULL;
 		}
 		if (old_fence) {
 			radeon_fence_unref(&old_fence);
@@ -471,51 +356,44 @@ int radeon_object_list_validate(struct list_head *head, void *fence)
 	return 0;
 }
 
-void radeon_object_list_unvalidate(struct list_head *head)
+void radeon_bo_list_unvalidate(struct list_head *head, void *fence)
 {
-	struct radeon_object_list *lobj;
-	struct radeon_fence *old_fence = NULL;
+	struct radeon_bo_list *lobj;
+	struct radeon_fence *old_fence;
 
-	list_for_each_entry(lobj, head, list) {
-		old_fence = (struct radeon_fence *)lobj->robj->tobj.sync_obj;
-		lobj->robj->tobj.sync_obj = NULL;
-		if (old_fence) {
-			radeon_fence_unref(&old_fence);
+	if (fence)
+		list_for_each_entry(lobj, head, list) {
+			old_fence = to_radeon_fence(lobj->bo->tbo.sync_obj);
+			if (old_fence == fence) {
+				lobj->bo->tbo.sync_obj = NULL;
+				radeon_fence_unref(&old_fence);
+			}
 		}
-	}
-	radeon_object_list_unreserve(head);
-}
-
-void radeon_object_list_clean(struct list_head *head)
-{
-	radeon_object_list_unreserve(head);
+	radeon_bo_list_unreserve(head);
 }
 
-int radeon_object_fbdev_mmap(struct radeon_object *robj,
+int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
 			     struct vm_area_struct *vma)
 {
-	return ttm_fbdev_mmap(vma, &robj->tobj);
+	return ttm_fbdev_mmap(vma, &bo->tbo);
 }
 
-unsigned long radeon_object_size(struct radeon_object *robj)
+int radeon_bo_get_surface_reg(struct radeon_bo *bo)
 {
-	return robj->tobj.num_pages << PAGE_SHIFT;
-}
-
-int radeon_object_get_surface_reg(struct radeon_object *robj)
-{
-	struct radeon_device *rdev = robj->rdev;
+	struct radeon_device *rdev = bo->rdev;
 	struct radeon_surface_reg *reg;
-	struct radeon_object *old_object;
+	struct radeon_bo *old_object;
 	int steal;
 	int i;
 
-	if (!robj->tiling_flags)
+	BUG_ON(!atomic_read(&bo->tbo.reserved));
+
+	if (!bo->tiling_flags)
 		return 0;
 
-	if (robj->surface_reg >= 0) {
-		reg = &rdev->surface_regs[robj->surface_reg];
-		i = robj->surface_reg;
+	if (bo->surface_reg >= 0) {
+		reg = &rdev->surface_regs[bo->surface_reg];
+		i = bo->surface_reg;
 		goto out;
 	}
 
@@ -523,10 +401,10 @@ int radeon_object_get_surface_reg(struct radeon_object *robj)
 	for (i = 0; i < RADEON_GEM_MAX_SURFACES; i++) {
 
 		reg = &rdev->surface_regs[i];
-		if (!reg->robj)
+		if (!reg->bo)
 			break;
 
-		old_object = reg->robj;
+		old_object = reg->bo;
 		if (old_object->pin_count == 0)
 			steal = i;
 	}
@@ -537,91 +415,101 @@ int radeon_object_get_surface_reg(struct radeon_object *robj)
 			return -ENOMEM;
 		/* find someone with a surface reg and nuke their BO */
 		reg = &rdev->surface_regs[steal];
-		old_object = reg->robj;
+		old_object = reg->bo;
 		/* blow away the mapping */
 		DRM_DEBUG("stealing surface reg %d from %p\n", steal, old_object);
-		ttm_bo_unmap_virtual(&old_object->tobj);
+		ttm_bo_unmap_virtual(&old_object->tbo);
 		old_object->surface_reg = -1;
 		i = steal;
 	}
 
-	robj->surface_reg = i;
-	reg->robj = robj;
+	bo->surface_reg = i;
+	reg->bo = bo;
 
 out:
-	radeon_set_surface_reg(rdev, i, robj->tiling_flags, robj->pitch,
-			       robj->tobj.mem.mm_node->start << PAGE_SHIFT,
-			       robj->tobj.num_pages << PAGE_SHIFT);
+	radeon_set_surface_reg(rdev, i, bo->tiling_flags, bo->pitch,
+			       bo->tbo.mem.mm_node->start << PAGE_SHIFT,
+			       bo->tbo.num_pages << PAGE_SHIFT);
 	return 0;
 }
 
-void radeon_object_clear_surface_reg(struct radeon_object *robj)
+static void radeon_bo_clear_surface_reg(struct radeon_bo *bo)
 {
-	struct radeon_device *rdev = robj->rdev;
+	struct radeon_device *rdev = bo->rdev;
 	struct radeon_surface_reg *reg;
 
-	if (robj->surface_reg == -1)
+	if (bo->surface_reg == -1)
 		return;
 
-	reg = &rdev->surface_regs[robj->surface_reg];
-	radeon_clear_surface_reg(rdev, robj->surface_reg);
+	reg = &rdev->surface_regs[bo->surface_reg];
+	radeon_clear_surface_reg(rdev, bo->surface_reg);
 
-	reg->robj = NULL;
-	robj->surface_reg = -1;
+	reg->bo = NULL;
+	bo->surface_reg = -1;
 }
 
-void radeon_object_set_tiling_flags(struct radeon_object *robj,
-				    uint32_t tiling_flags, uint32_t pitch)
+int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
+				uint32_t tiling_flags, uint32_t pitch)
 {
-	robj->tiling_flags = tiling_flags;
-	robj->pitch = pitch;
+	int r;
+
+	r = radeon_bo_reserve(bo, false);
+	if (unlikely(r != 0))
+		return r;
+	bo->tiling_flags = tiling_flags;
+	bo->pitch = pitch;
+	radeon_bo_unreserve(bo);
+	return 0;
 }
 
-void radeon_object_get_tiling_flags(struct radeon_object *robj,
-				    uint32_t *tiling_flags,
-				    uint32_t *pitch)
+void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
+				uint32_t *tiling_flags,
+				uint32_t *pitch)
 {
+	BUG_ON(!atomic_read(&bo->tbo.reserved));
 	if (tiling_flags)
-		*tiling_flags = robj->tiling_flags;
+		*tiling_flags = bo->tiling_flags;
 	if (pitch)
-		*pitch = robj->pitch;
+		*pitch = bo->pitch;
 }
 
-int radeon_object_check_tiling(struct radeon_object *robj, bool has_moved,
-			       bool force_drop)
+int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
+				bool force_drop)
 {
-	if (!(robj->tiling_flags & RADEON_TILING_SURFACE))
+	BUG_ON(!atomic_read(&bo->tbo.reserved));
+
+	if (!(bo->tiling_flags & RADEON_TILING_SURFACE))
 		return 0;
 
 	if (force_drop) {
-		radeon_object_clear_surface_reg(robj);
+		radeon_bo_clear_surface_reg(bo);
 		return 0;
 	}
 
-	if (robj->tobj.mem.mem_type != TTM_PL_VRAM) {
+	if (bo->tbo.mem.mem_type != TTM_PL_VRAM) {
 		if (!has_moved)
 			return 0;
 
-		if (robj->surface_reg >= 0)
-			radeon_object_clear_surface_reg(robj);
+		if (bo->surface_reg >= 0)
+			radeon_bo_clear_surface_reg(bo);
 		return 0;
 	}
 
-	if ((robj->surface_reg >= 0) && !has_moved)
+	if ((bo->surface_reg >= 0) && !has_moved)
 		return 0;
 
-	return radeon_object_get_surface_reg(robj);
+	return radeon_bo_get_surface_reg(bo);
 }
 
 void radeon_bo_move_notify(struct ttm_buffer_object *bo,
-			  struct ttm_mem_reg *mem)
+				struct ttm_mem_reg *mem)
 {
-	struct radeon_object *robj = container_of(bo, struct radeon_object, tobj);
-	radeon_object_check_tiling(robj, 0, 1);
+	struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
+	radeon_bo_check_tiling(rbo, 0, 1);
 }
 
 void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
 {
-	struct radeon_object *robj = container_of(bo, struct radeon_object, tobj);
-	radeon_object_check_tiling(robj, 0, 0);
+	struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
+	radeon_bo_check_tiling(rbo, 0, 0);
 }

+ 145 - 12
drivers/gpu/drm/radeon/radeon_object.h

@@ -28,19 +28,152 @@
 #ifndef __RADEON_OBJECT_H__
 #define __RADEON_OBJECT_H__
 
-#include <ttm/ttm_bo_api.h>
-#include <ttm/ttm_bo_driver.h>
-#include <ttm/ttm_placement.h>
-#include <ttm/ttm_module.h>
+#include <drm/radeon_drm.h>
+#include "radeon.h"
 
-/*
- * TTM.
+/**
+ * radeon_mem_type_to_domain - return domain corresponding to mem_type
+ * @mem_type:	ttm memory type
+ *
+ * Returns corresponding domain of the ttm mem_type
+ */
+static inline unsigned radeon_mem_type_to_domain(u32 mem_type)
+{
+	switch (mem_type) {
+	case TTM_PL_VRAM:
+		return RADEON_GEM_DOMAIN_VRAM;
+	case TTM_PL_TT:
+		return RADEON_GEM_DOMAIN_GTT;
+	case TTM_PL_SYSTEM:
+		return RADEON_GEM_DOMAIN_CPU;
+	default:
+		break;
+	}
+	return 0;
+}
+
+/**
+ * radeon_bo_reserve - reserve bo
+ * @bo:		bo structure
+ * @no_wait:		don't sleep while trying to reserve (return -EBUSY)
+ *
+ * Returns:
+ * -EBUSY: buffer is busy and @no_wait is true
+ * -ERESTART: A wait for the buffer to become unreserved was interrupted by
+ * a signal. Release all buffer reservations and return to user-space.
  */
-struct radeon_mman {
-	struct ttm_bo_global_ref        bo_global_ref;
-	struct ttm_global_reference	mem_global_ref;
-	bool				mem_global_referenced;
-	struct ttm_bo_device		bdev;
-};
+static inline int radeon_bo_reserve(struct radeon_bo *bo, bool no_wait)
+{
+	int r;
+
+retry:
+	r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
+	if (unlikely(r != 0)) {
+		if (r == -ERESTART)
+			goto retry;
+		dev_err(bo->rdev->dev, "%p reserve failed\n", bo);
+		return r;
+	}
+	return 0;
+}
+
+static inline void radeon_bo_unreserve(struct radeon_bo *bo)
+{
+	ttm_bo_unreserve(&bo->tbo);
+}
+
+/**
+ * radeon_bo_gpu_offset - return GPU offset of bo
+ * @bo:	radeon object for which we query the offset
+ *
+ * Returns current GPU offset of the object.
+ *
+ * Note: object should either be pinned or reserved when calling this
+ * function, it might be usefull to add check for this for debugging.
+ */
+static inline u64 radeon_bo_gpu_offset(struct radeon_bo *bo)
+{
+	return bo->tbo.offset;
+}
+
+static inline unsigned long radeon_bo_size(struct radeon_bo *bo)
+{
+	return bo->tbo.num_pages << PAGE_SHIFT;
+}
+
+static inline bool radeon_bo_is_reserved(struct radeon_bo *bo)
+{
+	return !!atomic_read(&bo->tbo.reserved);
+}
+
+/**
+ * radeon_bo_mmap_offset - return mmap offset of bo
+ * @bo:	radeon object for which we query the offset
+ *
+ * Returns mmap offset of the object.
+ *
+ * Note: addr_space_offset is constant after ttm bo init thus isn't protected
+ * by any lock.
+ */
+static inline u64 radeon_bo_mmap_offset(struct radeon_bo *bo)
+{
+	return bo->tbo.addr_space_offset;
+}
+
+static inline int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type,
+					bool no_wait)
+{
+	int r;
+
+retry:
+	r = ttm_bo_reserve(&bo->tbo, true, no_wait, false, 0);
+	if (unlikely(r != 0)) {
+		if (r == -ERESTART)
+			goto retry;
+		dev_err(bo->rdev->dev, "%p reserve failed for wait\n", bo);
+		return r;
+	}
+	spin_lock(&bo->tbo.lock);
+	if (mem_type)
+		*mem_type = bo->tbo.mem.mem_type;
+	if (bo->tbo.sync_obj)
+		r = ttm_bo_wait(&bo->tbo, true, true, no_wait);
+	spin_unlock(&bo->tbo.lock);
+	ttm_bo_unreserve(&bo->tbo);
+	if (unlikely(r == -ERESTART))
+		goto retry;
+	return r;
+}
 
+extern int radeon_bo_create(struct radeon_device *rdev,
+				struct drm_gem_object *gobj, unsigned long size,
+				bool kernel, u32 domain,
+				struct radeon_bo **bo_ptr);
+extern int radeon_bo_kmap(struct radeon_bo *bo, void **ptr);
+extern void radeon_bo_kunmap(struct radeon_bo *bo);
+extern void radeon_bo_unref(struct radeon_bo **bo);
+extern int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr);
+extern int radeon_bo_unpin(struct radeon_bo *bo);
+extern int radeon_bo_evict_vram(struct radeon_device *rdev);
+extern void radeon_bo_force_delete(struct radeon_device *rdev);
+extern int radeon_bo_init(struct radeon_device *rdev);
+extern void radeon_bo_fini(struct radeon_device *rdev);
+extern void radeon_bo_list_add_object(struct radeon_bo_list *lobj,
+				struct list_head *head);
+extern int radeon_bo_list_reserve(struct list_head *head);
+extern void radeon_bo_list_unreserve(struct list_head *head);
+extern int radeon_bo_list_validate(struct list_head *head, void *fence);
+extern void radeon_bo_list_unvalidate(struct list_head *head, void *fence);
+extern int radeon_bo_fbdev_mmap(struct radeon_bo *bo,
+				struct vm_area_struct *vma);
+extern int radeon_bo_set_tiling_flags(struct radeon_bo *bo,
+				u32 tiling_flags, u32 pitch);
+extern void radeon_bo_get_tiling_flags(struct radeon_bo *bo,
+				u32 *tiling_flags, u32 *pitch);
+extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved,
+				bool force_drop);
+extern void radeon_bo_move_notify(struct ttm_buffer_object *bo,
+					struct ttm_mem_reg *mem);
+extern void radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
+extern int radeon_bo_get_surface_reg(struct radeon_bo *bo);
 #endif

+ 3 - 3
drivers/gpu/drm/radeon/radeon_pm.c

@@ -27,7 +27,7 @@ int radeon_debugfs_pm_init(struct radeon_device *rdev);
 int radeon_pm_init(struct radeon_device *rdev)
 {
 	if (radeon_debugfs_pm_init(rdev)) {
-		DRM_ERROR("Failed to register debugfs file for CP !\n");
+		DRM_ERROR("Failed to register debugfs file for PM!\n");
 	}
 
 	return 0;
@@ -44,8 +44,8 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data)
 	struct drm_device *dev = node->minor->dev;
 	struct radeon_device *rdev = dev->dev_private;
 
-	seq_printf(m, "engine clock: %u0 Hz\n", radeon_get_engine_clock(rdev));
-	seq_printf(m, "memory clock: %u0 Hz\n", radeon_get_memory_clock(rdev));
+	seq_printf(m, "engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev));
+	seq_printf(m, "memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev));
 
 	return 0;
 }

+ 39 - 21
drivers/gpu/drm/radeon/radeon_reg.h

@@ -887,6 +887,7 @@
 #       define RADEON_FP_PANEL_FORMAT          (1 <<  3)
 #       define RADEON_FP_EN_TMDS               (1 <<  7)
 #       define RADEON_FP_DETECT_SENSE          (1 <<  8)
+#       define RADEON_FP_DETECT_INT_POL        (1 <<  9)
 #       define R200_FP_SOURCE_SEL_MASK         (3 <<  10)
 #       define R200_FP_SOURCE_SEL_CRTC1        (0 <<  10)
 #       define R200_FP_SOURCE_SEL_CRTC2        (1 <<  10)
@@ -894,6 +895,7 @@
 #       define R200_FP_SOURCE_SEL_TRANS        (3 <<  10)
 #       define RADEON_FP_SEL_CRTC1             (0 << 13)
 #       define RADEON_FP_SEL_CRTC2             (1 << 13)
+#       define R300_HPD_SEL(x)                 ((x) << 13)
 #       define RADEON_FP_CRTC_DONT_SHADOW_HPAR (1 << 15)
 #       define RADEON_FP_CRTC_DONT_SHADOW_VPAR (1 << 16)
 #       define RADEON_FP_CRTC_DONT_SHADOW_HEND (1 << 17)
@@ -909,6 +911,7 @@
 #       define RADEON_FP2_ON                   (1 <<  2)
 #       define RADEON_FP2_PANEL_FORMAT         (1 <<  3)
 #       define RADEON_FP2_DETECT_SENSE         (1 <<  8)
+#       define RADEON_FP2_DETECT_INT_POL       (1 <<  9)
 #       define R200_FP2_SOURCE_SEL_MASK        (3 << 10)
 #       define R200_FP2_SOURCE_SEL_CRTC1       (0 << 10)
 #       define R200_FP2_SOURCE_SEL_CRTC2       (1 << 10)
@@ -988,14 +991,20 @@
 
 #define RADEON_GEN_INT_CNTL                 0x0040
 #	define RADEON_CRTC_VBLANK_MASK		(1 << 0)
+#	define RADEON_FP_DETECT_MASK		(1 << 4)
 #	define RADEON_CRTC2_VBLANK_MASK		(1 << 9)
+#	define RADEON_FP2_DETECT_MASK		(1 << 10)
 #	define RADEON_SW_INT_ENABLE		(1 << 25)
 #define RADEON_GEN_INT_STATUS               0x0044
 #	define AVIVO_DISPLAY_INT_STATUS		(1 << 0)
 #	define RADEON_CRTC_VBLANK_STAT		(1 << 0)
 #	define RADEON_CRTC_VBLANK_STAT_ACK	(1 << 0)
+#	define RADEON_FP_DETECT_STAT		(1 << 4)
+#	define RADEON_FP_DETECT_STAT_ACK	(1 << 4)
 #	define RADEON_CRTC2_VBLANK_STAT		(1 << 9)
 #	define RADEON_CRTC2_VBLANK_STAT_ACK	(1 << 9)
+#	define RADEON_FP2_DETECT_STAT		(1 << 10)
+#	define RADEON_FP2_DETECT_STAT_ACK	(1 << 10)
 #	define RADEON_SW_INT_FIRE		(1 << 26)
 #	define RADEON_SW_INT_TEST		(1 << 25)
 #	define RADEON_SW_INT_TEST_ACK		(1 << 25)
@@ -1051,20 +1060,25 @@
 
        /* Multimedia I2C bus */
 #define RADEON_I2C_CNTL_0		    0x0090
-#define RADEON_I2C_DONE (1<<0)
-#define RADEON_I2C_NACK (1<<1)
-#define RADEON_I2C_HALT (1<<2)
-#define RADEON_I2C_SOFT_RST (1<<5)
-#define RADEON_I2C_DRIVE_EN (1<<6)
-#define RADEON_I2C_DRIVE_SEL (1<<7)
-#define RADEON_I2C_START (1<<8)
-#define RADEON_I2C_STOP (1<<9)
-#define RADEON_I2C_RECEIVE (1<<10)
-#define RADEON_I2C_ABORT (1<<11)
-#define RADEON_I2C_GO (1<<12)
+#define RADEON_I2C_DONE                     (1 << 0)
+#define RADEON_I2C_NACK                     (1 << 1)
+#define RADEON_I2C_HALT                     (1 << 2)
+#define RADEON_I2C_SOFT_RST                 (1 << 5)
+#define RADEON_I2C_DRIVE_EN                 (1 << 6)
+#define RADEON_I2C_DRIVE_SEL                (1 << 7)
+#define RADEON_I2C_START                    (1 << 8)
+#define RADEON_I2C_STOP                     (1 << 9)
+#define RADEON_I2C_RECEIVE                  (1 << 10)
+#define RADEON_I2C_ABORT                    (1 << 11)
+#define RADEON_I2C_GO                       (1 << 12)
+#define RADEON_I2C_PRESCALE_SHIFT           16
 #define RADEON_I2C_CNTL_1                   0x0094
-#define RADEON_I2C_SEL         (1<<16)
-#define RADEON_I2C_EN          (1<<17)
+#define RADEON_I2C_DATA_COUNT_SHIFT         0
+#define RADEON_I2C_ADDR_COUNT_SHIFT         4
+#define RADEON_I2C_INTRA_BYTE_DELAY_SHIFT   8
+#define RADEON_I2C_SEL                      (1 << 16)
+#define RADEON_I2C_EN                       (1 << 17)
+#define RADEON_I2C_TIME_LIMIT_SHIFT         24
 #define RADEON_I2C_DATA			    0x0098
 
 #define RADEON_DVI_I2C_CNTL_0		    0x02e0
@@ -1072,7 +1086,7 @@
 #       define R200_SEL_DDC1                0 /* 0x60 - VGA_DDC */
 #       define R200_SEL_DDC2                1 /* 0x64 - DVI_DDC */
 #       define R200_SEL_DDC3                2 /* 0x68 - MONID_DDC */
-#define RADEON_DVI_I2C_CNTL_1               0x02e4 /* ? */
+#define RADEON_DVI_I2C_CNTL_1               0x02e4
 #define RADEON_DVI_I2C_DATA		    0x02e8
 
 #define RADEON_INTERRUPT_LINE               0x0f3c /* PCI */
@@ -1143,15 +1157,16 @@
 #       define RADEON_IO_MCLK_MAX_DYN_STOP_LAT (1 << 13)
 #       define RADEON_MC_MCLK_DYN_ENABLE    (1 << 14)
 #       define RADEON_IO_MCLK_DYN_ENABLE    (1 << 15)
-#define RADEON_LCD_GPIO_MASK                0x01a0
-#define RADEON_GPIOPAD_EN                   0x01a0
-#define RADEON_LCD_GPIO_Y_REG               0x01a4
-#define RADEON_MDGPIO_A_REG                 0x01ac
-#define RADEON_MDGPIO_EN_REG                0x01b0
-#define RADEON_MDGPIO_MASK                  0x0198
+
 #define RADEON_GPIOPAD_MASK                 0x0198
 #define RADEON_GPIOPAD_A		    0x019c
-#define RADEON_MDGPIO_Y_REG                 0x01b4
+#define RADEON_GPIOPAD_EN                   0x01a0
+#define RADEON_GPIOPAD_Y                    0x01a4
+#define RADEON_MDGPIO_MASK                  0x01a8
+#define RADEON_MDGPIO_A                     0x01ac
+#define RADEON_MDGPIO_EN                    0x01b0
+#define RADEON_MDGPIO_Y                     0x01b4
+
 #define RADEON_MEM_ADDR_CONFIG              0x0148
 #define RADEON_MEM_BASE                     0x0f10 /* PCI */
 #define RADEON_MEM_CNTL                     0x0140
@@ -1360,6 +1375,9 @@
 #define RADEON_OVR_CLR                      0x0230
 #define RADEON_OVR_WID_LEFT_RIGHT           0x0234
 #define RADEON_OVR_WID_TOP_BOTTOM           0x0238
+#define RADEON_OVR2_CLR                     0x0330
+#define RADEON_OVR2_WID_LEFT_RIGHT          0x0334
+#define RADEON_OVR2_WID_TOP_BOTTOM          0x0338
 
 /* first capture unit */
 

+ 42 - 25
drivers/gpu/drm/radeon/radeon_ring.c

@@ -165,19 +165,24 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
 		return 0;
 	/* Allocate 1M object buffer */
 	INIT_LIST_HEAD(&rdev->ib_pool.scheduled_ibs);
-	r = radeon_object_create(rdev, NULL,  RADEON_IB_POOL_SIZE*64*1024,
-				 true, RADEON_GEM_DOMAIN_GTT,
-				 false, &rdev->ib_pool.robj);
+	r = radeon_bo_create(rdev, NULL,  RADEON_IB_POOL_SIZE*64*1024,
+				true, RADEON_GEM_DOMAIN_GTT,
+				&rdev->ib_pool.robj);
 	if (r) {
 		DRM_ERROR("radeon: failed to ib pool (%d).\n", r);
 		return r;
 	}
-	r = radeon_object_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr);
+	r = radeon_bo_reserve(rdev->ib_pool.robj, false);
+	if (unlikely(r != 0))
+		return r;
+	r = radeon_bo_pin(rdev->ib_pool.robj, RADEON_GEM_DOMAIN_GTT, &gpu_addr);
 	if (r) {
+		radeon_bo_unreserve(rdev->ib_pool.robj);
 		DRM_ERROR("radeon: failed to pin ib pool (%d).\n", r);
 		return r;
 	}
-	r = radeon_object_kmap(rdev->ib_pool.robj, &ptr);
+	r = radeon_bo_kmap(rdev->ib_pool.robj, &ptr);
+	radeon_bo_unreserve(rdev->ib_pool.robj);
 	if (r) {
 		DRM_ERROR("radeon: failed to map ib poll (%d).\n", r);
 		return r;
@@ -203,14 +208,21 @@ int radeon_ib_pool_init(struct radeon_device *rdev)
 
 void radeon_ib_pool_fini(struct radeon_device *rdev)
 {
+	int r;
+
 	if (!rdev->ib_pool.ready) {
 		return;
 	}
 	mutex_lock(&rdev->ib_pool.mutex);
 	bitmap_zero(rdev->ib_pool.alloc_bm, RADEON_IB_POOL_SIZE);
 	if (rdev->ib_pool.robj) {
-		radeon_object_kunmap(rdev->ib_pool.robj);
-		radeon_object_unref(&rdev->ib_pool.robj);
+		r = radeon_bo_reserve(rdev->ib_pool.robj, false);
+		if (likely(r == 0)) {
+			radeon_bo_kunmap(rdev->ib_pool.robj);
+			radeon_bo_unpin(rdev->ib_pool.robj);
+			radeon_bo_unreserve(rdev->ib_pool.robj);
+		}
+		radeon_bo_unref(&rdev->ib_pool.robj);
 		rdev->ib_pool.robj = NULL;
 	}
 	mutex_unlock(&rdev->ib_pool.mutex);
@@ -288,29 +300,28 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
 	rdev->cp.ring_size = ring_size;
 	/* Allocate ring buffer */
 	if (rdev->cp.ring_obj == NULL) {
-		r = radeon_object_create(rdev, NULL, rdev->cp.ring_size,
-					 true,
-					 RADEON_GEM_DOMAIN_GTT,
-					 false,
-					 &rdev->cp.ring_obj);
+		r = radeon_bo_create(rdev, NULL, rdev->cp.ring_size, true,
+					RADEON_GEM_DOMAIN_GTT,
+					&rdev->cp.ring_obj);
 		if (r) {
-			DRM_ERROR("radeon: failed to create ring buffer (%d).\n", r);
-			mutex_unlock(&rdev->cp.mutex);
+			dev_err(rdev->dev, "(%d) ring create failed\n", r);
 			return r;
 		}
-		r = radeon_object_pin(rdev->cp.ring_obj,
-				      RADEON_GEM_DOMAIN_GTT,
-				      &rdev->cp.gpu_addr);
+		r = radeon_bo_reserve(rdev->cp.ring_obj, false);
+		if (unlikely(r != 0))
+			return r;
+		r = radeon_bo_pin(rdev->cp.ring_obj, RADEON_GEM_DOMAIN_GTT,
+					&rdev->cp.gpu_addr);
 		if (r) {
-			DRM_ERROR("radeon: failed to pin ring buffer (%d).\n", r);
-			mutex_unlock(&rdev->cp.mutex);
+			radeon_bo_unreserve(rdev->cp.ring_obj);
+			dev_err(rdev->dev, "(%d) ring pin failed\n", r);
 			return r;
 		}
-		r = radeon_object_kmap(rdev->cp.ring_obj,
+		r = radeon_bo_kmap(rdev->cp.ring_obj,
 				       (void **)&rdev->cp.ring);
+		radeon_bo_unreserve(rdev->cp.ring_obj);
 		if (r) {
-			DRM_ERROR("radeon: failed to map ring buffer (%d).\n", r);
-			mutex_unlock(&rdev->cp.mutex);
+			dev_err(rdev->dev, "(%d) ring map failed\n", r);
 			return r;
 		}
 	}
@@ -321,11 +332,17 @@ int radeon_ring_init(struct radeon_device *rdev, unsigned ring_size)
 
 void radeon_ring_fini(struct radeon_device *rdev)
 {
+	int r;
+
 	mutex_lock(&rdev->cp.mutex);
 	if (rdev->cp.ring_obj) {
-		radeon_object_kunmap(rdev->cp.ring_obj);
-		radeon_object_unpin(rdev->cp.ring_obj);
-		radeon_object_unref(&rdev->cp.ring_obj);
+		r = radeon_bo_reserve(rdev->cp.ring_obj, false);
+		if (likely(r == 0)) {
+			radeon_bo_kunmap(rdev->cp.ring_obj);
+			radeon_bo_unpin(rdev->cp.ring_obj);
+			radeon_bo_unreserve(rdev->cp.ring_obj);
+		}
+		radeon_bo_unref(&rdev->cp.ring_obj);
 		rdev->cp.ring = NULL;
 		rdev->cp.ring_obj = NULL;
 	}

+ 32 - 23
drivers/gpu/drm/radeon/radeon_test.c

@@ -30,8 +30,8 @@
 /* Test BO GTT->VRAM and VRAM->GTT GPU copies across the whole GTT aperture */
 void radeon_test_moves(struct radeon_device *rdev)
 {
-	struct radeon_object *vram_obj = NULL;
-	struct radeon_object **gtt_obj = NULL;
+	struct radeon_bo *vram_obj = NULL;
+	struct radeon_bo **gtt_obj = NULL;
 	struct radeon_fence *fence = NULL;
 	uint64_t gtt_addr, vram_addr;
 	unsigned i, n, size;
@@ -52,38 +52,42 @@ void radeon_test_moves(struct radeon_device *rdev)
 		goto out_cleanup;
 	}
 
-	r = radeon_object_create(rdev, NULL, size, true, RADEON_GEM_DOMAIN_VRAM,
-				 false, &vram_obj);
+	r = radeon_bo_create(rdev, NULL, size, true, RADEON_GEM_DOMAIN_VRAM,
+				&vram_obj);
 	if (r) {
 		DRM_ERROR("Failed to create VRAM object\n");
 		goto out_cleanup;
 	}
-
-	r = radeon_object_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr);
+	r = radeon_bo_reserve(vram_obj, false);
+	if (unlikely(r != 0))
+		goto out_cleanup;
+	r = radeon_bo_pin(vram_obj, RADEON_GEM_DOMAIN_VRAM, &vram_addr);
 	if (r) {
 		DRM_ERROR("Failed to pin VRAM object\n");
 		goto out_cleanup;
 	}
-
 	for (i = 0; i < n; i++) {
 		void *gtt_map, *vram_map;
 		void **gtt_start, **gtt_end;
 		void **vram_start, **vram_end;
 
-		r = radeon_object_create(rdev, NULL, size, true,
-					 RADEON_GEM_DOMAIN_GTT, false, gtt_obj + i);
+		r = radeon_bo_create(rdev, NULL, size, true,
+					 RADEON_GEM_DOMAIN_GTT, gtt_obj + i);
 		if (r) {
 			DRM_ERROR("Failed to create GTT object %d\n", i);
 			goto out_cleanup;
 		}
 
-		r = radeon_object_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, &gtt_addr);
+		r = radeon_bo_reserve(gtt_obj[i], false);
+		if (unlikely(r != 0))
+			goto out_cleanup;
+		r = radeon_bo_pin(gtt_obj[i], RADEON_GEM_DOMAIN_GTT, &gtt_addr);
 		if (r) {
 			DRM_ERROR("Failed to pin GTT object %d\n", i);
 			goto out_cleanup;
 		}
 
-		r = radeon_object_kmap(gtt_obj[i], &gtt_map);
+		r = radeon_bo_kmap(gtt_obj[i], &gtt_map);
 		if (r) {
 			DRM_ERROR("Failed to map GTT object %d\n", i);
 			goto out_cleanup;
@@ -94,7 +98,7 @@ void radeon_test_moves(struct radeon_device *rdev)
 		     gtt_start++)
 			*gtt_start = gtt_start;
 
-		radeon_object_kunmap(gtt_obj[i]);
+		radeon_bo_kunmap(gtt_obj[i]);
 
 		r = radeon_fence_create(rdev, &fence);
 		if (r) {
@@ -116,7 +120,7 @@ void radeon_test_moves(struct radeon_device *rdev)
 
 		radeon_fence_unref(&fence);
 
-		r = radeon_object_kmap(vram_obj, &vram_map);
+		r = radeon_bo_kmap(vram_obj, &vram_map);
 		if (r) {
 			DRM_ERROR("Failed to map VRAM object after copy %d\n", i);
 			goto out_cleanup;
@@ -131,13 +135,13 @@ void radeon_test_moves(struct radeon_device *rdev)
 					  "expected 0x%p (GTT map 0x%p-0x%p)\n",
 					  i, *vram_start, gtt_start, gtt_map,
 					  gtt_end);
-				radeon_object_kunmap(vram_obj);
+				radeon_bo_kunmap(vram_obj);
 				goto out_cleanup;
 			}
 			*vram_start = vram_start;
 		}
 
-		radeon_object_kunmap(vram_obj);
+		radeon_bo_kunmap(vram_obj);
 
 		r = radeon_fence_create(rdev, &fence);
 		if (r) {
@@ -159,7 +163,7 @@ void radeon_test_moves(struct radeon_device *rdev)
 
 		radeon_fence_unref(&fence);
 
-		r = radeon_object_kmap(gtt_obj[i], &gtt_map);
+		r = radeon_bo_kmap(gtt_obj[i], &gtt_map);
 		if (r) {
 			DRM_ERROR("Failed to map GTT object after copy %d\n", i);
 			goto out_cleanup;
@@ -174,12 +178,12 @@ void radeon_test_moves(struct radeon_device *rdev)
 					  "expected 0x%p (VRAM map 0x%p-0x%p)\n",
 					  i, *gtt_start, vram_start, vram_map,
 					  vram_end);
-				radeon_object_kunmap(gtt_obj[i]);
+				radeon_bo_kunmap(gtt_obj[i]);
 				goto out_cleanup;
 			}
 		}
 
-		radeon_object_kunmap(gtt_obj[i]);
+		radeon_bo_kunmap(gtt_obj[i]);
 
 		DRM_INFO("Tested GTT->VRAM and VRAM->GTT copy for GTT offset 0x%llx\n",
 			 gtt_addr - rdev->mc.gtt_location);
@@ -187,14 +191,20 @@ void radeon_test_moves(struct radeon_device *rdev)
 
 out_cleanup:
 	if (vram_obj) {
-		radeon_object_unpin(vram_obj);
-		radeon_object_unref(&vram_obj);
+		if (radeon_bo_is_reserved(vram_obj)) {
+			radeon_bo_unpin(vram_obj);
+			radeon_bo_unreserve(vram_obj);
+		}
+		radeon_bo_unref(&vram_obj);
 	}
 	if (gtt_obj) {
 		for (i = 0; i < n; i++) {
 			if (gtt_obj[i]) {
-				radeon_object_unpin(gtt_obj[i]);
-				radeon_object_unref(&gtt_obj[i]);
+				if (radeon_bo_is_reserved(gtt_obj[i])) {
+					radeon_bo_unpin(gtt_obj[i]);
+					radeon_bo_unreserve(gtt_obj[i]);
+				}
+				radeon_bo_unref(&gtt_obj[i]);
 			}
 		}
 		kfree(gtt_obj);
@@ -206,4 +216,3 @@ out_cleanup:
 		printk(KERN_WARNING "Error while testing BO move.\n");
 	}
 }
-

+ 52 - 42
drivers/gpu/drm/radeon/radeon_ttm.c

@@ -150,7 +150,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
 		man->default_caching = TTM_PL_FLAG_CACHED;
 		break;
 	case TTM_PL_TT:
-		man->gpu_offset = 0;
+		man->gpu_offset = rdev->mc.gtt_location;
 		man->available_caching = TTM_PL_MASK_CACHING;
 		man->default_caching = TTM_PL_FLAG_CACHED;
 		man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA;
@@ -180,7 +180,7 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
 		break;
 	case TTM_PL_VRAM:
 		/* "On-card" video ram */
-		man->gpu_offset = 0;
+		man->gpu_offset = rdev->mc.vram_location;
 		man->flags = TTM_MEMTYPE_FLAG_FIXED |
 			     TTM_MEMTYPE_FLAG_NEEDS_IOREMAP |
 			     TTM_MEMTYPE_FLAG_MAPPABLE;
@@ -197,16 +197,19 @@ static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
 	return 0;
 }
 
-static uint32_t radeon_evict_flags(struct ttm_buffer_object *bo)
+static void radeon_evict_flags(struct ttm_buffer_object *bo,
+				struct ttm_placement *placement)
 {
-	uint32_t cur_placement = bo->mem.placement & ~TTM_PL_MASK_MEMTYPE;
-
+	struct radeon_bo *rbo = container_of(bo, struct radeon_bo, tbo);
 	switch (bo->mem.mem_type) {
+	case TTM_PL_VRAM:
+		radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT);
+		break;
+	case TTM_PL_TT:
 	default:
-		return (cur_placement & ~TTM_PL_MASK_CACHING) |
-			TTM_PL_FLAG_SYSTEM |
-			TTM_PL_FLAG_CACHED;
+		radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_CPU);
 	}
+	*placement = rbo->placement;
 }
 
 static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp)
@@ -283,14 +286,21 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
 	struct radeon_device *rdev;
 	struct ttm_mem_reg *old_mem = &bo->mem;
 	struct ttm_mem_reg tmp_mem;
-	uint32_t proposed_placement;
+	u32 placements;
+	struct ttm_placement placement;
 	int r;
 
 	rdev = radeon_get_rdev(bo->bdev);
 	tmp_mem = *new_mem;
 	tmp_mem.mm_node = NULL;
-	proposed_placement = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
-	r = ttm_bo_mem_space(bo, proposed_placement, &tmp_mem,
+	placement.fpfn = 0;
+	placement.lpfn = 0;
+	placement.num_placement = 1;
+	placement.placement = &placements;
+	placement.num_busy_placement = 1;
+	placement.busy_placement = &placements;
+	placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
+	r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
 			     interruptible, no_wait);
 	if (unlikely(r)) {
 		return r;
@@ -329,15 +339,21 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
 	struct radeon_device *rdev;
 	struct ttm_mem_reg *old_mem = &bo->mem;
 	struct ttm_mem_reg tmp_mem;
-	uint32_t proposed_flags;
+	struct ttm_placement placement;
+	u32 placements;
 	int r;
 
 	rdev = radeon_get_rdev(bo->bdev);
 	tmp_mem = *new_mem;
 	tmp_mem.mm_node = NULL;
-	proposed_flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
-	r = ttm_bo_mem_space(bo, proposed_flags, &tmp_mem,
-			     interruptible, no_wait);
+	placement.fpfn = 0;
+	placement.lpfn = 0;
+	placement.num_placement = 1;
+	placement.placement = &placements;
+	placement.num_busy_placement = 1;
+	placement.busy_placement = &placements;
+	placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
+	r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait);
 	if (unlikely(r)) {
 		return r;
 	}
@@ -407,18 +423,6 @@ memcpy:
 	return r;
 }
 
-const uint32_t radeon_mem_prios[] = {
-	TTM_PL_VRAM,
-	TTM_PL_TT,
-	TTM_PL_SYSTEM,
-};
-
-const uint32_t radeon_busy_prios[] = {
-	TTM_PL_TT,
-	TTM_PL_VRAM,
-	TTM_PL_SYSTEM,
-};
-
 static int radeon_sync_obj_wait(void *sync_obj, void *sync_arg,
 				bool lazy, bool interruptible)
 {
@@ -446,10 +450,6 @@ static bool radeon_sync_obj_signaled(void *sync_obj, void *sync_arg)
 }
 
 static struct ttm_bo_driver radeon_bo_driver = {
-	.mem_type_prio = radeon_mem_prios,
-	.mem_busy_prio = radeon_busy_prios,
-	.num_mem_type_prio = ARRAY_SIZE(radeon_mem_prios),
-	.num_mem_busy_prio = ARRAY_SIZE(radeon_busy_prios),
 	.create_ttm_backend_entry = &radeon_create_ttm_backend_entry,
 	.invalidate_caches = &radeon_invalidate_caches,
 	.init_mem_type = &radeon_init_mem_type,
@@ -482,27 +482,31 @@ int radeon_ttm_init(struct radeon_device *rdev)
 		DRM_ERROR("failed initializing buffer object driver(%d).\n", r);
 		return r;
 	}
-	r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM, 0,
-			   ((rdev->mc.real_vram_size) >> PAGE_SHIFT));
+	r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_VRAM,
+				rdev->mc.real_vram_size >> PAGE_SHIFT);
 	if (r) {
 		DRM_ERROR("Failed initializing VRAM heap.\n");
 		return r;
 	}
-	r = radeon_object_create(rdev, NULL, 256 * 1024, true,
-				 RADEON_GEM_DOMAIN_VRAM, false,
-				 &rdev->stollen_vga_memory);
+	r = radeon_bo_create(rdev, NULL, 256 * 1024, true,
+				RADEON_GEM_DOMAIN_VRAM,
+				&rdev->stollen_vga_memory);
 	if (r) {
 		return r;
 	}
-	r = radeon_object_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
+	r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
+	if (r)
+		return r;
+	r = radeon_bo_pin(rdev->stollen_vga_memory, RADEON_GEM_DOMAIN_VRAM, NULL);
+	radeon_bo_unreserve(rdev->stollen_vga_memory);
 	if (r) {
-		radeon_object_unref(&rdev->stollen_vga_memory);
+		radeon_bo_unref(&rdev->stollen_vga_memory);
 		return r;
 	}
 	DRM_INFO("radeon: %uM of VRAM memory ready\n",
 		 (unsigned)rdev->mc.real_vram_size / (1024 * 1024));
-	r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT, 0,
-			   ((rdev->mc.gtt_size) >> PAGE_SHIFT));
+	r = ttm_bo_init_mm(&rdev->mman.bdev, TTM_PL_TT,
+				rdev->mc.gtt_size >> PAGE_SHIFT);
 	if (r) {
 		DRM_ERROR("Failed initializing GTT heap.\n");
 		return r;
@@ -523,9 +527,15 @@ int radeon_ttm_init(struct radeon_device *rdev)
 
 void radeon_ttm_fini(struct radeon_device *rdev)
 {
+	int r;
+
 	if (rdev->stollen_vga_memory) {
-		radeon_object_unpin(rdev->stollen_vga_memory);
-		radeon_object_unref(&rdev->stollen_vga_memory);
+		r = radeon_bo_reserve(rdev->stollen_vga_memory, false);
+		if (r == 0) {
+			radeon_bo_unpin(rdev->stollen_vga_memory);
+			radeon_bo_unreserve(rdev->stollen_vga_memory);
+		}
+		radeon_bo_unref(&rdev->stollen_vga_memory);
 	}
 	ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_VRAM);
 	ttm_bo_clean_mm(&rdev->mman.bdev, TTM_PL_TT);

+ 9 - 8
drivers/gpu/drm/radeon/rs400.c

@@ -352,7 +352,7 @@ static int rs400_mc_init(struct radeon_device *rdev)
 	u32 tmp;
 
 	/* Setup GPU memory space */
-	tmp = G_00015C_MC_FB_START(RREG32(R_00015C_NB_TOM));
+	tmp = RREG32(R_00015C_NB_TOM);
 	rdev->mc.vram_location = G_00015C_MC_FB_START(tmp) << 16;
 	rdev->mc.gtt_location = 0xFFFFFFFFUL;
 	r = radeon_mc_setup(rdev);
@@ -387,13 +387,13 @@ static int rs400_startup(struct radeon_device *rdev)
 	r300_clock_startup(rdev);
 	/* Initialize GPU configuration (# pipes, ...) */
 	rs400_gpu_init(rdev);
+	r100_enable_bm(rdev);
 	/* Initialize GART (initialize after TTM so we can allocate
 	 * memory through TTM but finalize after TTM) */
 	r = rs400_gart_enable(rdev);
 	if (r)
 		return r;
 	/* Enable IRQ */
-	rdev->irq.sw_int = true;
 	r100_irq_set(rdev);
 	/* 1M ring buffer */
 	r = r100_cp_init(rdev, 1024 * 1024);
@@ -430,6 +430,8 @@ int rs400_resume(struct radeon_device *rdev)
 	radeon_combios_asic_init(rdev->ddev);
 	/* Resume clock after posting */
 	r300_clock_startup(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
 	return rs400_startup(rdev);
 }
 
@@ -452,7 +454,7 @@ void rs400_fini(struct radeon_device *rdev)
 	rs400_gart_fini(rdev);
 	radeon_irq_kms_fini(rdev);
 	radeon_fence_driver_fini(rdev);
-	radeon_object_fini(rdev);
+	radeon_bo_fini(rdev);
 	radeon_atombios_fini(rdev);
 	kfree(rdev->bios);
 	rdev->bios = NULL;
@@ -490,10 +492,9 @@ int rs400_init(struct radeon_device *rdev)
 			RREG32(R_0007C0_CP_STAT));
 	}
 	/* check if cards are posted or not */
-	if (!radeon_card_posted(rdev) && rdev->bios) {
-		DRM_INFO("GPU not posted. posting now...\n");
-		radeon_combios_asic_init(rdev->ddev);
-	}
+	if (radeon_boot_test_post_card(rdev) == false)
+		return -EINVAL;
+
 	/* Initialize clocks */
 	radeon_get_clock_info(rdev->ddev);
 	/* Get vram informations */
@@ -510,7 +511,7 @@ int rs400_init(struct radeon_device *rdev)
 	if (r)
 		return r;
 	/* Memory manager */
-	r = radeon_object_init(rdev);
+	r = radeon_bo_init(rdev);
 	if (r)
 		return r;
 	r = rs400_gart_init(rdev);

+ 200 - 36
drivers/gpu/drm/radeon/rs600.c

@@ -45,6 +45,122 @@
 void rs600_gpu_init(struct radeon_device *rdev);
 int rs600_mc_wait_for_idle(struct radeon_device *rdev);
 
+int rs600_mc_init(struct radeon_device *rdev)
+{
+	/* read back the MC value from the hw */
+	int r;
+	u32 tmp;
+
+	/* Setup GPU memory space */
+	tmp = RREG32_MC(R_000004_MC_FB_LOCATION);
+	rdev->mc.vram_location = G_000004_MC_FB_START(tmp) << 16;
+	rdev->mc.gtt_location = 0xffffffffUL;
+	r = radeon_mc_setup(rdev);
+	if (r)
+		return r;
+	return 0;
+}
+
+/* hpd for digital panel detect/disconnect */
+bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd)
+{
+	u32 tmp;
+	bool connected = false;
+
+	switch (hpd) {
+	case RADEON_HPD_1:
+		tmp = RREG32(R_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS);
+		if (G_007D04_DC_HOT_PLUG_DETECT1_SENSE(tmp))
+			connected = true;
+		break;
+	case RADEON_HPD_2:
+		tmp = RREG32(R_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS);
+		if (G_007D14_DC_HOT_PLUG_DETECT2_SENSE(tmp))
+			connected = true;
+		break;
+	default:
+		break;
+	}
+	return connected;
+}
+
+void rs600_hpd_set_polarity(struct radeon_device *rdev,
+			    enum radeon_hpd_id hpd)
+{
+	u32 tmp;
+	bool connected = rs600_hpd_sense(rdev, hpd);
+
+	switch (hpd) {
+	case RADEON_HPD_1:
+		tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL);
+		if (connected)
+			tmp &= ~S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1);
+		else
+			tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(1);
+		WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
+		break;
+	case RADEON_HPD_2:
+		tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL);
+		if (connected)
+			tmp &= ~S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1);
+		else
+			tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(1);
+		WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
+		break;
+	default:
+		break;
+	}
+}
+
+void rs600_hpd_init(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	struct drm_connector *connector;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		switch (radeon_connector->hpd.hpd) {
+		case RADEON_HPD_1:
+			WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL,
+			       S_007D00_DC_HOT_PLUG_DETECT1_EN(1));
+			rdev->irq.hpd[0] = true;
+			break;
+		case RADEON_HPD_2:
+			WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL,
+			       S_007D10_DC_HOT_PLUG_DETECT2_EN(1));
+			rdev->irq.hpd[1] = true;
+			break;
+		default:
+			break;
+		}
+	}
+	rs600_irq_set(rdev);
+}
+
+void rs600_hpd_fini(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	struct drm_connector *connector;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+		switch (radeon_connector->hpd.hpd) {
+		case RADEON_HPD_1:
+			WREG32(R_007D00_DC_HOT_PLUG_DETECT1_CONTROL,
+			       S_007D00_DC_HOT_PLUG_DETECT1_EN(0));
+			rdev->irq.hpd[0] = false;
+			break;
+		case RADEON_HPD_2:
+			WREG32(R_007D10_DC_HOT_PLUG_DETECT2_CONTROL,
+			       S_007D10_DC_HOT_PLUG_DETECT2_EN(0));
+			rdev->irq.hpd[1] = false;
+			break;
+		default:
+			break;
+		}
+	}
+}
+
 /*
  * GART.
  */
@@ -100,40 +216,40 @@ int rs600_gart_enable(struct radeon_device *rdev)
 	WREG32(R_00004C_BUS_CNTL, tmp);
 	/* FIXME: setup default page */
 	WREG32_MC(R_000100_MC_PT0_CNTL,
-		 (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) |
-		  S_000100_EFFECTIVE_L2_QUEUE_SIZE(6)));
+		  (S_000100_EFFECTIVE_L2_CACHE_SIZE(6) |
+		   S_000100_EFFECTIVE_L2_QUEUE_SIZE(6)));
+
 	for (i = 0; i < 19; i++) {
 		WREG32_MC(R_00016C_MC_PT0_CLIENT0_CNTL + i,
-			S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(1) |
-			S_00016C_SYSTEM_ACCESS_MODE_MASK(
-				V_00016C_SYSTEM_ACCESS_MODE_IN_SYS) |
-			S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(
-				V_00016C_SYSTEM_APERTURE_UNMAPPED_DEFAULT_PAGE) |
-			S_00016C_EFFECTIVE_L1_CACHE_SIZE(1) |
-			S_00016C_ENABLE_FRAGMENT_PROCESSING(1) |
-			S_00016C_EFFECTIVE_L1_QUEUE_SIZE(1));
+			  S_00016C_ENABLE_TRANSLATION_MODE_OVERRIDE(1) |
+			  S_00016C_SYSTEM_ACCESS_MODE_MASK(
+				  V_00016C_SYSTEM_ACCESS_MODE_NOT_IN_SYS) |
+			  S_00016C_SYSTEM_APERTURE_UNMAPPED_ACCESS(
+				  V_00016C_SYSTEM_APERTURE_UNMAPPED_PASSTHROUGH) |
+			  S_00016C_EFFECTIVE_L1_CACHE_SIZE(3) |
+			  S_00016C_ENABLE_FRAGMENT_PROCESSING(1) |
+			  S_00016C_EFFECTIVE_L1_QUEUE_SIZE(3));
 	}
-
-	/* System context map to GART space */
-	WREG32_MC(R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.gtt_start);
-	WREG32_MC(R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.gtt_end);
-
 	/* enable first context */
-	WREG32_MC(R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_start);
-	WREG32_MC(R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR, rdev->mc.gtt_end);
 	WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL,
-			S_000102_ENABLE_PAGE_TABLE(1) |
-			S_000102_PAGE_TABLE_DEPTH(V_000102_PAGE_TABLE_FLAT));
+		  S_000102_ENABLE_PAGE_TABLE(1) |
+		  S_000102_PAGE_TABLE_DEPTH(V_000102_PAGE_TABLE_FLAT));
+
 	/* disable all other contexts */
-	for (i = 1; i < 8; i++) {
+	for (i = 1; i < 8; i++)
 		WREG32_MC(R_000102_MC_PT0_CONTEXT0_CNTL + i, 0);
-	}
 
 	/* setup the page table */
 	WREG32_MC(R_00012C_MC_PT0_CONTEXT0_FLAT_BASE_ADDR,
-			rdev->gart.table_addr);
+		  rdev->gart.table_addr);
+	WREG32_MC(R_00013C_MC_PT0_CONTEXT0_FLAT_START_ADDR, rdev->mc.gtt_start);
+	WREG32_MC(R_00014C_MC_PT0_CONTEXT0_FLAT_END_ADDR, rdev->mc.gtt_end);
 	WREG32_MC(R_00011C_MC_PT0_CONTEXT0_DEFAULT_READ_ADDR, 0);
 
+	/* System context maps to VRAM space */
+	WREG32_MC(R_000112_MC_PT0_SYSTEM_APERTURE_LOW_ADDR, rdev->mc.vram_start);
+	WREG32_MC(R_000114_MC_PT0_SYSTEM_APERTURE_HIGH_ADDR, rdev->mc.vram_end);
+
 	/* enable page tables */
 	tmp = RREG32_MC(R_000100_MC_PT0_CNTL);
 	WREG32_MC(R_000100_MC_PT0_CNTL, (tmp | S_000100_ENABLE_PT(1)));
@@ -146,15 +262,20 @@ int rs600_gart_enable(struct radeon_device *rdev)
 
 void rs600_gart_disable(struct radeon_device *rdev)
 {
-	uint32_t tmp;
+	u32 tmp;
+	int r;
 
 	/* FIXME: disable out of gart access */
 	WREG32_MC(R_000100_MC_PT0_CNTL, 0);
 	tmp = RREG32_MC(R_000009_MC_CNTL1);
 	WREG32_MC(R_000009_MC_CNTL1, tmp & C_000009_ENABLE_PAGE_TABLES);
 	if (rdev->gart.table.vram.robj) {
-		radeon_object_kunmap(rdev->gart.table.vram.robj);
-		radeon_object_unpin(rdev->gart.table.vram.robj);
+		r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
+		if (r == 0) {
+			radeon_bo_kunmap(rdev->gart.table.vram.robj);
+			radeon_bo_unpin(rdev->gart.table.vram.robj);
+			radeon_bo_unreserve(rdev->gart.table.vram.robj);
+		}
 	}
 }
 
@@ -189,6 +310,10 @@ int rs600_irq_set(struct radeon_device *rdev)
 {
 	uint32_t tmp = 0;
 	uint32_t mode_int = 0;
+	u32 hpd1 = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL) &
+		~S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1);
+	u32 hpd2 = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL) &
+		~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
 
 	if (rdev->irq.sw_int) {
 		tmp |= S_000040_SW_INT_EN(1);
@@ -199,8 +324,16 @@ int rs600_irq_set(struct radeon_device *rdev)
 	if (rdev->irq.crtc_vblank_int[1]) {
 		mode_int |= S_006540_D2MODE_VBLANK_INT_MASK(1);
 	}
+	if (rdev->irq.hpd[0]) {
+		hpd1 |= S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(1);
+	}
+	if (rdev->irq.hpd[1]) {
+		hpd2 |= S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
+	}
 	WREG32(R_000040_GEN_INT_CNTL, tmp);
 	WREG32(R_006540_DxMODE_INT_MASK, mode_int);
+	WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, hpd1);
+	WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, hpd2);
 	return 0;
 }
 
@@ -208,6 +341,7 @@ static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_
 {
 	uint32_t irqs = RREG32(R_000044_GEN_INT_STATUS);
 	uint32_t irq_mask = ~C_000044_SW_INT;
+	u32 tmp;
 
 	if (G_000044_DISPLAY_INT_STAT(irqs)) {
 		*r500_disp_int = RREG32(R_007EDC_DISP_INTERRUPT_STATUS);
@@ -219,6 +353,16 @@ static inline uint32_t rs600_irq_ack(struct radeon_device *rdev, u32 *r500_disp_
 			WREG32(R_006D34_D2MODE_VBLANK_STATUS,
 				S_006D34_D2MODE_VBLANK_ACK(1));
 		}
+		if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(*r500_disp_int)) {
+			tmp = RREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL);
+			tmp |= S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(1);
+			WREG32(R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
+		}
+		if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(*r500_disp_int)) {
+			tmp = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL);
+			tmp |= S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(1);
+			WREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
+		}
 	} else {
 		*r500_disp_int = 0;
 	}
@@ -244,6 +388,7 @@ int rs600_irq_process(struct radeon_device *rdev)
 {
 	uint32_t status, msi_rearm;
 	uint32_t r500_disp_int;
+	bool queue_hotplug = false;
 
 	status = rs600_irq_ack(rdev, &r500_disp_int);
 	if (!status && !r500_disp_int) {
@@ -258,8 +403,18 @@ int rs600_irq_process(struct radeon_device *rdev)
 			drm_handle_vblank(rdev->ddev, 0);
 		if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int))
 			drm_handle_vblank(rdev->ddev, 1);
+		if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(r500_disp_int)) {
+			queue_hotplug = true;
+			DRM_DEBUG("HPD1\n");
+		}
+		if (G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(r500_disp_int)) {
+			queue_hotplug = true;
+			DRM_DEBUG("HPD2\n");
+		}
 		status = rs600_irq_ack(rdev, &r500_disp_int);
 	}
+	if (queue_hotplug)
+		queue_work(rdev->wq, &rdev->hotplug_work);
 	if (rdev->msi_enabled) {
 		switch (rdev->family) {
 		case CHIP_RS600:
@@ -301,9 +456,7 @@ int rs600_mc_wait_for_idle(struct radeon_device *rdev)
 
 void rs600_gpu_init(struct radeon_device *rdev)
 {
-	/* FIXME: HDP same place on rs600 ? */
 	r100_hdp_reset(rdev);
-	/* FIXME: is this correct ? */
 	r420_pipes_init(rdev);
 	/* Wait for mc idle */
 	if (rs600_mc_wait_for_idle(rdev))
@@ -312,9 +465,20 @@ void rs600_gpu_init(struct radeon_device *rdev)
 
 void rs600_vram_info(struct radeon_device *rdev)
 {
-	/* FIXME: to do or is these values sane ? */
 	rdev->mc.vram_is_ddr = true;
 	rdev->mc.vram_width = 128;
+
+	rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
+	rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
+
+	rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
+	rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+
+	if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
+		rdev->mc.mc_vram_size = rdev->mc.aper_size;
+
+	if (rdev->mc.real_vram_size > rdev->mc.aper_size)
+		rdev->mc.real_vram_size = rdev->mc.aper_size;
 }
 
 void rs600_bandwidth_update(struct radeon_device *rdev)
@@ -388,7 +552,6 @@ static int rs600_startup(struct radeon_device *rdev)
 	if (r)
 		return r;
 	/* Enable IRQ */
-	rdev->irq.sw_int = true;
 	rs600_irq_set(rdev);
 	/* 1M ring buffer */
 	r = r100_cp_init(rdev, 1024 * 1024);
@@ -423,6 +586,8 @@ int rs600_resume(struct radeon_device *rdev)
 	atom_asic_init(rdev->mode_info.atom_context);
 	/* Resume clock after posting */
 	rv515_clock_startup(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
 	return rs600_startup(rdev);
 }
 
@@ -445,7 +610,7 @@ void rs600_fini(struct radeon_device *rdev)
 	rs600_gart_fini(rdev);
 	radeon_irq_kms_fini(rdev);
 	radeon_fence_driver_fini(rdev);
-	radeon_object_fini(rdev);
+	radeon_bo_fini(rdev);
 	radeon_atombios_fini(rdev);
 	kfree(rdev->bios);
 	rdev->bios = NULL;
@@ -482,10 +647,9 @@ int rs600_init(struct radeon_device *rdev)
 			RREG32(R_0007C0_CP_STAT));
 	}
 	/* check if cards are posted or not */
-	if (!radeon_card_posted(rdev) && rdev->bios) {
-		DRM_INFO("GPU not posted. posting now...\n");
-		atom_asic_init(rdev->mode_info.atom_context);
-	}
+	if (radeon_boot_test_post_card(rdev) == false)
+		return -EINVAL;
+
 	/* Initialize clocks */
 	radeon_get_clock_info(rdev->ddev);
 	/* Initialize power management */
@@ -493,7 +657,7 @@ int rs600_init(struct radeon_device *rdev)
 	/* Get vram informations */
 	rs600_vram_info(rdev);
 	/* Initialize memory controller (also test AGP) */
-	r = r420_mc_init(rdev);
+	r = rs600_mc_init(rdev);
 	if (r)
 		return r;
 	rs600_debugfs(rdev);
@@ -505,7 +669,7 @@ int rs600_init(struct radeon_device *rdev)
 	if (r)
 		return r;
 	/* Memory manager */
-	r = radeon_object_init(rdev);
+	r = radeon_bo_init(rdev);
 	if (r)
 		return r;
 	r = rs600_gart_init(rdev);

+ 90 - 22
drivers/gpu/drm/radeon/rs600d.h

@@ -30,27 +30,12 @@
 
 /* Registers */
 #define R_000040_GEN_INT_CNTL                        0x000040
-#define   S_000040_DISPLAY_INT_STATUS(x)               (((x) & 0x1) << 0)
-#define   G_000040_DISPLAY_INT_STATUS(x)               (((x) >> 0) & 0x1)
-#define   C_000040_DISPLAY_INT_STATUS                  0xFFFFFFFE
-#define   S_000040_DMA_VIPH0_INT_EN(x)                 (((x) & 0x1) << 12)
-#define   G_000040_DMA_VIPH0_INT_EN(x)                 (((x) >> 12) & 0x1)
-#define   C_000040_DMA_VIPH0_INT_EN                    0xFFFFEFFF
-#define   S_000040_CRTC2_VSYNC(x)                      (((x) & 0x1) << 6)
-#define   G_000040_CRTC2_VSYNC(x)                      (((x) >> 6) & 0x1)
-#define   C_000040_CRTC2_VSYNC                         0xFFFFFFBF
-#define   S_000040_SNAPSHOT2(x)                        (((x) & 0x1) << 7)
-#define   G_000040_SNAPSHOT2(x)                        (((x) >> 7) & 0x1)
-#define   C_000040_SNAPSHOT2                           0xFFFFFF7F
-#define   S_000040_CRTC2_VBLANK(x)                     (((x) & 0x1) << 9)
-#define   G_000040_CRTC2_VBLANK(x)                     (((x) >> 9) & 0x1)
-#define   C_000040_CRTC2_VBLANK                        0xFFFFFDFF
-#define   S_000040_FP2_DETECT(x)                       (((x) & 0x1) << 10)
-#define   G_000040_FP2_DETECT(x)                       (((x) >> 10) & 0x1)
-#define   C_000040_FP2_DETECT                          0xFFFFFBFF
-#define   S_000040_VSYNC_DIFF_OVER_LIMIT(x)            (((x) & 0x1) << 11)
-#define   G_000040_VSYNC_DIFF_OVER_LIMIT(x)            (((x) >> 11) & 0x1)
-#define   C_000040_VSYNC_DIFF_OVER_LIMIT               0xFFFFF7FF
+#define   S_000040_SCRATCH_INT_MASK(x)                 (((x) & 0x1) << 18)
+#define   G_000040_SCRATCH_INT_MASK(x)                 (((x) >> 18) & 0x1)
+#define   C_000040_SCRATCH_INT_MASK                    0xFFFBFFFF
+#define   S_000040_GUI_IDLE_MASK(x)                    (((x) & 0x1) << 19)
+#define   G_000040_GUI_IDLE_MASK(x)                    (((x) >> 19) & 0x1)
+#define   C_000040_GUI_IDLE_MASK                       0xFFF7FFFF
 #define   S_000040_DMA_VIPH1_INT_EN(x)                 (((x) & 0x1) << 13)
 #define   G_000040_DMA_VIPH1_INT_EN(x)                 (((x) >> 13) & 0x1)
 #define   C_000040_DMA_VIPH1_INT_EN                    0xFFFFDFFF
@@ -370,7 +355,90 @@
 #define   S_007EDC_LB_D2_VBLANK_INTERRUPT(x)           (((x) & 0x1) << 5)
 #define   G_007EDC_LB_D2_VBLANK_INTERRUPT(x)           (((x) >> 5) & 0x1)
 #define   C_007EDC_LB_D2_VBLANK_INTERRUPT              0xFFFFFFDF
-
+#define   S_007EDC_DACA_AUTODETECT_INTERRUPT(x)        (((x) & 0x1) << 16)
+#define   G_007EDC_DACA_AUTODETECT_INTERRUPT(x)        (((x) >> 16) & 0x1)
+#define   C_007EDC_DACA_AUTODETECT_INTERRUPT           0xFFFEFFFF
+#define   S_007EDC_DACB_AUTODETECT_INTERRUPT(x)        (((x) & 0x1) << 17)
+#define   G_007EDC_DACB_AUTODETECT_INTERRUPT(x)        (((x) >> 17) & 0x1)
+#define   C_007EDC_DACB_AUTODETECT_INTERRUPT           0xFFFDFFFF
+#define   S_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(x)    (((x) & 0x1) << 18)
+#define   G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(x)    (((x) >> 18) & 0x1)
+#define   C_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT       0xFFFBFFFF
+#define   S_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(x)    (((x) & 0x1) << 19)
+#define   G_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT(x)    (((x) >> 19) & 0x1)
+#define   C_007EDC_DC_HOT_PLUG_DETECT2_INTERRUPT       0xFFF7FFFF
+#define R_007828_DACA_AUTODETECT_CONTROL               0x007828
+#define   S_007828_DACA_AUTODETECT_MODE(x)             (((x) & 0x3) << 0)
+#define   G_007828_DACA_AUTODETECT_MODE(x)             (((x) >> 0) & 0x3)
+#define   C_007828_DACA_AUTODETECT_MODE                0xFFFFFFFC
+#define   S_007828_DACA_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) & 0xff) << 8)
+#define   G_007828_DACA_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) >> 8) & 0xff)
+#define   C_007828_DACA_AUTODETECT_FRAME_TIME_COUNTER  0xFFFF00FF
+#define   S_007828_DACA_AUTODETECT_CHECK_MASK(x)       (((x) & 0x3) << 16)
+#define   G_007828_DACA_AUTODETECT_CHECK_MASK(x)       (((x) >> 16) & 0x3)
+#define   C_007828_DACA_AUTODETECT_CHECK_MASK          0xFFFCFFFF
+#define R_007838_DACA_AUTODETECT_INT_CONTROL           0x007838
+#define   S_007838_DACA_AUTODETECT_ACK(x)              (((x) & 0x1) << 0)
+#define   C_007838_DACA_DACA_AUTODETECT_ACK            0xFFFFFFFE
+#define   S_007838_DACA_AUTODETECT_INT_ENABLE(x)       (((x) & 0x1) << 16)
+#define   G_007838_DACA_AUTODETECT_INT_ENABLE(x)       (((x) >> 16) & 0x1)
+#define   C_007838_DACA_AUTODETECT_INT_ENABLE          0xFFFCFFFF
+#define R_007A28_DACB_AUTODETECT_CONTROL               0x007A28
+#define   S_007A28_DACB_AUTODETECT_MODE(x)             (((x) & 0x3) << 0)
+#define   G_007A28_DACB_AUTODETECT_MODE(x)             (((x) >> 0) & 0x3)
+#define   C_007A28_DACB_AUTODETECT_MODE                0xFFFFFFFC
+#define   S_007A28_DACB_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) & 0xff) << 8)
+#define   G_007A28_DACB_AUTODETECT_FRAME_TIME_COUNTER(x) (((x) >> 8) & 0xff)
+#define   C_007A28_DACB_AUTODETECT_FRAME_TIME_COUNTER  0xFFFF00FF
+#define   S_007A28_DACB_AUTODETECT_CHECK_MASK(x)       (((x) & 0x3) << 16)
+#define   G_007A28_DACB_AUTODETECT_CHECK_MASK(x)       (((x) >> 16) & 0x3)
+#define   C_007A28_DACB_AUTODETECT_CHECK_MASK          0xFFFCFFFF
+#define R_007A38_DACB_AUTODETECT_INT_CONTROL           0x007A38
+#define   S_007A38_DACB_AUTODETECT_ACK(x)              (((x) & 0x1) << 0)
+#define   C_007A38_DACB_DACA_AUTODETECT_ACK            0xFFFFFFFE
+#define   S_007A38_DACB_AUTODETECT_INT_ENABLE(x)       (((x) & 0x1) << 16)
+#define   G_007A38_DACB_AUTODETECT_INT_ENABLE(x)       (((x) >> 16) & 0x1)
+#define   C_007A38_DACB_AUTODETECT_INT_ENABLE          0xFFFCFFFF
+#define R_007D00_DC_HOT_PLUG_DETECT1_CONTROL           0x007D00
+#define   S_007D00_DC_HOT_PLUG_DETECT1_EN(x)           (((x) & 0x1) << 0)
+#define   G_007D00_DC_HOT_PLUG_DETECT1_EN(x)           (((x) >> 0) & 0x1)
+#define   C_007D00_DC_HOT_PLUG_DETECT1_EN              0xFFFFFFFE
+#define R_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS        0x007D04
+#define   S_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS(x)   (((x) & 0x1) << 0)
+#define   G_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS(x)   (((x) >> 0) & 0x1)
+#define   C_007D04_DC_HOT_PLUG_DETECT1_INT_STATUS      0xFFFFFFFE
+#define   S_007D04_DC_HOT_PLUG_DETECT1_SENSE(x)        (((x) & 0x1) << 1)
+#define   G_007D04_DC_HOT_PLUG_DETECT1_SENSE(x)        (((x) >> 1) & 0x1)
+#define   C_007D04_DC_HOT_PLUG_DETECT1_SENSE           0xFFFFFFFD
+#define R_007D08_DC_HOT_PLUG_DETECT1_INT_CONTROL       0x007D08
+#define   S_007D08_DC_HOT_PLUG_DETECT1_INT_ACK(x)      (((x) & 0x1) << 0)
+#define   C_007D08_DC_HOT_PLUG_DETECT1_INT_ACK         0xFFFFFFFE
+#define   S_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(x) (((x) & 0x1) << 8)
+#define   G_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY(x) (((x) >> 8) & 0x1)
+#define   C_007D08_DC_HOT_PLUG_DETECT1_INT_POLARITY    0xFFFFFEFF
+#define   S_007D08_DC_HOT_PLUG_DETECT1_INT_EN(x)       (((x) & 0x1) << 16)
+#define   G_007D08_DC_HOT_PLUG_DETECT1_INT_EN(x)       (((x) >> 16) & 0x1)
+#define   C_007D08_DC_HOT_PLUG_DETECT1_INT_EN          0xFFFEFFFF
+#define R_007D10_DC_HOT_PLUG_DETECT2_CONTROL           0x007D10
+#define   S_007D10_DC_HOT_PLUG_DETECT2_EN(x)           (((x) & 0x1) << 0)
+#define   G_007D10_DC_HOT_PLUG_DETECT2_EN(x)           (((x) >> 0) & 0x1)
+#define   C_007D10_DC_HOT_PLUG_DETECT2_EN              0xFFFFFFFE
+#define R_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS        0x007D14
+#define   S_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS(x)   (((x) & 0x1) << 0)
+#define   G_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS(x)   (((x) >> 0) & 0x1)
+#define   C_007D14_DC_HOT_PLUG_DETECT2_INT_STATUS      0xFFFFFFFE
+#define   S_007D14_DC_HOT_PLUG_DETECT2_SENSE(x)        (((x) & 0x1) << 1)
+#define   G_007D14_DC_HOT_PLUG_DETECT2_SENSE(x)        (((x) >> 1) & 0x1)
+#define   C_007D14_DC_HOT_PLUG_DETECT2_SENSE           0xFFFFFFFD
+#define R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL       0x007D18
+#define   S_007D18_DC_HOT_PLUG_DETECT2_INT_ACK(x)      (((x) & 0x1) << 0)
+#define   C_007D18_DC_HOT_PLUG_DETECT2_INT_ACK         0xFFFFFFFE
+#define   S_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(x) (((x) & 0x1) << 8)
+#define   G_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY(x) (((x) >> 8) & 0x1)
+#define   C_007D18_DC_HOT_PLUG_DETECT2_INT_POLARITY    0xFFFFFEFF
+#define   S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(x)       (((x) & 0x1) << 16)
+#define   G_007D18_DC_HOT_PLUG_DETECT2_INT_EN(x)       (((x) >> 16) & 0x1)
+#define   C_007D18_DC_HOT_PLUG_DETECT2_INT_EN          0xFFFEFFFF
 
 /* MC registers */
 #define R_000000_MC_STATUS                           0x000000

+ 38 - 19
drivers/gpu/drm/radeon/rs690.c

@@ -131,24 +131,25 @@ void rs690_pm_info(struct radeon_device *rdev)
 
 void rs690_vram_info(struct radeon_device *rdev)
 {
-	uint32_t tmp;
 	fixed20_12 a;
 
 	rs400_gart_adjust_size(rdev);
-	/* DDR for all card after R300 & IGP */
+
 	rdev->mc.vram_is_ddr = true;
-	/* FIXME: is this correct for RS690/RS740 ? */
-	tmp = RREG32(RADEON_MEM_CNTL);
-	if (tmp & R300_MEM_NUM_CHANNELS_MASK) {
-		rdev->mc.vram_width = 128;
-	} else {
-		rdev->mc.vram_width = 64;
-	}
+	rdev->mc.vram_width = 128;
+
 	rdev->mc.real_vram_size = RREG32(RADEON_CONFIG_MEMSIZE);
 	rdev->mc.mc_vram_size = rdev->mc.real_vram_size;
 
 	rdev->mc.aper_base = drm_get_resource_start(rdev->ddev, 0);
 	rdev->mc.aper_size = drm_get_resource_len(rdev->ddev, 0);
+
+	if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
+		rdev->mc.mc_vram_size = rdev->mc.aper_size;
+
+	if (rdev->mc.real_vram_size > rdev->mc.aper_size)
+		rdev->mc.real_vram_size = rdev->mc.aper_size;
+
 	rs690_pm_info(rdev);
 	/* FIXME: we should enforce default clock in case GPU is not in
 	 * default setup
@@ -161,6 +162,21 @@ void rs690_vram_info(struct radeon_device *rdev)
 	rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a);
 }
 
+static int rs690_mc_init(struct radeon_device *rdev)
+{
+	int r;
+	u32 tmp;
+
+	/* Setup GPU memory space */
+	tmp = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
+	rdev->mc.vram_location = G_000100_MC_FB_START(tmp) << 16;
+	rdev->mc.gtt_location = 0xFFFFFFFFUL;
+	r = radeon_mc_setup(rdev);
+	if (r)
+		return r;
+	return 0;
+}
+
 void rs690_line_buffer_adjust(struct radeon_device *rdev,
 			      struct drm_display_mode *mode1,
 			      struct drm_display_mode *mode2)
@@ -244,8 +260,9 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
 
 	b.full = rfixed_const(mode->crtc_hdisplay);
 	c.full = rfixed_const(256);
-	a.full = rfixed_mul(wm->num_line_pair, b);
-	request_fifo_depth.full = rfixed_div(a, c);
+	a.full = rfixed_div(b, c);
+	request_fifo_depth.full = rfixed_mul(a, wm->num_line_pair);
+	request_fifo_depth.full = rfixed_ceil(request_fifo_depth);
 	if (a.full < rfixed_const(4)) {
 		wm->lb_request_fifo_depth = 4;
 	} else {
@@ -374,6 +391,7 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
 	a.full = rfixed_const(16);
 	wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
 	wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a);
+	wm->priority_mark_max.full = rfixed_ceil(wm->priority_mark_max);
 
 	/* Determine estimated width */
 	estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
@@ -383,6 +401,7 @@ void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
 	} else {
 		a.full = rfixed_const(16);
 		wm->priority_mark.full = rfixed_div(estimated_width, a);
+		wm->priority_mark.full = rfixed_ceil(wm->priority_mark);
 		wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
 	}
 }
@@ -605,7 +624,6 @@ static int rs690_startup(struct radeon_device *rdev)
 	if (r)
 		return r;
 	/* Enable IRQ */
-	rdev->irq.sw_int = true;
 	rs600_irq_set(rdev);
 	/* 1M ring buffer */
 	r = r100_cp_init(rdev, 1024 * 1024);
@@ -640,6 +658,8 @@ int rs690_resume(struct radeon_device *rdev)
 	atom_asic_init(rdev->mode_info.atom_context);
 	/* Resume clock after posting */
 	rv515_clock_startup(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
 	return rs690_startup(rdev);
 }
 
@@ -662,7 +682,7 @@ void rs690_fini(struct radeon_device *rdev)
 	rs400_gart_fini(rdev);
 	radeon_irq_kms_fini(rdev);
 	radeon_fence_driver_fini(rdev);
-	radeon_object_fini(rdev);
+	radeon_bo_fini(rdev);
 	radeon_atombios_fini(rdev);
 	kfree(rdev->bios);
 	rdev->bios = NULL;
@@ -700,10 +720,9 @@ int rs690_init(struct radeon_device *rdev)
 			RREG32(R_0007C0_CP_STAT));
 	}
 	/* check if cards are posted or not */
-	if (!radeon_card_posted(rdev) && rdev->bios) {
-		DRM_INFO("GPU not posted. posting now...\n");
-		atom_asic_init(rdev->mode_info.atom_context);
-	}
+	if (radeon_boot_test_post_card(rdev) == false)
+		return -EINVAL;
+
 	/* Initialize clocks */
 	radeon_get_clock_info(rdev->ddev);
 	/* Initialize power management */
@@ -711,7 +730,7 @@ int rs690_init(struct radeon_device *rdev)
 	/* Get vram informations */
 	rs690_vram_info(rdev);
 	/* Initialize memory controller (also test AGP) */
-	r = r420_mc_init(rdev);
+	r = rs690_mc_init(rdev);
 	if (r)
 		return r;
 	rv515_debugfs(rdev);
@@ -723,7 +742,7 @@ int rs690_init(struct radeon_device *rdev)
 	if (r)
 		return r;
 	/* Memory manager */
-	r = radeon_object_init(rdev);
+	r = radeon_bo_init(rdev);
 	if (r)
 		return r;
 	r = rs400_gart_init(rdev);

+ 13 - 11
drivers/gpu/drm/radeon/rv515.c

@@ -478,7 +478,6 @@ static int rv515_startup(struct radeon_device *rdev)
 			return r;
 	}
 	/* Enable IRQ */
-	rdev->irq.sw_int = true;
 	rs600_irq_set(rdev);
 	/* 1M ring buffer */
 	r = r100_cp_init(rdev, 1024 * 1024);
@@ -514,6 +513,8 @@ int rv515_resume(struct radeon_device *rdev)
 	atom_asic_init(rdev->mode_info.atom_context);
 	/* Resume clock after posting */
 	rv515_clock_startup(rdev);
+	/* Initialize surface registers */
+	radeon_surface_init(rdev);
 	return rv515_startup(rdev);
 }
 
@@ -540,11 +541,11 @@ void rv515_fini(struct radeon_device *rdev)
 	r100_wb_fini(rdev);
 	r100_ib_fini(rdev);
 	radeon_gem_fini(rdev);
-    rv370_pcie_gart_fini(rdev);
+	rv370_pcie_gart_fini(rdev);
 	radeon_agp_fini(rdev);
 	radeon_irq_kms_fini(rdev);
 	radeon_fence_driver_fini(rdev);
-	radeon_object_fini(rdev);
+	radeon_bo_fini(rdev);
 	radeon_atombios_fini(rdev);
 	kfree(rdev->bios);
 	rdev->bios = NULL;
@@ -580,10 +581,8 @@ int rv515_init(struct radeon_device *rdev)
 			RREG32(R_0007C0_CP_STAT));
 	}
 	/* check if cards are posted or not */
-	if (!radeon_card_posted(rdev) && rdev->bios) {
-		DRM_INFO("GPU not posted. posting now...\n");
-		atom_asic_init(rdev->mode_info.atom_context);
-	}
+	if (radeon_boot_test_post_card(rdev) == false)
+		return -EINVAL;
 	/* Initialize clocks */
 	radeon_get_clock_info(rdev->ddev);
 	/* Initialize power management */
@@ -603,7 +602,7 @@ int rv515_init(struct radeon_device *rdev)
 	if (r)
 		return r;
 	/* Memory manager */
-	r = radeon_object_init(rdev);
+	r = radeon_bo_init(rdev);
 	if (r)
 		return r;
 	r = rv370_pcie_gart_init(rdev);
@@ -892,8 +891,9 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
 
 	b.full = rfixed_const(mode->crtc_hdisplay);
 	c.full = rfixed_const(256);
-	a.full = rfixed_mul(wm->num_line_pair, b);
-	request_fifo_depth.full = rfixed_div(a, c);
+	a.full = rfixed_div(b, c);
+	request_fifo_depth.full = rfixed_mul(a, wm->num_line_pair);
+	request_fifo_depth.full = rfixed_ceil(request_fifo_depth);
 	if (a.full < rfixed_const(4)) {
 		wm->lb_request_fifo_depth = 4;
 	} else {
@@ -995,15 +995,17 @@ void rv515_crtc_bandwidth_compute(struct radeon_device *rdev,
 	a.full = rfixed_const(16);
 	wm->priority_mark_max.full = rfixed_const(crtc->base.mode.crtc_hdisplay);
 	wm->priority_mark_max.full = rfixed_div(wm->priority_mark_max, a);
+	wm->priority_mark_max.full = rfixed_ceil(wm->priority_mark_max);
 
 	/* Determine estimated width */
 	estimated_width.full = tolerable_latency.full - wm->worst_case_latency.full;
 	estimated_width.full = rfixed_div(estimated_width, consumption_time);
 	if (rfixed_trunc(estimated_width) > crtc->base.mode.crtc_hdisplay) {
-		wm->priority_mark.full = rfixed_const(10);
+		wm->priority_mark.full = wm->priority_mark_max.full;
 	} else {
 		a.full = rfixed_const(16);
 		wm->priority_mark.full = rfixed_div(estimated_width, a);
+		wm->priority_mark.full = rfixed_ceil(wm->priority_mark);
 		wm->priority_mark.full = wm->priority_mark_max.full - wm->priority_mark.full;
 	}
 }

+ 58 - 21
drivers/gpu/drm/radeon/rv770.c

@@ -92,7 +92,7 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev)
 void rv770_pcie_gart_disable(struct radeon_device *rdev)
 {
 	u32 tmp;
-	int i;
+	int i, r;
 
 	/* Disable all tables */
 	for (i = 0; i < 7; i++)
@@ -113,8 +113,12 @@ void rv770_pcie_gart_disable(struct radeon_device *rdev)
 	WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
 	WREG32(MC_VM_MB_L1_TLB3_CNTL, tmp);
 	if (rdev->gart.table.vram.robj) {
-		radeon_object_kunmap(rdev->gart.table.vram.robj);
-		radeon_object_unpin(rdev->gart.table.vram.robj);
+		r = radeon_bo_reserve(rdev->gart.table.vram.robj, false);
+		if (likely(r == 0)) {
+			radeon_bo_kunmap(rdev->gart.table.vram.robj);
+			radeon_bo_unpin(rdev->gart.table.vram.robj);
+			radeon_bo_unreserve(rdev->gart.table.vram.robj);
+		}
 	}
 }
 
@@ -870,6 +874,14 @@ static int rv770_startup(struct radeon_device *rdev)
 {
 	int r;
 
+	if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) {
+		r = r600_init_microcode(rdev);
+		if (r) {
+			DRM_ERROR("Failed to load firmware!\n");
+			return r;
+		}
+	}
+
 	rv770_mc_program(rdev);
 	if (rdev->flags & RADEON_IS_AGP) {
 		rv770_agp_enable(rdev);
@@ -880,13 +892,26 @@ static int rv770_startup(struct radeon_device *rdev)
 	}
 	rv770_gpu_init(rdev);
 
-	r = radeon_object_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
-			      &rdev->r600_blit.shader_gpu_addr);
+	r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+	if (unlikely(r != 0))
+		return r;
+	r = radeon_bo_pin(rdev->r600_blit.shader_obj, RADEON_GEM_DOMAIN_VRAM,
+			&rdev->r600_blit.shader_gpu_addr);
+	radeon_bo_unreserve(rdev->r600_blit.shader_obj);
 	if (r) {
 		DRM_ERROR("failed to pin blit object %d\n", r);
 		return r;
 	}
 
+	/* Enable IRQ */
+	r = r600_irq_init(rdev);
+	if (r) {
+		DRM_ERROR("radeon: IH init failed (%d).\n", r);
+		radeon_irq_kms_fini(rdev);
+		return r;
+	}
+	r600_irq_set(rdev);
+
 	r = radeon_ring_init(rdev, rdev->cp.ring_size);
 	if (r)
 		return r;
@@ -934,13 +959,19 @@ int rv770_resume(struct radeon_device *rdev)
 
 int rv770_suspend(struct radeon_device *rdev)
 {
+	int r;
+
 	/* FIXME: we should wait for ring to be empty */
 	r700_cp_stop(rdev);
 	rdev->cp.ready = false;
 	r600_wb_disable(rdev);
 	rv770_pcie_gart_disable(rdev);
 	/* unpin shaders bo */
-        radeon_object_unpin(rdev->r600_blit.shader_obj);
+	r = radeon_bo_reserve(rdev->r600_blit.shader_obj, false);
+	if (likely(r == 0)) {
+		radeon_bo_unpin(rdev->r600_blit.shader_obj);
+		radeon_bo_unreserve(rdev->r600_blit.shader_obj);
+	}
 	return 0;
 }
 
@@ -975,7 +1006,11 @@ int rv770_init(struct radeon_device *rdev)
 	if (r)
 		return r;
 	/* Post card if necessary */
-	if (!r600_card_posted(rdev) && rdev->bios) {
+	if (!r600_card_posted(rdev)) {
+		if (!rdev->bios) {
+			dev_err(rdev->dev, "Card not posted and no BIOS - ignoring\n");
+			return -EINVAL;
+		}
 		DRM_INFO("GPU not posted. posting now...\n");
 		atom_asic_init(rdev->mode_info.atom_context);
 	}
@@ -998,31 +1033,31 @@ int rv770_init(struct radeon_device *rdev)
 	if (r)
 		return r;
 	/* Memory manager */
-	r = radeon_object_init(rdev);
+	r = radeon_bo_init(rdev);
+	if (r)
+		return r;
+
+	r = radeon_irq_kms_init(rdev);
 	if (r)
 		return r;
+
 	rdev->cp.ring_obj = NULL;
 	r600_ring_init(rdev, 1024 * 1024);
 
-	if (!rdev->me_fw || !rdev->pfp_fw) {
-		r = r600_cp_init_microcode(rdev);
-		if (r) {
-			DRM_ERROR("Failed to load firmware!\n");
-			return r;
-		}
-	}
+	rdev->ih.ring_obj = NULL;
+	r600_ih_ring_init(rdev, 64 * 1024);
 
 	r = r600_pcie_gart_init(rdev);
 	if (r)
 		return r;
 
-	rdev->accel_working = true;
 	r = r600_blit_init(rdev);
 	if (r) {
-		DRM_ERROR("radeon: failled blitter (%d).\n", r);
-		rdev->accel_working = false;
+		DRM_ERROR("radeon: failed blitter (%d).\n", r);
+		return r;
 	}
 
+	rdev->accel_working = true;
 	r = rv770_startup(rdev);
 	if (r) {
 		rv770_suspend(rdev);
@@ -1034,12 +1069,12 @@ int rv770_init(struct radeon_device *rdev)
 	if (rdev->accel_working) {
 		r = radeon_ib_pool_init(rdev);
 		if (r) {
-			DRM_ERROR("radeon: failled initializing IB pool (%d).\n", r);
+			DRM_ERROR("radeon: failed initializing IB pool (%d).\n", r);
 			rdev->accel_working = false;
 		}
 		r = r600_ib_test(rdev);
 		if (r) {
-			DRM_ERROR("radeon: failled testing IB (%d).\n", r);
+			DRM_ERROR("radeon: failed testing IB (%d).\n", r);
 			rdev->accel_working = false;
 		}
 	}
@@ -1051,6 +1086,8 @@ void rv770_fini(struct radeon_device *rdev)
 	rv770_suspend(rdev);
 
 	r600_blit_fini(rdev);
+	r600_irq_fini(rdev);
+	radeon_irq_kms_fini(rdev);
 	radeon_ring_fini(rdev);
 	r600_wb_fini(rdev);
 	rv770_pcie_gart_fini(rdev);
@@ -1059,7 +1096,7 @@ void rv770_fini(struct radeon_device *rdev)
 	radeon_clocks_fini(rdev);
 	if (rdev->flags & RADEON_IS_AGP)
 		radeon_agp_fini(rdev);
-	radeon_object_fini(rdev);
+	radeon_bo_fini(rdev);
 	radeon_atombios_fini(rdev);
 	kfree(rdev->bios);
 	rdev->bios = NULL;

+ 2 - 1
drivers/gpu/drm/ttm/Makefile

@@ -3,6 +3,7 @@
 
 ccflags-y := -Iinclude/drm
 ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
-	ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o
+	ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o \
+	ttm_object.o ttm_lock.o ttm_execbuf_util.o
 
 obj-$(CONFIG_DRM_TTM) += ttm.o

+ 283 - 262
drivers/gpu/drm/ttm/ttm_bo.c

@@ -27,6 +27,14 @@
 /*
  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  */
+/* Notes:
+ *
+ * We store bo pointer in drm_mm_node struct so we know which bo own a
+ * specific node. There is no protection on the pointer, thus to make
+ * sure things don't go berserk you have to access this pointer while
+ * holding the global lru lock and make sure anytime you free a node you
+ * reset the pointer to NULL.
+ */
 
 #include "ttm/ttm_module.h"
 #include "ttm/ttm_bo_driver.h"
@@ -51,6 +59,60 @@ static struct attribute ttm_bo_count = {
 	.mode = S_IRUGO
 };
 
+static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
+{
+	int i;
+
+	for (i = 0; i <= TTM_PL_PRIV5; i++)
+		if (flags & (1 << i)) {
+			*mem_type = i;
+			return 0;
+		}
+	return -EINVAL;
+}
+
+static void ttm_mem_type_manager_debug(struct ttm_bo_global *glob,
+					struct ttm_mem_type_manager *man)
+{
+	printk(KERN_ERR TTM_PFX "    has_type: %d\n", man->has_type);
+	printk(KERN_ERR TTM_PFX "    use_type: %d\n", man->use_type);
+	printk(KERN_ERR TTM_PFX "    flags: 0x%08X\n", man->flags);
+	printk(KERN_ERR TTM_PFX "    gpu_offset: 0x%08lX\n", man->gpu_offset);
+	printk(KERN_ERR TTM_PFX "    io_offset: 0x%08lX\n", man->io_offset);
+	printk(KERN_ERR TTM_PFX "    io_size: %ld\n", man->io_size);
+	printk(KERN_ERR TTM_PFX "    size: %ld\n", (unsigned long)man->size);
+	printk(KERN_ERR TTM_PFX "    available_caching: 0x%08X\n",
+		man->available_caching);
+	printk(KERN_ERR TTM_PFX "    default_caching: 0x%08X\n",
+		man->default_caching);
+	spin_lock(&glob->lru_lock);
+	drm_mm_debug_table(&man->manager, TTM_PFX);
+	spin_unlock(&glob->lru_lock);
+}
+
+static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
+					struct ttm_placement *placement)
+{
+	struct ttm_bo_device *bdev = bo->bdev;
+	struct ttm_bo_global *glob = bo->glob;
+	struct ttm_mem_type_manager *man;
+	int i, ret, mem_type;
+
+	printk(KERN_ERR TTM_PFX "No space for %p (%ld pages, %ldK, %ldM)\n",
+		bo, bo->mem.num_pages, bo->mem.size >> 10,
+		bo->mem.size >> 20);
+	for (i = 0; i < placement->num_placement; i++) {
+		ret = ttm_mem_type_from_flags(placement->placement[i],
+						&mem_type);
+		if (ret)
+			return;
+		man = &bdev->man[mem_type];
+		printk(KERN_ERR TTM_PFX "  placement[%d]=0x%08X (%d)\n",
+			i, placement->placement[i], mem_type);
+		ttm_mem_type_manager_debug(glob, man);
+	}
+}
+
 static ssize_t ttm_bo_global_show(struct kobject *kobj,
 				  struct attribute *attr,
 				  char *buffer)
@@ -117,7 +179,7 @@ int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
 		ret = wait_event_interruptible(bo->event_queue,
 					       atomic_read(&bo->reserved) == 0);
 		if (unlikely(ret != 0))
-			return -ERESTART;
+			return ret;
 	} else {
 		wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
 	}
@@ -247,7 +309,6 @@ EXPORT_SYMBOL(ttm_bo_unreserve);
 /*
  * Call bo->mutex locked.
  */
-
 static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
 {
 	struct ttm_bo_device *bdev = bo->bdev;
@@ -275,9 +336,10 @@ static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
 		bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
 					page_flags | TTM_PAGE_FLAG_USER,
 					glob->dummy_read_page);
-		if (unlikely(bo->ttm == NULL))
+		if (unlikely(bo->ttm == NULL)) {
 			ret = -ENOMEM;
-		break;
+			break;
+		}
 
 		ret = ttm_tt_set_user(bo->ttm, current,
 				      bo->buffer_start, bo->num_pages);
@@ -328,14 +390,8 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
 		}
 
 		if (bo->mem.mem_type == TTM_PL_SYSTEM) {
-
-			struct ttm_mem_reg *old_mem = &bo->mem;
-			uint32_t save_flags = old_mem->placement;
-
-			*old_mem = *mem;
+			bo->mem = *mem;
 			mem->mm_node = NULL;
-			ttm_flag_masked(&save_flags, mem->placement,
-					TTM_PL_MASK_MEMTYPE);
 			goto moved;
 		}
 
@@ -418,6 +474,7 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
 			kref_put(&bo->list_kref, ttm_bo_ref_bug);
 		}
 		if (bo->mem.mm_node) {
+			bo->mem.mm_node->private = NULL;
 			drm_mm_put_block(bo->mem.mm_node);
 			bo->mem.mm_node = NULL;
 		}
@@ -554,24 +611,21 @@ void ttm_bo_unref(struct ttm_buffer_object **p_bo)
 }
 EXPORT_SYMBOL(ttm_bo_unref);
 
-static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
-			bool interruptible, bool no_wait)
+static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
+			bool no_wait)
 {
-	int ret = 0;
 	struct ttm_bo_device *bdev = bo->bdev;
 	struct ttm_bo_global *glob = bo->glob;
 	struct ttm_mem_reg evict_mem;
-	uint32_t proposed_placement;
-
-	if (bo->mem.mem_type != mem_type)
-		goto out;
+	struct ttm_placement placement;
+	int ret = 0;
 
 	spin_lock(&bo->lock);
 	ret = ttm_bo_wait(bo, false, interruptible, no_wait);
 	spin_unlock(&bo->lock);
 
 	if (unlikely(ret != 0)) {
-		if (ret != -ERESTART) {
+		if (ret != -ERESTARTSYS) {
 			printk(KERN_ERR TTM_PFX
 			       "Failed to expire sync object before "
 			       "buffer eviction.\n");
@@ -584,116 +638,139 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
 	evict_mem = bo->mem;
 	evict_mem.mm_node = NULL;
 
-	proposed_placement = bdev->driver->evict_flags(bo);
-
-	ret = ttm_bo_mem_space(bo, proposed_placement,
-			       &evict_mem, interruptible, no_wait);
-	if (unlikely(ret != 0 && ret != -ERESTART))
-		ret = ttm_bo_mem_space(bo, TTM_PL_FLAG_SYSTEM,
-				       &evict_mem, interruptible, no_wait);
-
+	placement.fpfn = 0;
+	placement.lpfn = 0;
+	placement.num_placement = 0;
+	placement.num_busy_placement = 0;
+	bdev->driver->evict_flags(bo, &placement);
+	ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
+				no_wait);
 	if (ret) {
-		if (ret != -ERESTART)
+		if (ret != -ERESTARTSYS) {
 			printk(KERN_ERR TTM_PFX
 			       "Failed to find memory space for "
 			       "buffer 0x%p eviction.\n", bo);
+			ttm_bo_mem_space_debug(bo, &placement);
+		}
 		goto out;
 	}
 
 	ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
 				     no_wait);
 	if (ret) {
-		if (ret != -ERESTART)
+		if (ret != -ERESTARTSYS)
 			printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
+		spin_lock(&glob->lru_lock);
+		if (evict_mem.mm_node) {
+			evict_mem.mm_node->private = NULL;
+			drm_mm_put_block(evict_mem.mm_node);
+			evict_mem.mm_node = NULL;
+		}
+		spin_unlock(&glob->lru_lock);
 		goto out;
 	}
+	bo->evicted = true;
+out:
+	return ret;
+}
+
+static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
+				uint32_t mem_type,
+				bool interruptible, bool no_wait)
+{
+	struct ttm_bo_global *glob = bdev->glob;
+	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
+	struct ttm_buffer_object *bo;
+	int ret, put_count = 0;
 
 	spin_lock(&glob->lru_lock);
-	if (evict_mem.mm_node) {
-		drm_mm_put_block(evict_mem.mm_node);
-		evict_mem.mm_node = NULL;
-	}
+	bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
+	kref_get(&bo->list_kref);
+	ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, false, 0);
+	if (likely(ret == 0))
+		put_count = ttm_bo_del_from_lru(bo);
 	spin_unlock(&glob->lru_lock);
-	bo->evicted = true;
-out:
+	if (unlikely(ret != 0))
+		return ret;
+	while (put_count--)
+		kref_put(&bo->list_kref, ttm_bo_ref_bug);
+	ret = ttm_bo_evict(bo, interruptible, no_wait);
+	ttm_bo_unreserve(bo);
+	kref_put(&bo->list_kref, ttm_bo_release_list);
 	return ret;
 }
 
+static int ttm_bo_man_get_node(struct ttm_buffer_object *bo,
+				struct ttm_mem_type_manager *man,
+				struct ttm_placement *placement,
+				struct ttm_mem_reg *mem,
+				struct drm_mm_node **node)
+{
+	struct ttm_bo_global *glob = bo->glob;
+	unsigned long lpfn;
+	int ret;
+
+	lpfn = placement->lpfn;
+	if (!lpfn)
+		lpfn = man->size;
+	*node = NULL;
+	do {
+		ret = drm_mm_pre_get(&man->manager);
+		if (unlikely(ret))
+			return ret;
+
+		spin_lock(&glob->lru_lock);
+		*node = drm_mm_search_free_in_range(&man->manager,
+					mem->num_pages, mem->page_alignment,
+					placement->fpfn, lpfn, 1);
+		if (unlikely(*node == NULL)) {
+			spin_unlock(&glob->lru_lock);
+			return 0;
+		}
+		*node = drm_mm_get_block_atomic_range(*node, mem->num_pages,
+							mem->page_alignment,
+							placement->fpfn,
+							lpfn);
+		spin_unlock(&glob->lru_lock);
+	} while (*node == NULL);
+	return 0;
+}
+
 /**
  * Repeatedly evict memory from the LRU for @mem_type until we create enough
  * space, or we've evicted everything and there isn't enough space.
  */
-static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev,
-				  struct ttm_mem_reg *mem,
-				  uint32_t mem_type,
-				  bool interruptible, bool no_wait)
+static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
+					uint32_t mem_type,
+					struct ttm_placement *placement,
+					struct ttm_mem_reg *mem,
+					bool interruptible, bool no_wait)
 {
+	struct ttm_bo_device *bdev = bo->bdev;
 	struct ttm_bo_global *glob = bdev->glob;
-	struct drm_mm_node *node;
-	struct ttm_buffer_object *entry;
 	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
-	struct list_head *lru;
-	unsigned long num_pages = mem->num_pages;
-	int put_count = 0;
+	struct drm_mm_node *node;
 	int ret;
 
-retry_pre_get:
-	ret = drm_mm_pre_get(&man->manager);
-	if (unlikely(ret != 0))
-		return ret;
-
-	spin_lock(&glob->lru_lock);
 	do {
-		node = drm_mm_search_free(&man->manager, num_pages,
-					  mem->page_alignment, 1);
+		ret = ttm_bo_man_get_node(bo, man, placement, mem, &node);
+		if (unlikely(ret != 0))
+			return ret;
 		if (node)
 			break;
-
-		lru = &man->lru;
-		if (list_empty(lru))
+		spin_lock(&glob->lru_lock);
+		if (list_empty(&man->lru)) {
+			spin_unlock(&glob->lru_lock);
 			break;
-
-		entry = list_first_entry(lru, struct ttm_buffer_object, lru);
-		kref_get(&entry->list_kref);
-
-		ret =
-		    ttm_bo_reserve_locked(entry, interruptible, no_wait,
-					  false, 0);
-
-		if (likely(ret == 0))
-			put_count = ttm_bo_del_from_lru(entry);
-
+		}
 		spin_unlock(&glob->lru_lock);
-
+		ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
+						no_wait);
 		if (unlikely(ret != 0))
 			return ret;
-
-		while (put_count--)
-			kref_put(&entry->list_kref, ttm_bo_ref_bug);
-
-		ret = ttm_bo_evict(entry, mem_type, interruptible, no_wait);
-
-		ttm_bo_unreserve(entry);
-
-		kref_put(&entry->list_kref, ttm_bo_release_list);
-		if (ret)
-			return ret;
-
-		spin_lock(&glob->lru_lock);
 	} while (1);
-
-	if (!node) {
-		spin_unlock(&glob->lru_lock);
+	if (node == NULL)
 		return -ENOMEM;
-	}
-
-	node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment);
-	if (unlikely(!node)) {
-		spin_unlock(&glob->lru_lock);
-		goto retry_pre_get;
-	}
-
-	spin_unlock(&glob->lru_lock);
 	mem->mm_node = node;
 	mem->mem_type = mem_type;
 	return 0;
@@ -724,7 +801,6 @@ static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
 	return result;
 }
 
-
 static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
 				 bool disallow_fixed,
 				 uint32_t mem_type,
@@ -757,66 +833,55 @@ static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
  * space.
  */
 int ttm_bo_mem_space(struct ttm_buffer_object *bo,
-		     uint32_t proposed_placement,
-		     struct ttm_mem_reg *mem,
-		     bool interruptible, bool no_wait)
+			struct ttm_placement *placement,
+			struct ttm_mem_reg *mem,
+			bool interruptible, bool no_wait)
 {
 	struct ttm_bo_device *bdev = bo->bdev;
-	struct ttm_bo_global *glob = bo->glob;
 	struct ttm_mem_type_manager *man;
-
-	uint32_t num_prios = bdev->driver->num_mem_type_prio;
-	const uint32_t *prios = bdev->driver->mem_type_prio;
-	uint32_t i;
 	uint32_t mem_type = TTM_PL_SYSTEM;
 	uint32_t cur_flags = 0;
 	bool type_found = false;
 	bool type_ok = false;
-	bool has_eagain = false;
+	bool has_erestartsys = false;
 	struct drm_mm_node *node = NULL;
-	int ret;
+	int i, ret;
 
 	mem->mm_node = NULL;
-	for (i = 0; i < num_prios; ++i) {
-		mem_type = prios[i];
+	for (i = 0; i <= placement->num_placement; ++i) {
+		ret = ttm_mem_type_from_flags(placement->placement[i],
+						&mem_type);
+		if (ret)
+			return ret;
 		man = &bdev->man[mem_type];
 
 		type_ok = ttm_bo_mt_compatible(man,
-					       bo->type == ttm_bo_type_user,
-					       mem_type, proposed_placement,
-					       &cur_flags);
+						bo->type == ttm_bo_type_user,
+						mem_type,
+						placement->placement[i],
+						&cur_flags);
 
 		if (!type_ok)
 			continue;
 
 		cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
 						  cur_flags);
+		/*
+		 * Use the access and other non-mapping-related flag bits from
+		 * the memory placement flags to the current flags
+		 */
+		ttm_flag_masked(&cur_flags, placement->placement[i],
+				~TTM_PL_MASK_MEMTYPE);
 
 		if (mem_type == TTM_PL_SYSTEM)
 			break;
 
 		if (man->has_type && man->use_type) {
 			type_found = true;
-			do {
-				ret = drm_mm_pre_get(&man->manager);
-				if (unlikely(ret))
-					return ret;
-
-				spin_lock(&glob->lru_lock);
-				node = drm_mm_search_free(&man->manager,
-							  mem->num_pages,
-							  mem->page_alignment,
-							  1);
-				if (unlikely(!node)) {
-					spin_unlock(&glob->lru_lock);
-					break;
-				}
-				node = drm_mm_get_block_atomic(node,
-							       mem->num_pages,
-							       mem->
-							       page_alignment);
-				spin_unlock(&glob->lru_lock);
-			} while (!node);
+			ret = ttm_bo_man_get_node(bo, man, placement, mem,
+							&node);
+			if (unlikely(ret))
+				return ret;
 		}
 		if (node)
 			break;
@@ -826,67 +891,65 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
 		mem->mm_node = node;
 		mem->mem_type = mem_type;
 		mem->placement = cur_flags;
+		if (node)
+			node->private = bo;
 		return 0;
 	}
 
 	if (!type_found)
 		return -EINVAL;
 
-	num_prios = bdev->driver->num_mem_busy_prio;
-	prios = bdev->driver->mem_busy_prio;
-
-	for (i = 0; i < num_prios; ++i) {
-		mem_type = prios[i];
+	for (i = 0; i <= placement->num_busy_placement; ++i) {
+		ret = ttm_mem_type_from_flags(placement->placement[i],
+						&mem_type);
+		if (ret)
+			return ret;
 		man = &bdev->man[mem_type];
-
 		if (!man->has_type)
 			continue;
-
 		if (!ttm_bo_mt_compatible(man,
-					  bo->type == ttm_bo_type_user,
-					  mem_type,
-					  proposed_placement, &cur_flags))
+						bo->type == ttm_bo_type_user,
+						mem_type,
+						placement->placement[i],
+						&cur_flags))
 			continue;
 
 		cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
 						  cur_flags);
+		/*
+		 * Use the access and other non-mapping-related flag bits from
+		 * the memory placement flags to the current flags
+		 */
+		ttm_flag_masked(&cur_flags, placement->placement[i],
+				~TTM_PL_MASK_MEMTYPE);
 
-		ret = ttm_bo_mem_force_space(bdev, mem, mem_type,
-					     interruptible, no_wait);
-
+		ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
+						interruptible, no_wait);
 		if (ret == 0 && mem->mm_node) {
 			mem->placement = cur_flags;
+			mem->mm_node->private = bo;
 			return 0;
 		}
-
-		if (ret == -ERESTART)
-			has_eagain = true;
+		if (ret == -ERESTARTSYS)
+			has_erestartsys = true;
 	}
-
-	ret = (has_eagain) ? -ERESTART : -ENOMEM;
+	ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
 	return ret;
 }
 EXPORT_SYMBOL(ttm_bo_mem_space);
 
 int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
 {
-	int ret = 0;
-
 	if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
 		return -EBUSY;
 
-	ret = wait_event_interruptible(bo->event_queue,
-				       atomic_read(&bo->cpu_writers) == 0);
-
-	if (ret == -ERESTARTSYS)
-		ret = -ERESTART;
-
-	return ret;
+	return wait_event_interruptible(bo->event_queue,
+					atomic_read(&bo->cpu_writers) == 0);
 }
 
 int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
-		       uint32_t proposed_placement,
-		       bool interruptible, bool no_wait)
+			struct ttm_placement *placement,
+			bool interruptible, bool no_wait)
 {
 	struct ttm_bo_global *glob = bo->glob;
 	int ret = 0;
@@ -899,101 +962,82 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
 	 * Have the driver move function wait for idle when necessary,
 	 * instead of doing it here.
 	 */
-
 	spin_lock(&bo->lock);
 	ret = ttm_bo_wait(bo, false, interruptible, no_wait);
 	spin_unlock(&bo->lock);
-
 	if (ret)
 		return ret;
-
 	mem.num_pages = bo->num_pages;
 	mem.size = mem.num_pages << PAGE_SHIFT;
 	mem.page_alignment = bo->mem.page_alignment;
-
 	/*
 	 * Determine where to move the buffer.
 	 */
-
-	ret = ttm_bo_mem_space(bo, proposed_placement, &mem,
-			       interruptible, no_wait);
+	ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait);
 	if (ret)
 		goto out_unlock;
-
 	ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
-
 out_unlock:
 	if (ret && mem.mm_node) {
 		spin_lock(&glob->lru_lock);
+		mem.mm_node->private = NULL;
 		drm_mm_put_block(mem.mm_node);
 		spin_unlock(&glob->lru_lock);
 	}
 	return ret;
 }
 
-static int ttm_bo_mem_compat(uint32_t proposed_placement,
+static int ttm_bo_mem_compat(struct ttm_placement *placement,
 			     struct ttm_mem_reg *mem)
 {
-	if ((proposed_placement & mem->placement & TTM_PL_MASK_MEM) == 0)
-		return 0;
-	if ((proposed_placement & mem->placement & TTM_PL_MASK_CACHING) == 0)
-		return 0;
-
-	return 1;
+	int i;
+
+	for (i = 0; i < placement->num_placement; i++) {
+		if ((placement->placement[i] & mem->placement &
+			TTM_PL_MASK_CACHING) &&
+			(placement->placement[i] & mem->placement &
+			TTM_PL_MASK_MEM))
+			return i;
+	}
+	return -1;
 }
 
 int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
-			       uint32_t proposed_placement,
-			       bool interruptible, bool no_wait)
+				struct ttm_placement *placement,
+				bool interruptible, bool no_wait)
 {
 	int ret;
 
 	BUG_ON(!atomic_read(&bo->reserved));
-	bo->proposed_placement = proposed_placement;
-
-	TTM_DEBUG("Proposed placement 0x%08lx, Old flags 0x%08lx\n",
-		  (unsigned long)proposed_placement,
-		  (unsigned long)bo->mem.placement);
-
+	/* Check that range is valid */
+	if (placement->lpfn || placement->fpfn)
+		if (placement->fpfn > placement->lpfn ||
+			(placement->lpfn - placement->fpfn) < bo->num_pages)
+			return -EINVAL;
 	/*
 	 * Check whether we need to move buffer.
 	 */
-
-	if (!ttm_bo_mem_compat(bo->proposed_placement, &bo->mem)) {
-		ret = ttm_bo_move_buffer(bo, bo->proposed_placement,
-					 interruptible, no_wait);
-		if (ret) {
-			if (ret != -ERESTART)
-				printk(KERN_ERR TTM_PFX
-				       "Failed moving buffer. "
-				       "Proposed placement 0x%08x\n",
-				       bo->proposed_placement);
-			if (ret == -ENOMEM)
-				printk(KERN_ERR TTM_PFX
-				       "Out of aperture space or "
-				       "DRM memory quota.\n");
+	ret = ttm_bo_mem_compat(placement, &bo->mem);
+	if (ret < 0) {
+		ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait);
+		if (ret)
 			return ret;
-		}
+	} else {
+		/*
+		 * Use the access and other non-mapping-related flag bits from
+		 * the compatible memory placement flags to the active flags
+		 */
+		ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
+				~TTM_PL_MASK_MEMTYPE);
 	}
-
 	/*
 	 * We might need to add a TTM.
 	 */
-
 	if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
 		ret = ttm_bo_add_ttm(bo, true);
 		if (ret)
 			return ret;
 	}
-	/*
-	 * Validation has succeeded, move the access and other
-	 * non-mapping-related flag bits from the proposed flags to
-	 * the active flags
-	 */
-
-	ttm_flag_masked(&bo->mem.placement, bo->proposed_placement,
-			~TTM_PL_MASK_MEMTYPE);
-
 	return 0;
 }
 EXPORT_SYMBOL(ttm_buffer_object_validate);
@@ -1041,8 +1085,10 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev,
 			   size_t acc_size,
 			   void (*destroy) (struct ttm_buffer_object *))
 {
-	int ret = 0;
+	int i, c, ret = 0;
 	unsigned long num_pages;
+	uint32_t placements[8];
+	struct ttm_placement placement;
 
 	size += buffer_start & ~PAGE_MASK;
 	num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
@@ -1099,7 +1145,16 @@ int ttm_buffer_object_init(struct ttm_bo_device *bdev,
 			goto out_err;
 	}
 
-	ret = ttm_buffer_object_validate(bo, flags, interruptible, false);
+	placement.fpfn = 0;
+	placement.lpfn = 0;
+	for (i = 0, c = 0; i <= TTM_PL_PRIV5; i++)
+		if (flags & (1 << i))
+			placements[c++] = (flags & ~TTM_PL_MASK_MEM) | (1 << i);
+	placement.placement = placements;
+	placement.num_placement = c;
+	placement.busy_placement = placements;
+	placement.num_busy_placement = c;
+	ret = ttm_buffer_object_validate(bo, &placement, interruptible, false);
 	if (ret)
 		goto out_err;
 
@@ -1134,8 +1189,8 @@ int ttm_buffer_object_create(struct ttm_bo_device *bdev,
 			     struct ttm_buffer_object **p_bo)
 {
 	struct ttm_buffer_object *bo;
-	int ret;
 	struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
+	int ret;
 
 	size_t acc_size =
 	    ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
@@ -1160,66 +1215,32 @@ int ttm_buffer_object_create(struct ttm_bo_device *bdev,
 	return ret;
 }
 
-static int ttm_bo_leave_list(struct ttm_buffer_object *bo,
-			     uint32_t mem_type, bool allow_errors)
-{
-	int ret;
-
-	spin_lock(&bo->lock);
-	ret = ttm_bo_wait(bo, false, false, false);
-	spin_unlock(&bo->lock);
-
-	if (ret && allow_errors)
-		goto out;
-
-	if (bo->mem.mem_type == mem_type)
-		ret = ttm_bo_evict(bo, mem_type, false, false);
-
-	if (ret) {
-		if (allow_errors) {
-			goto out;
-		} else {
-			ret = 0;
-			printk(KERN_ERR TTM_PFX "Cleanup eviction failed\n");
-		}
-	}
-
-out:
-	return ret;
-}
-
 static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
-				   struct list_head *head,
-				   unsigned mem_type, bool allow_errors)
+					unsigned mem_type, bool allow_errors)
 {
+	struct ttm_mem_type_manager *man = &bdev->man[mem_type];
 	struct ttm_bo_global *glob = bdev->glob;
-	struct ttm_buffer_object *entry;
 	int ret;
-	int put_count;
 
 	/*
 	 * Can't use standard list traversal since we're unlocking.
 	 */
 
 	spin_lock(&glob->lru_lock);
-
-	while (!list_empty(head)) {
-		entry = list_first_entry(head, struct ttm_buffer_object, lru);
-		kref_get(&entry->list_kref);
-		ret = ttm_bo_reserve_locked(entry, false, false, false, 0);
-		put_count = ttm_bo_del_from_lru(entry);
+	while (!list_empty(&man->lru)) {
 		spin_unlock(&glob->lru_lock);
-		while (put_count--)
-			kref_put(&entry->list_kref, ttm_bo_ref_bug);
-		BUG_ON(ret);
-		ret = ttm_bo_leave_list(entry, mem_type, allow_errors);
-		ttm_bo_unreserve(entry);
-		kref_put(&entry->list_kref, ttm_bo_release_list);
+		ret = ttm_mem_evict_first(bdev, mem_type, false, false);
+		if (ret) {
+			if (allow_errors) {
+				return ret;
+			} else {
+				printk(KERN_ERR TTM_PFX
+					"Cleanup eviction failed\n");
+			}
+		}
 		spin_lock(&glob->lru_lock);
 	}
-
 	spin_unlock(&glob->lru_lock);
-
 	return 0;
 }
 
@@ -1246,7 +1267,7 @@ int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
 
 	ret = 0;
 	if (mem_type > 0) {
-		ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false);
+		ttm_bo_force_list_clean(bdev, mem_type, false);
 
 		spin_lock(&glob->lru_lock);
 		if (drm_mm_clean(&man->manager))
@@ -1279,12 +1300,12 @@ int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
 		return 0;
 	}
 
-	return ttm_bo_force_list_clean(bdev, &man->lru, mem_type, true);
+	return ttm_bo_force_list_clean(bdev, mem_type, true);
 }
 EXPORT_SYMBOL(ttm_bo_evict_mm);
 
 int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
-		   unsigned long p_offset, unsigned long p_size)
+			unsigned long p_size)
 {
 	int ret = -EINVAL;
 	struct ttm_mem_type_manager *man;
@@ -1314,7 +1335,7 @@ int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
 			       type);
 			return ret;
 		}
-		ret = drm_mm_init(&man->manager, p_offset, p_size);
+		ret = drm_mm_init(&man->manager, 0, p_size);
 		if (ret)
 			return ret;
 	}
@@ -1463,7 +1484,7 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
 	 * Initialize the system memory buffer type.
 	 * Other types need to be driver / IOCTL initialized.
 	 */
-	ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0, 0);
+	ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
 	if (unlikely(ret != 0))
 		goto out_no_sys;
 
@@ -1693,7 +1714,7 @@ int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible,
 			ret = wait_event_interruptible
 			    (bo->event_queue, atomic_read(&bo->reserved) == 0);
 			if (unlikely(ret != 0))
-				return -ERESTART;
+				return ret;
 		} else {
 			wait_event(bo->event_queue,
 				   atomic_read(&bo->reserved) == 0);

+ 1 - 0
drivers/gpu/drm/ttm/ttm_bo_util.c

@@ -369,6 +369,7 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
 #endif
 	return tmp;
 }
+EXPORT_SYMBOL(ttm_io_prot);
 
 static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
 			  unsigned long bus_base,

+ 1 - 6
drivers/gpu/drm/ttm/ttm_bo_vm.c

@@ -114,7 +114,7 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 		ret = ttm_bo_wait(bo, false, true, false);
 		spin_unlock(&bo->lock);
 		if (unlikely(ret != 0)) {
-			retval = (ret != -ERESTART) ?
+			retval = (ret != -ERESTARTSYS) ?
 			    VM_FAULT_SIGBUS : VM_FAULT_NOPAGE;
 			goto out_unlock;
 		}
@@ -349,9 +349,6 @@ ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
 	switch (ret) {
 	case 0:
 		break;
-	case -ERESTART:
-		ret = -EINTR;
-		goto out_unref;
 	case -EBUSY:
 		ret = -EAGAIN;
 		goto out_unref;
@@ -421,8 +418,6 @@ ssize_t ttm_bo_fbdev_io(struct ttm_buffer_object *bo, const char __user *wbuf,
 	switch (ret) {
 	case 0:
 		break;
-	case -ERESTART:
-		return -EINTR;
 	case -EBUSY:
 		return -EAGAIN;
 	default:

+ 117 - 0
drivers/gpu/drm/ttm/ttm_execbuf_util.c

@@ -0,0 +1,117 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "ttm/ttm_execbuf_util.h"
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_placement.h"
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/module.h>
+
+void ttm_eu_backoff_reservation(struct list_head *list)
+{
+	struct ttm_validate_buffer *entry;
+
+	list_for_each_entry(entry, list, head) {
+		struct ttm_buffer_object *bo = entry->bo;
+		if (!entry->reserved)
+			continue;
+
+		entry->reserved = false;
+		ttm_bo_unreserve(bo);
+	}
+}
+EXPORT_SYMBOL(ttm_eu_backoff_reservation);
+
+/*
+ * Reserve buffers for validation.
+ *
+ * If a buffer in the list is marked for CPU access, we back off and
+ * wait for that buffer to become free for GPU access.
+ *
+ * If a buffer is reserved for another validation, the validator with
+ * the highest validation sequence backs off and waits for that buffer
+ * to become unreserved. This prevents deadlocks when validating multiple
+ * buffers in different orders.
+ */
+
+int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq)
+{
+	struct ttm_validate_buffer *entry;
+	int ret;
+
+retry:
+	list_for_each_entry(entry, list, head) {
+		struct ttm_buffer_object *bo = entry->bo;
+
+		entry->reserved = false;
+		ret = ttm_bo_reserve(bo, true, false, true, val_seq);
+		if (ret != 0) {
+			ttm_eu_backoff_reservation(list);
+			if (ret == -EAGAIN) {
+				ret = ttm_bo_wait_unreserved(bo, true);
+				if (unlikely(ret != 0))
+					return ret;
+				goto retry;
+			} else
+				return ret;
+		}
+
+		entry->reserved = true;
+		if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
+			ttm_eu_backoff_reservation(list);
+			ret = ttm_bo_wait_cpu(bo, false);
+			if (ret)
+				return ret;
+			goto retry;
+		}
+	}
+	return 0;
+}
+EXPORT_SYMBOL(ttm_eu_reserve_buffers);
+
+void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
+{
+	struct ttm_validate_buffer *entry;
+
+	list_for_each_entry(entry, list, head) {
+		struct ttm_buffer_object *bo = entry->bo;
+		struct ttm_bo_driver *driver = bo->bdev->driver;
+		void *old_sync_obj;
+
+		spin_lock(&bo->lock);
+		old_sync_obj = bo->sync_obj;
+		bo->sync_obj = driver->sync_obj_ref(sync_obj);
+		bo->sync_obj_arg = entry->new_sync_obj_arg;
+		spin_unlock(&bo->lock);
+		ttm_bo_unreserve(bo);
+		entry->reserved = false;
+		if (old_sync_obj)
+			driver->sync_obj_unref(&old_sync_obj);
+	}
+}
+EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);

Some files were not shown because too many files changed in this diff