Browse Source

Merge branch 'drm-core-next' of git://people.freedesktop.org/~airlied/linux

* 'drm-core-next' of git://people.freedesktop.org/~airlied/linux: (290 commits)
  Revert "drm/ttm: add a way to bo_wait for either the last read or last write"
  Revert "drm/radeon/kms: add a new gem_wait ioctl with read/write flags"
  vmwgfx: Don't pass unused arguments to do_dirty functions
  vmwgfx: Emulate depth 32 framebuffers
  drm/radeon: Lower the severity of the radeon lockup messages.
  drm/i915/dp: Fix eDP on PCH DP on CPT/PPT
  drm/i915/dp: Introduce is_cpu_edp()
  drm/i915: use correct SPD type value
  drm/i915: fix ILK+ infoframe support
  drm/i915: add DP test request handling
  drm/i915: read full receiver capability field during DP hot plug
  drm/i915/dp: Remove eDP special cases from bandwidth checks
  drm/i915/dp: Fix the math in intel_dp_link_required
  drm/i915/panel: Always record the backlight level again (but cleverly)
  i915: Move i915_read/write out of line
  drm/i915: remove transcoder PLL mashing from mode_set per specs
  drm/i915: if transcoder disable fails, say which
  drm/i915: set watermarks for third pipe on IVB
  drm/i915: export a CPT mode set verification function
  drm/i915: fix transcoder PLL select masking
  ...
Linus Torvalds 13 years ago
parent
commit
37be944a02
100 changed files with 8350 additions and 2571 deletions
  1. 28 0
      drivers/char/agp/intel-gtt.c
  2. 4 0
      drivers/gpu/drm/Kconfig
  3. 1 0
      drivers/gpu/drm/Makefile
  4. 2 0
      drivers/gpu/drm/drm_crtc_helper.c
  5. 1 4
      drivers/gpu/drm/drm_debugfs.c
  6. 2 0
      drivers/gpu/drm/drm_drv.c
  7. 171 0
      drivers/gpu/drm/drm_edid.c
  8. 88 0
      drivers/gpu/drm/drm_gem.c
  9. 1 2
      drivers/gpu/drm/drm_proc.c
  10. 20 0
      drivers/gpu/drm/exynos/Kconfig
  11. 11 0
      drivers/gpu/drm/exynos/Makefile
  12. 110 0
      drivers/gpu/drm/exynos/exynos_drm_buf.c
  13. 53 0
      drivers/gpu/drm/exynos/exynos_drm_buf.h
  14. 293 0
      drivers/gpu/drm/exynos/exynos_drm_connector.c
  15. 34 0
      drivers/gpu/drm/exynos/exynos_drm_connector.h
  16. 272 0
      drivers/gpu/drm/exynos/exynos_drm_core.c
  17. 381 0
      drivers/gpu/drm/exynos/exynos_drm_crtc.c
  18. 38 0
      drivers/gpu/drm/exynos/exynos_drm_crtc.h
  19. 244 0
      drivers/gpu/drm/exynos/exynos_drm_drv.c
  20. 254 0
      drivers/gpu/drm/exynos/exynos_drm_drv.h
  21. 271 0
      drivers/gpu/drm/exynos/exynos_drm_encoder.c
  22. 45 0
      drivers/gpu/drm/exynos/exynos_drm_encoder.h
  23. 265 0
      drivers/gpu/drm/exynos/exynos_drm_fb.c
  24. 37 0
      drivers/gpu/drm/exynos/exynos_drm_fb.h
  25. 456 0
      drivers/gpu/drm/exynos/exynos_drm_fbdev.c
  26. 37 0
      drivers/gpu/drm/exynos/exynos_drm_fbdev.h
  27. 811 0
      drivers/gpu/drm/exynos/exynos_drm_fimd.c
  28. 415 0
      drivers/gpu/drm/exynos/exynos_drm_gem.c
  29. 107 0
      drivers/gpu/drm/exynos/exynos_drm_gem.h
  30. 1 1
      drivers/gpu/drm/i915/dvo_ch7017.c
  31. 2 2
      drivers/gpu/drm/i915/dvo_ch7xxx.c
  32. 3 3
      drivers/gpu/drm/i915/dvo_ivch.c
  33. 1 1
      drivers/gpu/drm/i915/dvo_sil164.c
  34. 7 7
      drivers/gpu/drm/i915/dvo_tfp410.c
  35. 19 19
      drivers/gpu/drm/i915/i915_debugfs.c
  36. 25 23
      drivers/gpu/drm/i915/i915_dma.c
  37. 53 10
      drivers/gpu/drm/i915/i915_drv.c
  38. 44 59
      drivers/gpu/drm/i915/i915_drv.h
  39. 14 99
      drivers/gpu/drm/i915/i915_gem.c
  40. 4 4
      drivers/gpu/drm/i915/i915_gem_debug.c
  41. 1 1
      drivers/gpu/drm/i915/i915_gem_evict.c
  42. 2 1
      drivers/gpu/drm/i915/i915_gem_execbuffer.c
  43. 30 0
      drivers/gpu/drm/i915/i915_gem_gtt.c
  44. 4 11
      drivers/gpu/drm/i915/i915_gem_tiling.c
  45. 36 6
      drivers/gpu/drm/i915/i915_irq.c
  46. 7 7
      drivers/gpu/drm/i915/i915_mem.c
  47. 94 17
      drivers/gpu/drm/i915/i915_reg.h
  48. 4 4
      drivers/gpu/drm/i915/i915_suspend.c
  49. 23 23
      drivers/gpu/drm/i915/i915_trace.h
  50. 1 1
      drivers/gpu/drm/i915/intel_acpi.c
  51. 14 9
      drivers/gpu/drm/i915/intel_bios.c
  52. 13 9
      drivers/gpu/drm/i915/intel_bios.h
  53. 8 12
      drivers/gpu/drm/i915/intel_crt.c
  54. 477 218
      drivers/gpu/drm/i915/intel_display.c
  55. 405 136
      drivers/gpu/drm/i915/intel_dp.c
  56. 14 7
      drivers/gpu/drm/i915/intel_drv.h
  57. 19 14
      drivers/gpu/drm/i915/intel_hdmi.c
  58. 1 7
      drivers/gpu/drm/i915/intel_i2c.c
  59. 5 3
      drivers/gpu/drm/i915/intel_lvds.c
  60. 2 0
      drivers/gpu/drm/i915/intel_modes.c
  61. 45 45
      drivers/gpu/drm/i915/intel_opregion.c
  62. 73 73
      drivers/gpu/drm/i915/intel_overlay.c
  63. 16 11
      drivers/gpu/drm/i915/intel_panel.c
  64. 273 96
      drivers/gpu/drm/i915/intel_ringbuffer.c
  65. 9 7
      drivers/gpu/drm/i915/intel_ringbuffer.h
  66. 123 126
      drivers/gpu/drm/i915/intel_sdvo.c
  67. 279 279
      drivers/gpu/drm/i915/intel_sdvo_regs.h
  68. 29 29
      drivers/gpu/drm/i915/intel_tv.c
  69. 5 4
      drivers/gpu/drm/nouveau/Makefile
  70. 126 43
      drivers/gpu/drm/nouveau/nouveau_backlight.c
  71. 139 136
      drivers/gpu/drm/nouveau/nouveau_bios.c
  72. 1 1
      drivers/gpu/drm/nouveau/nouveau_bios.h
  73. 1 1
      drivers/gpu/drm/nouveau/nouveau_bo.c
  74. 10 6
      drivers/gpu/drm/nouveau/nouveau_channel.c
  75. 14 38
      drivers/gpu/drm/nouveau/nouveau_connector.c
  76. 1 2
      drivers/gpu/drm/nouveau/nouveau_crtc.h
  77. 6 3
      drivers/gpu/drm/nouveau/nouveau_display.c
  78. 531 419
      drivers/gpu/drm/nouveau/nouveau_dp.c
  79. 1 1
      drivers/gpu/drm/nouveau/nouveau_drv.c
  80. 91 32
      drivers/gpu/drm/nouveau/nouveau_drv.h
  81. 5 22
      drivers/gpu/drm/nouveau/nouveau_encoder.h
  82. 1 1
      drivers/gpu/drm/nouveau/nouveau_fence.c
  83. 39 17
      drivers/gpu/drm/nouveau/nouveau_i2c.c
  84. 151 133
      drivers/gpu/drm/nouveau/nouveau_mem.c
  85. 43 46
      drivers/gpu/drm/nouveau/nouveau_mm.c
  86. 3 2
      drivers/gpu/drm/nouveau/nouveau_mm.h
  87. 11 7
      drivers/gpu/drm/nouveau/nouveau_object.c
  88. 95 23
      drivers/gpu/drm/nouveau/nouveau_perf.c
  89. 60 40
      drivers/gpu/drm/nouveau/nouveau_pm.c
  90. 11 4
      drivers/gpu/drm/nouveau/nouveau_pm.h
  91. 7 4
      drivers/gpu/drm/nouveau/nouveau_reg.h
  92. 21 45
      drivers/gpu/drm/nouveau/nouveau_sgdma.c
  93. 163 47
      drivers/gpu/drm/nouveau/nouveau_state.c
  94. 20 20
      drivers/gpu/drm/nouveau/nouveau_vm.c
  95. 1 1
      drivers/gpu/drm/nouveau/nouveau_vm.h
  96. 37 9
      drivers/gpu/drm/nouveau/nouveau_volt.c
  97. 0 23
      drivers/gpu/drm/nouveau/nv04_display.c
  98. 4 0
      drivers/gpu/drm/nouveau/nv04_pm.c
  99. 58 26
      drivers/gpu/drm/nouveau/nv04_timer.c
  100. 62 29
      drivers/gpu/drm/nouveau/nv31_mpeg.c

+ 28 - 0
drivers/char/agp/intel-gtt.c

@@ -923,6 +923,9 @@ static int intel_fake_agp_insert_entries(struct agp_memory *mem,
 {
 	int ret = -EINVAL;
 
+	if (intel_private.base.do_idle_maps)
+		return -ENODEV;
+
 	if (intel_private.clear_fake_agp) {
 		int start = intel_private.base.stolen_size / PAGE_SIZE;
 		int end = intel_private.base.gtt_mappable_entries;
@@ -985,6 +988,9 @@ static int intel_fake_agp_remove_entries(struct agp_memory *mem,
 	if (mem->page_count == 0)
 		return 0;
 
+	if (intel_private.base.do_idle_maps)
+		return -ENODEV;
+
 	intel_gtt_clear_range(pg_start, mem->page_count);
 
 	if (intel_private.base.needs_dmar) {
@@ -1177,6 +1183,25 @@ static void gen6_cleanup(void)
 {
 }
 
+/* Certain Gen5 chipsets require require idling the GPU before
+ * unmapping anything from the GTT when VT-d is enabled.
+ */
+extern int intel_iommu_gfx_mapped;
+static inline int needs_idle_maps(void)
+{
+	const unsigned short gpu_devid = intel_private.pcidev->device;
+
+	/* Query intel_iommu to see if we need the workaround. Presumably that
+	 * was loaded first.
+	 */
+	if ((gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB ||
+	     gpu_devid == PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG) &&
+	     intel_iommu_gfx_mapped)
+		return 1;
+
+	return 0;
+}
+
 static int i9xx_setup(void)
 {
 	u32 reg_addr;
@@ -1211,6 +1236,9 @@ static int i9xx_setup(void)
 		intel_private.gtt_bus_addr = reg_addr + gtt_offset;
 	}
 
+	if (needs_idle_maps());
+		intel_private.base.do_idle_maps = 1;
+
 	intel_i9xx_setup_flush();
 
 	return 0;

+ 4 - 0
drivers/gpu/drm/Kconfig

@@ -158,3 +158,7 @@ config DRM_SAVAGE
 	help
 	  Choose this option if you have a Savage3D/4/SuperSavage/Pro/Twister
 	  chipset. If M is selected the module will be called savage.
+
+source "drivers/gpu/drm/exynos/Kconfig"
+
+source "drivers/gpu/drm/vmwgfx/Kconfig"

+ 1 - 0
drivers/gpu/drm/Makefile

@@ -35,4 +35,5 @@ obj-$(CONFIG_DRM_SAVAGE)+= savage/
 obj-$(CONFIG_DRM_VMWGFX)+= vmwgfx/
 obj-$(CONFIG_DRM_VIA)	+=via/
 obj-$(CONFIG_DRM_NOUVEAU) +=nouveau/
+obj-$(CONFIG_DRM_EXYNOS) +=exynos/
 obj-y			+= i2c/

+ 2 - 0
drivers/gpu/drm/drm_crtc_helper.c

@@ -372,11 +372,13 @@ bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
 		encoder_funcs = encoder->helper_private;
 		if (!(ret = encoder_funcs->mode_fixup(encoder, mode,
 						      adjusted_mode))) {
+			DRM_DEBUG_KMS("Encoder fixup failed\n");
 			goto done;
 		}
 	}
 
 	if (!(ret = crtc_funcs->mode_fixup(crtc, mode, adjusted_mode))) {
+		DRM_DEBUG_KMS("CRTC fixup failed\n");
 		goto done;
 	}
 	DRM_DEBUG_KMS("[CRTC:%d]\n", crtc->base.id);

+ 1 - 4
drivers/gpu/drm/drm_debugfs.c

@@ -107,11 +107,8 @@ int drm_debugfs_create_files(struct drm_info_list *files, int count,
 		ent = debugfs_create_file(files[i].name, S_IFREG | S_IRUGO,
 					  root, tmp, &drm_debugfs_fops);
 		if (!ent) {
-			char name[64];
-			strncpy(name, root->d_name.name,
-						min(root->d_name.len, 64U));
 			DRM_ERROR("Cannot create /sys/kernel/debug/dri/%s/%s\n",
-				  name, files[i].name);
+				  root->d_name.name, files[i].name);
 			kfree(tmp);
 			ret = -1;
 			goto fail;

+ 2 - 0
drivers/gpu/drm/drm_drv.c

@@ -438,6 +438,8 @@ long drm_ioctl(struct file *filp,
 					goto err_i1;
 				}
 			}
+			if (asize > usize)
+				memset(kdata + usize, 0, asize - usize);
 		}
 
 		if (cmd & IOC_IN) {

+ 171 - 0
drivers/gpu/drm/drm_edid.c

@@ -1319,6 +1319,7 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
 #define HDMI_IDENTIFIER 0x000C03
 #define AUDIO_BLOCK	0x01
 #define VENDOR_BLOCK    0x03
+#define SPEAKER_BLOCK	0x04
 #define EDID_BASIC_AUDIO	(1 << 6)
 
 /**
@@ -1347,6 +1348,176 @@ u8 *drm_find_cea_extension(struct edid *edid)
 }
 EXPORT_SYMBOL(drm_find_cea_extension);
 
+static void
+parse_hdmi_vsdb(struct drm_connector *connector, uint8_t *db)
+{
+	connector->eld[5] |= (db[6] >> 7) << 1;  /* Supports_AI */
+
+	connector->dvi_dual = db[6] & 1;
+	connector->max_tmds_clock = db[7] * 5;
+
+	connector->latency_present[0] = db[8] >> 7;
+	connector->latency_present[1] = (db[8] >> 6) & 1;
+	connector->video_latency[0] = db[9];
+	connector->audio_latency[0] = db[10];
+	connector->video_latency[1] = db[11];
+	connector->audio_latency[1] = db[12];
+
+	DRM_LOG_KMS("HDMI: DVI dual %d, "
+		    "max TMDS clock %d, "
+		    "latency present %d %d, "
+		    "video latency %d %d, "
+		    "audio latency %d %d\n",
+		    connector->dvi_dual,
+		    connector->max_tmds_clock,
+	      (int) connector->latency_present[0],
+	      (int) connector->latency_present[1],
+		    connector->video_latency[0],
+		    connector->video_latency[1],
+		    connector->audio_latency[0],
+		    connector->audio_latency[1]);
+}
+
+static void
+monitor_name(struct detailed_timing *t, void *data)
+{
+	if (t->data.other_data.type == EDID_DETAIL_MONITOR_NAME)
+		*(u8 **)data = t->data.other_data.data.str.str;
+}
+
+/**
+ * drm_edid_to_eld - build ELD from EDID
+ * @connector: connector corresponding to the HDMI/DP sink
+ * @edid: EDID to parse
+ *
+ * Fill the ELD (EDID-Like Data) buffer for passing to the audio driver.
+ * Some ELD fields are left to the graphics driver caller:
+ * - Conn_Type
+ * - HDCP
+ * - Port_ID
+ */
+void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
+{
+	uint8_t *eld = connector->eld;
+	u8 *cea;
+	u8 *name;
+	u8 *db;
+	int sad_count = 0;
+	int mnl;
+	int dbl;
+
+	memset(eld, 0, sizeof(connector->eld));
+
+	cea = drm_find_cea_extension(edid);
+	if (!cea) {
+		DRM_DEBUG_KMS("ELD: no CEA Extension found\n");
+		return;
+	}
+
+	name = NULL;
+	drm_for_each_detailed_block((u8 *)edid, monitor_name, &name);
+	for (mnl = 0; name && mnl < 13; mnl++) {
+		if (name[mnl] == 0x0a)
+			break;
+		eld[20 + mnl] = name[mnl];
+	}
+	eld[4] = (cea[1] << 5) | mnl;
+	DRM_DEBUG_KMS("ELD monitor %s\n", eld + 20);
+
+	eld[0] = 2 << 3;		/* ELD version: 2 */
+
+	eld[16] = edid->mfg_id[0];
+	eld[17] = edid->mfg_id[1];
+	eld[18] = edid->prod_code[0];
+	eld[19] = edid->prod_code[1];
+
+	for (db = cea + 4; db < cea + cea[2]; db += dbl + 1) {
+		dbl = db[0] & 0x1f;
+
+		switch ((db[0] & 0xe0) >> 5) {
+		case AUDIO_BLOCK:	/* Audio Data Block, contains SADs */
+			sad_count = dbl / 3;
+			memcpy(eld + 20 + mnl, &db[1], dbl);
+			break;
+		case SPEAKER_BLOCK:	/* Speaker Allocation Data Block */
+			eld[7] = db[1];
+			break;
+		case VENDOR_BLOCK:
+			/* HDMI Vendor-Specific Data Block */
+			if (db[1] == 0x03 && db[2] == 0x0c && db[3] == 0)
+				parse_hdmi_vsdb(connector, db);
+			break;
+		default:
+			break;
+		}
+	}
+	eld[5] |= sad_count << 4;
+	eld[2] = (20 + mnl + sad_count * 3 + 3) / 4;
+
+	DRM_DEBUG_KMS("ELD size %d, SAD count %d\n", (int)eld[2], sad_count);
+}
+EXPORT_SYMBOL(drm_edid_to_eld);
+
+/**
+ * drm_av_sync_delay - HDMI/DP sink audio-video sync delay in millisecond
+ * @connector: connector associated with the HDMI/DP sink
+ * @mode: the display mode
+ */
+int drm_av_sync_delay(struct drm_connector *connector,
+		      struct drm_display_mode *mode)
+{
+	int i = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
+	int a, v;
+
+	if (!connector->latency_present[0])
+		return 0;
+	if (!connector->latency_present[1])
+		i = 0;
+
+	a = connector->audio_latency[i];
+	v = connector->video_latency[i];
+
+	/*
+	 * HDMI/DP sink doesn't support audio or video?
+	 */
+	if (a == 255 || v == 255)
+		return 0;
+
+	/*
+	 * Convert raw EDID values to millisecond.
+	 * Treat unknown latency as 0ms.
+	 */
+	if (a)
+		a = min(2 * (a - 1), 500);
+	if (v)
+		v = min(2 * (v - 1), 500);
+
+	return max(v - a, 0);
+}
+EXPORT_SYMBOL(drm_av_sync_delay);
+
+/**
+ * drm_select_eld - select one ELD from multiple HDMI/DP sinks
+ * @encoder: the encoder just changed display mode
+ * @mode: the adjusted display mode
+ *
+ * It's possible for one encoder to be associated with multiple HDMI/DP sinks.
+ * The policy is now hard coded to simply use the first HDMI/DP sink's ELD.
+ */
+struct drm_connector *drm_select_eld(struct drm_encoder *encoder,
+				     struct drm_display_mode *mode)
+{
+	struct drm_connector *connector;
+	struct drm_device *dev = encoder->dev;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head)
+		if (connector->encoder == encoder && connector->eld[0])
+			return connector;
+
+	return NULL;
+}
+EXPORT_SYMBOL(drm_select_eld);
+
 /**
  * drm_detect_hdmi_monitor - detect whether monitor is hdmi.
  * @edid: monitor EDID information

+ 88 - 0
drivers/gpu/drm/drm_gem.c

@@ -285,6 +285,94 @@ again:
 }
 EXPORT_SYMBOL(drm_gem_handle_create);
 
+
+/**
+ * drm_gem_free_mmap_offset - release a fake mmap offset for an object
+ * @obj: obj in question
+ *
+ * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
+ */
+void
+drm_gem_free_mmap_offset(struct drm_gem_object *obj)
+{
+	struct drm_device *dev = obj->dev;
+	struct drm_gem_mm *mm = dev->mm_private;
+	struct drm_map_list *list = &obj->map_list;
+
+	drm_ht_remove_item(&mm->offset_hash, &list->hash);
+	drm_mm_put_block(list->file_offset_node);
+	kfree(list->map);
+	list->map = NULL;
+}
+EXPORT_SYMBOL(drm_gem_free_mmap_offset);
+
+/**
+ * drm_gem_create_mmap_offset - create a fake mmap offset for an object
+ * @obj: obj in question
+ *
+ * GEM memory mapping works by handing back to userspace a fake mmap offset
+ * it can use in a subsequent mmap(2) call.  The DRM core code then looks
+ * up the object based on the offset and sets up the various memory mapping
+ * structures.
+ *
+ * This routine allocates and attaches a fake offset for @obj.
+ */
+int
+drm_gem_create_mmap_offset(struct drm_gem_object *obj)
+{
+	struct drm_device *dev = obj->dev;
+	struct drm_gem_mm *mm = dev->mm_private;
+	struct drm_map_list *list;
+	struct drm_local_map *map;
+	int ret = 0;
+
+	/* Set the object up for mmap'ing */
+	list = &obj->map_list;
+	list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
+	if (!list->map)
+		return -ENOMEM;
+
+	map = list->map;
+	map->type = _DRM_GEM;
+	map->size = obj->size;
+	map->handle = obj;
+
+	/* Get a DRM GEM mmap offset allocated... */
+	list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
+			obj->size / PAGE_SIZE, 0, 0);
+
+	if (!list->file_offset_node) {
+		DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
+		ret = -ENOSPC;
+		goto out_free_list;
+	}
+
+	list->file_offset_node = drm_mm_get_block(list->file_offset_node,
+			obj->size / PAGE_SIZE, 0);
+	if (!list->file_offset_node) {
+		ret = -ENOMEM;
+		goto out_free_list;
+	}
+
+	list->hash.key = list->file_offset_node->start;
+	ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
+	if (ret) {
+		DRM_ERROR("failed to add to map hash\n");
+		goto out_free_mm;
+	}
+
+	return 0;
+
+out_free_mm:
+	drm_mm_put_block(list->file_offset_node);
+out_free_list:
+	kfree(list->map);
+	list->map = NULL;
+
+	return ret;
+}
+EXPORT_SYMBOL(drm_gem_create_mmap_offset);
+
 /** Returns a reference to the object named by the handle. */
 struct drm_gem_object *
 drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,

+ 1 - 2
drivers/gpu/drm/drm_proc.c

@@ -95,7 +95,6 @@ int drm_proc_create_files(struct drm_info_list *files, int count,
 	struct drm_device *dev = minor->dev;
 	struct proc_dir_entry *ent;
 	struct drm_info_node *tmp;
-	char name[64];
 	int i, ret;
 
 	for (i = 0; i < count; i++) {
@@ -118,7 +117,7 @@ int drm_proc_create_files(struct drm_info_list *files, int count,
 				       &drm_proc_fops, tmp);
 		if (!ent) {
 			DRM_ERROR("Cannot create /proc/dri/%s/%s\n",
-				  name, files[i].name);
+				  root->name, files[i].name);
 			list_del(&tmp->list);
 			kfree(tmp);
 			ret = -1;

+ 20 - 0
drivers/gpu/drm/exynos/Kconfig

@@ -0,0 +1,20 @@
+config DRM_EXYNOS
+	tristate "DRM Support for Samsung SoC EXYNOS Series"
+	depends on DRM && PLAT_SAMSUNG
+	default	n
+	select DRM_KMS_HELPER
+	select FB_CFB_FILLRECT
+	select FB_CFB_COPYAREA
+	select FB_CFB_IMAGEBLIT
+	select VT_HW_CONSOLE_BINDING if FRAMEBUFFER_CONSOLE
+	help
+	  Choose this option if you have a Samsung SoC EXYNOS chipset.
+	  If M is selected the module will be called exynosdrm.
+
+config DRM_EXYNOS_FIMD
+	tristate "Exynos DRM FIMD"
+	depends on DRM_EXYNOS
+	default n
+	help
+	  Choose this option if you want to use Exynos FIMD for DRM.
+	  If M is selected, the module will be called exynos_drm_fimd

+ 11 - 0
drivers/gpu/drm/exynos/Makefile

@@ -0,0 +1,11 @@
+#
+# Makefile for the drm device driver.  This driver provides support for the
+# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
+
+ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/exynos
+exynosdrm-y := exynos_drm_drv.o exynos_drm_encoder.o exynos_drm_connector.o \
+		exynos_drm_crtc.o exynos_drm_fbdev.o exynos_drm_fb.o \
+		exynos_drm_buf.o exynos_drm_gem.o exynos_drm_core.o
+
+obj-$(CONFIG_DRM_EXYNOS) += exynosdrm.o
+obj-$(CONFIG_DRM_EXYNOS_FIMD) += exynos_drm_fimd.o

+ 110 - 0
drivers/gpu/drm/exynos/exynos_drm_buf.c

@@ -0,0 +1,110 @@
+/* exynos_drm_buf.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Author: Inki Dae <inki.dae@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm.h"
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_buf.h"
+
+static DEFINE_MUTEX(exynos_drm_buf_lock);
+
+static int lowlevel_buffer_allocate(struct drm_device *dev,
+		struct exynos_drm_buf_entry *entry)
+{
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	entry->vaddr = dma_alloc_writecombine(dev->dev, entry->size,
+			(dma_addr_t *)&entry->paddr, GFP_KERNEL);
+	if (!entry->paddr) {
+		DRM_ERROR("failed to allocate buffer.\n");
+		return -ENOMEM;
+	}
+
+	DRM_DEBUG_KMS("allocated : vaddr(0x%x), paddr(0x%x), size(0x%x)\n",
+			(unsigned int)entry->vaddr, entry->paddr, entry->size);
+
+	return 0;
+}
+
+static void lowlevel_buffer_deallocate(struct drm_device *dev,
+		struct exynos_drm_buf_entry *entry)
+{
+	DRM_DEBUG_KMS("%s.\n", __FILE__);
+
+	if (entry->paddr && entry->vaddr && entry->size)
+		dma_free_writecombine(dev->dev, entry->size, entry->vaddr,
+				entry->paddr);
+	else
+		DRM_DEBUG_KMS("entry data is null.\n");
+}
+
+struct exynos_drm_buf_entry *exynos_drm_buf_create(struct drm_device *dev,
+		unsigned int size)
+{
+	struct exynos_drm_buf_entry *entry;
+
+	DRM_DEBUG_KMS("%s.\n", __FILE__);
+
+	entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+	if (!entry) {
+		DRM_ERROR("failed to allocate exynos_drm_buf_entry.\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	entry->size = size;
+
+	/*
+	 * allocate memory region with size and set the memory information
+	 * to vaddr and paddr of a entry object.
+	 */
+	if (lowlevel_buffer_allocate(dev, entry) < 0) {
+		kfree(entry);
+		entry = NULL;
+		return ERR_PTR(-ENOMEM);
+	}
+
+	return entry;
+}
+
+void exynos_drm_buf_destroy(struct drm_device *dev,
+		struct exynos_drm_buf_entry *entry)
+{
+	DRM_DEBUG_KMS("%s.\n", __FILE__);
+
+	if (!entry) {
+		DRM_DEBUG_KMS("entry is null.\n");
+		return;
+	}
+
+	lowlevel_buffer_deallocate(dev, entry);
+
+	kfree(entry);
+	entry = NULL;
+}
+
+MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
+MODULE_DESCRIPTION("Samsung SoC DRM Buffer Management Module");
+MODULE_LICENSE("GPL");

+ 53 - 0
drivers/gpu/drm/exynos/exynos_drm_buf.h

@@ -0,0 +1,53 @@
+/* exynos_drm_buf.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Author: Inki Dae <inki.dae@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_BUF_H_
+#define _EXYNOS_DRM_BUF_H_
+
+/*
+ * exynos drm buffer entry structure.
+ *
+ * @paddr: physical address of allocated memory.
+ * @vaddr: kernel virtual address of allocated memory.
+ * @size: size of allocated memory.
+ */
+struct exynos_drm_buf_entry {
+	dma_addr_t paddr;
+	void __iomem *vaddr;
+	unsigned int size;
+};
+
+/* allocate physical memory. */
+struct exynos_drm_buf_entry *exynos_drm_buf_create(struct drm_device *dev,
+		unsigned int size);
+
+/* get physical memory information of a drm framebuffer. */
+struct exynos_drm_buf_entry *exynos_drm_fb_get_buf(struct drm_framebuffer *fb);
+
+/* remove allocated physical memory. */
+void exynos_drm_buf_destroy(struct drm_device *dev,
+		struct exynos_drm_buf_entry *entry);
+
+#endif

+ 293 - 0
drivers/gpu/drm/exynos/exynos_drm_connector.c

@@ -0,0 +1,293 @@
+/*
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authors:
+ *	Inki Dae <inki.dae@samsung.com>
+ *	Joonyoung Shim <jy0922.shim@samsung.com>
+ *	Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm_crtc_helper.h"
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_encoder.h"
+
+#define MAX_EDID 256
+#define to_exynos_connector(x)	container_of(x, struct exynos_drm_connector,\
+				drm_connector)
+
+struct exynos_drm_connector {
+	struct drm_connector	drm_connector;
+};
+
+/* convert exynos_video_timings to drm_display_mode */
+static inline void
+convert_to_display_mode(struct drm_display_mode *mode,
+			struct fb_videomode *timing)
+{
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	mode->clock = timing->pixclock / 1000;
+
+	mode->hdisplay = timing->xres;
+	mode->hsync_start = mode->hdisplay + timing->left_margin;
+	mode->hsync_end = mode->hsync_start + timing->hsync_len;
+	mode->htotal = mode->hsync_end + timing->right_margin;
+
+	mode->vdisplay = timing->yres;
+	mode->vsync_start = mode->vdisplay + timing->upper_margin;
+	mode->vsync_end = mode->vsync_start + timing->vsync_len;
+	mode->vtotal = mode->vsync_end + timing->lower_margin;
+}
+
+/* convert drm_display_mode to exynos_video_timings */
+static inline void
+convert_to_video_timing(struct fb_videomode *timing,
+			struct drm_display_mode *mode)
+{
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	memset(timing, 0, sizeof(*timing));
+
+	timing->pixclock = mode->clock * 1000;
+	timing->refresh = mode->vrefresh;
+
+	timing->xres = mode->hdisplay;
+	timing->left_margin = mode->hsync_start - mode->hdisplay;
+	timing->hsync_len = mode->hsync_end - mode->hsync_start;
+	timing->right_margin = mode->htotal - mode->hsync_end;
+
+	timing->yres = mode->vdisplay;
+	timing->upper_margin = mode->vsync_start - mode->vdisplay;
+	timing->vsync_len = mode->vsync_end - mode->vsync_start;
+	timing->lower_margin = mode->vtotal - mode->vsync_end;
+
+	if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+		timing->vmode = FB_VMODE_INTERLACED;
+	else
+		timing->vmode = FB_VMODE_NONINTERLACED;
+
+	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+		timing->vmode |= FB_VMODE_DOUBLE;
+}
+
+static int exynos_drm_connector_get_modes(struct drm_connector *connector)
+{
+	struct exynos_drm_manager *manager =
+				exynos_drm_get_manager(connector->encoder);
+	struct exynos_drm_display *display = manager->display;
+	unsigned int count;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (!display) {
+		DRM_DEBUG_KMS("display is null.\n");
+		return 0;
+	}
+
+	/*
+	 * if get_edid() exists then get_edid() callback of hdmi side
+	 * is called to get edid data through i2c interface else
+	 * get timing from the FIMD driver(display controller).
+	 *
+	 * P.S. in case of lcd panel, count is always 1 if success
+	 * because lcd panel has only one mode.
+	 */
+	if (display->get_edid) {
+		int ret;
+		void *edid;
+
+		edid = kzalloc(MAX_EDID, GFP_KERNEL);
+		if (!edid) {
+			DRM_ERROR("failed to allocate edid\n");
+			return 0;
+		}
+
+		ret = display->get_edid(manager->dev, connector,
+						edid, MAX_EDID);
+		if (ret < 0) {
+			DRM_ERROR("failed to get edid data.\n");
+			kfree(edid);
+			edid = NULL;
+			return 0;
+		}
+
+		drm_mode_connector_update_edid_property(connector, edid);
+		count = drm_add_edid_modes(connector, edid);
+
+		kfree(connector->display_info.raw_edid);
+		connector->display_info.raw_edid = edid;
+	} else {
+		struct drm_display_mode *mode = drm_mode_create(connector->dev);
+		struct fb_videomode *timing;
+
+		if (display->get_timing)
+			timing = display->get_timing(manager->dev);
+		else {
+			drm_mode_destroy(connector->dev, mode);
+			return 0;
+		}
+
+		convert_to_display_mode(mode, timing);
+
+		mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+		drm_mode_set_name(mode);
+		drm_mode_probed_add(connector, mode);
+
+		count = 1;
+	}
+
+	return count;
+}
+
+static int exynos_drm_connector_mode_valid(struct drm_connector *connector,
+					    struct drm_display_mode *mode)
+{
+	struct exynos_drm_manager *manager =
+				exynos_drm_get_manager(connector->encoder);
+	struct exynos_drm_display *display = manager->display;
+	struct fb_videomode timing;
+	int ret = MODE_BAD;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	convert_to_video_timing(&timing, mode);
+
+	if (display && display->check_timing)
+		if (!display->check_timing(manager->dev, (void *)&timing))
+			ret = MODE_OK;
+
+	return ret;
+}
+
+struct drm_encoder *exynos_drm_best_encoder(struct drm_connector *connector)
+{
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	return connector->encoder;
+}
+
+static struct drm_connector_helper_funcs exynos_connector_helper_funcs = {
+	.get_modes	= exynos_drm_connector_get_modes,
+	.mode_valid	= exynos_drm_connector_mode_valid,
+	.best_encoder	= exynos_drm_best_encoder,
+};
+
+/* get detection status of display device. */
+static enum drm_connector_status
+exynos_drm_connector_detect(struct drm_connector *connector, bool force)
+{
+	struct exynos_drm_manager *manager =
+				exynos_drm_get_manager(connector->encoder);
+	struct exynos_drm_display *display = manager->display;
+	enum drm_connector_status status = connector_status_disconnected;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (display && display->is_connected) {
+		if (display->is_connected(manager->dev))
+			status = connector_status_connected;
+		else
+			status = connector_status_disconnected;
+	}
+
+	return status;
+}
+
+static void exynos_drm_connector_destroy(struct drm_connector *connector)
+{
+	struct exynos_drm_connector *exynos_connector =
+		to_exynos_connector(connector);
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	drm_sysfs_connector_remove(connector);
+	drm_connector_cleanup(connector);
+	kfree(exynos_connector);
+}
+
+static struct drm_connector_funcs exynos_connector_funcs = {
+	.dpms		= drm_helper_connector_dpms,
+	.fill_modes	= drm_helper_probe_single_connector_modes,
+	.detect		= exynos_drm_connector_detect,
+	.destroy	= exynos_drm_connector_destroy,
+};
+
+struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
+						   struct drm_encoder *encoder)
+{
+	struct exynos_drm_connector *exynos_connector;
+	struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
+	struct drm_connector *connector;
+	int type;
+	int err;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	exynos_connector = kzalloc(sizeof(*exynos_connector), GFP_KERNEL);
+	if (!exynos_connector) {
+		DRM_ERROR("failed to allocate connector\n");
+		return NULL;
+	}
+
+	connector = &exynos_connector->drm_connector;
+
+	switch (manager->display->type) {
+	case EXYNOS_DISPLAY_TYPE_HDMI:
+		type = DRM_MODE_CONNECTOR_HDMIA;
+		break;
+	default:
+		type = DRM_MODE_CONNECTOR_Unknown;
+		break;
+	}
+
+	drm_connector_init(dev, connector, &exynos_connector_funcs, type);
+	drm_connector_helper_add(connector, &exynos_connector_helper_funcs);
+
+	err = drm_sysfs_connector_add(connector);
+	if (err)
+		goto err_connector;
+
+	connector->encoder = encoder;
+	err = drm_mode_connector_attach_encoder(connector, encoder);
+	if (err) {
+		DRM_ERROR("failed to attach a connector to a encoder\n");
+		goto err_sysfs;
+	}
+
+	DRM_DEBUG_KMS("connector has been created\n");
+
+	return connector;
+
+err_sysfs:
+	drm_sysfs_connector_remove(connector);
+err_connector:
+	drm_connector_cleanup(connector);
+	kfree(exynos_connector);
+	return NULL;
+}
+
+MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
+MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
+MODULE_AUTHOR("Seung-Woo Kim <sw0312.kim@samsung.com>");
+MODULE_DESCRIPTION("Samsung SoC DRM Connector Driver");
+MODULE_LICENSE("GPL");

+ 34 - 0
drivers/gpu/drm/exynos/exynos_drm_connector.h

@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authors:
+ *	Inki Dae <inki.dae@samsung.com>
+ *	Joonyoung Shim <jy0922.shim@samsung.com>
+ *	Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_CONNECTOR_H_
+#define _EXYNOS_DRM_CONNECTOR_H_
+
+struct drm_connector *exynos_drm_connector_create(struct drm_device *dev,
+						   struct drm_encoder *encoder);
+
+#endif

+ 272 - 0
drivers/gpu/drm/exynos/exynos_drm_core.c

@@ -0,0 +1,272 @@
+/* exynos_drm_core.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Author:
+ *	Inki Dae <inki.dae@samsung.com>
+ *	Joonyoung Shim <jy0922.shim@samsung.com>
+ *	Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "exynos_drm_drv.h"
+#include "exynos_drm_encoder.h"
+#include "exynos_drm_connector.h"
+#include "exynos_drm_fbdev.h"
+
+static DEFINE_MUTEX(exynos_drm_mutex);
+static LIST_HEAD(exynos_drm_subdrv_list);
+static struct drm_device *drm_dev;
+
+static int exynos_drm_subdrv_probe(struct drm_device *dev,
+					struct exynos_drm_subdrv *subdrv)
+{
+	struct drm_encoder *encoder;
+	struct drm_connector *connector;
+
+	DRM_DEBUG_DRIVER("%s\n", __FILE__);
+
+	if (subdrv->probe) {
+		int ret;
+
+		/*
+		 * this probe callback would be called by sub driver
+		 * after setting of all resources to this sub driver,
+		 * such as clock, irq and register map are done or by load()
+		 * of exynos drm driver.
+		 *
+		 * P.S. note that this driver is considered for modularization.
+		 */
+		ret = subdrv->probe(dev, subdrv->manager.dev);
+		if (ret)
+			return ret;
+	}
+
+	/* create and initialize a encoder for this sub driver. */
+	encoder = exynos_drm_encoder_create(dev, &subdrv->manager,
+			(1 << MAX_CRTC) - 1);
+	if (!encoder) {
+		DRM_ERROR("failed to create encoder\n");
+		return -EFAULT;
+	}
+
+	/*
+	 * create and initialize a connector for this sub driver and
+	 * attach the encoder created above to the connector.
+	 */
+	connector = exynos_drm_connector_create(dev, encoder);
+	if (!connector) {
+		DRM_ERROR("failed to create connector\n");
+		encoder->funcs->destroy(encoder);
+		return -EFAULT;
+	}
+
+	subdrv->encoder = encoder;
+	subdrv->connector = connector;
+
+	return 0;
+}
+
+static void exynos_drm_subdrv_remove(struct drm_device *dev,
+				      struct exynos_drm_subdrv *subdrv)
+{
+	DRM_DEBUG_DRIVER("%s\n", __FILE__);
+
+	if (subdrv->remove)
+		subdrv->remove(dev);
+
+	if (subdrv->encoder) {
+		struct drm_encoder *encoder = subdrv->encoder;
+		encoder->funcs->destroy(encoder);
+		subdrv->encoder = NULL;
+	}
+
+	if (subdrv->connector) {
+		struct drm_connector *connector = subdrv->connector;
+		connector->funcs->destroy(connector);
+		subdrv->connector = NULL;
+	}
+}
+
+int exynos_drm_device_register(struct drm_device *dev)
+{
+	struct exynos_drm_subdrv *subdrv, *n;
+	int err;
+
+	DRM_DEBUG_DRIVER("%s\n", __FILE__);
+
+	if (!dev)
+		return -EINVAL;
+
+	if (drm_dev) {
+		DRM_ERROR("Already drm device were registered\n");
+		return -EBUSY;
+	}
+
+	mutex_lock(&exynos_drm_mutex);
+	list_for_each_entry_safe(subdrv, n, &exynos_drm_subdrv_list, list) {
+		err = exynos_drm_subdrv_probe(dev, subdrv);
+		if (err) {
+			DRM_DEBUG("exynos drm subdrv probe failed.\n");
+			list_del(&subdrv->list);
+		}
+	}
+
+	drm_dev = dev;
+	mutex_unlock(&exynos_drm_mutex);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(exynos_drm_device_register);
+
+int exynos_drm_device_unregister(struct drm_device *dev)
+{
+	struct exynos_drm_subdrv *subdrv;
+
+	DRM_DEBUG_DRIVER("%s\n", __FILE__);
+
+	if (!dev || dev != drm_dev) {
+		WARN(1, "Unexpected drm device unregister!\n");
+		return -EINVAL;
+	}
+
+	mutex_lock(&exynos_drm_mutex);
+	list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list)
+		exynos_drm_subdrv_remove(dev, subdrv);
+
+	drm_dev = NULL;
+	mutex_unlock(&exynos_drm_mutex);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(exynos_drm_device_unregister);
+
+static int exynos_drm_mode_group_reinit(struct drm_device *dev)
+{
+	struct drm_mode_group *group = &dev->primary->mode_group;
+	uint32_t *id_list = group->id_list;
+	int ret;
+
+	DRM_DEBUG_DRIVER("%s\n", __FILE__);
+
+	ret = drm_mode_group_init_legacy_group(dev, group);
+	if (ret < 0)
+		return ret;
+
+	kfree(id_list);
+	return 0;
+}
+
+int exynos_drm_subdrv_register(struct exynos_drm_subdrv *subdrv)
+{
+	int err;
+
+	DRM_DEBUG_DRIVER("%s\n", __FILE__);
+
+	if (!subdrv)
+		return -EINVAL;
+
+	mutex_lock(&exynos_drm_mutex);
+	if (drm_dev) {
+		err = exynos_drm_subdrv_probe(drm_dev, subdrv);
+		if (err) {
+			DRM_ERROR("failed to probe exynos drm subdrv\n");
+			mutex_unlock(&exynos_drm_mutex);
+			return err;
+		}
+
+		/*
+		 * if any specific driver such as fimd or hdmi driver called
+		 * exynos_drm_subdrv_register() later than drm_load(),
+		 * the fb helper should be re-initialized and re-configured.
+		 */
+		err = exynos_drm_fbdev_reinit(drm_dev);
+		if (err) {
+			DRM_ERROR("failed to reinitialize exynos drm fbdev\n");
+			exynos_drm_subdrv_remove(drm_dev, subdrv);
+			mutex_unlock(&exynos_drm_mutex);
+			return err;
+		}
+
+		err = exynos_drm_mode_group_reinit(drm_dev);
+		if (err) {
+			DRM_ERROR("failed to reinitialize mode group\n");
+			exynos_drm_fbdev_fini(drm_dev);
+			exynos_drm_subdrv_remove(drm_dev, subdrv);
+			mutex_unlock(&exynos_drm_mutex);
+			return err;
+		}
+	}
+
+	subdrv->drm_dev = drm_dev;
+
+	list_add_tail(&subdrv->list, &exynos_drm_subdrv_list);
+	mutex_unlock(&exynos_drm_mutex);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(exynos_drm_subdrv_register);
+
+int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv)
+{
+	int ret = -EFAULT;
+
+	DRM_DEBUG_DRIVER("%s\n", __FILE__);
+
+	if (!subdrv) {
+		DRM_DEBUG("Unexpected exynos drm subdrv unregister!\n");
+		return ret;
+	}
+
+	mutex_lock(&exynos_drm_mutex);
+	if (drm_dev) {
+		exynos_drm_subdrv_remove(drm_dev, subdrv);
+		list_del(&subdrv->list);
+
+		/*
+		 * fb helper should be updated once a sub driver is released
+		 * to re-configure crtc and connector and also to re-setup
+		 * drm framebuffer.
+		 */
+		ret = exynos_drm_fbdev_reinit(drm_dev);
+		if (ret < 0) {
+			DRM_ERROR("failed fb helper reinit.\n");
+			goto fail;
+		}
+
+		ret = exynos_drm_mode_group_reinit(drm_dev);
+		if (ret < 0) {
+			DRM_ERROR("failed drm mode group reinit.\n");
+			goto fail;
+		}
+	}
+
+fail:
+	mutex_unlock(&exynos_drm_mutex);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(exynos_drm_subdrv_unregister);
+
+MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
+MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
+MODULE_AUTHOR("Seung-Woo Kim <sw0312.kim@samsung.com>");
+MODULE_DESCRIPTION("Samsung SoC DRM Core Driver");
+MODULE_LICENSE("GPL");

+ 381 - 0
drivers/gpu/drm/exynos/exynos_drm_crtc.c

@@ -0,0 +1,381 @@
+/* exynos_drm_crtc.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authors:
+ *	Inki Dae <inki.dae@samsung.com>
+ *	Joonyoung Shim <jy0922.shim@samsung.com>
+ *	Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm_crtc_helper.h"
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_fb.h"
+#include "exynos_drm_encoder.h"
+#include "exynos_drm_buf.h"
+
+#define to_exynos_crtc(x)	container_of(x, struct exynos_drm_crtc,\
+				drm_crtc)
+
+/*
+ * Exynos specific crtc postion structure.
+ *
+ * @fb_x: offset x on a framebuffer to be displyed
+ *	- the unit is screen coordinates.
+ * @fb_y: offset y on a framebuffer to be displayed
+ *	- the unit is screen coordinates.
+ * @crtc_x: offset x on hardware screen.
+ * @crtc_y: offset y on hardware screen.
+ * @crtc_w: width of hardware screen.
+ * @crtc_h: height of hardware screen.
+ */
+struct exynos_drm_crtc_pos {
+	unsigned int fb_x;
+	unsigned int fb_y;
+	unsigned int crtc_x;
+	unsigned int crtc_y;
+	unsigned int crtc_w;
+	unsigned int crtc_h;
+};
+
+/*
+ * Exynos specific crtc structure.
+ *
+ * @drm_crtc: crtc object.
+ * @overlay: contain information common to display controller and hdmi and
+ *	contents of this overlay object would be copied to sub driver size.
+ * @pipe: a crtc index created at load() with a new crtc object creation
+ *	and the crtc object would be set to private->crtc array
+ *	to get a crtc object corresponding to this pipe from private->crtc
+ *	array when irq interrupt occured. the reason of using this pipe is that
+ *	drm framework doesn't support multiple irq yet.
+ *	we can refer to the crtc to current hardware interrupt occured through
+ *	this pipe value.
+ */
+struct exynos_drm_crtc {
+	struct drm_crtc			drm_crtc;
+	struct exynos_drm_overlay	overlay;
+	unsigned int			pipe;
+};
+
+static void exynos_drm_crtc_apply(struct drm_crtc *crtc)
+{
+	struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+	struct exynos_drm_overlay *overlay = &exynos_crtc->overlay;
+
+	exynos_drm_fn_encoder(crtc, overlay,
+			exynos_drm_encoder_crtc_mode_set);
+	exynos_drm_fn_encoder(crtc, NULL, exynos_drm_encoder_crtc_commit);
+}
+
+static int exynos_drm_overlay_update(struct exynos_drm_overlay *overlay,
+				       struct drm_framebuffer *fb,
+				       struct drm_display_mode *mode,
+				       struct exynos_drm_crtc_pos *pos)
+{
+	struct exynos_drm_buf_entry *entry;
+	unsigned int actual_w;
+	unsigned int actual_h;
+
+	entry = exynos_drm_fb_get_buf(fb);
+	if (!entry) {
+		DRM_LOG_KMS("entry is null.\n");
+		return -EFAULT;
+	}
+
+	overlay->paddr = entry->paddr;
+	overlay->vaddr = entry->vaddr;
+
+	DRM_DEBUG_KMS("vaddr = 0x%lx, paddr = 0x%lx\n",
+			(unsigned long)overlay->vaddr,
+			(unsigned long)overlay->paddr);
+
+	actual_w = min((mode->hdisplay - pos->crtc_x), pos->crtc_w);
+	actual_h = min((mode->vdisplay - pos->crtc_y), pos->crtc_h);
+
+	/* set drm framebuffer data. */
+	overlay->fb_x = pos->fb_x;
+	overlay->fb_y = pos->fb_y;
+	overlay->fb_width = fb->width;
+	overlay->fb_height = fb->height;
+	overlay->bpp = fb->bits_per_pixel;
+	overlay->pitch = fb->pitch;
+
+	/* set overlay range to be displayed. */
+	overlay->crtc_x = pos->crtc_x;
+	overlay->crtc_y = pos->crtc_y;
+	overlay->crtc_width = actual_w;
+	overlay->crtc_height = actual_h;
+
+	/* set drm mode data. */
+	overlay->mode_width = mode->hdisplay;
+	overlay->mode_height = mode->vdisplay;
+	overlay->refresh = mode->vrefresh;
+	overlay->scan_flag = mode->flags;
+
+	DRM_DEBUG_KMS("overlay : offset_x/y(%d,%d), width/height(%d,%d)",
+			overlay->crtc_x, overlay->crtc_y,
+			overlay->crtc_width, overlay->crtc_height);
+
+	return 0;
+}
+
+static int exynos_drm_crtc_update(struct drm_crtc *crtc)
+{
+	struct exynos_drm_crtc *exynos_crtc;
+	struct exynos_drm_overlay *overlay;
+	struct exynos_drm_crtc_pos pos;
+	struct drm_display_mode *mode = &crtc->mode;
+	struct drm_framebuffer *fb = crtc->fb;
+
+	if (!mode || !fb)
+		return -EINVAL;
+
+	exynos_crtc = to_exynos_crtc(crtc);
+	overlay = &exynos_crtc->overlay;
+
+	memset(&pos, 0, sizeof(struct exynos_drm_crtc_pos));
+
+	/* it means the offset of framebuffer to be displayed. */
+	pos.fb_x = crtc->x;
+	pos.fb_y = crtc->y;
+
+	/* OSD position to be displayed. */
+	pos.crtc_x = 0;
+	pos.crtc_y = 0;
+	pos.crtc_w = fb->width - crtc->x;
+	pos.crtc_h = fb->height - crtc->y;
+
+	return exynos_drm_overlay_update(overlay, crtc->fb, mode, &pos);
+}
+
+static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
+{
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	/* TODO */
+}
+
+static void exynos_drm_crtc_prepare(struct drm_crtc *crtc)
+{
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	/* drm framework doesn't check NULL. */
+}
+
+static void exynos_drm_crtc_commit(struct drm_crtc *crtc)
+{
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	/* drm framework doesn't check NULL. */
+}
+
+static bool
+exynos_drm_crtc_mode_fixup(struct drm_crtc *crtc,
+			    struct drm_display_mode *mode,
+			    struct drm_display_mode *adjusted_mode)
+{
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	/* drm framework doesn't check NULL */
+	return true;
+}
+
+static int
+exynos_drm_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
+			  struct drm_display_mode *adjusted_mode, int x, int y,
+			  struct drm_framebuffer *old_fb)
+{
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	mode = adjusted_mode;
+
+	return exynos_drm_crtc_update(crtc);
+}
+
+static int exynos_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
+					  struct drm_framebuffer *old_fb)
+{
+	int ret;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	ret = exynos_drm_crtc_update(crtc);
+	if (ret)
+		return ret;
+
+	exynos_drm_crtc_apply(crtc);
+
+	return ret;
+}
+
+static void exynos_drm_crtc_load_lut(struct drm_crtc *crtc)
+{
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+	/* drm framework doesn't check NULL */
+}
+
+static struct drm_crtc_helper_funcs exynos_crtc_helper_funcs = {
+	.dpms		= exynos_drm_crtc_dpms,
+	.prepare	= exynos_drm_crtc_prepare,
+	.commit		= exynos_drm_crtc_commit,
+	.mode_fixup	= exynos_drm_crtc_mode_fixup,
+	.mode_set	= exynos_drm_crtc_mode_set,
+	.mode_set_base	= exynos_drm_crtc_mode_set_base,
+	.load_lut	= exynos_drm_crtc_load_lut,
+};
+
+static int exynos_drm_crtc_page_flip(struct drm_crtc *crtc,
+				      struct drm_framebuffer *fb,
+				      struct drm_pending_vblank_event *event)
+{
+	struct drm_device *dev = crtc->dev;
+	struct exynos_drm_private *dev_priv = dev->dev_private;
+	struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+	struct drm_framebuffer *old_fb = crtc->fb;
+	int ret = -EINVAL;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	mutex_lock(&dev->struct_mutex);
+
+	if (event) {
+		/*
+		 * the pipe from user always is 0 so we can set pipe number
+		 * of current owner to event.
+		 */
+		event->pipe = exynos_crtc->pipe;
+
+		list_add_tail(&event->base.link,
+				&dev_priv->pageflip_event_list);
+
+		ret = drm_vblank_get(dev, exynos_crtc->pipe);
+		if (ret) {
+			DRM_DEBUG("failed to acquire vblank counter\n");
+			list_del(&event->base.link);
+
+			goto out;
+		}
+
+		crtc->fb = fb;
+		ret = exynos_drm_crtc_update(crtc);
+		if (ret) {
+			crtc->fb = old_fb;
+			drm_vblank_put(dev, exynos_crtc->pipe);
+			list_del(&event->base.link);
+
+			goto out;
+		}
+
+		/*
+		 * the values related to a buffer of the drm framebuffer
+		 * to be applied should be set at here. because these values
+		 * first, are set to shadow registers and then to
+		 * real registers at vsync front porch period.
+		 */
+		exynos_drm_crtc_apply(crtc);
+	}
+out:
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
+static void exynos_drm_crtc_destroy(struct drm_crtc *crtc)
+{
+	struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+	struct exynos_drm_private *private = crtc->dev->dev_private;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	private->crtc[exynos_crtc->pipe] = NULL;
+
+	drm_crtc_cleanup(crtc);
+	kfree(exynos_crtc);
+}
+
+static struct drm_crtc_funcs exynos_crtc_funcs = {
+	.set_config	= drm_crtc_helper_set_config,
+	.page_flip	= exynos_drm_crtc_page_flip,
+	.destroy	= exynos_drm_crtc_destroy,
+};
+
+struct exynos_drm_overlay *get_exynos_drm_overlay(struct drm_device *dev,
+		struct drm_crtc *crtc)
+{
+	struct exynos_drm_crtc *exynos_crtc = to_exynos_crtc(crtc);
+
+	return &exynos_crtc->overlay;
+}
+
+int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr)
+{
+	struct exynos_drm_crtc *exynos_crtc;
+	struct exynos_drm_private *private = dev->dev_private;
+	struct drm_crtc *crtc;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	exynos_crtc = kzalloc(sizeof(*exynos_crtc), GFP_KERNEL);
+	if (!exynos_crtc) {
+		DRM_ERROR("failed to allocate exynos crtc\n");
+		return -ENOMEM;
+	}
+
+	exynos_crtc->pipe = nr;
+	crtc = &exynos_crtc->drm_crtc;
+
+	private->crtc[nr] = crtc;
+
+	drm_crtc_init(dev, crtc, &exynos_crtc_funcs);
+	drm_crtc_helper_add(crtc, &exynos_crtc_helper_funcs);
+
+	return 0;
+}
+
+int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc)
+{
+	struct exynos_drm_private *private = dev->dev_private;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	exynos_drm_fn_encoder(private->crtc[crtc], &crtc,
+			exynos_drm_enable_vblank);
+
+	return 0;
+}
+
+void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc)
+{
+	struct exynos_drm_private *private = dev->dev_private;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	exynos_drm_fn_encoder(private->crtc[crtc], &crtc,
+			exynos_drm_disable_vblank);
+}
+
+MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
+MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
+MODULE_AUTHOR("Seung-Woo Kim <sw0312.kim@samsung.com>");
+MODULE_DESCRIPTION("Samsung SoC DRM CRTC Driver");
+MODULE_LICENSE("GPL");

+ 38 - 0
drivers/gpu/drm/exynos/exynos_drm_crtc.h

@@ -0,0 +1,38 @@
+/* exynos_drm_crtc.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authors:
+ *	Inki Dae <inki.dae@samsung.com>
+ *	Joonyoung Shim <jy0922.shim@samsung.com>
+ *	Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_CRTC_H_
+#define _EXYNOS_DRM_CRTC_H_
+
+struct exynos_drm_overlay *get_exynos_drm_overlay(struct drm_device *dev,
+		struct drm_crtc *crtc);
+int exynos_drm_crtc_create(struct drm_device *dev, unsigned int nr);
+int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int crtc);
+void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int crtc);
+
+#endif

+ 244 - 0
drivers/gpu/drm/exynos/exynos_drm_drv.c

@@ -0,0 +1,244 @@
+/*
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authors:
+ *	Inki Dae <inki.dae@samsung.com>
+ *	Joonyoung Shim <jy0922.shim@samsung.com>
+ *	Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm.h"
+
+#include <drm/exynos_drm.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_crtc.h"
+#include "exynos_drm_fbdev.h"
+#include "exynos_drm_fb.h"
+#include "exynos_drm_gem.h"
+
+#define DRIVER_NAME	"exynos-drm"
+#define DRIVER_DESC	"Samsung SoC DRM"
+#define DRIVER_DATE	"20110530"
+#define DRIVER_MAJOR	1
+#define DRIVER_MINOR	0
+
+static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
+{
+	struct exynos_drm_private *private;
+	int ret;
+	int nr;
+
+	DRM_DEBUG_DRIVER("%s\n", __FILE__);
+
+	private = kzalloc(sizeof(struct exynos_drm_private), GFP_KERNEL);
+	if (!private) {
+		DRM_ERROR("failed to allocate private\n");
+		return -ENOMEM;
+	}
+
+	INIT_LIST_HEAD(&private->pageflip_event_list);
+	dev->dev_private = (void *)private;
+
+	drm_mode_config_init(dev);
+
+	exynos_drm_mode_config_init(dev);
+
+	/*
+	 * EXYNOS4 is enough to have two CRTCs and each crtc would be used
+	 * without dependency of hardware.
+	 */
+	for (nr = 0; nr < MAX_CRTC; nr++) {
+		ret = exynos_drm_crtc_create(dev, nr);
+		if (ret)
+			goto err_crtc;
+	}
+
+	ret = drm_vblank_init(dev, MAX_CRTC);
+	if (ret)
+		goto err_crtc;
+
+	/*
+	 * probe sub drivers such as display controller and hdmi driver,
+	 * that were registered at probe() of platform driver
+	 * to the sub driver and create encoder and connector for them.
+	 */
+	ret = exynos_drm_device_register(dev);
+	if (ret)
+		goto err_vblank;
+
+	/*
+	 * create and configure fb helper and also exynos specific
+	 * fbdev object.
+	 */
+	ret = exynos_drm_fbdev_init(dev);
+	if (ret) {
+		DRM_ERROR("failed to initialize drm fbdev\n");
+		goto err_drm_device;
+	}
+
+	return 0;
+
+err_drm_device:
+	exynos_drm_device_unregister(dev);
+err_vblank:
+	drm_vblank_cleanup(dev);
+err_crtc:
+	drm_mode_config_cleanup(dev);
+	kfree(private);
+
+	return ret;
+}
+
+static int exynos_drm_unload(struct drm_device *dev)
+{
+	DRM_DEBUG_DRIVER("%s\n", __FILE__);
+
+	exynos_drm_fbdev_fini(dev);
+	exynos_drm_device_unregister(dev);
+	drm_vblank_cleanup(dev);
+	drm_mode_config_cleanup(dev);
+	kfree(dev->dev_private);
+
+	dev->dev_private = NULL;
+
+	return 0;
+}
+
+static void exynos_drm_preclose(struct drm_device *dev,
+					struct drm_file *file_priv)
+{
+	struct exynos_drm_private *dev_priv = dev->dev_private;
+
+	/*
+	 * drm framework frees all events at release time,
+	 * so private event list should be cleared.
+	 */
+	if (!list_empty(&dev_priv->pageflip_event_list))
+		INIT_LIST_HEAD(&dev_priv->pageflip_event_list);
+}
+
+static void exynos_drm_lastclose(struct drm_device *dev)
+{
+	DRM_DEBUG_DRIVER("%s\n", __FILE__);
+
+	exynos_drm_fbdev_restore_mode(dev);
+}
+
+static struct vm_operations_struct exynos_drm_gem_vm_ops = {
+	.fault = exynos_drm_gem_fault,
+	.open = drm_gem_vm_open,
+	.close = drm_gem_vm_close,
+};
+
+static struct drm_ioctl_desc exynos_ioctls[] = {
+	DRM_IOCTL_DEF_DRV(EXYNOS_GEM_CREATE, exynos_drm_gem_create_ioctl,
+			DRM_UNLOCKED | DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MAP_OFFSET,
+			exynos_drm_gem_map_offset_ioctl, DRM_UNLOCKED |
+			DRM_AUTH),
+	DRM_IOCTL_DEF_DRV(EXYNOS_GEM_MMAP,
+			exynos_drm_gem_mmap_ioctl, DRM_UNLOCKED | DRM_AUTH),
+};
+
+static struct drm_driver exynos_drm_driver = {
+	.driver_features	= DRIVER_HAVE_IRQ | DRIVER_BUS_PLATFORM |
+				  DRIVER_MODESET | DRIVER_GEM,
+	.load			= exynos_drm_load,
+	.unload			= exynos_drm_unload,
+	.preclose		= exynos_drm_preclose,
+	.lastclose		= exynos_drm_lastclose,
+	.get_vblank_counter	= drm_vblank_count,
+	.enable_vblank		= exynos_drm_crtc_enable_vblank,
+	.disable_vblank		= exynos_drm_crtc_disable_vblank,
+	.gem_init_object	= exynos_drm_gem_init_object,
+	.gem_free_object	= exynos_drm_gem_free_object,
+	.gem_vm_ops		= &exynos_drm_gem_vm_ops,
+	.dumb_create		= exynos_drm_gem_dumb_create,
+	.dumb_map_offset	= exynos_drm_gem_dumb_map_offset,
+	.dumb_destroy		= exynos_drm_gem_dumb_destroy,
+	.ioctls			= exynos_ioctls,
+	.fops = {
+		.owner		= THIS_MODULE,
+		.open		= drm_open,
+		.mmap		= exynos_drm_gem_mmap,
+		.poll		= drm_poll,
+		.read		= drm_read,
+		.unlocked_ioctl	= drm_ioctl,
+		.release	= drm_release,
+	},
+	.name	= DRIVER_NAME,
+	.desc	= DRIVER_DESC,
+	.date	= DRIVER_DATE,
+	.major	= DRIVER_MAJOR,
+	.minor	= DRIVER_MINOR,
+};
+
+static int exynos_drm_platform_probe(struct platform_device *pdev)
+{
+	DRM_DEBUG_DRIVER("%s\n", __FILE__);
+
+	exynos_drm_driver.num_ioctls = DRM_ARRAY_SIZE(exynos_ioctls);
+
+	return drm_platform_init(&exynos_drm_driver, pdev);
+}
+
+static int exynos_drm_platform_remove(struct platform_device *pdev)
+{
+	DRM_DEBUG_DRIVER("%s\n", __FILE__);
+
+	drm_platform_exit(&exynos_drm_driver, pdev);
+
+	return 0;
+}
+
+static struct platform_driver exynos_drm_platform_driver = {
+	.probe		= exynos_drm_platform_probe,
+	.remove		= __devexit_p(exynos_drm_platform_remove),
+	.driver		= {
+		.owner	= THIS_MODULE,
+		.name	= DRIVER_NAME,
+	},
+};
+
+static int __init exynos_drm_init(void)
+{
+	DRM_DEBUG_DRIVER("%s\n", __FILE__);
+
+	return platform_driver_register(&exynos_drm_platform_driver);
+}
+
+static void __exit exynos_drm_exit(void)
+{
+	DRM_DEBUG_DRIVER("%s\n", __FILE__);
+
+	platform_driver_unregister(&exynos_drm_platform_driver);
+}
+
+module_init(exynos_drm_init);
+module_exit(exynos_drm_exit);
+
+MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
+MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
+MODULE_AUTHOR("Seung-Woo Kim <sw0312.kim@samsung.com>");
+MODULE_DESCRIPTION("Samsung SoC DRM Driver");
+MODULE_LICENSE("GPL");

+ 254 - 0
drivers/gpu/drm/exynos/exynos_drm_drv.h

@@ -0,0 +1,254 @@
+/* exynos_drm_drv.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authors:
+ *	Inki Dae <inki.dae@samsung.com>
+ *	Joonyoung Shim <jy0922.shim@samsung.com>
+ *	Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_DRV_H_
+#define _EXYNOS_DRM_DRV_H_
+
+#include "drm.h"
+
+#define MAX_CRTC	2
+
+struct drm_device;
+struct exynos_drm_overlay;
+struct drm_connector;
+
+/* this enumerates display type. */
+enum exynos_drm_output_type {
+	EXYNOS_DISPLAY_TYPE_NONE,
+	/* RGB or CPU Interface. */
+	EXYNOS_DISPLAY_TYPE_LCD,
+	/* HDMI Interface. */
+	EXYNOS_DISPLAY_TYPE_HDMI,
+};
+
+/*
+ * Exynos drm overlay ops structure.
+ *
+ * @mode_set: copy drm overlay info to hw specific overlay info.
+ * @commit: apply hardware specific overlay data to registers.
+ * @disable: disable hardware specific overlay.
+ */
+struct exynos_drm_overlay_ops {
+	void (*mode_set)(struct device *subdrv_dev,
+			 struct exynos_drm_overlay *overlay);
+	void (*commit)(struct device *subdrv_dev);
+	void (*disable)(struct device *subdrv_dev);
+};
+
+/*
+ * Exynos drm common overlay structure.
+ *
+ * @fb_x: offset x on a framebuffer to be displayed.
+ *	- the unit is screen coordinates.
+ * @fb_y: offset y on a framebuffer to be displayed.
+ *	- the unit is screen coordinates.
+ * @fb_width: width of a framebuffer.
+ * @fb_height: height of a framebuffer.
+ * @crtc_x: offset x on hardware screen.
+ * @crtc_y: offset y on hardware screen.
+ * @crtc_width: window width to be displayed (hardware screen).
+ * @crtc_height: window height to be displayed (hardware screen).
+ * @mode_width: width of screen mode.
+ * @mode_height: height of screen mode.
+ * @refresh: refresh rate.
+ * @scan_flag: interlace or progressive way.
+ *	(it could be DRM_MODE_FLAG_*)
+ * @bpp: pixel size.(in bit)
+ * @paddr: bus(accessed by dma) physical memory address to this overlay
+ *		and this is physically continuous.
+ * @vaddr: virtual memory addresss to this overlay.
+ * @default_win: a window to be enabled.
+ * @color_key: color key on or off.
+ * @index_color: if using color key feature then this value would be used
+ *			as index color.
+ * @local_path: in case of lcd type, local path mode on or off.
+ * @transparency: transparency on or off.
+ * @activated: activated or not.
+ *
+ * this structure is common to exynos SoC and its contents would be copied
+ * to hardware specific overlay info.
+ */
+struct exynos_drm_overlay {
+	unsigned int fb_x;
+	unsigned int fb_y;
+	unsigned int fb_width;
+	unsigned int fb_height;
+	unsigned int crtc_x;
+	unsigned int crtc_y;
+	unsigned int crtc_width;
+	unsigned int crtc_height;
+	unsigned int mode_width;
+	unsigned int mode_height;
+	unsigned int refresh;
+	unsigned int scan_flag;
+	unsigned int bpp;
+	unsigned int pitch;
+	dma_addr_t paddr;
+	void __iomem *vaddr;
+
+	bool default_win;
+	bool color_key;
+	unsigned int index_color;
+	bool local_path;
+	bool transparency;
+	bool activated;
+};
+
+/*
+ * Exynos DRM Display Structure.
+ *	- this structure is common to analog tv, digital tv and lcd panel.
+ *
+ * @type: one of EXYNOS_DISPLAY_TYPE_LCD and HDMI.
+ * @is_connected: check for that display is connected or not.
+ * @get_edid: get edid modes from display driver.
+ * @get_timing: get timing object from display driver.
+ * @check_timing: check if timing is valid or not.
+ * @power_on: display device on or off.
+ */
+struct exynos_drm_display {
+	enum exynos_drm_output_type type;
+	bool (*is_connected)(struct device *dev);
+	int (*get_edid)(struct device *dev, struct drm_connector *connector,
+				u8 *edid, int len);
+	void *(*get_timing)(struct device *dev);
+	int (*check_timing)(struct device *dev, void *timing);
+	int (*power_on)(struct device *dev, int mode);
+};
+
+/*
+ * Exynos drm manager ops
+ *
+ * @mode_set: convert drm_display_mode to hw specific display mode and
+ *	      would be called by encoder->mode_set().
+ * @commit: set current hw specific display mode to hw.
+ * @enable_vblank: specific driver callback for enabling vblank interrupt.
+ * @disable_vblank: specific driver callback for disabling vblank interrupt.
+ */
+struct exynos_drm_manager_ops {
+	void (*mode_set)(struct device *subdrv_dev, void *mode);
+	void (*commit)(struct device *subdrv_dev);
+	int (*enable_vblank)(struct device *subdrv_dev);
+	void (*disable_vblank)(struct device *subdrv_dev);
+};
+
+/*
+ * Exynos drm common manager structure.
+ *
+ * @dev: pointer to device object for subdrv device driver.
+ *	sub drivers such as display controller or hdmi driver,
+ *	have their own device object.
+ * @ops: pointer to callbacks for exynos drm specific framebuffer.
+ *	these callbacks should be set by specific drivers such fimd
+ *	or hdmi driver and are used to control hardware global registers.
+ * @overlay_ops: pointer to callbacks for exynos drm specific framebuffer.
+ *	these callbacks should be set by specific drivers such fimd
+ *	or hdmi driver and are used to control hardware overlay reigsters.
+ * @display: pointer to callbacks for exynos drm specific framebuffer.
+ *	these callbacks should be set by specific drivers such fimd
+ *	or hdmi driver and are used to control display devices such as
+ *	analog tv, digital tv and lcd panel and also get timing data for them.
+ */
+struct exynos_drm_manager {
+	struct device *dev;
+	int pipe;
+	struct exynos_drm_manager_ops *ops;
+	struct exynos_drm_overlay_ops *overlay_ops;
+	struct exynos_drm_display *display;
+};
+
+/*
+ * Exynos drm private structure.
+ */
+struct exynos_drm_private {
+	struct drm_fb_helper *fb_helper;
+
+	/* list head for new event to be added. */
+	struct list_head pageflip_event_list;
+
+	/*
+	 * created crtc object would be contained at this array and
+	 * this array is used to be aware of which crtc did it request vblank.
+	 */
+	struct drm_crtc *crtc[MAX_CRTC];
+};
+
+/*
+ * Exynos drm sub driver structure.
+ *
+ * @list: sub driver has its own list object to register to exynos drm driver.
+ * @drm_dev: pointer to drm_device and this pointer would be set
+ *	when sub driver calls exynos_drm_subdrv_register().
+ * @probe: this callback would be called by exynos drm driver after
+ *	subdrv is registered to it.
+ * @remove: this callback is used to release resources created
+ *	by probe callback.
+ * @manager: subdrv has its own manager to control a hardware appropriately
+ *	and we can access a hardware drawing on this manager.
+ * @encoder: encoder object owned by this sub driver.
+ * @connector: connector object owned by this sub driver.
+ */
+struct exynos_drm_subdrv {
+	struct list_head list;
+	struct drm_device *drm_dev;
+
+	int (*probe)(struct drm_device *drm_dev, struct device *dev);
+	void (*remove)(struct drm_device *dev);
+
+	struct exynos_drm_manager manager;
+	struct drm_encoder *encoder;
+	struct drm_connector *connector;
+};
+
+/*
+ * this function calls a probe callback registered to sub driver list and
+ * create its own encoder and connector and then set drm_device object
+ * to global one.
+ */
+int exynos_drm_device_register(struct drm_device *dev);
+/*
+ * this function calls a remove callback registered to sub driver list and
+ * destroy its own encoder and connetor.
+ */
+int exynos_drm_device_unregister(struct drm_device *dev);
+
+/*
+ * this function would be called by sub drivers such as display controller
+ * or hdmi driver to register this sub driver object to exynos drm driver
+ * and when a sub driver is registered to exynos drm driver a probe callback
+ * of the sub driver is called and creates its own encoder and connector
+ * and then fb helper and drm mode group would be re-initialized.
+ */
+int exynos_drm_subdrv_register(struct exynos_drm_subdrv *drm_subdrv);
+
+/*
+ * this function removes subdrv list from exynos drm driver and fb helper
+ * and drm mode group would be re-initialized.
+ */
+int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *drm_subdrv);
+
+#endif

+ 271 - 0
drivers/gpu/drm/exynos/exynos_drm_encoder.c

@@ -0,0 +1,271 @@
+/* exynos_drm_encoder.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authors:
+ *	Inki Dae <inki.dae@samsung.com>
+ *	Joonyoung Shim <jy0922.shim@samsung.com>
+ *	Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm_crtc_helper.h"
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_crtc.h"
+#include "exynos_drm_encoder.h"
+
+#define to_exynos_encoder(x)	container_of(x, struct exynos_drm_encoder,\
+				drm_encoder)
+
+/*
+ * exynos specific encoder structure.
+ *
+ * @drm_encoder: encoder object.
+ * @manager: specific encoder has its own manager to control a hardware
+ *	appropriately and we can access a hardware drawing on this manager.
+ */
+struct exynos_drm_encoder {
+	struct drm_encoder		drm_encoder;
+	struct exynos_drm_manager	*manager;
+};
+
+static void exynos_drm_encoder_dpms(struct drm_encoder *encoder, int mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct drm_connector *connector;
+	struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
+
+	DRM_DEBUG_KMS("%s, encoder dpms: %d\n", __FILE__, mode);
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		if (connector->encoder == encoder) {
+			struct exynos_drm_display *display = manager->display;
+
+			if (display && display->power_on)
+				display->power_on(manager->dev, mode);
+		}
+	}
+}
+
+static bool
+exynos_drm_encoder_mode_fixup(struct drm_encoder *encoder,
+			       struct drm_display_mode *mode,
+			       struct drm_display_mode *adjusted_mode)
+{
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	/* drm framework doesn't check NULL. */
+
+	return true;
+}
+
+static void exynos_drm_encoder_mode_set(struct drm_encoder *encoder,
+					 struct drm_display_mode *mode,
+					 struct drm_display_mode *adjusted_mode)
+{
+	struct drm_device *dev = encoder->dev;
+	struct drm_connector *connector;
+	struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
+	struct exynos_drm_manager_ops *manager_ops = manager->ops;
+	struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
+	struct exynos_drm_overlay *overlay = get_exynos_drm_overlay(dev,
+						encoder->crtc);
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	mode = adjusted_mode;
+
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		if (connector->encoder == encoder) {
+			if (manager_ops && manager_ops->mode_set)
+				manager_ops->mode_set(manager->dev, mode);
+
+			if (overlay_ops && overlay_ops->mode_set)
+				overlay_ops->mode_set(manager->dev, overlay);
+		}
+	}
+}
+
+static void exynos_drm_encoder_prepare(struct drm_encoder *encoder)
+{
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	/* drm framework doesn't check NULL. */
+}
+
+static void exynos_drm_encoder_commit(struct drm_encoder *encoder)
+{
+	struct exynos_drm_manager *manager = exynos_drm_get_manager(encoder);
+	struct exynos_drm_manager_ops *manager_ops = manager->ops;
+	struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (manager_ops && manager_ops->commit)
+		manager_ops->commit(manager->dev);
+
+	if (overlay_ops && overlay_ops->commit)
+		overlay_ops->commit(manager->dev);
+}
+
+static struct drm_crtc *
+exynos_drm_encoder_get_crtc(struct drm_encoder *encoder)
+{
+	return encoder->crtc;
+}
+
+static struct drm_encoder_helper_funcs exynos_encoder_helper_funcs = {
+	.dpms		= exynos_drm_encoder_dpms,
+	.mode_fixup	= exynos_drm_encoder_mode_fixup,
+	.mode_set	= exynos_drm_encoder_mode_set,
+	.prepare	= exynos_drm_encoder_prepare,
+	.commit		= exynos_drm_encoder_commit,
+	.get_crtc	= exynos_drm_encoder_get_crtc,
+};
+
+static void exynos_drm_encoder_destroy(struct drm_encoder *encoder)
+{
+	struct exynos_drm_encoder *exynos_encoder =
+		to_exynos_encoder(encoder);
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	exynos_encoder->manager->pipe = -1;
+
+	drm_encoder_cleanup(encoder);
+	encoder->dev->mode_config.num_encoder--;
+	kfree(exynos_encoder);
+}
+
+static struct drm_encoder_funcs exynos_encoder_funcs = {
+	.destroy = exynos_drm_encoder_destroy,
+};
+
+struct drm_encoder *
+exynos_drm_encoder_create(struct drm_device *dev,
+			   struct exynos_drm_manager *manager,
+			   unsigned int possible_crtcs)
+{
+	struct drm_encoder *encoder;
+	struct exynos_drm_encoder *exynos_encoder;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (!manager || !possible_crtcs)
+		return NULL;
+
+	if (!manager->dev)
+		return NULL;
+
+	exynos_encoder = kzalloc(sizeof(*exynos_encoder), GFP_KERNEL);
+	if (!exynos_encoder) {
+		DRM_ERROR("failed to allocate encoder\n");
+		return NULL;
+	}
+
+	exynos_encoder->manager = manager;
+	encoder = &exynos_encoder->drm_encoder;
+	encoder->possible_crtcs = possible_crtcs;
+
+	DRM_DEBUG_KMS("possible_crtcs = 0x%x\n", encoder->possible_crtcs);
+
+	drm_encoder_init(dev, encoder, &exynos_encoder_funcs,
+			DRM_MODE_ENCODER_TMDS);
+
+	drm_encoder_helper_add(encoder, &exynos_encoder_helper_funcs);
+
+	DRM_DEBUG_KMS("encoder has been created\n");
+
+	return encoder;
+}
+
+struct exynos_drm_manager *exynos_drm_get_manager(struct drm_encoder *encoder)
+{
+	return to_exynos_encoder(encoder)->manager;
+}
+
+void exynos_drm_fn_encoder(struct drm_crtc *crtc, void *data,
+			    void (*fn)(struct drm_encoder *, void *))
+{
+	struct drm_device *dev = crtc->dev;
+	struct drm_encoder *encoder;
+
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		if (encoder->crtc != crtc)
+			continue;
+
+		fn(encoder, data);
+	}
+}
+
+void exynos_drm_enable_vblank(struct drm_encoder *encoder, void *data)
+{
+	struct exynos_drm_manager *manager =
+		to_exynos_encoder(encoder)->manager;
+	struct exynos_drm_manager_ops *manager_ops = manager->ops;
+	int crtc = *(int *)data;
+
+	if (manager->pipe == -1)
+		manager->pipe = crtc;
+
+	if (manager_ops->enable_vblank)
+		manager_ops->enable_vblank(manager->dev);
+}
+
+void exynos_drm_disable_vblank(struct drm_encoder *encoder, void *data)
+{
+	struct exynos_drm_manager *manager =
+		to_exynos_encoder(encoder)->manager;
+	struct exynos_drm_manager_ops *manager_ops = manager->ops;
+	int crtc = *(int *)data;
+
+	if (manager->pipe == -1)
+		manager->pipe = crtc;
+
+	if (manager_ops->disable_vblank)
+		manager_ops->disable_vblank(manager->dev);
+}
+
+void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data)
+{
+	struct exynos_drm_manager *manager =
+		to_exynos_encoder(encoder)->manager;
+	struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
+
+	overlay_ops->commit(manager->dev);
+}
+
+void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data)
+{
+	struct exynos_drm_manager *manager =
+		to_exynos_encoder(encoder)->manager;
+	struct exynos_drm_overlay_ops *overlay_ops = manager->overlay_ops;
+	struct exynos_drm_overlay *overlay = data;
+
+	overlay_ops->mode_set(manager->dev, overlay);
+}
+
+MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
+MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
+MODULE_AUTHOR("Seung-Woo Kim <sw0312.kim@samsung.com>");
+MODULE_DESCRIPTION("Samsung SoC DRM Encoder Driver");
+MODULE_LICENSE("GPL");

+ 45 - 0
drivers/gpu/drm/exynos/exynos_drm_encoder.h

@@ -0,0 +1,45 @@
+/*
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authors:
+ *	Inki Dae <inki.dae@samsung.com>
+ *	Joonyoung Shim <jy0922.shim@samsung.com>
+ *	Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_ENCODER_H_
+#define _EXYNOS_DRM_ENCODER_H_
+
+struct exynos_drm_manager;
+
+struct drm_encoder *exynos_drm_encoder_create(struct drm_device *dev,
+					       struct exynos_drm_manager *mgr,
+					       unsigned int possible_crtcs);
+struct exynos_drm_manager *
+exynos_drm_get_manager(struct drm_encoder *encoder);
+void exynos_drm_fn_encoder(struct drm_crtc *crtc, void *data,
+			    void (*fn)(struct drm_encoder *, void *));
+void exynos_drm_enable_vblank(struct drm_encoder *encoder, void *data);
+void exynos_drm_disable_vblank(struct drm_encoder *encoder, void *data);
+void exynos_drm_encoder_crtc_commit(struct drm_encoder *encoder, void *data);
+void exynos_drm_encoder_crtc_mode_set(struct drm_encoder *encoder, void *data);
+
+#endif

+ 265 - 0
drivers/gpu/drm/exynos/exynos_drm_fb.c

@@ -0,0 +1,265 @@
+/* exynos_drm_fb.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authors:
+ *	Inki Dae <inki.dae@samsung.com>
+ *	Joonyoung Shim <jy0922.shim@samsung.com>
+ *	Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+#include "exynos_drm_fb.h"
+#include "exynos_drm_buf.h"
+#include "exynos_drm_gem.h"
+
+#define to_exynos_fb(x)	container_of(x, struct exynos_drm_fb, fb)
+
+/*
+ * exynos specific framebuffer structure.
+ *
+ * @fb: drm framebuffer obejct.
+ * @exynos_gem_obj: exynos specific gem object containing a gem object.
+ * @entry: pointer to exynos drm buffer entry object.
+ *	- containing only the information to physically continuous memory
+ *	region allocated at default framebuffer creation.
+ */
+struct exynos_drm_fb {
+	struct drm_framebuffer		fb;
+	struct exynos_drm_gem_obj	*exynos_gem_obj;
+	struct exynos_drm_buf_entry	*entry;
+};
+
+static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
+{
+	struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	drm_framebuffer_cleanup(fb);
+
+	/*
+	 * default framebuffer has no gem object so
+	 * a buffer of the default framebuffer should be released at here.
+	 */
+	if (!exynos_fb->exynos_gem_obj && exynos_fb->entry)
+		exynos_drm_buf_destroy(fb->dev, exynos_fb->entry);
+
+	kfree(exynos_fb);
+	exynos_fb = NULL;
+}
+
+static int exynos_drm_fb_create_handle(struct drm_framebuffer *fb,
+					struct drm_file *file_priv,
+					unsigned int *handle)
+{
+	struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	return drm_gem_handle_create(file_priv,
+			&exynos_fb->exynos_gem_obj->base, handle);
+}
+
+static int exynos_drm_fb_dirty(struct drm_framebuffer *fb,
+				struct drm_file *file_priv, unsigned flags,
+				unsigned color, struct drm_clip_rect *clips,
+				unsigned num_clips)
+{
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	/* TODO */
+
+	return 0;
+}
+
+static struct drm_framebuffer_funcs exynos_drm_fb_funcs = {
+	.destroy	= exynos_drm_fb_destroy,
+	.create_handle	= exynos_drm_fb_create_handle,
+	.dirty		= exynos_drm_fb_dirty,
+};
+
+static struct drm_framebuffer *
+exynos_drm_fb_init(struct drm_file *file_priv, struct drm_device *dev,
+		    struct drm_mode_fb_cmd *mode_cmd)
+{
+	struct exynos_drm_fb *exynos_fb;
+	struct drm_framebuffer *fb;
+	struct exynos_drm_gem_obj *exynos_gem_obj = NULL;
+	struct drm_gem_object *obj;
+	unsigned int size;
+	int ret;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	mode_cmd->pitch = max(mode_cmd->pitch,
+			mode_cmd->width * (mode_cmd->bpp >> 3));
+
+	DRM_LOG_KMS("drm fb create(%dx%d)\n",
+			mode_cmd->width, mode_cmd->height);
+
+	exynos_fb = kzalloc(sizeof(*exynos_fb), GFP_KERNEL);
+	if (!exynos_fb) {
+		DRM_ERROR("failed to allocate exynos drm framebuffer.\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	fb = &exynos_fb->fb;
+	ret = drm_framebuffer_init(dev, fb, &exynos_drm_fb_funcs);
+	if (ret) {
+		DRM_ERROR("failed to initialize framebuffer.\n");
+		goto err_init;
+	}
+
+	DRM_LOG_KMS("create: fb id: %d\n", fb->base.id);
+
+	size = mode_cmd->pitch * mode_cmd->height;
+
+	/*
+	 * mode_cmd->handle could be NULL at booting time or
+	 * with user request. if NULL, a new buffer or a gem object
+	 * would be allocated.
+	 */
+	if (!mode_cmd->handle) {
+		if (!file_priv) {
+			struct exynos_drm_buf_entry *entry;
+
+			/*
+			 * in case that file_priv is NULL, it allocates
+			 * only buffer and this buffer would be used
+			 * for default framebuffer.
+			 */
+			entry = exynos_drm_buf_create(dev, size);
+			if (IS_ERR(entry)) {
+				ret = PTR_ERR(entry);
+				goto err_buffer;
+			}
+
+			exynos_fb->entry = entry;
+
+			DRM_LOG_KMS("default fb: paddr = 0x%lx, size = 0x%x\n",
+					(unsigned long)entry->paddr, size);
+
+			goto out;
+		} else {
+			exynos_gem_obj = exynos_drm_gem_create(file_priv, dev,
+							size,
+							&mode_cmd->handle);
+			if (IS_ERR(exynos_gem_obj)) {
+				ret = PTR_ERR(exynos_gem_obj);
+				goto err_buffer;
+			}
+		}
+	} else {
+		obj = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
+		if (!obj) {
+			DRM_ERROR("failed to lookup gem object.\n");
+			goto err_buffer;
+		}
+
+		exynos_gem_obj = to_exynos_gem_obj(obj);
+
+		drm_gem_object_unreference_unlocked(obj);
+	}
+
+	/*
+	 * if got a exynos_gem_obj from either a handle or
+	 * a new creation then exynos_fb->exynos_gem_obj is NULL
+	 * so that default framebuffer has no its own gem object,
+	 * only its own buffer object.
+	 */
+	exynos_fb->entry = exynos_gem_obj->entry;
+
+	DRM_LOG_KMS("paddr = 0x%lx, size = 0x%x, gem object = 0x%x\n",
+			(unsigned long)exynos_fb->entry->paddr, size,
+			(unsigned int)&exynos_gem_obj->base);
+
+out:
+	exynos_fb->exynos_gem_obj = exynos_gem_obj;
+
+	drm_helper_mode_fill_fb_struct(fb, mode_cmd);
+
+	return fb;
+
+err_buffer:
+	drm_framebuffer_cleanup(fb);
+
+err_init:
+	kfree(exynos_fb);
+
+	return ERR_PTR(ret);
+}
+
+struct drm_framebuffer *exynos_drm_fb_create(struct drm_device *dev,
+					      struct drm_file *file_priv,
+					      struct drm_mode_fb_cmd *mode_cmd)
+{
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	return exynos_drm_fb_init(file_priv, dev, mode_cmd);
+}
+
+struct exynos_drm_buf_entry *exynos_drm_fb_get_buf(struct drm_framebuffer *fb)
+{
+	struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
+	struct exynos_drm_buf_entry *entry;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	entry = exynos_fb->entry;
+	if (!entry)
+		return NULL;
+
+	DRM_DEBUG_KMS("vaddr = 0x%lx, paddr = 0x%lx\n",
+			(unsigned long)entry->vaddr,
+			(unsigned long)entry->paddr);
+
+	return entry;
+}
+
+static struct drm_mode_config_funcs exynos_drm_mode_config_funcs = {
+	.fb_create = exynos_drm_fb_create,
+};
+
+void exynos_drm_mode_config_init(struct drm_device *dev)
+{
+	dev->mode_config.min_width = 0;
+	dev->mode_config.min_height = 0;
+
+	/*
+	 * set max width and height as default value(4096x4096).
+	 * this value would be used to check framebuffer size limitation
+	 * at drm_mode_addfb().
+	 */
+	dev->mode_config.max_width = 4096;
+	dev->mode_config.max_height = 4096;
+
+	dev->mode_config.funcs = &exynos_drm_mode_config_funcs;
+}
+
+MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
+MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
+MODULE_AUTHOR("Seung-Woo Kim <sw0312.kim@samsung.com>");
+MODULE_DESCRIPTION("Samsung SoC DRM FB Driver");
+MODULE_LICENSE("GPL");

+ 37 - 0
drivers/gpu/drm/exynos/exynos_drm_fb.h

@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authors:
+ *	Inki Dae <inki.dae@samsung.com>
+ *	Joonyoung Shim <jy0922.shim@samsung.com>
+ *	Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_FB_H_
+#define _EXYNOS_DRM_FB_H
+
+struct drm_framebuffer *exynos_drm_fb_create(struct drm_device *dev,
+					      struct drm_file *filp,
+					      struct drm_mode_fb_cmd *mode_cmd);
+
+void exynos_drm_mode_config_init(struct drm_device *dev);
+
+#endif

+ 456 - 0
drivers/gpu/drm/exynos/exynos_drm_fbdev.c

@@ -0,0 +1,456 @@
+/* exynos_drm_fbdev.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authors:
+ *	Inki Dae <inki.dae@samsung.com>
+ *	Joonyoung Shim <jy0922.shim@samsung.com>
+ *	Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm_crtc.h"
+#include "drm_fb_helper.h"
+#include "drm_crtc_helper.h"
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_fb.h"
+#include "exynos_drm_buf.h"
+
+#define MAX_CONNECTOR		4
+#define PREFERRED_BPP		32
+
+#define to_exynos_fbdev(x)	container_of(x, struct exynos_drm_fbdev,\
+				drm_fb_helper)
+
+struct exynos_drm_fbdev {
+	struct drm_fb_helper	drm_fb_helper;
+	struct drm_framebuffer	*fb;
+};
+
+static int exynos_drm_fbdev_set_par(struct fb_info *info)
+{
+	struct fb_var_screeninfo *var = &info->var;
+
+	switch (var->bits_per_pixel) {
+	case 32:
+	case 24:
+	case 18:
+	case 16:
+	case 12:
+		info->fix.visual = FB_VISUAL_TRUECOLOR;
+		break;
+	case 1:
+		info->fix.visual = FB_VISUAL_MONO01;
+		break;
+	default:
+		info->fix.visual = FB_VISUAL_PSEUDOCOLOR;
+		break;
+	}
+
+	info->fix.line_length = (var->xres_virtual * var->bits_per_pixel) / 8;
+
+	return drm_fb_helper_set_par(info);
+}
+
+
+static struct fb_ops exynos_drm_fb_ops = {
+	.owner		= THIS_MODULE,
+	.fb_fillrect	= cfb_fillrect,
+	.fb_copyarea	= cfb_copyarea,
+	.fb_imageblit	= cfb_imageblit,
+	.fb_check_var	= drm_fb_helper_check_var,
+	.fb_set_par	= exynos_drm_fbdev_set_par,
+	.fb_blank	= drm_fb_helper_blank,
+	.fb_pan_display	= drm_fb_helper_pan_display,
+	.fb_setcmap	= drm_fb_helper_setcmap,
+};
+
+static int exynos_drm_fbdev_update(struct drm_fb_helper *helper,
+				     struct drm_framebuffer *fb,
+				     unsigned int fb_width,
+				     unsigned int fb_height)
+{
+	struct fb_info *fbi = helper->fbdev;
+	struct drm_device *dev = helper->dev;
+	struct exynos_drm_fbdev *exynos_fb = to_exynos_fbdev(helper);
+	struct exynos_drm_buf_entry *entry;
+	unsigned int size = fb_width * fb_height * (fb->bits_per_pixel >> 3);
+	unsigned long offset;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	exynos_fb->fb = fb;
+
+	drm_fb_helper_fill_fix(fbi, fb->pitch, fb->depth);
+	drm_fb_helper_fill_var(fbi, helper, fb_width, fb_height);
+
+	entry = exynos_drm_fb_get_buf(fb);
+	if (!entry) {
+		DRM_LOG_KMS("entry is null.\n");
+		return -EFAULT;
+	}
+
+	offset = fbi->var.xoffset * (fb->bits_per_pixel >> 3);
+	offset += fbi->var.yoffset * fb->pitch;
+
+	dev->mode_config.fb_base = entry->paddr;
+	fbi->screen_base = entry->vaddr + offset;
+	fbi->fix.smem_start = entry->paddr + offset;
+	fbi->screen_size = size;
+	fbi->fix.smem_len = size;
+
+	return 0;
+}
+
+static int exynos_drm_fbdev_create(struct drm_fb_helper *helper,
+				    struct drm_fb_helper_surface_size *sizes)
+{
+	struct exynos_drm_fbdev *exynos_fbdev = to_exynos_fbdev(helper);
+	struct drm_device *dev = helper->dev;
+	struct fb_info *fbi;
+	struct drm_mode_fb_cmd mode_cmd = { 0 };
+	struct platform_device *pdev = dev->platformdev;
+	int ret;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	DRM_DEBUG_KMS("surface width(%d), height(%d) and bpp(%d\n",
+			sizes->surface_width, sizes->surface_height,
+			sizes->surface_bpp);
+
+	mode_cmd.width = sizes->surface_width;
+	mode_cmd.height = sizes->surface_height;
+	mode_cmd.bpp = sizes->surface_bpp;
+	mode_cmd.depth = sizes->surface_depth;
+
+	mutex_lock(&dev->struct_mutex);
+
+	fbi = framebuffer_alloc(0, &pdev->dev);
+	if (!fbi) {
+		DRM_ERROR("failed to allocate fb info.\n");
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	exynos_fbdev->fb = exynos_drm_fb_create(dev, NULL, &mode_cmd);
+	if (IS_ERR_OR_NULL(exynos_fbdev->fb)) {
+		DRM_ERROR("failed to create drm framebuffer.\n");
+		ret = PTR_ERR(exynos_fbdev->fb);
+		goto out;
+	}
+
+	helper->fb = exynos_fbdev->fb;
+	helper->fbdev = fbi;
+
+	fbi->par = helper;
+	fbi->flags = FBINFO_FLAG_DEFAULT;
+	fbi->fbops = &exynos_drm_fb_ops;
+
+	ret = fb_alloc_cmap(&fbi->cmap, 256, 0);
+	if (ret) {
+		DRM_ERROR("failed to allocate cmap.\n");
+		goto out;
+	}
+
+	ret = exynos_drm_fbdev_update(helper, helper->fb, sizes->fb_width,
+			sizes->fb_height);
+	if (ret < 0)
+		fb_dealloc_cmap(&fbi->cmap);
+
+/*
+ * if failed, all resources allocated above would be released by
+ * drm_mode_config_cleanup() when drm_load() had been called prior
+ * to any specific driver such as fimd or hdmi driver.
+ */
+out:
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
+static bool
+exynos_drm_fbdev_is_samefb(struct drm_framebuffer *fb,
+			    struct drm_fb_helper_surface_size *sizes)
+{
+	if (fb->width != sizes->surface_width)
+		return false;
+	if (fb->height != sizes->surface_height)
+		return false;
+	if (fb->bits_per_pixel != sizes->surface_bpp)
+		return false;
+	if (fb->depth != sizes->surface_depth)
+		return false;
+
+	return true;
+}
+
+static int exynos_drm_fbdev_recreate(struct drm_fb_helper *helper,
+				      struct drm_fb_helper_surface_size *sizes)
+{
+	struct drm_device *dev = helper->dev;
+	struct exynos_drm_fbdev *exynos_fbdev = to_exynos_fbdev(helper);
+	struct drm_framebuffer *fb = exynos_fbdev->fb;
+	struct drm_mode_fb_cmd mode_cmd = { 0 };
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (helper->fb != fb) {
+		DRM_ERROR("drm framebuffer is different\n");
+		return -EINVAL;
+	}
+
+	if (exynos_drm_fbdev_is_samefb(fb, sizes))
+		return 0;
+
+	mode_cmd.width = sizes->surface_width;
+	mode_cmd.height = sizes->surface_height;
+	mode_cmd.bpp = sizes->surface_bpp;
+	mode_cmd.depth = sizes->surface_depth;
+
+	if (fb->funcs->destroy)
+		fb->funcs->destroy(fb);
+
+	exynos_fbdev->fb = exynos_drm_fb_create(dev, NULL, &mode_cmd);
+	if (IS_ERR(exynos_fbdev->fb)) {
+		DRM_ERROR("failed to allocate fb.\n");
+		return PTR_ERR(exynos_fbdev->fb);
+	}
+
+	helper->fb = exynos_fbdev->fb;
+	return exynos_drm_fbdev_update(helper, helper->fb, sizes->fb_width,
+			sizes->fb_height);
+}
+
+static int exynos_drm_fbdev_probe(struct drm_fb_helper *helper,
+				   struct drm_fb_helper_surface_size *sizes)
+{
+	int ret = 0;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (!helper->fb) {
+		ret = exynos_drm_fbdev_create(helper, sizes);
+		if (ret < 0) {
+			DRM_ERROR("failed to create fbdev.\n");
+			return ret;
+		}
+
+		/*
+		 * fb_helper expects a value more than 1 if succeed
+		 * because register_framebuffer() should be called.
+		 */
+		ret = 1;
+	} else {
+		ret = exynos_drm_fbdev_recreate(helper, sizes);
+		if (ret < 0) {
+			DRM_ERROR("failed to reconfigure fbdev\n");
+			return ret;
+		}
+	}
+
+	return ret;
+}
+
+static struct drm_fb_helper_funcs exynos_drm_fb_helper_funcs = {
+	.fb_probe =	exynos_drm_fbdev_probe,
+};
+
+int exynos_drm_fbdev_init(struct drm_device *dev)
+{
+	struct exynos_drm_fbdev *fbdev;
+	struct exynos_drm_private *private = dev->dev_private;
+	struct drm_fb_helper *helper;
+	unsigned int num_crtc;
+	int ret;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (!dev->mode_config.num_crtc || !dev->mode_config.num_connector)
+		return 0;
+
+	fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
+	if (!fbdev) {
+		DRM_ERROR("failed to allocate drm fbdev.\n");
+		return -ENOMEM;
+	}
+
+	private->fb_helper = helper = &fbdev->drm_fb_helper;
+	helper->funcs = &exynos_drm_fb_helper_funcs;
+
+	num_crtc = dev->mode_config.num_crtc;
+
+	ret = drm_fb_helper_init(dev, helper, num_crtc, MAX_CONNECTOR);
+	if (ret < 0) {
+		DRM_ERROR("failed to initialize drm fb helper.\n");
+		goto err_init;
+	}
+
+	ret = drm_fb_helper_single_add_all_connectors(helper);
+	if (ret < 0) {
+		DRM_ERROR("failed to register drm_fb_helper_connector.\n");
+		goto err_setup;
+
+	}
+
+	ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP);
+	if (ret < 0) {
+		DRM_ERROR("failed to set up hw configuration.\n");
+		goto err_setup;
+	}
+
+	return 0;
+
+err_setup:
+	drm_fb_helper_fini(helper);
+
+err_init:
+	private->fb_helper = NULL;
+	kfree(fbdev);
+
+	return ret;
+}
+
+static void exynos_drm_fbdev_destroy(struct drm_device *dev,
+				      struct drm_fb_helper *fb_helper)
+{
+	struct drm_framebuffer *fb;
+
+	/* release drm framebuffer and real buffer */
+	if (fb_helper->fb && fb_helper->fb->funcs) {
+		fb = fb_helper->fb;
+		if (fb && fb->funcs->destroy)
+			fb->funcs->destroy(fb);
+	}
+
+	/* release linux framebuffer */
+	if (fb_helper->fbdev) {
+		struct fb_info *info;
+		int ret;
+
+		info = fb_helper->fbdev;
+		ret = unregister_framebuffer(info);
+		if (ret < 0)
+			DRM_DEBUG_KMS("failed unregister_framebuffer()\n");
+
+		if (info->cmap.len)
+			fb_dealloc_cmap(&info->cmap);
+
+		framebuffer_release(info);
+	}
+
+	drm_fb_helper_fini(fb_helper);
+}
+
+void exynos_drm_fbdev_fini(struct drm_device *dev)
+{
+	struct exynos_drm_private *private = dev->dev_private;
+	struct exynos_drm_fbdev *fbdev;
+
+	if (!private || !private->fb_helper)
+		return;
+
+	fbdev = to_exynos_fbdev(private->fb_helper);
+
+	exynos_drm_fbdev_destroy(dev, private->fb_helper);
+	kfree(fbdev);
+	private->fb_helper = NULL;
+}
+
+void exynos_drm_fbdev_restore_mode(struct drm_device *dev)
+{
+	struct exynos_drm_private *private = dev->dev_private;
+
+	if (!private || !private->fb_helper)
+		return;
+
+	drm_fb_helper_restore_fbdev_mode(private->fb_helper);
+}
+
+int exynos_drm_fbdev_reinit(struct drm_device *dev)
+{
+	struct exynos_drm_private *private = dev->dev_private;
+	struct drm_fb_helper *fb_helper;
+	int ret;
+
+	if (!private)
+		return -EINVAL;
+
+	/*
+	 * if all sub drivers were unloaded then num_connector is 0
+	 * so at this time, the framebuffers also should be destroyed.
+	 */
+	if (!dev->mode_config.num_connector) {
+		exynos_drm_fbdev_fini(dev);
+		return 0;
+	}
+
+	fb_helper = private->fb_helper;
+
+	if (fb_helper) {
+		drm_fb_helper_fini(fb_helper);
+
+		ret = drm_fb_helper_init(dev, fb_helper,
+				dev->mode_config.num_crtc, MAX_CONNECTOR);
+		if (ret < 0) {
+			DRM_ERROR("failed to initialize drm fb helper\n");
+			return ret;
+		}
+
+		ret = drm_fb_helper_single_add_all_connectors(fb_helper);
+		if (ret < 0) {
+			DRM_ERROR("failed to add fb helper to connectors\n");
+			goto err;
+		}
+
+		ret = drm_fb_helper_initial_config(fb_helper, PREFERRED_BPP);
+		if (ret < 0) {
+			DRM_ERROR("failed to set up hw configuration.\n");
+			goto err;
+		}
+	} else {
+		/*
+		 * if drm_load() failed whem drm load() was called prior
+		 * to specific drivers, fb_helper must be NULL and so
+		 * this fuction should be called again to re-initialize and
+		 * re-configure the fb helper. it means that this function
+		 * has been called by the specific drivers.
+		 */
+		ret = exynos_drm_fbdev_init(dev);
+	}
+
+	return ret;
+
+err:
+	/*
+	 * if drm_load() failed when drm load() was called prior
+	 * to specific drivers, the fb_helper must be NULL and so check it.
+	 */
+	if (fb_helper)
+		drm_fb_helper_fini(fb_helper);
+
+	return ret;
+}
+
+MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
+MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
+MODULE_AUTHOR("Seung-Woo Kim <sw0312.kim@samsung.com>");
+MODULE_DESCRIPTION("Samsung SoC DRM FBDEV Driver");
+MODULE_LICENSE("GPL");

+ 37 - 0
drivers/gpu/drm/exynos/exynos_drm_fbdev.h

@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ *
+ * Authors:
+ *	Inki Dae <inki.dae@samsung.com>
+ *	Joonyoung Shim <jy0922.shim@samsung.com>
+ *	Seung-Woo Kim <sw0312.kim@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_FBDEV_H_
+#define _EXYNOS_DRM_FBDEV_H_
+
+int exynos_drm_fbdev_init(struct drm_device *dev);
+int exynos_drm_fbdev_reinit(struct drm_device *dev);
+void exynos_drm_fbdev_fini(struct drm_device *dev);
+void exynos_drm_fbdev_restore_mode(struct drm_device *dev);
+
+#endif

+ 811 - 0
drivers/gpu/drm/exynos/exynos_drm_fimd.c

@@ -0,0 +1,811 @@
+/* exynos_drm_fimd.c
+ *
+ * Copyright (C) 2011 Samsung Electronics Co.Ltd
+ * Authors:
+ *	Joonyoung Shim <jy0922.shim@samsung.com>
+ *	Inki Dae <inki.dae@samsung.com>
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ */
+#include "drmP.h"
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+
+#include <drm/exynos_drm.h>
+#include <plat/regs-fb-v4.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_fbdev.h"
+#include "exynos_drm_crtc.h"
+
+/*
+ * FIMD is stand for Fully Interactive Mobile Display and
+ * as a display controller, it transfers contents drawn on memory
+ * to a LCD Panel through Display Interfaces such as RGB or
+ * CPU Interface.
+ */
+
+/* position control register for hardware window 0, 2 ~ 4.*/
+#define VIDOSD_A(win)		(VIDOSD_BASE + 0x00 + (win) * 16)
+#define VIDOSD_B(win)		(VIDOSD_BASE + 0x04 + (win) * 16)
+/* size control register for hardware window 0. */
+#define VIDOSD_C_SIZE_W0	(VIDOSD_BASE + 0x08)
+/* alpha control register for hardware window 1 ~ 4. */
+#define VIDOSD_C(win)		(VIDOSD_BASE + 0x18 + (win) * 16)
+/* size control register for hardware window 1 ~ 4. */
+#define VIDOSD_D(win)		(VIDOSD_BASE + 0x0C + (win) * 16)
+
+#define VIDWx_BUF_START(win, buf)	(VIDW_BUF_START(buf) + (win) * 8)
+#define VIDWx_BUF_END(win, buf)		(VIDW_BUF_END(buf) + (win) * 8)
+#define VIDWx_BUF_SIZE(win, buf)	(VIDW_BUF_SIZE(buf) + (win) * 4)
+
+/* color key control register for hardware window 1 ~ 4. */
+#define WKEYCON0_BASE(x)		((WKEYCON0 + 0x140) + (x * 8))
+/* color key value register for hardware window 1 ~ 4. */
+#define WKEYCON1_BASE(x)		((WKEYCON1 + 0x140) + (x * 8))
+
+/* FIMD has totally five hardware windows. */
+#define WINDOWS_NR	5
+
+#define get_fimd_context(dev)	platform_get_drvdata(to_platform_device(dev))
+
+struct fimd_win_data {
+	unsigned int		offset_x;
+	unsigned int		offset_y;
+	unsigned int		ovl_width;
+	unsigned int		ovl_height;
+	unsigned int		fb_width;
+	unsigned int		fb_height;
+	unsigned int		bpp;
+	dma_addr_t		paddr;
+	void __iomem		*vaddr;
+	unsigned int		buf_offsize;
+	unsigned int		line_size;	/* bytes */
+};
+
+struct fimd_context {
+	struct exynos_drm_subdrv	subdrv;
+	int				irq;
+	struct drm_crtc			*crtc;
+	struct clk			*bus_clk;
+	struct clk			*lcd_clk;
+	struct resource			*regs_res;
+	void __iomem			*regs;
+	struct fimd_win_data		win_data[WINDOWS_NR];
+	unsigned int			clkdiv;
+	unsigned int			default_win;
+	unsigned long			irq_flags;
+	u32				vidcon0;
+	u32				vidcon1;
+
+	struct fb_videomode		*timing;
+};
+
+static bool fimd_display_is_connected(struct device *dev)
+{
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	/* TODO. */
+
+	return true;
+}
+
+static void *fimd_get_timing(struct device *dev)
+{
+	struct fimd_context *ctx = get_fimd_context(dev);
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	return ctx->timing;
+}
+
+static int fimd_check_timing(struct device *dev, void *timing)
+{
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	/* TODO. */
+
+	return 0;
+}
+
+static int fimd_display_power_on(struct device *dev, int mode)
+{
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	/* TODO. */
+
+	return 0;
+}
+
+static struct exynos_drm_display fimd_display = {
+	.type = EXYNOS_DISPLAY_TYPE_LCD,
+	.is_connected = fimd_display_is_connected,
+	.get_timing = fimd_get_timing,
+	.check_timing = fimd_check_timing,
+	.power_on = fimd_display_power_on,
+};
+
+static void fimd_commit(struct device *dev)
+{
+	struct fimd_context *ctx = get_fimd_context(dev);
+	struct fb_videomode *timing = ctx->timing;
+	u32 val;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	/* setup polarity values from machine code. */
+	writel(ctx->vidcon1, ctx->regs + VIDCON1);
+
+	/* setup vertical timing values. */
+	val = VIDTCON0_VBPD(timing->upper_margin - 1) |
+	       VIDTCON0_VFPD(timing->lower_margin - 1) |
+	       VIDTCON0_VSPW(timing->vsync_len - 1);
+	writel(val, ctx->regs + VIDTCON0);
+
+	/* setup horizontal timing values.  */
+	val = VIDTCON1_HBPD(timing->left_margin - 1) |
+	       VIDTCON1_HFPD(timing->right_margin - 1) |
+	       VIDTCON1_HSPW(timing->hsync_len - 1);
+	writel(val, ctx->regs + VIDTCON1);
+
+	/* setup horizontal and vertical display size. */
+	val = VIDTCON2_LINEVAL(timing->yres - 1) |
+	       VIDTCON2_HOZVAL(timing->xres - 1);
+	writel(val, ctx->regs + VIDTCON2);
+
+	/* setup clock source, clock divider, enable dma. */
+	val = ctx->vidcon0;
+	val &= ~(VIDCON0_CLKVAL_F_MASK | VIDCON0_CLKDIR);
+
+	if (ctx->clkdiv > 1)
+		val |= VIDCON0_CLKVAL_F(ctx->clkdiv - 1) | VIDCON0_CLKDIR;
+	else
+		val &= ~VIDCON0_CLKDIR;	/* 1:1 clock */
+
+	/*
+	 * fields of register with prefix '_F' would be updated
+	 * at vsync(same as dma start)
+	 */
+	val |= VIDCON0_ENVID | VIDCON0_ENVID_F;
+	writel(val, ctx->regs + VIDCON0);
+}
+
+static int fimd_enable_vblank(struct device *dev)
+{
+	struct fimd_context *ctx = get_fimd_context(dev);
+	u32 val;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (!test_and_set_bit(0, &ctx->irq_flags)) {
+		val = readl(ctx->regs + VIDINTCON0);
+
+		val |= VIDINTCON0_INT_ENABLE;
+		val |= VIDINTCON0_INT_FRAME;
+
+		val &= ~VIDINTCON0_FRAMESEL0_MASK;
+		val |= VIDINTCON0_FRAMESEL0_VSYNC;
+		val &= ~VIDINTCON0_FRAMESEL1_MASK;
+		val |= VIDINTCON0_FRAMESEL1_NONE;
+
+		writel(val, ctx->regs + VIDINTCON0);
+	}
+
+	return 0;
+}
+
+static void fimd_disable_vblank(struct device *dev)
+{
+	struct fimd_context *ctx = get_fimd_context(dev);
+	u32 val;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (test_and_clear_bit(0, &ctx->irq_flags)) {
+		val = readl(ctx->regs + VIDINTCON0);
+
+		val &= ~VIDINTCON0_INT_FRAME;
+		val &= ~VIDINTCON0_INT_ENABLE;
+
+		writel(val, ctx->regs + VIDINTCON0);
+	}
+}
+
+static struct exynos_drm_manager_ops fimd_manager_ops = {
+	.commit = fimd_commit,
+	.enable_vblank = fimd_enable_vblank,
+	.disable_vblank = fimd_disable_vblank,
+};
+
+static void fimd_win_mode_set(struct device *dev,
+			      struct exynos_drm_overlay *overlay)
+{
+	struct fimd_context *ctx = get_fimd_context(dev);
+	struct fimd_win_data *win_data;
+	unsigned long offset;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (!overlay) {
+		dev_err(dev, "overlay is NULL\n");
+		return;
+	}
+
+	offset = overlay->fb_x * (overlay->bpp >> 3);
+	offset += overlay->fb_y * overlay->pitch;
+
+	DRM_DEBUG_KMS("offset = 0x%lx, pitch = %x\n", offset, overlay->pitch);
+
+	win_data = &ctx->win_data[ctx->default_win];
+
+	win_data->offset_x = overlay->crtc_x;
+	win_data->offset_y = overlay->crtc_y;
+	win_data->ovl_width = overlay->crtc_width;
+	win_data->ovl_height = overlay->crtc_height;
+	win_data->fb_width = overlay->fb_width;
+	win_data->fb_height = overlay->fb_height;
+	win_data->paddr = overlay->paddr + offset;
+	win_data->vaddr = overlay->vaddr + offset;
+	win_data->bpp = overlay->bpp;
+	win_data->buf_offsize = (overlay->fb_width - overlay->crtc_width) *
+				(overlay->bpp >> 3);
+	win_data->line_size = overlay->crtc_width * (overlay->bpp >> 3);
+
+	DRM_DEBUG_KMS("offset_x = %d, offset_y = %d\n",
+			win_data->offset_x, win_data->offset_y);
+	DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
+			win_data->ovl_width, win_data->ovl_height);
+	DRM_DEBUG_KMS("paddr = 0x%lx, vaddr = 0x%lx\n",
+			(unsigned long)win_data->paddr,
+			(unsigned long)win_data->vaddr);
+	DRM_DEBUG_KMS("fb_width = %d, crtc_width = %d\n",
+			overlay->fb_width, overlay->crtc_width);
+}
+
+static void fimd_win_set_pixfmt(struct device *dev, unsigned int win)
+{
+	struct fimd_context *ctx = get_fimd_context(dev);
+	struct fimd_win_data *win_data = &ctx->win_data[win];
+	unsigned long val;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	val = WINCONx_ENWIN;
+
+	switch (win_data->bpp) {
+	case 1:
+		val |= WINCON0_BPPMODE_1BPP;
+		val |= WINCONx_BITSWP;
+		val |= WINCONx_BURSTLEN_4WORD;
+		break;
+	case 2:
+		val |= WINCON0_BPPMODE_2BPP;
+		val |= WINCONx_BITSWP;
+		val |= WINCONx_BURSTLEN_8WORD;
+		break;
+	case 4:
+		val |= WINCON0_BPPMODE_4BPP;
+		val |= WINCONx_BITSWP;
+		val |= WINCONx_BURSTLEN_8WORD;
+		break;
+	case 8:
+		val |= WINCON0_BPPMODE_8BPP_PALETTE;
+		val |= WINCONx_BURSTLEN_8WORD;
+		val |= WINCONx_BYTSWP;
+		break;
+	case 16:
+		val |= WINCON0_BPPMODE_16BPP_565;
+		val |= WINCONx_HAWSWP;
+		val |= WINCONx_BURSTLEN_16WORD;
+		break;
+	case 24:
+		val |= WINCON0_BPPMODE_24BPP_888;
+		val |= WINCONx_WSWP;
+		val |= WINCONx_BURSTLEN_16WORD;
+		break;
+	case 32:
+		val |= WINCON1_BPPMODE_28BPP_A4888
+			| WINCON1_BLD_PIX | WINCON1_ALPHA_SEL;
+		val |= WINCONx_WSWP;
+		val |= WINCONx_BURSTLEN_16WORD;
+		break;
+	default:
+		DRM_DEBUG_KMS("invalid pixel size so using unpacked 24bpp.\n");
+
+		val |= WINCON0_BPPMODE_24BPP_888;
+		val |= WINCONx_WSWP;
+		val |= WINCONx_BURSTLEN_16WORD;
+		break;
+	}
+
+	DRM_DEBUG_KMS("bpp = %d\n", win_data->bpp);
+
+	writel(val, ctx->regs + WINCON(win));
+}
+
+static void fimd_win_set_colkey(struct device *dev, unsigned int win)
+{
+	struct fimd_context *ctx = get_fimd_context(dev);
+	unsigned int keycon0 = 0, keycon1 = 0;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	keycon0 = ~(WxKEYCON0_KEYBL_EN | WxKEYCON0_KEYEN_F |
+			WxKEYCON0_DIRCON) | WxKEYCON0_COMPKEY(0);
+
+	keycon1 = WxKEYCON1_COLVAL(0xffffffff);
+
+	writel(keycon0, ctx->regs + WKEYCON0_BASE(win));
+	writel(keycon1, ctx->regs + WKEYCON1_BASE(win));
+}
+
+static void fimd_win_commit(struct device *dev)
+{
+	struct fimd_context *ctx = get_fimd_context(dev);
+	struct fimd_win_data *win_data;
+	int win = ctx->default_win;
+	unsigned long val, alpha, size;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (win < 0 || win > WINDOWS_NR)
+		return;
+
+	win_data = &ctx->win_data[win];
+
+	/*
+	 * SHADOWCON register is used for enabling timing.
+	 *
+	 * for example, once only width value of a register is set,
+	 * if the dma is started then fimd hardware could malfunction so
+	 * with protect window setting, the register fields with prefix '_F'
+	 * wouldn't be updated at vsync also but updated once unprotect window
+	 * is set.
+	 */
+
+	/* protect windows */
+	val = readl(ctx->regs + SHADOWCON);
+	val |= SHADOWCON_WINx_PROTECT(win);
+	writel(val, ctx->regs + SHADOWCON);
+
+	/* buffer start address */
+	val = win_data->paddr;
+	writel(val, ctx->regs + VIDWx_BUF_START(win, 0));
+
+	/* buffer end address */
+	size = win_data->fb_width * win_data->ovl_height * (win_data->bpp >> 3);
+	val = win_data->paddr + size;
+	writel(val, ctx->regs + VIDWx_BUF_END(win, 0));
+
+	DRM_DEBUG_KMS("start addr = 0x%lx, end addr = 0x%lx, size = 0x%lx\n",
+			(unsigned long)win_data->paddr, val, size);
+	DRM_DEBUG_KMS("ovl_width = %d, ovl_height = %d\n",
+			win_data->ovl_width, win_data->ovl_height);
+
+	/* buffer size */
+	val = VIDW_BUF_SIZE_OFFSET(win_data->buf_offsize) |
+		VIDW_BUF_SIZE_PAGEWIDTH(win_data->line_size);
+	writel(val, ctx->regs + VIDWx_BUF_SIZE(win, 0));
+
+	/* OSD position */
+	val = VIDOSDxA_TOPLEFT_X(win_data->offset_x) |
+		VIDOSDxA_TOPLEFT_Y(win_data->offset_y);
+	writel(val, ctx->regs + VIDOSD_A(win));
+
+	val = VIDOSDxB_BOTRIGHT_X(win_data->offset_x +
+					win_data->ovl_width - 1) |
+		VIDOSDxB_BOTRIGHT_Y(win_data->offset_y +
+					win_data->ovl_height - 1);
+	writel(val, ctx->regs + VIDOSD_B(win));
+
+	DRM_DEBUG_KMS("osd pos: tx = %d, ty = %d, bx = %d, by = %d\n",
+			win_data->offset_x, win_data->offset_y,
+			win_data->offset_x + win_data->ovl_width - 1,
+			win_data->offset_y + win_data->ovl_height - 1);
+
+	/* hardware window 0 doesn't support alpha channel. */
+	if (win != 0) {
+		/* OSD alpha */
+		alpha = VIDISD14C_ALPHA1_R(0xf) |
+			VIDISD14C_ALPHA1_G(0xf) |
+			VIDISD14C_ALPHA1_B(0xf);
+
+		writel(alpha, ctx->regs + VIDOSD_C(win));
+	}
+
+	/* OSD size */
+	if (win != 3 && win != 4) {
+		u32 offset = VIDOSD_D(win);
+		if (win == 0)
+			offset = VIDOSD_C_SIZE_W0;
+		val = win_data->ovl_width * win_data->ovl_height;
+		writel(val, ctx->regs + offset);
+
+		DRM_DEBUG_KMS("osd size = 0x%x\n", (unsigned int)val);
+	}
+
+	fimd_win_set_pixfmt(dev, win);
+
+	/* hardware window 0 doesn't support color key. */
+	if (win != 0)
+		fimd_win_set_colkey(dev, win);
+
+	/* Enable DMA channel and unprotect windows */
+	val = readl(ctx->regs + SHADOWCON);
+	val |= SHADOWCON_CHx_ENABLE(win);
+	val &= ~SHADOWCON_WINx_PROTECT(win);
+	writel(val, ctx->regs + SHADOWCON);
+}
+
+static void fimd_win_disable(struct device *dev)
+{
+	struct fimd_context *ctx = get_fimd_context(dev);
+	struct fimd_win_data *win_data;
+	int win = ctx->default_win;
+	u32 val;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (win < 0 || win > WINDOWS_NR)
+		return;
+
+	win_data = &ctx->win_data[win];
+
+	/* protect windows */
+	val = readl(ctx->regs + SHADOWCON);
+	val |= SHADOWCON_WINx_PROTECT(win);
+	writel(val, ctx->regs + SHADOWCON);
+
+	/* wincon */
+	val = readl(ctx->regs + WINCON(win));
+	val &= ~WINCONx_ENWIN;
+	writel(val, ctx->regs + WINCON(win));
+
+	/* unprotect windows */
+	val = readl(ctx->regs + SHADOWCON);
+	val &= ~SHADOWCON_CHx_ENABLE(win);
+	val &= ~SHADOWCON_WINx_PROTECT(win);
+	writel(val, ctx->regs + SHADOWCON);
+}
+
+static struct exynos_drm_overlay_ops fimd_overlay_ops = {
+	.mode_set = fimd_win_mode_set,
+	.commit = fimd_win_commit,
+	.disable = fimd_win_disable,
+};
+
+static void fimd_finish_pageflip(struct drm_device *drm_dev, int crtc)
+{
+	struct exynos_drm_private *dev_priv = drm_dev->dev_private;
+	struct drm_pending_vblank_event *e, *t;
+	struct timeval now;
+	unsigned long flags;
+	bool is_checked = false;
+
+	spin_lock_irqsave(&drm_dev->event_lock, flags);
+
+	list_for_each_entry_safe(e, t, &dev_priv->pageflip_event_list,
+			base.link) {
+		/* if event's pipe isn't same as crtc then ignore it. */
+		if (crtc != e->pipe)
+			continue;
+
+		is_checked = true;
+
+		do_gettimeofday(&now);
+		e->event.sequence = 0;
+		e->event.tv_sec = now.tv_sec;
+		e->event.tv_usec = now.tv_usec;
+
+		list_move_tail(&e->base.link, &e->base.file_priv->event_list);
+		wake_up_interruptible(&e->base.file_priv->event_wait);
+	}
+
+	if (is_checked)
+		drm_vblank_put(drm_dev, crtc);
+
+	spin_unlock_irqrestore(&drm_dev->event_lock, flags);
+}
+
+static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
+{
+	struct fimd_context *ctx = (struct fimd_context *)dev_id;
+	struct exynos_drm_subdrv *subdrv = &ctx->subdrv;
+	struct drm_device *drm_dev = subdrv->drm_dev;
+	struct exynos_drm_manager *manager = &subdrv->manager;
+	u32 val;
+
+	val = readl(ctx->regs + VIDINTCON1);
+
+	if (val & VIDINTCON1_INT_FRAME)
+		/* VSYNC interrupt */
+		writel(VIDINTCON1_INT_FRAME, ctx->regs + VIDINTCON1);
+
+	drm_handle_vblank(drm_dev, manager->pipe);
+	fimd_finish_pageflip(drm_dev, manager->pipe);
+
+	return IRQ_HANDLED;
+}
+
+static int fimd_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
+{
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	/*
+	 * enable drm irq mode.
+	 * - with irq_enabled = 1, we can use the vblank feature.
+	 *
+	 * P.S. note that we wouldn't use drm irq handler but
+	 *	just specific driver own one instead because
+	 *	drm framework supports only one irq handler.
+	 */
+	drm_dev->irq_enabled = 1;
+
+	/*
+	 * with vblank_disable_allowed = 1, vblank interrupt will be disabled
+	 * by drm timer once a current process gives up ownership of
+	 * vblank event.(drm_vblank_put function was called)
+	 */
+	drm_dev->vblank_disable_allowed = 1;
+
+	return 0;
+}
+
+static void fimd_subdrv_remove(struct drm_device *drm_dev)
+{
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	/* TODO. */
+}
+
+static int fimd_calc_clkdiv(struct fimd_context *ctx,
+			    struct fb_videomode *timing)
+{
+	unsigned long clk = clk_get_rate(ctx->lcd_clk);
+	u32 retrace;
+	u32 clkdiv;
+	u32 best_framerate = 0;
+	u32 framerate;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	retrace = timing->left_margin + timing->hsync_len +
+				timing->right_margin + timing->xres;
+	retrace *= timing->upper_margin + timing->vsync_len +
+				timing->lower_margin + timing->yres;
+
+	/* default framerate is 60Hz */
+	if (!timing->refresh)
+		timing->refresh = 60;
+
+	clk /= retrace;
+
+	for (clkdiv = 1; clkdiv < 0x100; clkdiv++) {
+		int tmp;
+
+		/* get best framerate */
+		framerate = clk / clkdiv;
+		tmp = timing->refresh - framerate;
+		if (tmp < 0) {
+			best_framerate = framerate;
+			continue;
+		} else {
+			if (!best_framerate)
+				best_framerate = framerate;
+			else if (tmp < (best_framerate - framerate))
+				best_framerate = framerate;
+			break;
+		}
+	}
+
+	return clkdiv;
+}
+
+static void fimd_clear_win(struct fimd_context *ctx, int win)
+{
+	u32 val;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	writel(0, ctx->regs + WINCON(win));
+	writel(0, ctx->regs + VIDOSD_A(win));
+	writel(0, ctx->regs + VIDOSD_B(win));
+	writel(0, ctx->regs + VIDOSD_C(win));
+
+	if (win == 1 || win == 2)
+		writel(0, ctx->regs + VIDOSD_D(win));
+
+	val = readl(ctx->regs + SHADOWCON);
+	val &= ~SHADOWCON_WINx_PROTECT(win);
+	writel(val, ctx->regs + SHADOWCON);
+}
+
+static int __devinit fimd_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct fimd_context *ctx;
+	struct exynos_drm_subdrv *subdrv;
+	struct exynos_drm_fimd_pdata *pdata;
+	struct fb_videomode *timing;
+	struct resource *res;
+	int win;
+	int ret = -EINVAL;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	pdata = pdev->dev.platform_data;
+	if (!pdata) {
+		dev_err(dev, "no platform data specified\n");
+		return -EINVAL;
+	}
+
+	timing = &pdata->timing;
+	if (!timing) {
+		dev_err(dev, "timing is null.\n");
+		return -EINVAL;
+	}
+
+	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+	if (!ctx)
+		return -ENOMEM;
+
+	ctx->bus_clk = clk_get(dev, "fimd");
+	if (IS_ERR(ctx->bus_clk)) {
+		dev_err(dev, "failed to get bus clock\n");
+		ret = PTR_ERR(ctx->bus_clk);
+		goto err_clk_get;
+	}
+
+	clk_enable(ctx->bus_clk);
+
+	ctx->lcd_clk = clk_get(dev, "sclk_fimd");
+	if (IS_ERR(ctx->lcd_clk)) {
+		dev_err(dev, "failed to get lcd clock\n");
+		ret = PTR_ERR(ctx->lcd_clk);
+		goto err_bus_clk;
+	}
+
+	clk_enable(ctx->lcd_clk);
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res) {
+		dev_err(dev, "failed to find registers\n");
+		ret = -ENOENT;
+		goto err_clk;
+	}
+
+	ctx->regs_res = request_mem_region(res->start, resource_size(res),
+					   dev_name(dev));
+	if (!ctx->regs_res) {
+		dev_err(dev, "failed to claim register region\n");
+		ret = -ENOENT;
+		goto err_clk;
+	}
+
+	ctx->regs = ioremap(res->start, resource_size(res));
+	if (!ctx->regs) {
+		dev_err(dev, "failed to map registers\n");
+		ret = -ENXIO;
+		goto err_req_region_io;
+	}
+
+	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+	if (!res) {
+		dev_err(dev, "irq request failed.\n");
+		goto err_req_region_irq;
+	}
+
+	ctx->irq = res->start;
+
+	for (win = 0; win < WINDOWS_NR; win++)
+		fimd_clear_win(ctx, win);
+
+	ret = request_irq(ctx->irq, fimd_irq_handler, 0, "drm_fimd", ctx);
+	if (ret < 0) {
+		dev_err(dev, "irq request failed.\n");
+		goto err_req_irq;
+	}
+
+	ctx->clkdiv = fimd_calc_clkdiv(ctx, timing);
+	ctx->vidcon0 = pdata->vidcon0;
+	ctx->vidcon1 = pdata->vidcon1;
+	ctx->default_win = pdata->default_win;
+	ctx->timing = timing;
+
+	timing->pixclock = clk_get_rate(ctx->lcd_clk) / ctx->clkdiv;
+
+	DRM_DEBUG_KMS("pixel clock = %d, clkdiv = %d\n",
+			timing->pixclock, ctx->clkdiv);
+
+	subdrv = &ctx->subdrv;
+
+	subdrv->probe = fimd_subdrv_probe;
+	subdrv->remove = fimd_subdrv_remove;
+	subdrv->manager.pipe = -1;
+	subdrv->manager.ops = &fimd_manager_ops;
+	subdrv->manager.overlay_ops = &fimd_overlay_ops;
+	subdrv->manager.display = &fimd_display;
+	subdrv->manager.dev = dev;
+
+	platform_set_drvdata(pdev, ctx);
+	exynos_drm_subdrv_register(subdrv);
+
+	return 0;
+
+err_req_irq:
+err_req_region_irq:
+	iounmap(ctx->regs);
+
+err_req_region_io:
+	release_resource(ctx->regs_res);
+	kfree(ctx->regs_res);
+
+err_clk:
+	clk_disable(ctx->lcd_clk);
+	clk_put(ctx->lcd_clk);
+
+err_bus_clk:
+	clk_disable(ctx->bus_clk);
+	clk_put(ctx->bus_clk);
+
+err_clk_get:
+	kfree(ctx);
+	return ret;
+}
+
+static int __devexit fimd_remove(struct platform_device *pdev)
+{
+	struct fimd_context *ctx = platform_get_drvdata(pdev);
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	exynos_drm_subdrv_unregister(&ctx->subdrv);
+
+	clk_disable(ctx->lcd_clk);
+	clk_disable(ctx->bus_clk);
+	clk_put(ctx->lcd_clk);
+	clk_put(ctx->bus_clk);
+
+	iounmap(ctx->regs);
+	release_resource(ctx->regs_res);
+	kfree(ctx->regs_res);
+	free_irq(ctx->irq, ctx);
+
+	kfree(ctx);
+
+	return 0;
+}
+
+static struct platform_driver fimd_driver = {
+	.probe		= fimd_probe,
+	.remove		= __devexit_p(fimd_remove),
+	.driver		= {
+		.name	= "exynos4-fb",
+		.owner	= THIS_MODULE,
+	},
+};
+
+static int __init fimd_init(void)
+{
+	return platform_driver_register(&fimd_driver);
+}
+
+static void __exit fimd_exit(void)
+{
+	platform_driver_unregister(&fimd_driver);
+}
+
+module_init(fimd_init);
+module_exit(fimd_exit);
+
+MODULE_AUTHOR("Joonyoung Shim <jy0922.shim@samsung.com>");
+MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
+MODULE_DESCRIPTION("Samsung DRM FIMD Driver");
+MODULE_LICENSE("GPL");

+ 415 - 0
drivers/gpu/drm/exynos/exynos_drm_gem.c

@@ -0,0 +1,415 @@
+/* exynos_drm_gem.c
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Author: Inki Dae <inki.dae@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm.h"
+
+#include <drm/exynos_drm.h>
+
+#include "exynos_drm_drv.h"
+#include "exynos_drm_gem.h"
+#include "exynos_drm_buf.h"
+
+static unsigned int convert_to_vm_err_msg(int msg)
+{
+	unsigned int out_msg;
+
+	switch (msg) {
+	case 0:
+	case -ERESTARTSYS:
+	case -EINTR:
+		out_msg = VM_FAULT_NOPAGE;
+		break;
+
+	case -ENOMEM:
+		out_msg = VM_FAULT_OOM;
+		break;
+
+	default:
+		out_msg = VM_FAULT_SIGBUS;
+		break;
+	}
+
+	return out_msg;
+}
+
+static unsigned int get_gem_mmap_offset(struct drm_gem_object *obj)
+{
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	return (unsigned int)obj->map_list.hash.key << PAGE_SHIFT;
+}
+
+struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_file *file_priv,
+		struct drm_device *dev, unsigned int size,
+		unsigned int *handle)
+{
+	struct exynos_drm_gem_obj *exynos_gem_obj;
+	struct exynos_drm_buf_entry *entry;
+	struct drm_gem_object *obj;
+	int ret;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	size = roundup(size, PAGE_SIZE);
+
+	exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
+	if (!exynos_gem_obj) {
+		DRM_ERROR("failed to allocate exynos gem object.\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	/* allocate the new buffer object and memory region. */
+	entry = exynos_drm_buf_create(dev, size);
+	if (!entry) {
+		kfree(exynos_gem_obj);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	exynos_gem_obj->entry = entry;
+
+	obj = &exynos_gem_obj->base;
+
+	ret = drm_gem_object_init(dev, obj, size);
+	if (ret < 0) {
+		DRM_ERROR("failed to initailize gem object.\n");
+		goto err_obj_init;
+	}
+
+	DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
+
+	ret = drm_gem_create_mmap_offset(obj);
+	if (ret < 0) {
+		DRM_ERROR("failed to allocate mmap offset.\n");
+		goto err_create_mmap_offset;
+	}
+
+	/*
+	 * allocate a id of idr table where the obj is registered
+	 * and handle has the id what user can see.
+	 */
+	ret = drm_gem_handle_create(file_priv, obj, handle);
+	if (ret)
+		goto err_handle_create;
+
+	DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
+
+	/* drop reference from allocate - handle holds it now. */
+	drm_gem_object_unreference_unlocked(obj);
+
+	return exynos_gem_obj;
+
+err_handle_create:
+	drm_gem_free_mmap_offset(obj);
+
+err_create_mmap_offset:
+	drm_gem_object_release(obj);
+
+err_obj_init:
+	exynos_drm_buf_destroy(dev, exynos_gem_obj->entry);
+
+	kfree(exynos_gem_obj);
+
+	return ERR_PTR(ret);
+}
+
+int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
+		struct drm_file *file_priv)
+{
+	struct drm_exynos_gem_create *args = data;
+	struct exynos_drm_gem_obj *exynos_gem_obj;
+
+	DRM_DEBUG_KMS("%s : size = 0x%x\n", __FILE__, args->size);
+
+	exynos_gem_obj = exynos_drm_gem_create(file_priv, dev, args->size,
+			&args->handle);
+	if (IS_ERR(exynos_gem_obj))
+		return PTR_ERR(exynos_gem_obj);
+
+	return 0;
+}
+
+int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
+		struct drm_file *file_priv)
+{
+	struct drm_exynos_gem_map_off *args = data;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	DRM_DEBUG_KMS("handle = 0x%x, offset = 0x%lx\n",
+			args->handle, (unsigned long)args->offset);
+
+	if (!(dev->driver->driver_features & DRIVER_GEM)) {
+		DRM_ERROR("does not support GEM.\n");
+		return -ENODEV;
+	}
+
+	return exynos_drm_gem_dumb_map_offset(file_priv, dev, args->handle,
+			&args->offset);
+}
+
+static int exynos_drm_gem_mmap_buffer(struct file *filp,
+		struct vm_area_struct *vma)
+{
+	struct drm_gem_object *obj = filp->private_data;
+	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
+	struct exynos_drm_buf_entry *entry;
+	unsigned long pfn, vm_size;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	vma->vm_flags |= (VM_IO | VM_RESERVED);
+
+	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+	vma->vm_file = filp;
+
+	vm_size = vma->vm_end - vma->vm_start;
+	/*
+	 * a entry contains information to physically continuous memory
+	 * allocated by user request or at framebuffer creation.
+	 */
+	entry = exynos_gem_obj->entry;
+
+	/* check if user-requested size is valid. */
+	if (vm_size > entry->size)
+		return -EINVAL;
+
+	/*
+	 * get page frame number to physical memory to be mapped
+	 * to user space.
+	 */
+	pfn = exynos_gem_obj->entry->paddr >> PAGE_SHIFT;
+
+	DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
+
+	if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size,
+				vma->vm_page_prot)) {
+		DRM_ERROR("failed to remap pfn range.\n");
+		return -EAGAIN;
+	}
+
+	return 0;
+}
+
+static const struct file_operations exynos_drm_gem_fops = {
+	.mmap = exynos_drm_gem_mmap_buffer,
+};
+
+int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
+		struct drm_file *file_priv)
+{
+	struct drm_exynos_gem_mmap *args = data;
+	struct drm_gem_object *obj;
+	unsigned int addr;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	if (!(dev->driver->driver_features & DRIVER_GEM)) {
+		DRM_ERROR("does not support GEM.\n");
+		return -ENODEV;
+	}
+
+	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+	if (!obj) {
+		DRM_ERROR("failed to lookup gem object.\n");
+		return -EINVAL;
+	}
+
+	obj->filp->f_op = &exynos_drm_gem_fops;
+	obj->filp->private_data = obj;
+
+	down_write(&current->mm->mmap_sem);
+	addr = do_mmap(obj->filp, 0, args->size,
+			PROT_READ | PROT_WRITE, MAP_SHARED, 0);
+	up_write(&current->mm->mmap_sem);
+
+	drm_gem_object_unreference_unlocked(obj);
+
+	if (IS_ERR((void *)addr))
+		return PTR_ERR((void *)addr);
+
+	args->mapped = addr;
+
+	DRM_DEBUG_KMS("mapped = 0x%lx\n", (unsigned long)args->mapped);
+
+	return 0;
+}
+
+int exynos_drm_gem_init_object(struct drm_gem_object *obj)
+{
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	return 0;
+}
+
+void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj)
+{
+	struct exynos_drm_gem_obj *exynos_gem_obj;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	DRM_DEBUG_KMS("handle count = %d\n",
+			atomic_read(&gem_obj->handle_count));
+
+	if (gem_obj->map_list.map)
+		drm_gem_free_mmap_offset(gem_obj);
+
+	/* release file pointer to gem object. */
+	drm_gem_object_release(gem_obj);
+
+	exynos_gem_obj = to_exynos_gem_obj(gem_obj);
+
+	exynos_drm_buf_destroy(gem_obj->dev, exynos_gem_obj->entry);
+
+	kfree(exynos_gem_obj);
+}
+
+int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
+		struct drm_device *dev, struct drm_mode_create_dumb *args)
+{
+	struct exynos_drm_gem_obj *exynos_gem_obj;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	/*
+	 * alocate memory to be used for framebuffer.
+	 * - this callback would be called by user application
+	 *	with DRM_IOCTL_MODE_CREATE_DUMB command.
+	 */
+
+	args->pitch = args->width * args->bpp >> 3;
+	args->size = args->pitch * args->height;
+
+	exynos_gem_obj = exynos_drm_gem_create(file_priv, dev, args->size,
+							&args->handle);
+	if (IS_ERR(exynos_gem_obj))
+		return PTR_ERR(exynos_gem_obj);
+
+	return 0;
+}
+
+int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
+		struct drm_device *dev, uint32_t handle, uint64_t *offset)
+{
+	struct exynos_drm_gem_obj *exynos_gem_obj;
+	struct drm_gem_object *obj;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	mutex_lock(&dev->struct_mutex);
+
+	/*
+	 * get offset of memory allocated for drm framebuffer.
+	 * - this callback would be called by user application
+	 *	with DRM_IOCTL_MODE_MAP_DUMB command.
+	 */
+
+	obj = drm_gem_object_lookup(dev, file_priv, handle);
+	if (!obj) {
+		DRM_ERROR("failed to lookup gem object.\n");
+		mutex_unlock(&dev->struct_mutex);
+		return -EINVAL;
+	}
+
+	exynos_gem_obj = to_exynos_gem_obj(obj);
+
+	*offset = get_gem_mmap_offset(&exynos_gem_obj->base);
+
+	drm_gem_object_unreference(obj);
+
+	DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
+
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+	struct drm_gem_object *obj = vma->vm_private_data;
+	struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
+	struct drm_device *dev = obj->dev;
+	unsigned long pfn;
+	pgoff_t page_offset;
+	int ret;
+
+	page_offset = ((unsigned long)vmf->virtual_address -
+			vma->vm_start) >> PAGE_SHIFT;
+
+	mutex_lock(&dev->struct_mutex);
+
+	pfn = (exynos_gem_obj->entry->paddr >> PAGE_SHIFT) + page_offset;
+
+	ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
+
+	mutex_unlock(&dev->struct_mutex);
+
+	return convert_to_vm_err_msg(ret);
+}
+
+int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+	int ret;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	/* set vm_area_struct. */
+	ret = drm_gem_mmap(filp, vma);
+	if (ret < 0) {
+		DRM_ERROR("failed to mmap.\n");
+		return ret;
+	}
+
+	vma->vm_flags &= ~VM_PFNMAP;
+	vma->vm_flags |= VM_MIXEDMAP;
+
+	return ret;
+}
+
+
+int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
+		struct drm_device *dev, unsigned int handle)
+{
+	int ret;
+
+	DRM_DEBUG_KMS("%s\n", __FILE__);
+
+	/*
+	 * obj->refcount and obj->handle_count are decreased and
+	 * if both them are 0 then exynos_drm_gem_free_object()
+	 * would be called by callback to release resources.
+	 */
+	ret = drm_gem_handle_delete(file_priv, handle);
+	if (ret < 0) {
+		DRM_ERROR("failed to delete drm_gem_handle.\n");
+		return ret;
+	}
+
+	return 0;
+}
+
+MODULE_AUTHOR("Inki Dae <inki.dae@samsung.com>");
+MODULE_DESCRIPTION("Samsung SoC DRM GEM Module");
+MODULE_LICENSE("GPL");

+ 107 - 0
drivers/gpu/drm/exynos/exynos_drm_gem.h

@@ -0,0 +1,107 @@
+/* exynos_drm_gem.h
+ *
+ * Copyright (c) 2011 Samsung Electronics Co., Ltd.
+ * Authoer: Inki Dae <inki.dae@samsung.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _EXYNOS_DRM_GEM_H_
+#define _EXYNOS_DRM_GEM_H_
+
+#define to_exynos_gem_obj(x)	container_of(x,\
+			struct exynos_drm_gem_obj, base)
+
+/*
+ * exynos drm buffer structure.
+ *
+ * @base: a gem object.
+ *	- a new handle to this gem object would be created
+ *	by drm_gem_handle_create().
+ * @entry: pointer to exynos drm buffer entry object.
+ *	- containing the information to physically
+ *	continuous memory region allocated by user request
+ *	or at framebuffer creation.
+ *
+ * P.S. this object would be transfered to user as kms_bo.handle so
+ *	user can access the buffer through kms_bo.handle.
+ */
+struct exynos_drm_gem_obj {
+	struct drm_gem_object base;
+	struct exynos_drm_buf_entry *entry;
+};
+
+/* create a new buffer and get a new gem handle. */
+struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_file *file_priv,
+		struct drm_device *dev, unsigned int size,
+		unsigned int *handle);
+
+/*
+ * request gem object creation and buffer allocation as the size
+ * that it is calculated with framebuffer information such as width,
+ * height and bpp.
+ */
+int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
+		struct drm_file *file_priv);
+
+/* get buffer offset to map to user space. */
+int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
+		struct drm_file *file_priv);
+
+/* unmap a buffer from user space. */
+int exynos_drm_gem_munmap_ioctl(struct drm_device *dev, void *data,
+		struct drm_file *file_priv);
+
+/* initialize gem object. */
+int exynos_drm_gem_init_object(struct drm_gem_object *obj);
+
+/* free gem object. */
+void exynos_drm_gem_free_object(struct drm_gem_object *gem_obj);
+
+/* create memory region for drm framebuffer. */
+int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
+		struct drm_device *dev, struct drm_mode_create_dumb *args);
+
+/* map memory region for drm framebuffer to user space. */
+int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
+		struct drm_device *dev, uint32_t handle, uint64_t *offset);
+
+/* page fault handler and mmap fault address(virtual) to physical memory. */
+int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
+
+/*
+ * mmap the physically continuous memory that a gem object contains
+ * to user space.
+ */
+int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
+		struct drm_file *file_priv);
+
+/* set vm_flags and we can change the vm attribute to other one at here. */
+int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
+
+/*
+ * destroy memory region allocated.
+ *	- a gem handle and physical memory region pointed by a gem object
+ *	would be released by drm_gem_handle_delete().
+ */
+int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
+		struct drm_device *dev, unsigned int handle);
+
+#endif

+ 1 - 1
drivers/gpu/drm/i915/dvo_ch7017.c

@@ -227,7 +227,7 @@ static bool ch7017_init(struct intel_dvo_device *dvo,
 	default:
 		DRM_DEBUG_KMS("ch701x not detected, got %d: from %s "
 			      "slave %d.\n",
-			      val, adapter->name,dvo->slave_addr);
+			      val, adapter->name, dvo->slave_addr);
 		goto fail;
 	}
 

+ 2 - 2
drivers/gpu/drm/i915/dvo_ch7xxx.c

@@ -111,7 +111,7 @@ static char *ch7xxx_get_id(uint8_t vid)
 /** Reads an 8 bit register */
 static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
 {
-	struct ch7xxx_priv *ch7xxx= dvo->dev_priv;
+	struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
 	struct i2c_adapter *adapter = dvo->i2c_bus;
 	u8 out_buf[2];
 	u8 in_buf[2];
@@ -303,7 +303,7 @@ static void ch7xxx_dump_regs(struct intel_dvo_device *dvo)
 
 	for (i = 0; i < CH7xxx_NUM_REGS; i++) {
 		uint8_t val;
-		if ((i % 8) == 0 )
+		if ((i % 8) == 0)
 			DRM_LOG_KMS("\n %02X: ", i);
 		ch7xxx_readb(dvo, i, &val);
 		DRM_LOG_KMS("%02X ", val);

+ 3 - 3
drivers/gpu/drm/i915/dvo_ivch.c

@@ -344,8 +344,8 @@ static void ivch_mode_set(struct intel_dvo_device *dvo,
 			   (adjusted_mode->hdisplay - 1)) >> 2;
 		y_ratio = (((mode->vdisplay - 1) << 16) /
 			   (adjusted_mode->vdisplay - 1)) >> 2;
-		ivch_write (dvo, VR42, x_ratio);
-		ivch_write (dvo, VR41, y_ratio);
+		ivch_write(dvo, VR42, x_ratio);
+		ivch_write(dvo, VR41, y_ratio);
 	} else {
 		vr01 &= ~VR01_PANEL_FIT_ENABLE;
 		vr40 &= ~VR40_CLOCK_GATING_ENABLE;
@@ -410,7 +410,7 @@ static void ivch_destroy(struct intel_dvo_device *dvo)
 	}
 }
 
-struct intel_dvo_dev_ops ivch_ops= {
+struct intel_dvo_dev_ops ivch_ops = {
 	.init = ivch_init,
 	.dpms = ivch_dpms,
 	.mode_valid = ivch_mode_valid,

+ 1 - 1
drivers/gpu/drm/i915/dvo_sil164.c

@@ -104,7 +104,7 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
 
 static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
 {
-	struct sil164_priv *sil= dvo->dev_priv;
+	struct sil164_priv *sil = dvo->dev_priv;
 	struct i2c_adapter *adapter = dvo->i2c_bus;
 	uint8_t out_buf[2];
 	struct i2c_msg msg = {

+ 7 - 7
drivers/gpu/drm/i915/dvo_tfp410.c

@@ -56,7 +56,7 @@
 #define TFP410_CTL_2_MDI	(1<<0)
 
 #define TFP410_CTL_3		0x0A
-#define TFP410_CTL_3_DK_MASK 	(0x7<<5)
+#define TFP410_CTL_3_DK_MASK	(0x7<<5)
 #define TFP410_CTL_3_DK		(1<<5)
 #define TFP410_CTL_3_DKEN	(1<<4)
 #define TFP410_CTL_3_CTL_MASK	(0x7<<1)
@@ -225,12 +225,12 @@ static void tfp410_mode_set(struct intel_dvo_device *dvo,
 			    struct drm_display_mode *mode,
 			    struct drm_display_mode *adjusted_mode)
 {
-    /* As long as the basics are set up, since we don't have clock dependencies
-     * in the mode setup, we can just leave the registers alone and everything
-     * will work fine.
-     */
-    /* don't do much */
-    return;
+	/* As long as the basics are set up, since we don't have clock dependencies
+	* in the mode setup, we can just leave the registers alone and everything
+	* will work fine.
+	*/
+	/* don't do much */
+	return;
 }
 
 /* set the tfp410 power state */

+ 19 - 19
drivers/gpu/drm/i915/i915_debugfs.c

@@ -98,12 +98,12 @@ static const char *get_pin_flag(struct drm_i915_gem_object *obj)
 
 static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
 {
-    switch (obj->tiling_mode) {
-    default:
-    case I915_TILING_NONE: return " ";
-    case I915_TILING_X: return "X";
-    case I915_TILING_Y: return "Y";
-    }
+	switch (obj->tiling_mode) {
+	default:
+	case I915_TILING_NONE: return " ";
+	case I915_TILING_X: return "X";
+	case I915_TILING_Y: return "Y";
+	}
 }
 
 static const char *cache_level_str(int type)
@@ -217,7 +217,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
 			++mappable_count; \
 		} \
 	} \
-} while(0)
+} while (0)
 
 static int i915_gem_object_info(struct seq_file *m, void* data)
 {
@@ -1293,12 +1293,12 @@ i915_wedged_read(struct file *filp,
 	char buf[80];
 	int len;
 
-	len = snprintf(buf, sizeof (buf),
+	len = snprintf(buf, sizeof(buf),
 		       "wedged :  %d\n",
 		       atomic_read(&dev_priv->mm.wedged));
 
-	if (len > sizeof (buf))
-		len = sizeof (buf);
+	if (len > sizeof(buf))
+		len = sizeof(buf);
 
 	return simple_read_from_buffer(ubuf, max, ppos, buf, len);
 }
@@ -1314,7 +1314,7 @@ i915_wedged_write(struct file *filp,
 	int val = 1;
 
 	if (cnt > 0) {
-		if (cnt > sizeof (buf) - 1)
+		if (cnt > sizeof(buf) - 1)
 			return -EINVAL;
 
 		if (copy_from_user(buf, ubuf, cnt))
@@ -1357,11 +1357,11 @@ i915_max_freq_read(struct file *filp,
 	char buf[80];
 	int len;
 
-	len = snprintf(buf, sizeof (buf),
+	len = snprintf(buf, sizeof(buf),
 		       "max freq: %d\n", dev_priv->max_delay * 50);
 
-	if (len > sizeof (buf))
-		len = sizeof (buf);
+	if (len > sizeof(buf))
+		len = sizeof(buf);
 
 	return simple_read_from_buffer(ubuf, max, ppos, buf, len);
 }
@@ -1378,7 +1378,7 @@ i915_max_freq_write(struct file *filp,
 	int val = 1;
 
 	if (cnt > 0) {
-		if (cnt > sizeof (buf) - 1)
+		if (cnt > sizeof(buf) - 1)
 			return -EINVAL;
 
 		if (copy_from_user(buf, ubuf, cnt))
@@ -1432,12 +1432,12 @@ i915_cache_sharing_read(struct file *filp,
 	snpcr = I915_READ(GEN6_MBCUNIT_SNPCR);
 	mutex_unlock(&dev_priv->dev->struct_mutex);
 
-	len = snprintf(buf, sizeof (buf),
+	len = snprintf(buf, sizeof(buf),
 		       "%d\n", (snpcr & GEN6_MBC_SNPCR_MASK) >>
 		       GEN6_MBC_SNPCR_SHIFT);
 
-	if (len > sizeof (buf))
-		len = sizeof (buf);
+	if (len > sizeof(buf))
+		len = sizeof(buf);
 
 	return simple_read_from_buffer(ubuf, max, ppos, buf, len);
 }
@@ -1455,7 +1455,7 @@ i915_cache_sharing_write(struct file *filp,
 	int val = 1;
 
 	if (cnt > 0) {
-		if (cnt > sizeof (buf) - 1)
+		if (cnt > sizeof(buf) - 1)
 			return -EINVAL;
 
 		if (copy_from_user(buf, ubuf, cnt))

+ 25 - 23
drivers/gpu/drm/i915/i915_dma.c

@@ -884,7 +884,7 @@ static int i915_get_bridge_dev(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0,0));
+	dev_priv->bridge_dev = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
 	if (!dev_priv->bridge_dev) {
 		DRM_ERROR("bridge device not found\n");
 		return -1;
@@ -1730,10 +1730,10 @@ static DEFINE_SPINLOCK(mchdev_lock);
  */
 unsigned long i915_read_mch_val(void)
 {
-  	struct drm_i915_private *dev_priv;
+	struct drm_i915_private *dev_priv;
 	unsigned long chipset_val, graphics_val, ret = 0;
 
-  	spin_lock(&mchdev_lock);
+	spin_lock(&mchdev_lock);
 	if (!i915_mch_dev)
 		goto out_unlock;
 	dev_priv = i915_mch_dev;
@@ -1744,9 +1744,9 @@ unsigned long i915_read_mch_val(void)
 	ret = chipset_val + graphics_val;
 
 out_unlock:
-  	spin_unlock(&mchdev_lock);
+	spin_unlock(&mchdev_lock);
 
-  	return ret;
+	return ret;
 }
 EXPORT_SYMBOL_GPL(i915_read_mch_val);
 
@@ -1757,10 +1757,10 @@ EXPORT_SYMBOL_GPL(i915_read_mch_val);
  */
 bool i915_gpu_raise(void)
 {
-  	struct drm_i915_private *dev_priv;
+	struct drm_i915_private *dev_priv;
 	bool ret = true;
 
-  	spin_lock(&mchdev_lock);
+	spin_lock(&mchdev_lock);
 	if (!i915_mch_dev) {
 		ret = false;
 		goto out_unlock;
@@ -1771,9 +1771,9 @@ bool i915_gpu_raise(void)
 		dev_priv->max_delay--;
 
 out_unlock:
-  	spin_unlock(&mchdev_lock);
+	spin_unlock(&mchdev_lock);
 
-  	return ret;
+	return ret;
 }
 EXPORT_SYMBOL_GPL(i915_gpu_raise);
 
@@ -1785,10 +1785,10 @@ EXPORT_SYMBOL_GPL(i915_gpu_raise);
  */
 bool i915_gpu_lower(void)
 {
-  	struct drm_i915_private *dev_priv;
+	struct drm_i915_private *dev_priv;
 	bool ret = true;
 
-  	spin_lock(&mchdev_lock);
+	spin_lock(&mchdev_lock);
 	if (!i915_mch_dev) {
 		ret = false;
 		goto out_unlock;
@@ -1799,9 +1799,9 @@ bool i915_gpu_lower(void)
 		dev_priv->max_delay++;
 
 out_unlock:
-  	spin_unlock(&mchdev_lock);
+	spin_unlock(&mchdev_lock);
 
-  	return ret;
+	return ret;
 }
 EXPORT_SYMBOL_GPL(i915_gpu_lower);
 
@@ -1812,10 +1812,10 @@ EXPORT_SYMBOL_GPL(i915_gpu_lower);
  */
 bool i915_gpu_busy(void)
 {
-  	struct drm_i915_private *dev_priv;
+	struct drm_i915_private *dev_priv;
 	bool ret = false;
 
-  	spin_lock(&mchdev_lock);
+	spin_lock(&mchdev_lock);
 	if (!i915_mch_dev)
 		goto out_unlock;
 	dev_priv = i915_mch_dev;
@@ -1823,9 +1823,9 @@ bool i915_gpu_busy(void)
 	ret = dev_priv->busy;
 
 out_unlock:
-  	spin_unlock(&mchdev_lock);
+	spin_unlock(&mchdev_lock);
 
-  	return ret;
+	return ret;
 }
 EXPORT_SYMBOL_GPL(i915_gpu_busy);
 
@@ -1837,10 +1837,10 @@ EXPORT_SYMBOL_GPL(i915_gpu_busy);
  */
 bool i915_gpu_turbo_disable(void)
 {
-  	struct drm_i915_private *dev_priv;
+	struct drm_i915_private *dev_priv;
 	bool ret = true;
 
-  	spin_lock(&mchdev_lock);
+	spin_lock(&mchdev_lock);
 	if (!i915_mch_dev) {
 		ret = false;
 		goto out_unlock;
@@ -1853,9 +1853,9 @@ bool i915_gpu_turbo_disable(void)
 		ret = false;
 
 out_unlock:
-  	spin_unlock(&mchdev_lock);
+	spin_unlock(&mchdev_lock);
 
-  	return ret;
+	return ret;
 }
 EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
 
@@ -1948,7 +1948,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 
 	agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
 
-        dev_priv->mm.gtt_mapping =
+	dev_priv->mm.gtt_mapping =
 		io_mapping_create_wc(dev->agp->base, agp_size);
 	if (dev_priv->mm.gtt_mapping == NULL) {
 		ret = -EIO;
@@ -2035,7 +2035,9 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 	spin_lock_init(&dev_priv->error_lock);
 	spin_lock_init(&dev_priv->rps_lock);
 
-	if (IS_MOBILE(dev) || !IS_GEN2(dev))
+	if (IS_IVYBRIDGE(dev))
+		dev_priv->num_pipe = 3;
+	else if (IS_MOBILE(dev) || !IS_GEN2(dev))
 		dev_priv->num_pipe = 2;
 	else
 		dev_priv->num_pipe = 1;

+ 53 - 10
drivers/gpu/drm/i915/i915_drv.c

@@ -79,11 +79,11 @@ MODULE_PARM_DESC(lvds_downclock,
 		"Use panel (LVDS/eDP) downclocking for power savings "
 		"(default: false)");
 
-unsigned int i915_panel_use_ssc __read_mostly = 1;
+unsigned int i915_panel_use_ssc __read_mostly = -1;
 module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600);
 MODULE_PARM_DESC(lvds_use_ssc,
 		"Use Spread Spectrum Clock with panels [LVDS/eDP] "
-		"(default: true)");
+		"(default: auto from VBT)");
 
 int i915_vbt_sdvo_panel_type __read_mostly = -1;
 module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600);
@@ -294,7 +294,7 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
 #define INTEL_PCH_CPT_DEVICE_ID_TYPE	0x1c00
 #define INTEL_PCH_PPT_DEVICE_ID_TYPE	0x1e00
 
-void intel_detect_pch (struct drm_device *dev)
+void intel_detect_pch(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct pci_dev *pch;
@@ -377,7 +377,7 @@ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
 
 void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
 {
-	if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES ) {
+	if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
 		int loop = 500;
 		u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
 		while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
@@ -471,6 +471,9 @@ static int i915_drm_thaw(struct drm_device *dev)
 		error = i915_gem_init_ringbuffer(dev);
 		mutex_unlock(&dev->struct_mutex);
 
+		if (HAS_PCH_SPLIT(dev))
+			ironlake_init_pch_refclk(dev);
+
 		drm_mode_config_reset(dev);
 		drm_irq_install(dev);
 
@@ -770,12 +773,12 @@ static int i915_pm_poweroff(struct device *dev)
 }
 
 static const struct dev_pm_ops i915_pm_ops = {
-     .suspend = i915_pm_suspend,
-     .resume = i915_pm_resume,
-     .freeze = i915_pm_freeze,
-     .thaw = i915_pm_thaw,
-     .poweroff = i915_pm_poweroff,
-     .restore = i915_pm_resume,
+	.suspend = i915_pm_suspend,
+	.resume = i915_pm_resume,
+	.freeze = i915_pm_freeze,
+	.thaw = i915_pm_thaw,
+	.poweroff = i915_pm_poweroff,
+	.restore = i915_pm_resume,
 };
 
 static struct vm_operations_struct i915_gem_vm_ops = {
@@ -895,3 +898,43 @@ module_exit(i915_exit);
 MODULE_AUTHOR(DRIVER_AUTHOR);
 MODULE_DESCRIPTION(DRIVER_DESC);
 MODULE_LICENSE("GPL and additional rights");
+
+/* We give fast paths for the really cool registers */
+#define NEEDS_FORCE_WAKE(dev_priv, reg) \
+	(((dev_priv)->info->gen >= 6) && \
+	((reg) < 0x40000) && \
+	((reg) != FORCEWAKE))
+
+#define __i915_read(x, y) \
+u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
+	u##x val = 0; \
+	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
+		gen6_gt_force_wake_get(dev_priv); \
+		val = read##y(dev_priv->regs + reg); \
+		gen6_gt_force_wake_put(dev_priv); \
+	} else { \
+		val = read##y(dev_priv->regs + reg); \
+	} \
+	trace_i915_reg_rw(false, reg, val, sizeof(val)); \
+	return val; \
+}
+
+__i915_read(8, b)
+__i915_read(16, w)
+__i915_read(32, l)
+__i915_read(64, q)
+#undef __i915_read
+
+#define __i915_write(x, y) \
+void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
+	trace_i915_reg_rw(true, reg, val, sizeof(val)); \
+	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
+		__gen6_gt_wait_for_fifo(dev_priv); \
+	} \
+	write##y(val, dev_priv->regs + reg); \
+}
+__i915_write(8, b)
+__i915_write(16, w)
+__i915_write(32, l)
+__i915_write(64, q)
+#undef __i915_write

+ 44 - 59
drivers/gpu/drm/i915/i915_drv.h

@@ -139,7 +139,6 @@ struct sdvo_device_mapping {
 	u8 slave_addr;
 	u8 dvo_wiring;
 	u8 i2c_pin;
-	u8 i2c_speed;
 	u8 ddc_pin;
 };
 
@@ -209,6 +208,8 @@ struct drm_i915_display_funcs {
 			     struct drm_display_mode *adjusted_mode,
 			     int x, int y,
 			     struct drm_framebuffer *old_fb);
+	void (*write_eld)(struct drm_connector *connector,
+			  struct drm_crtc *crtc);
 	void (*fdi_link_train)(struct drm_crtc *crtc);
 	void (*init_clock_gating)(struct drm_device *dev);
 	void (*init_pch_clock_gating)(struct drm_device *dev);
@@ -226,26 +227,26 @@ struct drm_i915_display_funcs {
 
 struct intel_device_info {
 	u8 gen;
-	u8 is_mobile : 1;
-	u8 is_i85x : 1;
-	u8 is_i915g : 1;
-	u8 is_i945gm : 1;
-	u8 is_g33 : 1;
-	u8 need_gfx_hws : 1;
-	u8 is_g4x : 1;
-	u8 is_pineview : 1;
-	u8 is_broadwater : 1;
-	u8 is_crestline : 1;
-	u8 is_ivybridge : 1;
-	u8 has_fbc : 1;
-	u8 has_pipe_cxsr : 1;
-	u8 has_hotplug : 1;
-	u8 cursor_needs_physical : 1;
-	u8 has_overlay : 1;
-	u8 overlay_needs_physical : 1;
-	u8 supports_tv : 1;
-	u8 has_bsd_ring : 1;
-	u8 has_blt_ring : 1;
+	u8 is_mobile:1;
+	u8 is_i85x:1;
+	u8 is_i915g:1;
+	u8 is_i945gm:1;
+	u8 is_g33:1;
+	u8 need_gfx_hws:1;
+	u8 is_g4x:1;
+	u8 is_pineview:1;
+	u8 is_broadwater:1;
+	u8 is_crestline:1;
+	u8 is_ivybridge:1;
+	u8 has_fbc:1;
+	u8 has_pipe_cxsr:1;
+	u8 has_hotplug:1;
+	u8 cursor_needs_physical:1;
+	u8 has_overlay:1;
+	u8 overlay_needs_physical:1;
+	u8 supports_tv:1;
+	u8 has_bsd_ring:1;
+	u8 has_blt_ring:1;
 };
 
 enum no_fbc_reason {
@@ -347,7 +348,6 @@ typedef struct drm_i915_private {
 	/* LVDS info */
 	int backlight_level;  /* restore backlight to this value */
 	bool backlight_enabled;
-	struct drm_display_mode *panel_fixed_mode;
 	struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
 	struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
 
@@ -357,6 +357,7 @@ typedef struct drm_i915_private {
 	unsigned int lvds_vbt:1;
 	unsigned int int_crt_support:1;
 	unsigned int lvds_use_ssc:1;
+	unsigned int display_clock_mode:1;
 	int lvds_ssc_freq;
 	struct {
 		int rate;
@@ -672,10 +673,9 @@ typedef struct drm_i915_private {
 	unsigned int lvds_border_bits;
 	/* Panel fitter placement and size for Ironlake+ */
 	u32 pch_pf_pos, pch_pf_size;
-	int panel_t3, panel_t12;
 
-	struct drm_crtc *plane_to_crtc_mapping[2];
-	struct drm_crtc *pipe_to_crtc_mapping[2];
+	struct drm_crtc *plane_to_crtc_mapping[3];
+	struct drm_crtc *pipe_to_crtc_mapping[3];
 	wait_queue_head_t pending_flip_queue;
 	bool flip_pending_is_done;
 
@@ -759,19 +759,19 @@ struct drm_i915_gem_object {
 	 * (has pending rendering), and is not set if it's on inactive (ready
 	 * to be unbound).
 	 */
-	unsigned int active : 1;
+	unsigned int active:1;
 
 	/**
 	 * This is set if the object has been written to since last bound
 	 * to the GTT
 	 */
-	unsigned int dirty : 1;
+	unsigned int dirty:1;
 
 	/**
 	 * This is set if the object has been written to since the last
 	 * GPU flush.
 	 */
-	unsigned int pending_gpu_write : 1;
+	unsigned int pending_gpu_write:1;
 
 	/**
 	 * Fence register bits (if any) for this object.  Will be set
@@ -780,18 +780,18 @@ struct drm_i915_gem_object {
 	 *
 	 * Size: 4 bits for 16 fences + sign (for FENCE_REG_NONE)
 	 */
-	signed int fence_reg : 5;
+	signed int fence_reg:5;
 
 	/**
 	 * Advice: are the backing pages purgeable?
 	 */
-	unsigned int madv : 2;
+	unsigned int madv:2;
 
 	/**
 	 * Current tiling mode for the object.
 	 */
-	unsigned int tiling_mode : 2;
-	unsigned int tiling_changed : 1;
+	unsigned int tiling_mode:2;
+	unsigned int tiling_changed:1;
 
 	/** How many users have pinned this object in GTT space. The following
 	 * users can each hold at most one reference: pwrite/pread, pin_ioctl
@@ -802,22 +802,22 @@ struct drm_i915_gem_object {
 	 *
 	 * In the worst case this is 1 + 1 + 1 + 2*2 = 7. That would fit into 3
 	 * bits with absolutely no headroom. So use 4 bits. */
-	unsigned int pin_count : 4;
+	unsigned int pin_count:4;
 #define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
 
 	/**
 	 * Is the object at the current location in the gtt mappable and
 	 * fenceable? Used to avoid costly recalculations.
 	 */
-	unsigned int map_and_fenceable : 1;
+	unsigned int map_and_fenceable:1;
 
 	/**
 	 * Whether the current gtt mapping needs to be mappable (and isn't just
 	 * mappable by accident). Track pin and fault separate for a more
 	 * accurate mappable working set.
 	 */
-	unsigned int fault_mappable : 1;
-	unsigned int pin_mappable : 1;
+	unsigned int fault_mappable:1;
+	unsigned int pin_mappable:1;
 
 	/*
 	 * Is the GPU currently using a fence to access this buffer,
@@ -1056,7 +1056,7 @@ i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
 void
 i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
 
-void intel_enable_asle (struct drm_device *dev);
+void intel_enable_asle(struct drm_device *dev);
 
 #ifdef CONFIG_DEBUG_FS
 extern void i915_destroy_error_state(struct drm_device *dev);
@@ -1146,7 +1146,7 @@ int i915_gem_dumb_create(struct drm_file *file_priv,
 int i915_gem_mmap_gtt(struct drm_file *file_priv, struct drm_device *dev,
 		      uint32_t handle, uint64_t *offset);
 int i915_gem_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev,
-			  uint32_t handle);			  
+			  uint32_t handle);
 /**
  * Returns true if seq1 is later than seq2.
  */
@@ -1301,10 +1301,11 @@ extern int intel_modeset_vga_set_state(struct drm_device *dev, bool state);
 extern bool intel_fbc_enabled(struct drm_device *dev);
 extern void intel_disable_fbc(struct drm_device *dev);
 extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
+extern void ironlake_init_pch_refclk(struct drm_device *dev);
 extern void ironlake_enable_rc6(struct drm_device *dev);
 extern void gen6_set_rps(struct drm_device *dev, u8 val);
-extern void intel_detect_pch (struct drm_device *dev);
-extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
+extern void intel_detect_pch(struct drm_device *dev);
+extern int intel_trans_dp_port_sel(struct drm_crtc *crtc);
 
 /* overlay */
 #ifdef CONFIG_DEBUG_FS
@@ -1354,18 +1355,7 @@ void __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv);
 	((reg) != FORCEWAKE))
 
 #define __i915_read(x, y) \
-static inline u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
-	u##x val = 0; \
-	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
-		gen6_gt_force_wake_get(dev_priv); \
-		val = read##y(dev_priv->regs + reg); \
-		gen6_gt_force_wake_put(dev_priv); \
-	} else { \
-		val = read##y(dev_priv->regs + reg); \
-	} \
-	trace_i915_reg_rw(false, reg, val, sizeof(val)); \
-	return val; \
-}
+	u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg);
 
 __i915_read(8, b)
 __i915_read(16, w)
@@ -1374,13 +1364,8 @@ __i915_read(64, q)
 #undef __i915_read
 
 #define __i915_write(x, y) \
-static inline void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
-	trace_i915_reg_rw(true, reg, val, sizeof(val)); \
-	if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
-		__gen6_gt_wait_for_fifo(dev_priv); \
-	} \
-	write##y(val, dev_priv->regs + reg); \
-}
+	void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val);
+
 __i915_write(8, b)
 __i915_write(16, w)
 __i915_write(32, l)

+ 14 - 99
drivers/gpu/drm/i915/i915_gem.c

@@ -179,7 +179,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
 	mutex_unlock(&dev->struct_mutex);
 
 	args->aper_size = dev_priv->mm.gtt_total;
-	args->aper_available_size = args->aper_size -pinned;
+	args->aper_available_size = args->aper_size - pinned;
 
 	return 0;
 }
@@ -195,6 +195,8 @@ i915_gem_create(struct drm_file *file,
 	u32 handle;
 
 	size = roundup(size, PAGE_SIZE);
+	if (size == 0)
+		return -EINVAL;
 
 	/* Allocate the new object */
 	obj = i915_gem_alloc_object(dev, size);
@@ -800,11 +802,11 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev,
 		if (IS_ERR(page))
 			return PTR_ERR(page);
 
-		vaddr = kmap_atomic(page, KM_USER0);
+		vaddr = kmap_atomic(page);
 		ret = __copy_from_user_inatomic(vaddr + page_offset,
 						user_data,
 						page_length);
-		kunmap_atomic(vaddr, KM_USER0);
+		kunmap_atomic(vaddr);
 
 		set_page_dirty(page);
 		mark_page_accessed(page);
@@ -1264,74 +1266,6 @@ out:
 	}
 }
 
-/**
- * i915_gem_create_mmap_offset - create a fake mmap offset for an object
- * @obj: obj in question
- *
- * GEM memory mapping works by handing back to userspace a fake mmap offset
- * it can use in a subsequent mmap(2) call.  The DRM core code then looks
- * up the object based on the offset and sets up the various memory mapping
- * structures.
- *
- * This routine allocates and attaches a fake offset for @obj.
- */
-static int
-i915_gem_create_mmap_offset(struct drm_i915_gem_object *obj)
-{
-	struct drm_device *dev = obj->base.dev;
-	struct drm_gem_mm *mm = dev->mm_private;
-	struct drm_map_list *list;
-	struct drm_local_map *map;
-	int ret = 0;
-
-	/* Set the object up for mmap'ing */
-	list = &obj->base.map_list;
-	list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
-	if (!list->map)
-		return -ENOMEM;
-
-	map = list->map;
-	map->type = _DRM_GEM;
-	map->size = obj->base.size;
-	map->handle = obj;
-
-	/* Get a DRM GEM mmap offset allocated... */
-	list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
-						    obj->base.size / PAGE_SIZE,
-						    0, 0);
-	if (!list->file_offset_node) {
-		DRM_ERROR("failed to allocate offset for bo %d\n",
-			  obj->base.name);
-		ret = -ENOSPC;
-		goto out_free_list;
-	}
-
-	list->file_offset_node = drm_mm_get_block(list->file_offset_node,
-						  obj->base.size / PAGE_SIZE,
-						  0);
-	if (!list->file_offset_node) {
-		ret = -ENOMEM;
-		goto out_free_list;
-	}
-
-	list->hash.key = list->file_offset_node->start;
-	ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
-	if (ret) {
-		DRM_ERROR("failed to add to map hash\n");
-		goto out_free_mm;
-	}
-
-	return 0;
-
-out_free_mm:
-	drm_mm_put_block(list->file_offset_node);
-out_free_list:
-	kfree(list->map);
-	list->map = NULL;
-
-	return ret;
-}
-
 /**
  * i915_gem_release_mmap - remove physical page mappings
  * @obj: obj in question
@@ -1360,19 +1294,6 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
 	obj->fault_mappable = false;
 }
 
-static void
-i915_gem_free_mmap_offset(struct drm_i915_gem_object *obj)
-{
-	struct drm_device *dev = obj->base.dev;
-	struct drm_gem_mm *mm = dev->mm_private;
-	struct drm_map_list *list = &obj->base.map_list;
-
-	drm_ht_remove_item(&mm->offset_hash, &list->hash);
-	drm_mm_put_block(list->file_offset_node);
-	kfree(list->map);
-	list->map = NULL;
-}
-
 static uint32_t
 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
 {
@@ -1485,7 +1406,7 @@ i915_gem_mmap_gtt(struct drm_file *file,
 	}
 
 	if (!obj->base.map_list.map) {
-		ret = i915_gem_create_mmap_offset(obj);
+		ret = drm_gem_create_mmap_offset(&obj->base);
 		if (ret)
 			goto out;
 	}
@@ -1557,7 +1478,7 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
 		obj->pages[i] = page;
 	}
 
-	if (obj->tiling_mode != I915_TILING_NONE)
+	if (i915_gem_object_needs_bit17_swizzle(obj))
 		i915_gem_object_do_bit_17_swizzle(obj);
 
 	return 0;
@@ -1579,7 +1500,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
 
 	BUG_ON(obj->madv == __I915_MADV_PURGED);
 
-	if (obj->tiling_mode != I915_TILING_NONE)
+	if (i915_gem_object_needs_bit17_swizzle(obj))
 		i915_gem_object_save_bit_17_swizzle(obj);
 
 	if (obj->madv == I915_MADV_DONTNEED)
@@ -1856,7 +1777,7 @@ void i915_gem_reset(struct drm_device *dev)
 	 * lost bo to the inactive list.
 	 */
 	while (!list_empty(&dev_priv->mm.flushing_list)) {
-		obj= list_first_entry(&dev_priv->mm.flushing_list,
+		obj = list_first_entry(&dev_priv->mm.flushing_list,
 				      struct drm_i915_gem_object,
 				      mm_list);
 
@@ -1922,7 +1843,7 @@ i915_gem_retire_requests_ring(struct intel_ring_buffer *ring)
 	while (!list_empty(&ring->active_list)) {
 		struct drm_i915_gem_object *obj;
 
-		obj= list_first_entry(&ring->active_list,
+		obj = list_first_entry(&ring->active_list,
 				      struct drm_i915_gem_object,
 				      ring_list);
 
@@ -2272,14 +2193,8 @@ int
 i915_gpu_idle(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	bool lists_empty;
 	int ret, i;
 
-	lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
-		       list_empty(&dev_priv->mm.active_list));
-	if (lists_empty)
-		return 0;
-
 	/* Flush everything onto the inactive list. */
 	for (i = 0; i < I915_NUM_RINGS; i++) {
 		ret = i915_ring_idle(&dev_priv->ring[i]);
@@ -2882,7 +2797,7 @@ i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
 
 	fenceable =
 		obj->gtt_space->size == fence_size &&
-		(obj->gtt_space->start & (fence_alignment -1)) == 0;
+		(obj->gtt_space->start & (fence_alignment - 1)) == 0;
 
 	mappable =
 		obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
@@ -3598,7 +3513,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
 			 */
 			request = kzalloc(sizeof(*request), GFP_KERNEL);
 			if (request)
-				ret = i915_add_request(obj->ring, NULL,request);
+				ret = i915_add_request(obj->ring, NULL, request);
 			else
 				ret = -ENOMEM;
 		}
@@ -3623,7 +3538,7 @@ int
 i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
 			struct drm_file *file_priv)
 {
-    return i915_gem_ring_throttle(dev, file_priv);
+	return i915_gem_ring_throttle(dev, file_priv);
 }
 
 int
@@ -3752,7 +3667,7 @@ static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
 	trace_i915_gem_object_destroy(obj);
 
 	if (obj->base.map_list.map)
-		i915_gem_free_mmap_offset(obj);
+		drm_gem_free_mmap_offset(&obj->base);
 
 	drm_gem_object_release(&obj->base);
 	i915_gem_info_remove_obj(dev_priv, obj->base.size);

+ 4 - 4
drivers/gpu/drm/i915/i915_gem_debug.c

@@ -72,7 +72,7 @@ i915_verify_lists(struct drm_device *dev)
 			break;
 		} else if (!obj->active ||
 			   (obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0 ||
-			   list_empty(&obj->gpu_write_list)){
+			   list_empty(&obj->gpu_write_list)) {
 			DRM_ERROR("invalid flushing %p (a %d w %x gwl %d)\n",
 				  obj,
 				  obj->active,
@@ -157,7 +157,7 @@ i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle)
 	for (page = 0; page < obj->size / PAGE_SIZE; page++) {
 		int i;
 
-		backing_map = kmap_atomic(obj->pages[page], KM_USER0);
+		backing_map = kmap_atomic(obj->pages[page]);
 
 		if (backing_map == NULL) {
 			DRM_ERROR("failed to map backing page\n");
@@ -181,13 +181,13 @@ i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle)
 				}
 			}
 		}
-		kunmap_atomic(backing_map, KM_USER0);
+		kunmap_atomic(backing_map);
 		backing_map = NULL;
 	}
 
  out:
 	if (backing_map != NULL)
-		kunmap_atomic(backing_map, KM_USER0);
+		kunmap_atomic(backing_map);
 	iounmap(gtt_mapping);
 
 	/* give syslog time to catch up */

+ 1 - 1
drivers/gpu/drm/i915/i915_gem_evict.c

@@ -122,7 +122,7 @@ i915_gem_evict_something(struct drm_device *dev, int min_size,
 			goto found;
 	}
 	list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
-		if (! obj->base.write_domain || obj->pin_count)
+		if (!obj->base.write_domain || obj->pin_count)
 			continue;
 
 		if (mark_free(obj, &unwind_list))

+ 2 - 1
drivers/gpu/drm/i915/i915_gem_execbuffer.c

@@ -784,7 +784,8 @@ i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
 	}
 
 	from->sync_seqno[idx] = seqno;
-	return intel_ring_sync(to, from, seqno - 1);
+
+	return to->sync_to(to, from, seqno - 1);
 }
 
 static int

+ 30 - 0
drivers/gpu/drm/i915/i915_gem_gtt.c

@@ -49,6 +49,28 @@ static unsigned int cache_level_to_agp_type(struct drm_device *dev,
 	}
 }
 
+static bool do_idling(struct drm_i915_private *dev_priv)
+{
+	bool ret = dev_priv->mm.interruptible;
+
+	if (unlikely(dev_priv->mm.gtt->do_idle_maps)) {
+		dev_priv->mm.interruptible = false;
+		if (i915_gpu_idle(dev_priv->dev)) {
+			DRM_ERROR("Couldn't idle GPU\n");
+			/* Wait a bit, in hopes it avoids the hang */
+			udelay(10);
+		}
+	}
+
+	return ret;
+}
+
+static void undo_idling(struct drm_i915_private *dev_priv, bool interruptible)
+{
+	if (unlikely(dev_priv->mm.gtt->do_idle_maps))
+		dev_priv->mm.interruptible = interruptible;
+}
+
 void i915_gem_restore_gtt_mappings(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -117,6 +139,12 @@ void i915_gem_gtt_rebind_object(struct drm_i915_gem_object *obj,
 
 void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
 {
+	struct drm_device *dev = obj->base.dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	bool interruptible;
+
+	interruptible = do_idling(dev_priv);
+
 	intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
 			      obj->base.size >> PAGE_SHIFT);
 
@@ -124,4 +152,6 @@ void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
 		intel_gtt_unmap_memory(obj->sg_list, obj->num_sg);
 		obj->sg_list = NULL;
 	}
+
+	undo_idling(dev_priv, interruptible);
 }

+ 4 - 11
drivers/gpu/drm/i915/i915_gem_tiling.c

@@ -92,7 +92,10 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
 	uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
 	uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
 
-	if (INTEL_INFO(dev)->gen >= 5) {
+	if (INTEL_INFO(dev)->gen >= 6) {
+		swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+		swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+	} else if (IS_GEN5(dev)) {
 		/* On Ironlake whatever DRAM config, GPU always do
 		 * same swizzling setup.
 		 */
@@ -440,14 +443,9 @@ i915_gem_swizzle_page(struct page *page)
 void
 i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
 {
-	struct drm_device *dev = obj->base.dev;
-	drm_i915_private_t *dev_priv = dev->dev_private;
 	int page_count = obj->base.size >> PAGE_SHIFT;
 	int i;
 
-	if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
-		return;
-
 	if (obj->bit_17 == NULL)
 		return;
 
@@ -464,14 +462,9 @@ i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
 void
 i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
 {
-	struct drm_device *dev = obj->base.dev;
-	drm_i915_private_t *dev_priv = dev->dev_private;
 	int page_count = obj->base.size >> PAGE_SHIFT;
 	int i;
 
-	if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
-		return;
-
 	if (obj->bit_17 == NULL) {
 		obj->bit_17 = kmalloc(BITS_TO_LONGS(page_count) *
 					   sizeof(long), GFP_KERNEL);

+ 36 - 6
drivers/gpu/drm/i915/i915_irq.c

@@ -383,6 +383,7 @@ static void gen6_pm_rps_work(struct work_struct *work)
 	pm_iir = dev_priv->pm_iir;
 	dev_priv->pm_iir = 0;
 	pm_imr = I915_READ(GEN6_PMIMR);
+	I915_WRITE(GEN6_PMIMR, 0);
 	spin_unlock_irq(&dev_priv->rps_lock);
 
 	if (!pm_iir)
@@ -420,7 +421,6 @@ static void gen6_pm_rps_work(struct work_struct *work)
 	 * an *extremely* unlikely race with gen6_rps_enable() that is prevented
 	 * by holding struct_mutex for the duration of the write.
 	 */
-	I915_WRITE(GEN6_PMIMR, pm_imr & ~pm_iir);
 	mutex_unlock(&dev_priv->dev->struct_mutex);
 }
 
@@ -536,8 +536,9 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
 		unsigned long flags;
 		spin_lock_irqsave(&dev_priv->rps_lock, flags);
 		WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
-		I915_WRITE(GEN6_PMIMR, pm_iir);
 		dev_priv->pm_iir |= pm_iir;
+		I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
+		POSTING_READ(GEN6_PMIMR);
 		spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
 		queue_work(dev_priv->wq, &dev_priv->rps_work);
 	}
@@ -649,8 +650,9 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
 		unsigned long flags;
 		spin_lock_irqsave(&dev_priv->rps_lock, flags);
 		WARN(dev_priv->pm_iir & pm_iir, "Missed a PM interrupt\n");
-		I915_WRITE(GEN6_PMIMR, pm_iir);
 		dev_priv->pm_iir |= pm_iir;
+		I915_WRITE(GEN6_PMIMR, dev_priv->pm_iir);
+		POSTING_READ(GEN6_PMIMR);
 		spin_unlock_irqrestore(&dev_priv->rps_lock, flags);
 		queue_work(dev_priv->wq, &dev_priv->rps_work);
 	}
@@ -711,7 +713,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
 
 	page_count = src->base.size / PAGE_SIZE;
 
-	dst = kmalloc(sizeof(*dst) + page_count * sizeof (u32 *), GFP_ATOMIC);
+	dst = kmalloc(sizeof(*dst) + page_count * sizeof(u32 *), GFP_ATOMIC);
 	if (dst == NULL)
 		return NULL;
 
@@ -1493,7 +1495,7 @@ static int ironlake_enable_vblank(struct drm_device *dev, int pipe)
 
 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 	ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
-				    DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
+				    DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 
 	return 0;
@@ -1541,7 +1543,7 @@ static void ironlake_disable_vblank(struct drm_device *dev, int pipe)
 
 	spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
 	ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
-				     DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
+				     DE_PIPEA_VBLANK : DE_PIPEB_VBLANK);
 	spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
 }
 
@@ -1777,6 +1779,26 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
 	POSTING_READ(SDEIER);
 }
 
+/*
+ * Enable digital hotplug on the PCH, and configure the DP short pulse
+ * duration to 2ms (which is the minimum in the Display Port spec)
+ *
+ * This register is the same on all known PCH chips.
+ */
+
+static void ironlake_enable_pch_hotplug(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	u32	hotplug;
+
+	hotplug = I915_READ(PCH_PORT_HOTPLUG);
+	hotplug &= ~(PORTD_PULSE_DURATION_MASK|PORTC_PULSE_DURATION_MASK|PORTB_PULSE_DURATION_MASK);
+	hotplug |= PORTD_HOTPLUG_ENABLE | PORTD_PULSE_DURATION_2ms;
+	hotplug |= PORTC_HOTPLUG_ENABLE | PORTC_PULSE_DURATION_2ms;
+	hotplug |= PORTB_HOTPLUG_ENABLE | PORTB_PULSE_DURATION_2ms;
+	I915_WRITE(PCH_PORT_HOTPLUG, hotplug);
+}
+
 static int ironlake_irq_postinstall(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -1839,6 +1861,8 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
 	I915_WRITE(SDEIER, hotplug_mask);
 	POSTING_READ(SDEIER);
 
+	ironlake_enable_pch_hotplug(dev);
+
 	if (IS_IRONLAKE_M(dev)) {
 		/* Clear & enable PCU event interrupts */
 		I915_WRITE(DEIIR, DE_PCU_EVENT);
@@ -1896,6 +1920,8 @@ static int ivybridge_irq_postinstall(struct drm_device *dev)
 	I915_WRITE(SDEIER, hotplug_mask);
 	POSTING_READ(SDEIER);
 
+	ironlake_enable_pch_hotplug(dev);
+
 	return 0;
 }
 
@@ -2020,6 +2046,10 @@ static void ironlake_irq_uninstall(struct drm_device *dev)
 	I915_WRITE(GTIMR, 0xffffffff);
 	I915_WRITE(GTIER, 0x0);
 	I915_WRITE(GTIIR, I915_READ(GTIIR));
+
+	I915_WRITE(SDEIMR, 0xffffffff);
+	I915_WRITE(SDEIER, 0x0);
+	I915_WRITE(SDEIIR, I915_READ(SDEIIR));
 }
 
 static void i915_driver_irq_uninstall(struct drm_device * dev)

+ 7 - 7
drivers/gpu/drm/i915/i915_mem.c

@@ -202,7 +202,7 @@ static int init_heap(struct mem_block **heap, int start, int size)
 	blocks->next = blocks->prev = *heap;
 
 	memset(*heap, 0, sizeof(**heap));
-	(*heap)->file_priv = (struct drm_file *) - 1;
+	(*heap)->file_priv = (struct drm_file *) -1;
 	(*heap)->next = (*heap)->prev = blocks;
 	return 0;
 }
@@ -359,19 +359,19 @@ int i915_mem_init_heap(struct drm_device *dev, void *data,
 	return init_heap(heap, initheap->start, initheap->size);
 }
 
-int i915_mem_destroy_heap( struct drm_device *dev, void *data,
-			   struct drm_file *file_priv )
+int i915_mem_destroy_heap(struct drm_device *dev, void *data,
+			   struct drm_file *file_priv)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	drm_i915_mem_destroy_heap_t *destroyheap = data;
 	struct mem_block **heap;
 
-	if ( !dev_priv ) {
-		DRM_ERROR( "called with no initialization\n" );
+	if (!dev_priv) {
+		DRM_ERROR("called with no initialization\n");
 		return -EINVAL;
 	}
 
-	heap = get_heap( dev_priv, destroyheap->region );
+	heap = get_heap(dev_priv, destroyheap->region);
 	if (!heap) {
 		DRM_ERROR("get_heap failed");
 		return -EFAULT;
@@ -382,6 +382,6 @@ int i915_mem_destroy_heap( struct drm_device *dev, void *data,
 		return -EFAULT;
 	}
 
-	i915_mem_takedown( heap );
+	i915_mem_takedown(heap);
 	return 0;
 }

+ 94 - 17
drivers/gpu/drm/i915/i915_reg.h

@@ -156,7 +156,7 @@
 #define MI_SUSPEND_FLUSH	MI_INSTR(0x0b, 0)
 #define   MI_SUSPEND_FLUSH_EN	(1<<0)
 #define MI_REPORT_HEAD		MI_INSTR(0x07, 0)
-#define MI_OVERLAY_FLIP		MI_INSTR(0x11,0)
+#define MI_OVERLAY_FLIP		MI_INSTR(0x11, 0)
 #define   MI_OVERLAY_CONTINUE	(0x0<<21)
 #define   MI_OVERLAY_ON		(0x1<<21)
 #define   MI_OVERLAY_OFF	(0x2<<21)
@@ -194,6 +194,13 @@
 #define  MI_SEMAPHORE_UPDATE	    (1<<21)
 #define  MI_SEMAPHORE_COMPARE	    (1<<20)
 #define  MI_SEMAPHORE_REGISTER	    (1<<18)
+#define  MI_SEMAPHORE_SYNC_RV	    (2<<16)
+#define  MI_SEMAPHORE_SYNC_RB	    (0<<16)
+#define  MI_SEMAPHORE_SYNC_VR	    (0<<16)
+#define  MI_SEMAPHORE_SYNC_VB	    (2<<16)
+#define  MI_SEMAPHORE_SYNC_BR	    (2<<16)
+#define  MI_SEMAPHORE_SYNC_BV	    (0<<16)
+#define  MI_SEMAPHORE_SYNC_INVALID  (1<<0)
 /*
  * 3D instructions used by the kernel
  */
@@ -235,16 +242,22 @@
 #define   ASYNC_FLIP                (1<<22)
 #define   DISPLAY_PLANE_A           (0<<20)
 #define   DISPLAY_PLANE_B           (1<<20)
-#define GFX_OP_PIPE_CONTROL	((0x3<<29)|(0x3<<27)|(0x2<<24)|2)
-#define   PIPE_CONTROL_QW_WRITE	(1<<14)
-#define   PIPE_CONTROL_DEPTH_STALL (1<<13)
-#define   PIPE_CONTROL_WC_FLUSH	(1<<12)
-#define   PIPE_CONTROL_IS_FLUSH	(1<<11) /* MBZ on Ironlake */
-#define   PIPE_CONTROL_TC_FLUSH (1<<10) /* GM45+ only */
-#define   PIPE_CONTROL_ISP_DIS	(1<<9)
-#define   PIPE_CONTROL_NOTIFY	(1<<8)
+#define GFX_OP_PIPE_CONTROL(len)	((0x3<<29)|(0x3<<27)|(0x2<<24)|(len-2))
+#define   PIPE_CONTROL_CS_STALL				(1<<20)
+#define   PIPE_CONTROL_QW_WRITE				(1<<14)
+#define   PIPE_CONTROL_DEPTH_STALL			(1<<13)
+#define   PIPE_CONTROL_WRITE_FLUSH			(1<<12)
+#define   PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH	(1<<12) /* gen6+ */
+#define   PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE	(1<<11) /* MBZ on Ironlake */
+#define   PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE		(1<<10) /* GM45+ only */
+#define   PIPE_CONTROL_INDIRECT_STATE_DISABLE		(1<<9)
+#define   PIPE_CONTROL_NOTIFY				(1<<8)
+#define   PIPE_CONTROL_VF_CACHE_INVALIDATE		(1<<4)
+#define   PIPE_CONTROL_CONST_CACHE_INVALIDATE		(1<<3)
+#define   PIPE_CONTROL_STATE_CACHE_INVALIDATE		(1<<2)
+#define   PIPE_CONTROL_STALL_AT_SCOREBOARD		(1<<1)
+#define   PIPE_CONTROL_DEPTH_CACHE_FLUSH		(1<<0)
 #define   PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */
-#define   PIPE_CONTROL_STALL_EN	(1<<1) /* in addr word, Ironlake+ only */
 
 
 /*
@@ -296,6 +309,12 @@
 #define RING_CTL(base)		((base)+0x3c)
 #define RING_SYNC_0(base)	((base)+0x40)
 #define RING_SYNC_1(base)	((base)+0x44)
+#define GEN6_RVSYNC (RING_SYNC_0(RENDER_RING_BASE))
+#define GEN6_RBSYNC (RING_SYNC_1(RENDER_RING_BASE))
+#define GEN6_VRSYNC (RING_SYNC_1(GEN6_BSD_RING_BASE))
+#define GEN6_VBSYNC (RING_SYNC_0(GEN6_BSD_RING_BASE))
+#define GEN6_BRSYNC (RING_SYNC_0(BLT_RING_BASE))
+#define GEN6_BVSYNC (RING_SYNC_1(BLT_RING_BASE))
 #define RING_MAX_IDLE(base)	((base)+0x54)
 #define RING_HWS_PGA(base)	((base)+0x80)
 #define RING_HWS_PGA_GEN6(base)	((base)+0x2080)
@@ -470,7 +489,7 @@
 
 /* Enables non-sequential data reads through arbiter
  */
-#define   MI_ARB_DUAL_DATA_PHASE_DISABLE       	(1 << 9)
+#define   MI_ARB_DUAL_DATA_PHASE_DISABLE	(1 << 9)
 
 /* Disable FSB snooping of cacheable write cycles from binner/render
  * command stream
@@ -626,7 +645,7 @@
 
 #define ILK_DISPLAY_CHICKEN1	0x42000
 #define   ILK_FBCQ_DIS		(1<<22)
-#define   ILK_PABSTRETCH_DIS 	(1<<21)
+#define	  ILK_PABSTRETCH_DIS	(1<<21)
 
 
 /*
@@ -2358,7 +2377,7 @@
 
 #define DSPFW1			0x70034
 #define   DSPFW_SR_SHIFT	23
-#define   DSPFW_SR_MASK 	(0x1ff<<23)
+#define   DSPFW_SR_MASK		(0x1ff<<23)
 #define   DSPFW_CURSORB_SHIFT	16
 #define   DSPFW_CURSORB_MASK	(0x3f<<16)
 #define   DSPFW_PLANEB_SHIFT	8
@@ -2416,6 +2435,7 @@
 #define  WM0_PIPE_CURSOR_MASK	(0x1f)
 
 #define WM0_PIPEB_ILK		0x45104
+#define WM0_PIPEC_IVB		0x45200
 #define WM1_LP_ILK		0x45108
 #define  WM1_LP_SR_EN		(1<<31)
 #define  WM1_LP_LATENCY_SHIFT	24
@@ -2554,10 +2574,18 @@
 #define _CURBBASE		0x700c4
 #define _CURBPOS			0x700c8
 
+#define _CURBCNTR_IVB		0x71080
+#define _CURBBASE_IVB		0x71084
+#define _CURBPOS_IVB		0x71088
+
 #define CURCNTR(pipe) _PIPE(pipe, _CURACNTR, _CURBCNTR)
 #define CURBASE(pipe) _PIPE(pipe, _CURABASE, _CURBBASE)
 #define CURPOS(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS)
 
+#define CURCNTR_IVB(pipe) _PIPE(pipe, _CURACNTR, _CURBCNTR_IVB)
+#define CURBASE_IVB(pipe) _PIPE(pipe, _CURABASE, _CURBBASE_IVB)
+#define CURPOS_IVB(pipe) _PIPE(pipe, _CURAPOS, _CURBPOS_IVB)
+
 /* Display A control */
 #define _DSPACNTR                0x70180
 #define   DISPLAY_PLANE_ENABLE			(1<<31)
@@ -2903,12 +2931,13 @@
 #define SDEIER  0xc400c
 
 /* digital port hotplug */
-#define PCH_PORT_HOTPLUG        0xc4030
+#define PCH_PORT_HOTPLUG        0xc4030		/* SHOTPLUG_CTL */
 #define PORTD_HOTPLUG_ENABLE            (1 << 20)
 #define PORTD_PULSE_DURATION_2ms        (0)
 #define PORTD_PULSE_DURATION_4_5ms      (1 << 18)
 #define PORTD_PULSE_DURATION_6ms        (2 << 18)
 #define PORTD_PULSE_DURATION_100ms      (3 << 18)
+#define PORTD_PULSE_DURATION_MASK	(3 << 18)
 #define PORTD_HOTPLUG_NO_DETECT         (0)
 #define PORTD_HOTPLUG_SHORT_DETECT      (1 << 16)
 #define PORTD_HOTPLUG_LONG_DETECT       (1 << 17)
@@ -2917,6 +2946,7 @@
 #define PORTC_PULSE_DURATION_4_5ms      (1 << 10)
 #define PORTC_PULSE_DURATION_6ms        (2 << 10)
 #define PORTC_PULSE_DURATION_100ms      (3 << 10)
+#define PORTC_PULSE_DURATION_MASK	(3 << 10)
 #define PORTC_HOTPLUG_NO_DETECT         (0)
 #define PORTC_HOTPLUG_SHORT_DETECT      (1 << 8)
 #define PORTC_HOTPLUG_LONG_DETECT       (1 << 9)
@@ -2925,6 +2955,7 @@
 #define PORTB_PULSE_DURATION_4_5ms      (1 << 2)
 #define PORTB_PULSE_DURATION_6ms        (2 << 2)
 #define PORTB_PULSE_DURATION_100ms      (3 << 2)
+#define PORTB_PULSE_DURATION_MASK	(3 << 2)
 #define PORTB_HOTPLUG_NO_DETECT         (0)
 #define PORTB_HOTPLUG_SHORT_DETECT      (1 << 0)
 #define PORTB_HOTPLUG_LONG_DETECT       (1 << 1)
@@ -2945,15 +2976,15 @@
 
 #define _PCH_DPLL_A              0xc6014
 #define _PCH_DPLL_B              0xc6018
-#define PCH_DPLL(pipe) _PIPE(pipe, _PCH_DPLL_A, _PCH_DPLL_B)
+#define PCH_DPLL(pipe) (pipe == 0 ?  _PCH_DPLL_A : _PCH_DPLL_B)
 
 #define _PCH_FPA0                0xc6040
 #define  FP_CB_TUNE		(0x3<<22)
 #define _PCH_FPA1                0xc6044
 #define _PCH_FPB0                0xc6048
 #define _PCH_FPB1                0xc604c
-#define PCH_FP0(pipe) _PIPE(pipe, _PCH_FPA0, _PCH_FPB0)
-#define PCH_FP1(pipe) _PIPE(pipe, _PCH_FPA1, _PCH_FPB1)
+#define PCH_FP0(pipe) (pipe == 0 ? _PCH_FPA0 : _PCH_FPB0)
+#define PCH_FP1(pipe) (pipe == 0 ? _PCH_FPA1 : _PCH_FPB1)
 
 #define PCH_DPLL_TEST           0xc606c
 
@@ -3167,6 +3198,7 @@
 #define  FDI_LINK_TRAIN_NONE_IVB            (3<<8)
 
 /* both Tx and Rx */
+#define  FDI_COMPOSITE_SYNC		(1<<11)
 #define  FDI_LINK_TRAIN_AUTO		(1<<10)
 #define  FDI_SCRAMBLING_ENABLE          (0<<7)
 #define  FDI_SCRAMBLING_DISABLE         (1<<7)
@@ -3308,15 +3340,35 @@
 #define PCH_PP_STATUS		0xc7200
 #define PCH_PP_CONTROL		0xc7204
 #define  PANEL_UNLOCK_REGS	(0xabcd << 16)
+#define  PANEL_UNLOCK_MASK	(0xffff << 16)
 #define  EDP_FORCE_VDD		(1 << 3)
 #define  EDP_BLC_ENABLE		(1 << 2)
 #define  PANEL_POWER_RESET	(1 << 1)
 #define  PANEL_POWER_OFF	(0 << 0)
 #define  PANEL_POWER_ON		(1 << 0)
 #define PCH_PP_ON_DELAYS	0xc7208
+#define  PANEL_PORT_SELECT_MASK	(3 << 30)
+#define  PANEL_PORT_SELECT_LVDS	(0 << 30)
+#define  PANEL_PORT_SELECT_DPA	(1 << 30)
 #define  EDP_PANEL		(1 << 30)
+#define  PANEL_PORT_SELECT_DPC	(2 << 30)
+#define  PANEL_PORT_SELECT_DPD	(3 << 30)
+#define  PANEL_POWER_UP_DELAY_MASK	(0x1fff0000)
+#define  PANEL_POWER_UP_DELAY_SHIFT	16
+#define  PANEL_LIGHT_ON_DELAY_MASK	(0x1fff)
+#define  PANEL_LIGHT_ON_DELAY_SHIFT	0
+
 #define PCH_PP_OFF_DELAYS	0xc720c
+#define  PANEL_POWER_DOWN_DELAY_MASK	(0x1fff0000)
+#define  PANEL_POWER_DOWN_DELAY_SHIFT	16
+#define  PANEL_LIGHT_OFF_DELAY_MASK	(0x1fff)
+#define  PANEL_LIGHT_OFF_DELAY_SHIFT	0
+
 #define PCH_PP_DIVISOR		0xc7210
+#define  PP_REFERENCE_DIVIDER_MASK	(0xffffff00)
+#define  PP_REFERENCE_DIVIDER_SHIFT	8
+#define  PANEL_POWER_CYCLE_DELAY_MASK	(0x1f)
+#define  PANEL_POWER_CYCLE_DELAY_SHIFT	0
 
 #define PCH_DP_B		0xe4100
 #define PCH_DPB_AUX_CH_CTL	0xe4110
@@ -3470,4 +3522,29 @@
 #define GEN6_PCODE_DATA				0x138128
 #define   GEN6_PCODE_FREQ_IA_RATIO_SHIFT	8
 
+#define G4X_AUD_VID_DID			0x62020
+#define INTEL_AUDIO_DEVCL		0x808629FB
+#define INTEL_AUDIO_DEVBLC		0x80862801
+#define INTEL_AUDIO_DEVCTG		0x80862802
+
+#define G4X_AUD_CNTL_ST			0x620B4
+#define G4X_ELDV_DEVCL_DEVBLC		(1 << 13)
+#define G4X_ELDV_DEVCTG			(1 << 14)
+#define G4X_ELD_ADDR			(0xf << 5)
+#define G4X_ELD_ACK			(1 << 4)
+#define G4X_HDMIW_HDMIEDID		0x6210C
+
+#define GEN5_HDMIW_HDMIEDID_A		0xE2050
+#define GEN5_AUD_CNTL_ST_A		0xE20B4
+#define GEN5_ELD_BUFFER_SIZE		(0x1f << 10)
+#define GEN5_ELD_ADDRESS		(0x1f << 5)
+#define GEN5_ELD_ACK			(1 << 4)
+#define GEN5_AUD_CNTL_ST2		0xE20C0
+#define GEN5_ELD_VALIDB			(1 << 0)
+#define GEN5_CP_READYB			(1 << 1)
+
+#define GEN7_HDMIW_HDMIEDID_A		0xE5050
+#define GEN7_AUD_CNTRL_ST_A		0xE50B4
+#define GEN7_AUD_CNTRL_ST2		0xE50C0
+
 #endif /* _I915_REG_H_ */

+ 4 - 4
drivers/gpu/drm/i915/i915_suspend.c

@@ -60,7 +60,7 @@ static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
 	else
 		array = dev_priv->save_palette_b;
 
-	for(i = 0; i < 256; i++)
+	for (i = 0; i < 256; i++)
 		array[i] = I915_READ(reg + (i << 2));
 }
 
@@ -82,7 +82,7 @@ static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
 	else
 		array = dev_priv->save_palette_b;
 
-	for(i = 0; i < 256; i++)
+	for (i = 0; i < 256; i++)
 		I915_WRITE(reg + (i << 2), array[i]);
 }
 
@@ -887,10 +887,10 @@ int i915_restore_state(struct drm_device *dev)
 	mutex_lock(&dev->struct_mutex);
 
 	/* Cache mode state */
-	I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
+	I915_WRITE(CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
 
 	/* Memory arbitration state */
-	I915_WRITE (MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000);
+	I915_WRITE(MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000);
 
 	for (i = 0; i < 16; i++) {
 		I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]);

+ 23 - 23
drivers/gpu/drm/i915/i915_trace.h

@@ -385,29 +385,29 @@ TRACE_EVENT(i915_flip_complete,
 );
 
 TRACE_EVENT(i915_reg_rw,
-           TP_PROTO(bool write, u32 reg, u64 val, int len),
-
-           TP_ARGS(write, reg, val, len),
-
-           TP_STRUCT__entry(
-                   __field(u64, val)
-                   __field(u32, reg)
-                   __field(u16, write)
-                   __field(u16, len)
-                   ),
-
-           TP_fast_assign(
-                   __entry->val = (u64)val;
-                   __entry->reg = reg;
-                   __entry->write = write;
-                   __entry->len = len;
-                   ),
-
-           TP_printk("%s reg=0x%x, len=%d, val=(0x%x, 0x%x)",
-                     __entry->write ? "write" : "read",
-		     __entry->reg, __entry->len,
-		     (u32)(__entry->val & 0xffffffff),
-		     (u32)(__entry->val >> 32))
+	TP_PROTO(bool write, u32 reg, u64 val, int len),
+
+	TP_ARGS(write, reg, val, len),
+
+	TP_STRUCT__entry(
+		__field(u64, val)
+		__field(u32, reg)
+		__field(u16, write)
+		__field(u16, len)
+		),
+
+	TP_fast_assign(
+		__entry->val = (u64)val;
+		__entry->reg = reg;
+		__entry->write = write;
+		__entry->len = len;
+		),
+
+	TP_printk("%s reg=0x%x, len=%d, val=(0x%x, 0x%x)",
+		__entry->write ? "write" : "read",
+		__entry->reg, __entry->len,
+		(u32)(__entry->val & 0xffffffff),
+		(u32)(__entry->val >> 32))
 );
 
 #endif /* _I915_TRACE_H_ */

+ 1 - 1
drivers/gpu/drm/i915/intel_acpi.c

@@ -64,7 +64,7 @@ static int intel_dsm(acpi_handle handle, int func, int arg)
 
 	case ACPI_TYPE_BUFFER:
 		if (obj->buffer.length == 4) {
-			result =(obj->buffer.pointer[0] |
+			result = (obj->buffer.pointer[0] |
 				(obj->buffer.pointer[1] <<  8) |
 				(obj->buffer.pointer[2] << 16) |
 				(obj->buffer.pointer[3] << 24));

+ 14 - 9
drivers/gpu/drm/i915/intel_bios.c

@@ -1,5 +1,5 @@
 /*
- * Copyright © 2006 Intel Corporation
+ * Copyright © 2006 Intel Corporation
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -309,6 +309,13 @@ parse_general_features(struct drm_i915_private *dev_priv,
 		dev_priv->lvds_use_ssc = general->enable_ssc;
 		dev_priv->lvds_ssc_freq =
 			intel_bios_ssc_frequency(dev, general->ssc_freq);
+		dev_priv->display_clock_mode = general->display_clock_mode;
+		DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d\n",
+			      dev_priv->int_tv_support,
+			      dev_priv->int_crt_support,
+			      dev_priv->lvds_use_ssc,
+			      dev_priv->lvds_ssc_freq,
+			      dev_priv->display_clock_mode);
 	}
 }
 
@@ -381,7 +388,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
 		if (p_child->dvo_port != DEVICE_PORT_DVOB &&
 			p_child->dvo_port != DEVICE_PORT_DVOC) {
 			/* skip the incorrect SDVO port */
-			DRM_DEBUG_KMS("Incorrect SDVO port. Skip it \n");
+			DRM_DEBUG_KMS("Incorrect SDVO port. Skip it\n");
 			continue;
 		}
 		DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on"
@@ -396,15 +403,13 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
 			p_mapping->dvo_wiring = p_child->dvo_wiring;
 			p_mapping->ddc_pin = p_child->ddc_pin;
 			p_mapping->i2c_pin = p_child->i2c_pin;
-			p_mapping->i2c_speed = p_child->i2c_speed;
 			p_mapping->initialized = 1;
-			DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d, i2c_speed=%d\n",
+			DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n",
 				      p_mapping->dvo_port,
 				      p_mapping->slave_addr,
 				      p_mapping->dvo_wiring,
 				      p_mapping->ddc_pin,
-				      p_mapping->i2c_pin,
-				      p_mapping->i2c_speed);
+				      p_mapping->i2c_pin);
 		} else {
 			DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
 					 "two SDVO device.\n");
@@ -564,7 +569,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
 		count++;
 	}
 	if (!count) {
-		DRM_DEBUG_KMS("no child dev is parsed from VBT \n");
+		DRM_DEBUG_KMS("no child dev is parsed from VBT\n");
 		return;
 	}
 	dev_priv->child_dev = kzalloc(sizeof(*p_child) * count, GFP_KERNEL);
@@ -610,7 +615,7 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
 	/* Default to using SSC */
 	dev_priv->lvds_use_ssc = 1;
 	dev_priv->lvds_ssc_freq = intel_bios_ssc_frequency(dev, 1);
-	DRM_DEBUG("Set default to SSC at %dMHz\n", dev_priv->lvds_ssc_freq);
+	DRM_DEBUG_KMS("Set default to SSC at %dMHz\n", dev_priv->lvds_ssc_freq);
 
 	/* eDP data */
 	dev_priv->edp.bpp = 18;
@@ -639,7 +644,7 @@ intel_parse_bios(struct drm_device *dev)
 	if (dev_priv->opregion.vbt) {
 		struct vbt_header *vbt = dev_priv->opregion.vbt;
 		if (memcmp(vbt->signature, "$VBT", 4) == 0) {
-			DRM_DEBUG_DRIVER("Using VBT from OpRegion: %20s\n",
+			DRM_DEBUG_KMS("Using VBT from OpRegion: %20s\n",
 					 vbt->signature);
 			bdb = (struct bdb_header *)((char *)vbt + vbt->bdb_offset);
 		} else

+ 13 - 9
drivers/gpu/drm/i915/intel_bios.h

@@ -1,5 +1,5 @@
 /*
- * Copyright © 2006 Intel Corporation
+ * Copyright © 2006 Intel Corporation
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * copy of this software and associated documentation files (the "Software"),
@@ -120,7 +120,9 @@ struct bdb_general_features {
 	u8 ssc_freq:1;
 	u8 enable_lfp_on_override:1;
 	u8 disable_ssc_ddt:1;
-	u8 rsvd8:3; /* finish byte */
+	u8 rsvd7:1;
+	u8 display_clock_mode:1;
+	u8 rsvd8:1; /* finish byte */
 
         /* bits 3 */
 	u8 disable_smooth_vision:1;
@@ -133,7 +135,10 @@ struct bdb_general_features {
         /* bits 5 */
 	u8 int_crt_support:1;
 	u8 int_tv_support:1;
-	u8 rsvd11:6; /* finish byte */
+	u8 int_efp_support:1;
+	u8 dp_ssc_enb:1;	/* PCH attached eDP supports SSC */
+	u8 dp_ssc_freq:1;	/* SSC freq for PCH attached eDP */
+	u8 rsvd11:3; /* finish byte */
 } __attribute__((packed));
 
 /* pre-915 */
@@ -197,8 +202,7 @@ struct bdb_general_features {
 struct child_device_config {
 	u16 handle;
 	u16 device_type;
-	u8  i2c_speed;
-	u8  rsvd[9];
+	u8  device_id[10]; /* ascii string */
 	u16 addin_offset;
 	u8  dvo_port; /* See Device_PORT_* above */
 	u8  i2c_pin;
@@ -240,7 +244,7 @@ struct bdb_general_definitions {
 	 * And the device num is related with the size of general definition
 	 * block. It is obtained by using the following formula:
 	 * number = (block_size - sizeof(bdb_general_definitions))/
-	 * 		sizeof(child_device_config);
+	 *	     sizeof(child_device_config);
 	 */
 	struct child_device_config devices[0];
 } __attribute__((packed));
@@ -446,11 +450,11 @@ struct bdb_driver_features {
 #define EDP_VSWING_1_2V		3
 
 struct edp_power_seq {
-	u16 t3;
-	u16 t7;
+	u16 t1_t3;
+	u16 t8;
 	u16 t9;
 	u16 t10;
-	u16 t12;
+	u16 t11_t12;
 } __attribute__ ((packed));
 
 struct edp_link_params {

+ 8 - 12
drivers/gpu/drm/i915/intel_crt.c

@@ -69,7 +69,7 @@ static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
 	temp &= ~(ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
 	temp &= ~ADPA_DAC_ENABLE;
 
-	switch(mode) {
+	switch (mode) {
 	case DRM_MODE_DPMS_ON:
 		temp |= ADPA_DAC_ENABLE;
 		break;
@@ -152,17 +152,13 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
 	if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
 		adpa |= ADPA_VSYNC_ACTIVE_HIGH;
 
-	if (intel_crtc->pipe == 0) {
-		if (HAS_PCH_CPT(dev))
-			adpa |= PORT_TRANS_A_SEL_CPT;
-		else
-			adpa |= ADPA_PIPE_A_SELECT;
-	} else {
-		if (HAS_PCH_CPT(dev))
-			adpa |= PORT_TRANS_B_SEL_CPT;
-		else
-			adpa |= ADPA_PIPE_B_SELECT;
-	}
+	/* For CPT allow 3 pipe config, for others just use A or B */
+	if (HAS_PCH_CPT(dev))
+		adpa |= PORT_TRANS_SEL_CPT(intel_crtc->pipe);
+	else if (intel_crtc->pipe == 0)
+		adpa |= ADPA_PIPE_A_SELECT;
+	else
+		adpa |= ADPA_PIPE_B_SELECT;
 
 	if (!HAS_PCH_SPLIT(dev))
 		I915_WRITE(BCLRPAT(intel_crtc->pipe), 0);

File diff suppressed because it is too large
+ 477 - 218
drivers/gpu/drm/i915/intel_display.c


File diff suppressed because it is too large
+ 405 - 136
drivers/gpu/drm/i915/intel_dp.c


+ 14 - 7
drivers/gpu/drm/i915/intel_drv.h

@@ -34,7 +34,7 @@
 #define _wait_for(COND, MS, W) ({ \
 	unsigned long timeout__ = jiffies + msecs_to_jiffies(MS);	\
 	int ret__ = 0;							\
-	while (! (COND)) {						\
+	while (!(COND)) {						\
 		if (time_after(jiffies, timeout__)) {			\
 			ret__ = -ETIMEDOUT;				\
 			break;						\
@@ -49,10 +49,10 @@
 
 #define MSLEEP(x) do { \
 	if (in_dbg_master()) \
-	       	mdelay(x); \
+		mdelay(x); \
 	else \
 		msleep(x); \
-} while(0)
+} while (0)
 
 #define KHz(x) (1000*x)
 #define MHz(x) KHz(1000*x)
@@ -171,6 +171,9 @@ struct intel_crtc {
 	int16_t cursor_width, cursor_height;
 	bool cursor_visible;
 	unsigned int bpp;
+
+	bool no_pll; /* tertiary pipe for IVB */
+	bool use_pll_a;
 };
 
 #define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
@@ -184,7 +187,7 @@ struct intel_crtc {
 #define DIP_VERSION_AVI 0x2
 #define DIP_LEN_AVI     13
 
-#define DIP_TYPE_SPD	0x3
+#define DIP_TYPE_SPD	0x83
 #define DIP_VERSION_SPD	0x1
 #define DIP_LEN_SPD	25
 #define DIP_SPD_UNKNOWN	0
@@ -284,7 +287,7 @@ void
 intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
 		 struct drm_display_mode *adjusted_mode);
 extern bool intel_dpd_is_edp(struct drm_device *dev);
-extern void intel_edp_link_config (struct intel_encoder *, int *, int *);
+extern void intel_edp_link_config(struct intel_encoder *, int *, int *);
 extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder);
 
 /* intel_panel.c */
@@ -304,8 +307,8 @@ extern void intel_panel_destroy_backlight(struct drm_device *dev);
 extern enum drm_connector_status intel_panel_detect(struct drm_device *dev);
 
 extern void intel_crtc_load_lut(struct drm_crtc *crtc);
-extern void intel_encoder_prepare (struct drm_encoder *encoder);
-extern void intel_encoder_commit (struct drm_encoder *encoder);
+extern void intel_encoder_prepare(struct drm_encoder *encoder);
+extern void intel_encoder_commit(struct drm_encoder *encoder);
 extern void intel_encoder_destroy(struct drm_encoder *encoder);
 
 static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector)
@@ -377,4 +380,8 @@ extern void intel_fb_output_poll_changed(struct drm_device *dev);
 extern void intel_fb_restore_mode(struct drm_device *dev);
 
 extern void intel_init_clock_gating(struct drm_device *dev);
+extern void intel_write_eld(struct drm_encoder *encoder,
+			    struct drm_display_mode *mode);
+extern void intel_cpt_verify_modeset(struct drm_device *dev, int pipe);
+
 #endif /* __INTEL_DRV_H__ */

+ 19 - 14
drivers/gpu/drm/i915/intel_hdmi.c

@@ -69,8 +69,7 @@ void intel_dip_infoframe_csum(struct dip_infoframe *frame)
 	frame->checksum = 0;
 	frame->ecc = 0;
 
-	/* Header isn't part of the checksum */
-	for (i = 5; i < frame->len; i++)
+	for (i = 0; i < frame->len + DIP_HEADER_SIZE; i++)
 		sum += data[i];
 
 	frame->checksum = 0x100 - sum;
@@ -104,7 +103,7 @@ static u32 intel_infoframe_flags(struct dip_infoframe *frame)
 		flags |= VIDEO_DIP_ENABLE_AVI | VIDEO_DIP_FREQ_VSYNC;
 		break;
 	case DIP_TYPE_SPD:
-		flags |= VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_FREQ_2VSYNC;
+		flags |= VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_FREQ_VSYNC;
 		break;
 	default:
 		DRM_DEBUG_DRIVER("unknown info frame type %d\n", frame->type);
@@ -165,9 +164,9 @@ static void ironlake_write_infoframe(struct drm_encoder *encoder,
 
 	flags = intel_infoframe_index(frame);
 
-	val &= ~VIDEO_DIP_SELECT_MASK;
+	val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
 
-	I915_WRITE(reg, val | flags);
+	I915_WRITE(reg, VIDEO_DIP_ENABLE | val | flags);
 
 	for (i = 0; i < len; i += 4) {
 		I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
@@ -245,16 +244,17 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
 		sdvox |= HDMI_MODE_SELECT;
 
 	if (intel_hdmi->has_audio) {
+		DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
+				 pipe_name(intel_crtc->pipe));
 		sdvox |= SDVO_AUDIO_ENABLE;
 		sdvox |= SDVO_NULL_PACKETS_DURING_VSYNC;
+		intel_write_eld(encoder, adjusted_mode);
 	}
 
-	if (intel_crtc->pipe == 1) {
-		if (HAS_PCH_CPT(dev))
-			sdvox |= PORT_TRANS_B_SEL_CPT;
-		else
-			sdvox |= SDVO_PIPE_B_SELECT;
-	}
+	if (HAS_PCH_CPT(dev))
+		sdvox |= PORT_TRANS_SEL_CPT(intel_crtc->pipe);
+	else if (intel_crtc->pipe == 1)
+		sdvox |= SDVO_PIPE_B_SELECT;
 
 	I915_WRITE(intel_hdmi->sdvox_reg, sdvox);
 	POSTING_READ(intel_hdmi->sdvox_reg);
@@ -486,6 +486,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
 	struct intel_encoder *intel_encoder;
 	struct intel_connector *intel_connector;
 	struct intel_hdmi *intel_hdmi;
+	int i;
 
 	intel_hdmi = kzalloc(sizeof(struct intel_hdmi), GFP_KERNEL);
 	if (!intel_hdmi)
@@ -511,7 +512,7 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
 	connector->polled = DRM_CONNECTOR_POLL_HPD;
 	connector->interlace_allowed = 0;
 	connector->doublescan_allowed = 0;
-	intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+	intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
 
 	/* Set up the DDC bus. */
 	if (sdvox_reg == SDVOB) {
@@ -538,10 +539,14 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
 
 	intel_hdmi->sdvox_reg = sdvox_reg;
 
-	if (!HAS_PCH_SPLIT(dev))
+	if (!HAS_PCH_SPLIT(dev)) {
 		intel_hdmi->write_infoframe = i9xx_write_infoframe;
-	else
+		I915_WRITE(VIDEO_DIP_CTL, 0);
+	} else {
 		intel_hdmi->write_infoframe = ironlake_write_infoframe;
+		for_each_pipe(i)
+			I915_WRITE(TVIDEO_DIP_CTL(i), 0);
+	}
 
 	drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
 

+ 1 - 7
drivers/gpu/drm/i915/intel_i2c.c

@@ -422,13 +422,7 @@ void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed)
 {
 	struct intel_gmbus *bus = to_intel_gmbus(adapter);
 
-	/* speed:
-	 * 0x0 = 100 KHz
-	 * 0x1 = 50 KHz
-	 * 0x2 = 400 KHz
-	 * 0x3 = 1000 Khz
-	 */
-	bus->reg0 = (bus->reg0 & ~(0x3 << 8)) | (speed << 8);
+	bus->reg0 = (bus->reg0 & ~(0x3 << 8)) | speed;
 }
 
 void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)

+ 5 - 3
drivers/gpu/drm/i915/intel_lvds.c

@@ -888,9 +888,11 @@ bool intel_lvds_init(struct drm_device *dev)
 	intel_encoder->type = INTEL_OUTPUT_LVDS;
 
 	intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
-	intel_encoder->crtc_mask = (1 << 1);
-	if (INTEL_INFO(dev)->gen >= 5)
-		intel_encoder->crtc_mask |= (1 << 0);
+	if (HAS_PCH_SPLIT(dev))
+		intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
+	else
+		intel_encoder->crtc_mask = (1 << 1);
+
 	drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
 	drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
 	connector->display_info.subpixel_order = SubPixelHorizontalRGB;

+ 2 - 0
drivers/gpu/drm/i915/intel_modes.c

@@ -26,6 +26,7 @@
 #include <linux/slab.h>
 #include <linux/i2c.h>
 #include <linux/fb.h>
+#include <drm/drm_edid.h>
 #include "drmP.h"
 #include "intel_drv.h"
 #include "i915_drv.h"
@@ -74,6 +75,7 @@ int intel_ddc_get_modes(struct drm_connector *connector,
 	if (edid) {
 		drm_mode_connector_update_edid_property(connector, edid);
 		ret = drm_add_edid_modes(connector, edid);
+		drm_edid_to_eld(connector, edid);
 		connector->display_info.raw_edid = NULL;
 		kfree(edid);
 	}

+ 45 - 45
drivers/gpu/drm/i915/intel_opregion.c

@@ -51,61 +51,61 @@
 #define MBOX_ASLE      (1<<2)
 
 struct opregion_header {
-       u8 signature[16];
-       u32 size;
-       u32 opregion_ver;
-       u8 bios_ver[32];
-       u8 vbios_ver[16];
-       u8 driver_ver[16];
-       u32 mboxes;
-       u8 reserved[164];
+	u8 signature[16];
+	u32 size;
+	u32 opregion_ver;
+	u8 bios_ver[32];
+	u8 vbios_ver[16];
+	u8 driver_ver[16];
+	u32 mboxes;
+	u8 reserved[164];
 } __attribute__((packed));
 
 /* OpRegion mailbox #1: public ACPI methods */
 struct opregion_acpi {
-       u32 drdy;       /* driver readiness */
-       u32 csts;       /* notification status */
-       u32 cevt;       /* current event */
-       u8 rsvd1[20];
-       u32 didl[8];    /* supported display devices ID list */
-       u32 cpdl[8];    /* currently presented display list */
-       u32 cadl[8];    /* currently active display list */
-       u32 nadl[8];    /* next active devices list */
-       u32 aslp;       /* ASL sleep time-out */
-       u32 tidx;       /* toggle table index */
-       u32 chpd;       /* current hotplug enable indicator */
-       u32 clid;       /* current lid state*/
-       u32 cdck;       /* current docking state */
-       u32 sxsw;       /* Sx state resume */
-       u32 evts;       /* ASL supported events */
-       u32 cnot;       /* current OS notification */
-       u32 nrdy;       /* driver status */
-       u8 rsvd2[60];
+	u32 drdy;       /* driver readiness */
+	u32 csts;       /* notification status */
+	u32 cevt;       /* current event */
+	u8 rsvd1[20];
+	u32 didl[8];    /* supported display devices ID list */
+	u32 cpdl[8];    /* currently presented display list */
+	u32 cadl[8];    /* currently active display list */
+	u32 nadl[8];    /* next active devices list */
+	u32 aslp;       /* ASL sleep time-out */
+	u32 tidx;       /* toggle table index */
+	u32 chpd;       /* current hotplug enable indicator */
+	u32 clid;       /* current lid state*/
+	u32 cdck;       /* current docking state */
+	u32 sxsw;       /* Sx state resume */
+	u32 evts;       /* ASL supported events */
+	u32 cnot;       /* current OS notification */
+	u32 nrdy;       /* driver status */
+	u8 rsvd2[60];
 } __attribute__((packed));
 
 /* OpRegion mailbox #2: SWSCI */
 struct opregion_swsci {
-       u32 scic;       /* SWSCI command|status|data */
-       u32 parm;       /* command parameters */
-       u32 dslp;       /* driver sleep time-out */
-       u8 rsvd[244];
+	u32 scic;       /* SWSCI command|status|data */
+	u32 parm;       /* command parameters */
+	u32 dslp;       /* driver sleep time-out */
+	u8 rsvd[244];
 } __attribute__((packed));
 
 /* OpRegion mailbox #3: ASLE */
 struct opregion_asle {
-       u32 ardy;       /* driver readiness */
-       u32 aslc;       /* ASLE interrupt command */
-       u32 tche;       /* technology enabled indicator */
-       u32 alsi;       /* current ALS illuminance reading */
-       u32 bclp;       /* backlight brightness to set */
-       u32 pfit;       /* panel fitting state */
-       u32 cblv;       /* current brightness level */
-       u16 bclm[20];   /* backlight level duty cycle mapping table */
-       u32 cpfm;       /* current panel fitting mode */
-       u32 epfm;       /* enabled panel fitting modes */
-       u8 plut[74];    /* panel LUT and identifier */
-       u32 pfmb;       /* PWM freq and min brightness */
-       u8 rsvd[102];
+	u32 ardy;       /* driver readiness */
+	u32 aslc;       /* ASLE interrupt command */
+	u32 tche;       /* technology enabled indicator */
+	u32 alsi;       /* current ALS illuminance reading */
+	u32 bclp;       /* backlight brightness to set */
+	u32 pfit;       /* panel fitting state */
+	u32 cblv;       /* current brightness level */
+	u16 bclm[20];   /* backlight level duty cycle mapping table */
+	u32 cpfm;       /* current panel fitting mode */
+	u32 epfm;       /* enabled panel fitting modes */
+	u8 plut[74];    /* panel LUT and identifier */
+	u32 pfmb;       /* PWM freq and min brightness */
+	u8 rsvd[102];
 } __attribute__((packed));
 
 /* ASLE irq request bits */
@@ -361,7 +361,7 @@ static void intel_didl_outputs(struct drm_device *dev)
 
 	list_for_each_entry(acpi_cdev, &acpi_video_bus->children, node) {
 		if (i >= 8) {
-			dev_printk (KERN_ERR, &dev->pdev->dev,
+			dev_printk(KERN_ERR, &dev->pdev->dev,
 				    "More than 8 outputs detected\n");
 			return;
 		}
@@ -387,7 +387,7 @@ blind_set:
 	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
 		int output_type = ACPI_OTHER_OUTPUT;
 		if (i >= 8) {
-			dev_printk (KERN_ERR, &dev->pdev->dev,
+			dev_printk(KERN_ERR, &dev->pdev->dev,
 				    "More than 8 outputs detected\n");
 			return;
 		}

+ 73 - 73
drivers/gpu/drm/i915/intel_overlay.c

@@ -117,57 +117,57 @@
 
 /* memory bufferd overlay registers */
 struct overlay_registers {
-    u32 OBUF_0Y;
-    u32 OBUF_1Y;
-    u32 OBUF_0U;
-    u32 OBUF_0V;
-    u32 OBUF_1U;
-    u32 OBUF_1V;
-    u32 OSTRIDE;
-    u32 YRGB_VPH;
-    u32 UV_VPH;
-    u32 HORZ_PH;
-    u32 INIT_PHS;
-    u32 DWINPOS;
-    u32 DWINSZ;
-    u32 SWIDTH;
-    u32 SWIDTHSW;
-    u32 SHEIGHT;
-    u32 YRGBSCALE;
-    u32 UVSCALE;
-    u32 OCLRC0;
-    u32 OCLRC1;
-    u32 DCLRKV;
-    u32 DCLRKM;
-    u32 SCLRKVH;
-    u32 SCLRKVL;
-    u32 SCLRKEN;
-    u32 OCONFIG;
-    u32 OCMD;
-    u32 RESERVED1; /* 0x6C */
-    u32 OSTART_0Y;
-    u32 OSTART_1Y;
-    u32 OSTART_0U;
-    u32 OSTART_0V;
-    u32 OSTART_1U;
-    u32 OSTART_1V;
-    u32 OTILEOFF_0Y;
-    u32 OTILEOFF_1Y;
-    u32 OTILEOFF_0U;
-    u32 OTILEOFF_0V;
-    u32 OTILEOFF_1U;
-    u32 OTILEOFF_1V;
-    u32 FASTHSCALE; /* 0xA0 */
-    u32 UVSCALEV; /* 0xA4 */
-    u32 RESERVEDC[(0x200 - 0xA8) / 4]; /* 0xA8 - 0x1FC */
-    u16 Y_VCOEFS[N_VERT_Y_TAPS * N_PHASES]; /* 0x200 */
-    u16 RESERVEDD[0x100 / 2 - N_VERT_Y_TAPS * N_PHASES];
-    u16 Y_HCOEFS[N_HORIZ_Y_TAPS * N_PHASES]; /* 0x300 */
-    u16 RESERVEDE[0x200 / 2 - N_HORIZ_Y_TAPS * N_PHASES];
-    u16 UV_VCOEFS[N_VERT_UV_TAPS * N_PHASES]; /* 0x500 */
-    u16 RESERVEDF[0x100 / 2 - N_VERT_UV_TAPS * N_PHASES];
-    u16 UV_HCOEFS[N_HORIZ_UV_TAPS * N_PHASES]; /* 0x600 */
-    u16 RESERVEDG[0x100 / 2 - N_HORIZ_UV_TAPS * N_PHASES];
+	u32 OBUF_0Y;
+	u32 OBUF_1Y;
+	u32 OBUF_0U;
+	u32 OBUF_0V;
+	u32 OBUF_1U;
+	u32 OBUF_1V;
+	u32 OSTRIDE;
+	u32 YRGB_VPH;
+	u32 UV_VPH;
+	u32 HORZ_PH;
+	u32 INIT_PHS;
+	u32 DWINPOS;
+	u32 DWINSZ;
+	u32 SWIDTH;
+	u32 SWIDTHSW;
+	u32 SHEIGHT;
+	u32 YRGBSCALE;
+	u32 UVSCALE;
+	u32 OCLRC0;
+	u32 OCLRC1;
+	u32 DCLRKV;
+	u32 DCLRKM;
+	u32 SCLRKVH;
+	u32 SCLRKVL;
+	u32 SCLRKEN;
+	u32 OCONFIG;
+	u32 OCMD;
+	u32 RESERVED1; /* 0x6C */
+	u32 OSTART_0Y;
+	u32 OSTART_1Y;
+	u32 OSTART_0U;
+	u32 OSTART_0V;
+	u32 OSTART_1U;
+	u32 OSTART_1V;
+	u32 OTILEOFF_0Y;
+	u32 OTILEOFF_1Y;
+	u32 OTILEOFF_0U;
+	u32 OTILEOFF_0V;
+	u32 OTILEOFF_1U;
+	u32 OTILEOFF_1V;
+	u32 FASTHSCALE; /* 0xA0 */
+	u32 UVSCALEV; /* 0xA4 */
+	u32 RESERVEDC[(0x200 - 0xA8) / 4]; /* 0xA8 - 0x1FC */
+	u16 Y_VCOEFS[N_VERT_Y_TAPS * N_PHASES]; /* 0x200 */
+	u16 RESERVEDD[0x100 / 2 - N_VERT_Y_TAPS * N_PHASES];
+	u16 Y_HCOEFS[N_HORIZ_Y_TAPS * N_PHASES]; /* 0x300 */
+	u16 RESERVEDE[0x200 / 2 - N_HORIZ_Y_TAPS * N_PHASES];
+	u16 UV_VCOEFS[N_VERT_UV_TAPS * N_PHASES]; /* 0x500 */
+	u16 RESERVEDF[0x100 / 2 - N_VERT_UV_TAPS * N_PHASES];
+	u16 UV_HCOEFS[N_HORIZ_UV_TAPS * N_PHASES]; /* 0x600 */
+	u16 RESERVEDG[0x100 / 2 - N_HORIZ_UV_TAPS * N_PHASES];
 };
 
 struct intel_overlay {
@@ -192,7 +192,7 @@ struct intel_overlay {
 static struct overlay_registers *
 intel_overlay_map_regs(struct intel_overlay *overlay)
 {
-        drm_i915_private_t *dev_priv = overlay->dev->dev_private;
+	drm_i915_private_t *dev_priv = overlay->dev->dev_private;
 	struct overlay_registers *regs;
 
 	if (OVERLAY_NEEDS_PHYSICAL(overlay->dev))
@@ -264,7 +264,7 @@ i830_activate_pipe_a(struct drm_device *dev)
 
 	mode = drm_mode_duplicate(dev, &vesa_640x480);
 	drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
-	if(!drm_crtc_helper_set_mode(&crtc->base, mode,
+	if (!drm_crtc_helper_set_mode(&crtc->base, mode,
 				       crtc->base.x, crtc->base.y,
 				       crtc->base.fb))
 		return 0;
@@ -332,7 +332,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
 				  bool load_polyphase_filter)
 {
 	struct drm_device *dev = overlay->dev;
-        drm_i915_private_t *dev_priv = dev->dev_private;
+	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_i915_gem_request *request;
 	u32 flip_addr = overlay->flip_addr;
 	u32 tmp;
@@ -359,7 +359,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
 	}
 	OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
 	OUT_RING(flip_addr);
-        ADVANCE_LP_RING();
+	ADVANCE_LP_RING();
 
 	ret = i915_add_request(LP_RING(dev_priv), NULL, request);
 	if (ret) {
@@ -583,7 +583,7 @@ static u32 calc_swidthsw(struct drm_device *dev, u32 offset, u32 width)
 	ret = ((offset + width + mask) >> shift) - (offset >> shift);
 	if (!IS_GEN2(dev))
 		ret <<= 1;
-	ret -=1;
+	ret -= 1;
 	return ret << 2;
 }
 
@@ -817,7 +817,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
 	regs->SWIDTHSW = calc_swidthsw(overlay->dev,
 				       params->offset_Y, tmp_width);
 	regs->SHEIGHT = params->src_h;
-	regs->OBUF_0Y = new_bo->gtt_offset + params-> offset_Y;
+	regs->OBUF_0Y = new_bo->gtt_offset + params->offset_Y;
 	regs->OSTRIDE = params->stride_Y;
 
 	if (params->format & I915_OVERLAY_YUV_PLANAR) {
@@ -917,7 +917,7 @@ static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
 	 * line with the intel documentation for the i965
 	 */
 	if (INTEL_INFO(dev)->gen >= 4) {
-	       	/* on i965 use the PGM reg to read out the autoscaler values */
+		/* on i965 use the PGM reg to read out the autoscaler values */
 		ratio = I915_READ(PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965;
 	} else {
 		if (pfit_control & VERT_AUTO_SCALE)
@@ -1098,7 +1098,7 @@ static int intel_panel_fitter_pipe(struct drm_device *dev)
 }
 
 int intel_overlay_put_image(struct drm_device *dev, void *data,
-                            struct drm_file *file_priv)
+			    struct drm_file *file_priv)
 {
 	struct drm_intel_overlay_put_image *put_image_rec = data;
 	drm_i915_private_t *dev_priv = dev->dev_private;
@@ -1301,10 +1301,10 @@ static int check_gamma(struct drm_intel_overlay_attrs *attrs)
 }
 
 int intel_overlay_attrs(struct drm_device *dev, void *data,
-                        struct drm_file *file_priv)
+			struct drm_file *file_priv)
 {
 	struct drm_intel_overlay_attrs *attrs = data;
-        drm_i915_private_t *dev_priv = dev->dev_private;
+	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct intel_overlay *overlay;
 	struct overlay_registers *regs;
 	int ret;
@@ -1393,7 +1393,7 @@ out_unlock:
 
 void intel_setup_overlay(struct drm_device *dev)
 {
-        drm_i915_private_t *dev_priv = dev->dev_private;
+	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct intel_overlay *overlay;
 	struct drm_i915_gem_object *reg_bo;
 	struct overlay_registers *regs;
@@ -1421,24 +1421,24 @@ void intel_setup_overlay(struct drm_device *dev)
 		ret = i915_gem_attach_phys_object(dev, reg_bo,
 						  I915_GEM_PHYS_OVERLAY_REGS,
 						  PAGE_SIZE);
-                if (ret) {
-                        DRM_ERROR("failed to attach phys overlay regs\n");
-                        goto out_free_bo;
-                }
+		if (ret) {
+			DRM_ERROR("failed to attach phys overlay regs\n");
+			goto out_free_bo;
+		}
 		overlay->flip_addr = reg_bo->phys_obj->handle->busaddr;
 	} else {
 		ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true);
 		if (ret) {
-                        DRM_ERROR("failed to pin overlay register bo\n");
-                        goto out_free_bo;
-                }
+			DRM_ERROR("failed to pin overlay register bo\n");
+			goto out_free_bo;
+		}
 		overlay->flip_addr = reg_bo->gtt_offset;
 
 		ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
 		if (ret) {
-                        DRM_ERROR("failed to move overlay register bo into the GTT\n");
-                        goto out_unpin_bo;
-                }
+			DRM_ERROR("failed to move overlay register bo into the GTT\n");
+			goto out_unpin_bo;
+		}
 	}
 
 	/* init all values */
@@ -1525,7 +1525,7 @@ static void intel_overlay_unmap_regs_atomic(struct intel_overlay *overlay,
 struct intel_overlay_error_state *
 intel_overlay_capture_error_state(struct drm_device *dev)
 {
-        drm_i915_private_t *dev_priv = dev->dev_private;
+	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct intel_overlay *overlay = dev_priv->overlay;
 	struct intel_overlay_error_state *error;
 	struct overlay_registers __iomem *regs;

+ 16 - 11
drivers/gpu/drm/i915/intel_panel.c

@@ -84,7 +84,7 @@ intel_pch_panel_fitting(struct drm_device *dev,
 			if (scaled_width > scaled_height) { /* pillar */
 				width = scaled_height / mode->vdisplay;
 				if (width & 1)
-				    	width++;
+					width++;
 				x = (adjusted_mode->hdisplay - width + 1) / 2;
 				y = 0;
 				height = adjusted_mode->vdisplay;
@@ -206,7 +206,7 @@ u32 intel_panel_get_backlight(struct drm_device *dev)
 		if (IS_PINEVIEW(dev))
 			val >>= 1;
 
-		if (is_backlight_combination_mode(dev)){
+		if (is_backlight_combination_mode(dev)) {
 			u8 lbpc;
 
 			val &= ~1;
@@ -226,7 +226,7 @@ static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level)
 	I915_WRITE(BLC_PWM_CPU_CTL, val | level);
 }
 
-void intel_panel_set_backlight(struct drm_device *dev, u32 level)
+static void intel_panel_actually_set_backlight(struct drm_device *dev, u32 level)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	u32 tmp;
@@ -236,7 +236,7 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level)
 	if (HAS_PCH_SPLIT(dev))
 		return intel_pch_panel_set_backlight(dev, level);
 
-	if (is_backlight_combination_mode(dev)){
+	if (is_backlight_combination_mode(dev)) {
 		u32 max = intel_panel_get_max_backlight(dev);
 		u8 lbpc;
 
@@ -254,16 +254,21 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level)
 	I915_WRITE(BLC_PWM_CTL, tmp | level);
 }
 
-void intel_panel_disable_backlight(struct drm_device *dev)
+void intel_panel_set_backlight(struct drm_device *dev, u32 level)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	if (dev_priv->backlight_enabled) {
-		dev_priv->backlight_level = intel_panel_get_backlight(dev);
-		dev_priv->backlight_enabled = false;
-	}
+	dev_priv->backlight_level = level;
+	if (dev_priv->backlight_enabled)
+		intel_panel_actually_set_backlight(dev, level);
+}
+
+void intel_panel_disable_backlight(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	intel_panel_set_backlight(dev, 0);
+	dev_priv->backlight_enabled = false;
+	intel_panel_actually_set_backlight(dev, 0);
 }
 
 void intel_panel_enable_backlight(struct drm_device *dev)
@@ -273,8 +278,8 @@ void intel_panel_enable_backlight(struct drm_device *dev)
 	if (dev_priv->backlight_level == 0)
 		dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
 
-	intel_panel_set_backlight(dev, dev_priv->backlight_level);
 	dev_priv->backlight_enabled = true;
+	intel_panel_actually_set_backlight(dev, dev_priv->backlight_level);
 }
 
 static void intel_panel_init_backlight(struct drm_device *dev)

+ 273 - 96
drivers/gpu/drm/i915/intel_ringbuffer.c

@@ -34,6 +34,16 @@
 #include "i915_trace.h"
 #include "intel_drv.h"
 
+/*
+ * 965+ support PIPE_CONTROL commands, which provide finer grained control
+ * over cache flushing.
+ */
+struct pipe_control {
+	struct drm_i915_gem_object *obj;
+	volatile u32 *cpu_page;
+	u32 gtt_offset;
+};
+
 static inline int ring_space(struct intel_ring_buffer *ring)
 {
 	int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
@@ -123,6 +133,118 @@ render_ring_flush(struct intel_ring_buffer *ring,
 	return 0;
 }
 
+/**
+ * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
+ * implementing two workarounds on gen6.  From section 1.4.7.1
+ * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
+ *
+ * [DevSNB-C+{W/A}] Before any depth stall flush (including those
+ * produced by non-pipelined state commands), software needs to first
+ * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
+ * 0.
+ *
+ * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
+ * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
+ *
+ * And the workaround for these two requires this workaround first:
+ *
+ * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
+ * BEFORE the pipe-control with a post-sync op and no write-cache
+ * flushes.
+ *
+ * And this last workaround is tricky because of the requirements on
+ * that bit.  From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
+ * volume 2 part 1:
+ *
+ *     "1 of the following must also be set:
+ *      - Render Target Cache Flush Enable ([12] of DW1)
+ *      - Depth Cache Flush Enable ([0] of DW1)
+ *      - Stall at Pixel Scoreboard ([1] of DW1)
+ *      - Depth Stall ([13] of DW1)
+ *      - Post-Sync Operation ([13] of DW1)
+ *      - Notify Enable ([8] of DW1)"
+ *
+ * The cache flushes require the workaround flush that triggered this
+ * one, so we can't use it.  Depth stall would trigger the same.
+ * Post-sync nonzero is what triggered this second workaround, so we
+ * can't use that one either.  Notify enable is IRQs, which aren't
+ * really our business.  That leaves only stall at scoreboard.
+ */
+static int
+intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
+{
+	struct pipe_control *pc = ring->private;
+	u32 scratch_addr = pc->gtt_offset + 128;
+	int ret;
+
+
+	ret = intel_ring_begin(ring, 6);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
+	intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
+			PIPE_CONTROL_STALL_AT_SCOREBOARD);
+	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
+	intel_ring_emit(ring, 0); /* low dword */
+	intel_ring_emit(ring, 0); /* high dword */
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
+
+	ret = intel_ring_begin(ring, 6);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
+	intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
+	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, 0);
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
+
+	return 0;
+}
+
+static int
+gen6_render_ring_flush(struct intel_ring_buffer *ring,
+                         u32 invalidate_domains, u32 flush_domains)
+{
+	u32 flags = 0;
+	struct pipe_control *pc = ring->private;
+	u32 scratch_addr = pc->gtt_offset + 128;
+	int ret;
+
+	/* Force SNB workarounds for PIPE_CONTROL flushes */
+	intel_emit_post_sync_nonzero_flush(ring);
+
+	/* Just flush everything.  Experiments have shown that reducing the
+	 * number of bits based on the write domains has little performance
+	 * impact.
+	 */
+	flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
+	flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
+	flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
+	flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
+	flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
+	flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
+	flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
+
+	ret = intel_ring_begin(ring, 6);
+	if (ret)
+		return ret;
+
+	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
+	intel_ring_emit(ring, flags);
+	intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
+	intel_ring_emit(ring, 0); /* lower dword */
+	intel_ring_emit(ring, 0); /* uppwer dword */
+	intel_ring_emit(ring, MI_NOOP);
+	intel_ring_advance(ring);
+
+	return 0;
+}
+
 static void ring_write_tail(struct intel_ring_buffer *ring,
 			    u32 value)
 {
@@ -206,16 +328,6 @@ static int init_ring_common(struct intel_ring_buffer *ring)
 	return 0;
 }
 
-/*
- * 965+ support PIPE_CONTROL commands, which provide finer grained control
- * over cache flushing.
- */
-struct pipe_control {
-	struct drm_i915_gem_object *obj;
-	volatile u32 *cpu_page;
-	u32 gtt_offset;
-};
-
 static int
 init_pipe_control(struct intel_ring_buffer *ring)
 {
@@ -296,8 +408,7 @@ static int init_render_ring(struct intel_ring_buffer *ring)
 				   GFX_MODE_ENABLE(GFX_REPLAY_MODE));
 	}
 
-	if (INTEL_INFO(dev)->gen >= 6) {
-	} else if (IS_GEN5(dev)) {
+	if (INTEL_INFO(dev)->gen >= 5) {
 		ret = init_pipe_control(ring);
 		if (ret)
 			return ret;
@@ -315,83 +426,131 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring)
 }
 
 static void
-update_semaphore(struct intel_ring_buffer *ring, int i, u32 seqno)
+update_mboxes(struct intel_ring_buffer *ring,
+	    u32 seqno,
+	    u32 mmio_offset)
 {
-	struct drm_device *dev = ring->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int id;
-
-	/*
-	 * cs -> 1 = vcs, 0 = bcs
-	 * vcs -> 1 = bcs, 0 = cs,
-	 * bcs -> 1 = cs, 0 = vcs.
-	 */
-	id = ring - dev_priv->ring;
-	id += 2 - i;
-	id %= 3;
-
-	intel_ring_emit(ring,
-			MI_SEMAPHORE_MBOX |
-			MI_SEMAPHORE_REGISTER |
-			MI_SEMAPHORE_UPDATE);
+	intel_ring_emit(ring, MI_SEMAPHORE_MBOX |
+			      MI_SEMAPHORE_GLOBAL_GTT |
+			      MI_SEMAPHORE_REGISTER |
+			      MI_SEMAPHORE_UPDATE);
 	intel_ring_emit(ring, seqno);
-	intel_ring_emit(ring,
-			RING_SYNC_0(dev_priv->ring[id].mmio_base) + 4*i);
+	intel_ring_emit(ring, mmio_offset);
 }
 
+/**
+ * gen6_add_request - Update the semaphore mailbox registers
+ * 
+ * @ring - ring that is adding a request
+ * @seqno - return seqno stuck into the ring
+ *
+ * Update the mailbox registers in the *other* rings with the current seqno.
+ * This acts like a signal in the canonical semaphore.
+ */
 static int
 gen6_add_request(struct intel_ring_buffer *ring,
-		 u32 *result)
+		 u32 *seqno)
 {
-	u32 seqno;
+	u32 mbox1_reg;
+	u32 mbox2_reg;
 	int ret;
 
 	ret = intel_ring_begin(ring, 10);
 	if (ret)
 		return ret;
 
-	seqno = i915_gem_get_seqno(ring->dev);
-	update_semaphore(ring, 0, seqno);
-	update_semaphore(ring, 1, seqno);
+	mbox1_reg = ring->signal_mbox[0];
+	mbox2_reg = ring->signal_mbox[1];
 
+	*seqno = i915_gem_get_seqno(ring->dev);
+
+	update_mboxes(ring, *seqno, mbox1_reg);
+	update_mboxes(ring, *seqno, mbox2_reg);
 	intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
 	intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
-	intel_ring_emit(ring, seqno);
+	intel_ring_emit(ring, *seqno);
 	intel_ring_emit(ring, MI_USER_INTERRUPT);
 	intel_ring_advance(ring);
 
-	*result = seqno;
 	return 0;
 }
 
-int
-intel_ring_sync(struct intel_ring_buffer *ring,
-		struct intel_ring_buffer *to,
+/**
+ * intel_ring_sync - sync the waiter to the signaller on seqno
+ *
+ * @waiter - ring that is waiting
+ * @signaller - ring which has, or will signal
+ * @seqno - seqno which the waiter will block on
+ */
+static int
+intel_ring_sync(struct intel_ring_buffer *waiter,
+		struct intel_ring_buffer *signaller,
+		int ring,
 		u32 seqno)
 {
 	int ret;
+	u32 dw1 = MI_SEMAPHORE_MBOX |
+		  MI_SEMAPHORE_COMPARE |
+		  MI_SEMAPHORE_REGISTER;
 
-	ret = intel_ring_begin(ring, 4);
+	ret = intel_ring_begin(waiter, 4);
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring,
-			MI_SEMAPHORE_MBOX |
-			MI_SEMAPHORE_REGISTER |
-			intel_ring_sync_index(ring, to) << 17 |
-			MI_SEMAPHORE_COMPARE);
-	intel_ring_emit(ring, seqno);
-	intel_ring_emit(ring, 0);
-	intel_ring_emit(ring, MI_NOOP);
-	intel_ring_advance(ring);
+	intel_ring_emit(waiter, dw1 | signaller->semaphore_register[ring]);
+	intel_ring_emit(waiter, seqno);
+	intel_ring_emit(waiter, 0);
+	intel_ring_emit(waiter, MI_NOOP);
+	intel_ring_advance(waiter);
 
 	return 0;
 }
 
+/* VCS->RCS (RVSYNC) or BCS->RCS (RBSYNC) */
+int
+render_ring_sync_to(struct intel_ring_buffer *waiter,
+		    struct intel_ring_buffer *signaller,
+		    u32 seqno)
+{
+	WARN_ON(signaller->semaphore_register[RCS] == MI_SEMAPHORE_SYNC_INVALID);
+	return intel_ring_sync(waiter,
+			       signaller,
+			       RCS,
+			       seqno);
+}
+
+/* RCS->VCS (VRSYNC) or BCS->VCS (VBSYNC) */
+int
+gen6_bsd_ring_sync_to(struct intel_ring_buffer *waiter,
+		      struct intel_ring_buffer *signaller,
+		      u32 seqno)
+{
+	WARN_ON(signaller->semaphore_register[VCS] == MI_SEMAPHORE_SYNC_INVALID);
+	return intel_ring_sync(waiter,
+			       signaller,
+			       VCS,
+			       seqno);
+}
+
+/* RCS->BCS (BRSYNC) or VCS->BCS (BVSYNC) */
+int
+gen6_blt_ring_sync_to(struct intel_ring_buffer *waiter,
+		      struct intel_ring_buffer *signaller,
+		      u32 seqno)
+{
+	WARN_ON(signaller->semaphore_register[BCS] == MI_SEMAPHORE_SYNC_INVALID);
+	return intel_ring_sync(waiter,
+			       signaller,
+			       BCS,
+			       seqno);
+}
+
+
+
 #define PIPE_CONTROL_FLUSH(ring__, addr__)					\
 do {									\
-	intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |		\
-		 PIPE_CONTROL_DEPTH_STALL | 2);				\
+	intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |		\
+		 PIPE_CONTROL_DEPTH_STALL);				\
 	intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT);			\
 	intel_ring_emit(ring__, 0);							\
 	intel_ring_emit(ring__, 0);							\
@@ -419,8 +578,9 @@ pc_render_add_request(struct intel_ring_buffer *ring,
 	if (ret)
 		return ret;
 
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
-			PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
+	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
+			PIPE_CONTROL_WRITE_FLUSH |
+			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
 	intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
 	intel_ring_emit(ring, seqno);
 	intel_ring_emit(ring, 0);
@@ -435,8 +595,9 @@ pc_render_add_request(struct intel_ring_buffer *ring,
 	PIPE_CONTROL_FLUSH(ring, scratch_addr);
 	scratch_addr += 128;
 	PIPE_CONTROL_FLUSH(ring, scratch_addr);
-	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
-			PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
+	intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
+			PIPE_CONTROL_WRITE_FLUSH |
+			PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
 			PIPE_CONTROL_NOTIFY);
 	intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
 	intel_ring_emit(ring, seqno);
@@ -1026,7 +1187,12 @@ static const struct intel_ring_buffer render_ring = {
 	.irq_get		= render_ring_get_irq,
 	.irq_put		= render_ring_put_irq,
 	.dispatch_execbuffer	= render_ring_dispatch_execbuffer,
-       .cleanup			= render_ring_cleanup,
+	.cleanup		= render_ring_cleanup,
+	.sync_to		= render_ring_sync_to,
+	.semaphore_register	= {MI_SEMAPHORE_SYNC_INVALID,
+				   MI_SEMAPHORE_SYNC_RV,
+				   MI_SEMAPHORE_SYNC_RB},
+	.signal_mbox		= {GEN6_VRSYNC, GEN6_BRSYNC},
 };
 
 /* ring buffer for bit-stream decoder */
@@ -1050,23 +1216,23 @@ static const struct intel_ring_buffer bsd_ring = {
 static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
 				     u32 value)
 {
-       drm_i915_private_t *dev_priv = ring->dev->dev_private;
+	drm_i915_private_t *dev_priv = ring->dev->dev_private;
 
        /* Every tail move must follow the sequence below */
-       I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
-	       GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
-	       GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
-       I915_WRITE(GEN6_BSD_RNCID, 0x0);
-
-       if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
-                               GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0,
-                       50))
-               DRM_ERROR("timed out waiting for IDLE Indicator\n");
-
-       I915_WRITE_TAIL(ring, value);
-       I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
-	       GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
-	       GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
+	I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
+		GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
+		GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
+	I915_WRITE(GEN6_BSD_RNCID, 0x0);
+
+	if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
+		GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0,
+		50))
+	DRM_ERROR("timed out waiting for IDLE Indicator\n");
+
+	I915_WRITE_TAIL(ring, value);
+	I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
+		GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
+		GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
 }
 
 static int gen6_ring_flush(struct intel_ring_buffer *ring,
@@ -1094,18 +1260,18 @@ static int
 gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
 			      u32 offset, u32 len)
 {
-       int ret;
+	int ret;
 
-       ret = intel_ring_begin(ring, 2);
-       if (ret)
-	       return ret;
+	ret = intel_ring_begin(ring, 2);
+	if (ret)
+		return ret;
 
-       intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
-       /* bit0-7 is the length on GEN6+ */
-       intel_ring_emit(ring, offset);
-       intel_ring_advance(ring);
+	intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
+	/* bit0-7 is the length on GEN6+ */
+	intel_ring_emit(ring, offset);
+	intel_ring_advance(ring);
 
-       return 0;
+	return 0;
 }
 
 static bool
@@ -1154,6 +1320,11 @@ static const struct intel_ring_buffer gen6_bsd_ring = {
 	.irq_get		= gen6_bsd_ring_get_irq,
 	.irq_put		= gen6_bsd_ring_put_irq,
 	.dispatch_execbuffer	= gen6_ring_dispatch_execbuffer,
+	.sync_to		= gen6_bsd_ring_sync_to,
+	.semaphore_register	= {MI_SEMAPHORE_SYNC_VR,
+				   MI_SEMAPHORE_SYNC_INVALID,
+				   MI_SEMAPHORE_SYNC_VB},
+	.signal_mbox		= {GEN6_RVSYNC, GEN6_BVSYNC},
 };
 
 /* Blitter support (SandyBridge+) */
@@ -1272,19 +1443,24 @@ static void blt_ring_cleanup(struct intel_ring_buffer *ring)
 }
 
 static const struct intel_ring_buffer gen6_blt_ring = {
-       .name			= "blt ring",
-       .id			= RING_BLT,
-       .mmio_base		= BLT_RING_BASE,
-       .size			= 32 * PAGE_SIZE,
-       .init			= blt_ring_init,
-       .write_tail		= ring_write_tail,
-       .flush			= blt_ring_flush,
-       .add_request		= gen6_add_request,
-       .get_seqno		= ring_get_seqno,
-       .irq_get			= blt_ring_get_irq,
-       .irq_put			= blt_ring_put_irq,
-       .dispatch_execbuffer	= gen6_ring_dispatch_execbuffer,
-       .cleanup			= blt_ring_cleanup,
+	.name			= "blt ring",
+	.id			= RING_BLT,
+	.mmio_base		= BLT_RING_BASE,
+	.size			= 32 * PAGE_SIZE,
+	.init			= blt_ring_init,
+	.write_tail		= ring_write_tail,
+	.flush			= blt_ring_flush,
+	.add_request		= gen6_add_request,
+	.get_seqno		= ring_get_seqno,
+	.irq_get		= blt_ring_get_irq,
+	.irq_put		= blt_ring_put_irq,
+	.dispatch_execbuffer	= gen6_ring_dispatch_execbuffer,
+	.cleanup		= blt_ring_cleanup,
+	.sync_to		= gen6_blt_ring_sync_to,
+	.semaphore_register	= {MI_SEMAPHORE_SYNC_BR,
+				   MI_SEMAPHORE_SYNC_BV,
+				   MI_SEMAPHORE_SYNC_INVALID},
+	.signal_mbox		= {GEN6_RBSYNC, GEN6_VBSYNC},
 };
 
 int intel_init_render_ring_buffer(struct drm_device *dev)
@@ -1295,6 +1471,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
 	*ring = render_ring;
 	if (INTEL_INFO(dev)->gen >= 6) {
 		ring->add_request = gen6_add_request;
+		ring->flush = gen6_render_ring_flush;
 		ring->irq_get = gen6_render_ring_get_irq;
 		ring->irq_put = gen6_render_ring_put_irq;
 	} else if (IS_GEN5(dev)) {

+ 9 - 7
drivers/gpu/drm/i915/intel_ringbuffer.h

@@ -2,10 +2,10 @@
 #define _INTEL_RINGBUFFER_H_
 
 enum {
-    RCS = 0x0,
-    VCS,
-    BCS,
-    I915_NUM_RINGS,
+	RCS = 0x0,
+	VCS,
+	BCS,
+	I915_NUM_RINGS,
 };
 
 struct  intel_hw_status_page {
@@ -75,7 +75,12 @@ struct  intel_ring_buffer {
 	int		(*dispatch_execbuffer)(struct intel_ring_buffer *ring,
 					       u32 offset, u32 length);
 	void		(*cleanup)(struct intel_ring_buffer *ring);
+	int		(*sync_to)(struct intel_ring_buffer *ring,
+				   struct intel_ring_buffer *to,
+				   u32 seqno);
 
+	u32		semaphore_register[3]; /*our mbox written by others */
+	u32		signal_mbox[2]; /* mboxes this ring signals to */
 	/**
 	 * List of objects currently involved in rendering from the
 	 * ringbuffer.
@@ -180,9 +185,6 @@ static inline void intel_ring_emit(struct intel_ring_buffer *ring,
 void intel_ring_advance(struct intel_ring_buffer *ring);
 
 u32 intel_ring_get_seqno(struct intel_ring_buffer *ring);
-int intel_ring_sync(struct intel_ring_buffer *ring,
-		    struct intel_ring_buffer *to,
-		    u32 seqno);
 
 int intel_init_render_ring_buffer(struct drm_device *dev);
 int intel_init_bsd_ring_buffer(struct drm_device *dev);

+ 123 - 126
drivers/gpu/drm/i915/intel_sdvo.c

@@ -43,7 +43,7 @@
 #define SDVO_TV_MASK   (SDVO_OUTPUT_CVBS0 | SDVO_OUTPUT_SVID0)
 
 #define SDVO_OUTPUT_MASK (SDVO_TMDS_MASK | SDVO_RGB_MASK | SDVO_LVDS_MASK |\
-                         SDVO_TV_MASK)
+			SDVO_TV_MASK)
 
 #define IS_TV(c)	(c->output_flag & SDVO_TV_MASK)
 #define IS_TMDS(c)	(c->output_flag & SDVO_TMDS_MASK)
@@ -288,117 +288,117 @@ static const struct _sdvo_cmd_name {
 	u8 cmd;
 	const char *name;
 } sdvo_cmd_names[] = {
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_POWER_STATES),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POWER_STATE),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODER_POWER_STATE),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DISPLAY_POWER_STATE),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS),
-
-    /* Add the op code for SDVO enhancements */
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HPOS),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HPOS),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HPOS),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_VPOS),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_VPOS),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_VPOS),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HUE),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HUE),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HUE),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_CONTRAST),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CONTRAST),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTRAST),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_BRIGHTNESS),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_BRIGHTNESS),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_BRIGHTNESS),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_H),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_H),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_H),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_2D),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_2D),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_2D),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SHARPNESS),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SHARPNESS),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SHARPNESS),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DOT_CRAWL),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DOT_CRAWL),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_CHROMA_FILTER),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_CHROMA_FILTER),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_CHROMA_FILTER),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_LUMA_FILTER),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_LUMA_FILTER),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_LUMA_FILTER),
-
-    /* HDMI op code */
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODE),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_PIXEL_REPLI),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PIXEL_REPLI),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY_CAP),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_COLORIMETRY),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_AUDIO_STAT),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_STAT),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INDEX),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_INDEX),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INFO),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_AV_SPLIT),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_AV_SPLIT),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_TXRATE),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_TXRATE),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_DATA),
-    SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_RESET),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DEVICE_CAPS),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FIRMWARE_REV),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TRAINED_INPUTS),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_OUTPUTS),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_OUTPUTS),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_IN_OUT_MAP),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_IN_OUT_MAP),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ATTACHED_DISPLAYS),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HOT_PLUG_SUPPORT),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ACTIVE_HOT_PLUG),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ACTIVE_HOT_PLUG),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_INPUT),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TARGET_OUTPUT),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART1),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_TIMINGS_PART2),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART2),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_INPUT_TIMINGS_PART1),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART1),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OUTPUT_TIMINGS_PART2),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART1),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_TIMINGS_PART2),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_CREATE_PREFERRED_INPUT_TIMING),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART1),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PREFERRED_INPUT_TIMING_PART2),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_INPUT_PIXEL_CLOCK_RANGE),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OUTPUT_PIXEL_CLOCK_RANGE),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_CLOCK_RATE_MULTS),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CLOCK_RATE_MULT),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CLOCK_RATE_MULT),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_TV_FORMATS),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_FORMAT),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_FORMAT),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_POWER_STATES),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_POWER_STATE),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODER_POWER_STATE),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DISPLAY_POWER_STATE),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTROL_BUS_SWITCH),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS),
+
+	/* Add the op code for SDVO enhancements */
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HPOS),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HPOS),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HPOS),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_VPOS),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_VPOS),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_VPOS),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SATURATION),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SATURATION),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SATURATION),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_HUE),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HUE),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HUE),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_CONTRAST),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_CONTRAST),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_CONTRAST),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_BRIGHTNESS),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_BRIGHTNESS),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_BRIGHTNESS),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_H),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_H),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_H),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_OVERSCAN_V),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_OVERSCAN_V),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_OVERSCAN_V),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_ADAPTIVE),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_ADAPTIVE),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_ADAPTIVE),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_FLICKER_FILTER_2D),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_FLICKER_FILTER_2D),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_FLICKER_FILTER_2D),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_SHARPNESS),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SHARPNESS),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_SHARPNESS),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_DOT_CRAWL),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_DOT_CRAWL),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_CHROMA_FILTER),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_CHROMA_FILTER),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_CHROMA_FILTER),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_MAX_TV_LUMA_FILTER),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_TV_LUMA_FILTER),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_TV_LUMA_FILTER),
+
+	/* HDMI op code */
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_SUPP_ENCODE),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_ENCODE),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_ENCODE),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_PIXEL_REPLI),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_PIXEL_REPLI),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY_CAP),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_COLORIMETRY),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_COLORIMETRY),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_ENCRYPT_PREFER),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_AUDIO_STAT),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_AUDIO_STAT),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INDEX),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_INDEX),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_INFO),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_AV_SPLIT),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_AV_SPLIT),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_TXRATE),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_TXRATE),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_SET_HBUF_DATA),
+	SDVO_CMD_NAME_ENTRY(SDVO_CMD_GET_HBUF_DATA),
 };
 
 #define IS_SDVOB(reg)	(reg == SDVOB || reg == PCH_SDVOB)
@@ -1232,8 +1232,7 @@ static bool
 intel_sdvo_multifunc_encoder(struct intel_sdvo *intel_sdvo)
 {
 	/* Is there more than one type of output? */
-	int caps = intel_sdvo->caps.output_flags & 0xf;
-	return caps & -caps;
+	return hweight16(intel_sdvo->caps.output_flags) > 1;
 }
 
 static struct edid *
@@ -1254,7 +1253,7 @@ intel_sdvo_get_analog_edid(struct drm_connector *connector)
 }
 
 enum drm_connector_status
-intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
+intel_sdvo_tmds_sink_detect(struct drm_connector *connector)
 {
 	struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
 	enum drm_connector_status status;
@@ -1349,7 +1348,7 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
 	if ((intel_sdvo_connector->output_flag & response) == 0)
 		ret = connector_status_disconnected;
 	else if (IS_TMDS(intel_sdvo_connector))
-		ret = intel_sdvo_hdmi_sink_detect(connector);
+		ret = intel_sdvo_tmds_sink_detect(connector);
 	else {
 		struct edid *edid;
 
@@ -1896,7 +1895,7 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
 			  struct intel_sdvo *sdvo, u32 reg)
 {
 	struct sdvo_device_mapping *mapping;
-	u8 pin, speed;
+	u8 pin;
 
 	if (IS_SDVOB(reg))
 		mapping = &dev_priv->sdvo_mappings[0];
@@ -1904,18 +1903,16 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
 		mapping = &dev_priv->sdvo_mappings[1];
 
 	pin = GMBUS_PORT_DPB;
-	speed = GMBUS_RATE_1MHZ >> 8;
-	if (mapping->initialized) {
+	if (mapping->initialized)
 		pin = mapping->i2c_pin;
-		speed = mapping->i2c_speed;
-	}
 
 	if (pin < GMBUS_NUM_PORTS) {
 		sdvo->i2c = &dev_priv->gmbus[pin].adapter;
-		intel_gmbus_set_speed(sdvo->i2c, speed);
+		intel_gmbus_set_speed(sdvo->i2c, GMBUS_RATE_1MHZ);
 		intel_gmbus_force_bit(sdvo->i2c, true);
-	} else
+	} else {
 		sdvo->i2c = &dev_priv->gmbus[GMBUS_PORT_DPB].adapter;
+	}
 }
 
 static bool
@@ -2206,7 +2203,7 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags)
 			      bytes[0], bytes[1]);
 		return false;
 	}
-	intel_sdvo->base.crtc_mask = (1 << 0) | (1 << 1);
+	intel_sdvo->base.crtc_mask = (1 << 0) | (1 << 1) | (1 << 2);
 
 	return true;
 }
@@ -2275,7 +2272,7 @@ static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo,
 		DRM_DEBUG_KMS(#name ": max %d, default %d, current %d\n", \
 			      data_value[0], data_value[1], response); \
 	} \
-} while(0)
+} while (0)
 
 static bool
 intel_sdvo_create_enhance_property_tv(struct intel_sdvo *intel_sdvo,
@@ -2442,7 +2439,7 @@ static bool intel_sdvo_create_enhance_property(struct intel_sdvo *intel_sdvo,
 
 	if (IS_TV(intel_sdvo_connector))
 		return intel_sdvo_create_enhance_property_tv(intel_sdvo, intel_sdvo_connector, enhancements.reply);
-	else if(IS_LVDS(intel_sdvo_connector))
+	else if (IS_LVDS(intel_sdvo_connector))
 		return intel_sdvo_create_enhance_property_lvds(intel_sdvo, intel_sdvo_connector, enhancements.reply);
 	else
 		return true;

+ 279 - 279
drivers/gpu/drm/i915/intel_sdvo_regs.h

@@ -46,63 +46,63 @@
 #define SDVO_OUTPUT_LAST    (14)
 
 struct intel_sdvo_caps {
-    u8 vendor_id;
-    u8 device_id;
-    u8 device_rev_id;
-    u8 sdvo_version_major;
-    u8 sdvo_version_minor;
-    unsigned int sdvo_inputs_mask:2;
-    unsigned int smooth_scaling:1;
-    unsigned int sharp_scaling:1;
-    unsigned int up_scaling:1;
-    unsigned int down_scaling:1;
-    unsigned int stall_support:1;
-    unsigned int pad:1;
-    u16 output_flags;
+	u8 vendor_id;
+	u8 device_id;
+	u8 device_rev_id;
+	u8 sdvo_version_major;
+	u8 sdvo_version_minor;
+	unsigned int sdvo_inputs_mask:2;
+	unsigned int smooth_scaling:1;
+	unsigned int sharp_scaling:1;
+	unsigned int up_scaling:1;
+	unsigned int down_scaling:1;
+	unsigned int stall_support:1;
+	unsigned int pad:1;
+	u16 output_flags;
 } __attribute__((packed));
 
 /** This matches the EDID DTD structure, more or less */
 struct intel_sdvo_dtd {
-    struct {
-	u16 clock;		/**< pixel clock, in 10kHz units */
-	u8 h_active;		/**< lower 8 bits (pixels) */
-	u8 h_blank;		/**< lower 8 bits (pixels) */
-	u8 h_high;		/**< upper 4 bits each h_active, h_blank */
-	u8 v_active;		/**< lower 8 bits (lines) */
-	u8 v_blank;		/**< lower 8 bits (lines) */
-	u8 v_high;		/**< upper 4 bits each v_active, v_blank */
-    } part1;
-
-    struct {
-	u8 h_sync_off;	/**< lower 8 bits, from hblank start */
-	u8 h_sync_width;	/**< lower 8 bits (pixels) */
-	/** lower 4 bits each vsync offset, vsync width */
-	u8 v_sync_off_width;
-	/**
-	 * 2 high bits of hsync offset, 2 high bits of hsync width,
-	 * bits 4-5 of vsync offset, and 2 high bits of vsync width.
-	 */
-	u8 sync_off_width_high;
-	u8 dtd_flags;
-	u8 sdvo_flags;
-	/** bits 6-7 of vsync offset at bits 6-7 */
-	u8 v_sync_off_high;
-	u8 reserved;
-    } part2;
+	struct {
+		u16 clock;	/**< pixel clock, in 10kHz units */
+		u8 h_active;	/**< lower 8 bits (pixels) */
+		u8 h_blank;	/**< lower 8 bits (pixels) */
+		u8 h_high;	/**< upper 4 bits each h_active, h_blank */
+		u8 v_active;	/**< lower 8 bits (lines) */
+		u8 v_blank;	/**< lower 8 bits (lines) */
+		u8 v_high;	/**< upper 4 bits each v_active, v_blank */
+	} part1;
+
+	struct {
+		u8 h_sync_off;	/**< lower 8 bits, from hblank start */
+		u8 h_sync_width;	/**< lower 8 bits (pixels) */
+		/** lower 4 bits each vsync offset, vsync width */
+		u8 v_sync_off_width;
+		/**
+		* 2 high bits of hsync offset, 2 high bits of hsync width,
+		* bits 4-5 of vsync offset, and 2 high bits of vsync width.
+		*/
+		u8 sync_off_width_high;
+		u8 dtd_flags;
+		u8 sdvo_flags;
+		/** bits 6-7 of vsync offset at bits 6-7 */
+		u8 v_sync_off_high;
+		u8 reserved;
+	} part2;
 } __attribute__((packed));
 
 struct intel_sdvo_pixel_clock_range {
-    u16 min;			/**< pixel clock, in 10kHz units */
-    u16 max;			/**< pixel clock, in 10kHz units */
+	u16 min;	/**< pixel clock, in 10kHz units */
+	u16 max;	/**< pixel clock, in 10kHz units */
 } __attribute__((packed));
 
 struct intel_sdvo_preferred_input_timing_args {
-    u16 clock;
-    u16 width;
-    u16 height;
-    u8	interlace:1;
-    u8	scaled:1;
-    u8	pad:6;
+	u16 clock;
+	u16 width;
+	u16 height;
+	u8	interlace:1;
+	u8	scaled:1;
+	u8	pad:6;
 } __attribute__((packed));
 
 /* I2C registers for SDVO */
@@ -154,9 +154,9 @@ struct intel_sdvo_preferred_input_timing_args {
  */
 #define SDVO_CMD_GET_TRAINED_INPUTS			0x03
 struct intel_sdvo_get_trained_inputs_response {
-    unsigned int input0_trained:1;
-    unsigned int input1_trained:1;
-    unsigned int pad:6;
+	unsigned int input0_trained:1;
+	unsigned int input1_trained:1;
+	unsigned int pad:6;
 } __attribute__((packed));
 
 /** Returns a struct intel_sdvo_output_flags of active outputs. */
@@ -177,7 +177,7 @@ struct intel_sdvo_get_trained_inputs_response {
  */
 #define SDVO_CMD_GET_IN_OUT_MAP				0x06
 struct intel_sdvo_in_out_map {
-    u16 in0, in1;
+	u16 in0, in1;
 };
 
 /**
@@ -210,10 +210,10 @@ struct intel_sdvo_in_out_map {
 
 #define SDVO_CMD_GET_INTERRUPT_EVENT_SOURCE		0x0f
 struct intel_sdvo_get_interrupt_event_source_response {
-    u16 interrupt_status;
-    unsigned int ambient_light_interrupt:1;
-    unsigned int hdmi_audio_encrypt_change:1;
-    unsigned int pad:6;
+	u16 interrupt_status;
+	unsigned int ambient_light_interrupt:1;
+	unsigned int hdmi_audio_encrypt_change:1;
+	unsigned int pad:6;
 } __attribute__((packed));
 
 /**
@@ -225,8 +225,8 @@ struct intel_sdvo_get_interrupt_event_source_response {
  */
 #define SDVO_CMD_SET_TARGET_INPUT			0x10
 struct intel_sdvo_set_target_input_args {
-    unsigned int target_1:1;
-    unsigned int pad:7;
+	unsigned int target_1:1;
+	unsigned int pad:7;
 } __attribute__((packed));
 
 /**
@@ -314,57 +314,57 @@ struct intel_sdvo_set_target_input_args {
 #define SDVO_CMD_GET_SUPPORTED_TV_FORMATS		0x27
 /** 6 bytes of bit flags for TV formats shared by all TV format functions */
 struct intel_sdvo_tv_format {
-    unsigned int ntsc_m:1;
-    unsigned int ntsc_j:1;
-    unsigned int ntsc_443:1;
-    unsigned int pal_b:1;
-    unsigned int pal_d:1;
-    unsigned int pal_g:1;
-    unsigned int pal_h:1;
-    unsigned int pal_i:1;
-
-    unsigned int pal_m:1;
-    unsigned int pal_n:1;
-    unsigned int pal_nc:1;
-    unsigned int pal_60:1;
-    unsigned int secam_b:1;
-    unsigned int secam_d:1;
-    unsigned int secam_g:1;
-    unsigned int secam_k:1;
-
-    unsigned int secam_k1:1;
-    unsigned int secam_l:1;
-    unsigned int secam_60:1;
-    unsigned int hdtv_std_smpte_240m_1080i_59:1;
-    unsigned int hdtv_std_smpte_240m_1080i_60:1;
-    unsigned int hdtv_std_smpte_260m_1080i_59:1;
-    unsigned int hdtv_std_smpte_260m_1080i_60:1;
-    unsigned int hdtv_std_smpte_274m_1080i_50:1;
-
-    unsigned int hdtv_std_smpte_274m_1080i_59:1;
-    unsigned int hdtv_std_smpte_274m_1080i_60:1;
-    unsigned int hdtv_std_smpte_274m_1080p_23:1;
-    unsigned int hdtv_std_smpte_274m_1080p_24:1;
-    unsigned int hdtv_std_smpte_274m_1080p_25:1;
-    unsigned int hdtv_std_smpte_274m_1080p_29:1;
-    unsigned int hdtv_std_smpte_274m_1080p_30:1;
-    unsigned int hdtv_std_smpte_274m_1080p_50:1;
-
-    unsigned int hdtv_std_smpte_274m_1080p_59:1;
-    unsigned int hdtv_std_smpte_274m_1080p_60:1;
-    unsigned int hdtv_std_smpte_295m_1080i_50:1;
-    unsigned int hdtv_std_smpte_295m_1080p_50:1;
-    unsigned int hdtv_std_smpte_296m_720p_59:1;
-    unsigned int hdtv_std_smpte_296m_720p_60:1;
-    unsigned int hdtv_std_smpte_296m_720p_50:1;
-    unsigned int hdtv_std_smpte_293m_480p_59:1;
-
-    unsigned int hdtv_std_smpte_170m_480i_59:1;
-    unsigned int hdtv_std_iturbt601_576i_50:1;
-    unsigned int hdtv_std_iturbt601_576p_50:1;
-    unsigned int hdtv_std_eia_7702a_480i_60:1;
-    unsigned int hdtv_std_eia_7702a_480p_60:1;
-    unsigned int pad:3;
+	unsigned int ntsc_m:1;
+	unsigned int ntsc_j:1;
+	unsigned int ntsc_443:1;
+	unsigned int pal_b:1;
+	unsigned int pal_d:1;
+	unsigned int pal_g:1;
+	unsigned int pal_h:1;
+	unsigned int pal_i:1;
+
+	unsigned int pal_m:1;
+	unsigned int pal_n:1;
+	unsigned int pal_nc:1;
+	unsigned int pal_60:1;
+	unsigned int secam_b:1;
+	unsigned int secam_d:1;
+	unsigned int secam_g:1;
+	unsigned int secam_k:1;
+
+	unsigned int secam_k1:1;
+	unsigned int secam_l:1;
+	unsigned int secam_60:1;
+	unsigned int hdtv_std_smpte_240m_1080i_59:1;
+	unsigned int hdtv_std_smpte_240m_1080i_60:1;
+	unsigned int hdtv_std_smpte_260m_1080i_59:1;
+	unsigned int hdtv_std_smpte_260m_1080i_60:1;
+	unsigned int hdtv_std_smpte_274m_1080i_50:1;
+
+	unsigned int hdtv_std_smpte_274m_1080i_59:1;
+	unsigned int hdtv_std_smpte_274m_1080i_60:1;
+	unsigned int hdtv_std_smpte_274m_1080p_23:1;
+	unsigned int hdtv_std_smpte_274m_1080p_24:1;
+	unsigned int hdtv_std_smpte_274m_1080p_25:1;
+	unsigned int hdtv_std_smpte_274m_1080p_29:1;
+	unsigned int hdtv_std_smpte_274m_1080p_30:1;
+	unsigned int hdtv_std_smpte_274m_1080p_50:1;
+
+	unsigned int hdtv_std_smpte_274m_1080p_59:1;
+	unsigned int hdtv_std_smpte_274m_1080p_60:1;
+	unsigned int hdtv_std_smpte_295m_1080i_50:1;
+	unsigned int hdtv_std_smpte_295m_1080p_50:1;
+	unsigned int hdtv_std_smpte_296m_720p_59:1;
+	unsigned int hdtv_std_smpte_296m_720p_60:1;
+	unsigned int hdtv_std_smpte_296m_720p_50:1;
+	unsigned int hdtv_std_smpte_293m_480p_59:1;
+
+	unsigned int hdtv_std_smpte_170m_480i_59:1;
+	unsigned int hdtv_std_iturbt601_576i_50:1;
+	unsigned int hdtv_std_iturbt601_576p_50:1;
+	unsigned int hdtv_std_eia_7702a_480i_60:1;
+	unsigned int hdtv_std_eia_7702a_480p_60:1;
+	unsigned int pad:3;
 } __attribute__((packed));
 
 #define SDVO_CMD_GET_TV_FORMAT				0x28
@@ -374,53 +374,53 @@ struct intel_sdvo_tv_format {
 /** Returns the resolutiosn that can be used with the given TV format */
 #define SDVO_CMD_GET_SDTV_RESOLUTION_SUPPORT		0x83
 struct intel_sdvo_sdtv_resolution_request {
-    unsigned int ntsc_m:1;
-    unsigned int ntsc_j:1;
-    unsigned int ntsc_443:1;
-    unsigned int pal_b:1;
-    unsigned int pal_d:1;
-    unsigned int pal_g:1;
-    unsigned int pal_h:1;
-    unsigned int pal_i:1;
-
-    unsigned int pal_m:1;
-    unsigned int pal_n:1;
-    unsigned int pal_nc:1;
-    unsigned int pal_60:1;
-    unsigned int secam_b:1;
-    unsigned int secam_d:1;
-    unsigned int secam_g:1;
-    unsigned int secam_k:1;
-
-    unsigned int secam_k1:1;
-    unsigned int secam_l:1;
-    unsigned int secam_60:1;
-    unsigned int pad:5;
+	unsigned int ntsc_m:1;
+	unsigned int ntsc_j:1;
+	unsigned int ntsc_443:1;
+	unsigned int pal_b:1;
+	unsigned int pal_d:1;
+	unsigned int pal_g:1;
+	unsigned int pal_h:1;
+	unsigned int pal_i:1;
+
+	unsigned int pal_m:1;
+	unsigned int pal_n:1;
+	unsigned int pal_nc:1;
+	unsigned int pal_60:1;
+	unsigned int secam_b:1;
+	unsigned int secam_d:1;
+	unsigned int secam_g:1;
+	unsigned int secam_k:1;
+
+	unsigned int secam_k1:1;
+	unsigned int secam_l:1;
+	unsigned int secam_60:1;
+	unsigned int pad:5;
 } __attribute__((packed));
 
 struct intel_sdvo_sdtv_resolution_reply {
-    unsigned int res_320x200:1;
-    unsigned int res_320x240:1;
-    unsigned int res_400x300:1;
-    unsigned int res_640x350:1;
-    unsigned int res_640x400:1;
-    unsigned int res_640x480:1;
-    unsigned int res_704x480:1;
-    unsigned int res_704x576:1;
-
-    unsigned int res_720x350:1;
-    unsigned int res_720x400:1;
-    unsigned int res_720x480:1;
-    unsigned int res_720x540:1;
-    unsigned int res_720x576:1;
-    unsigned int res_768x576:1;
-    unsigned int res_800x600:1;
-    unsigned int res_832x624:1;
-
-    unsigned int res_920x766:1;
-    unsigned int res_1024x768:1;
-    unsigned int res_1280x1024:1;
-    unsigned int pad:5;
+	unsigned int res_320x200:1;
+	unsigned int res_320x240:1;
+	unsigned int res_400x300:1;
+	unsigned int res_640x350:1;
+	unsigned int res_640x400:1;
+	unsigned int res_640x480:1;
+	unsigned int res_704x480:1;
+	unsigned int res_704x576:1;
+
+	unsigned int res_720x350:1;
+	unsigned int res_720x400:1;
+	unsigned int res_720x480:1;
+	unsigned int res_720x540:1;
+	unsigned int res_720x576:1;
+	unsigned int res_768x576:1;
+	unsigned int res_800x600:1;
+	unsigned int res_832x624:1;
+
+	unsigned int res_920x766:1;
+	unsigned int res_1024x768:1;
+	unsigned int res_1280x1024:1;
+	unsigned int pad:5;
 } __attribute__((packed));
 
 /* Get supported resolution with squire pixel aspect ratio that can be
@@ -428,90 +428,90 @@ struct intel_sdvo_sdtv_resolution_reply {
 #define SDVO_CMD_GET_SCALED_HDTV_RESOLUTION_SUPPORT		0x85
 
 struct intel_sdvo_hdtv_resolution_request {
-    unsigned int hdtv_std_smpte_240m_1080i_59:1;
-    unsigned int hdtv_std_smpte_240m_1080i_60:1;
-    unsigned int hdtv_std_smpte_260m_1080i_59:1;
-    unsigned int hdtv_std_smpte_260m_1080i_60:1;
-    unsigned int hdtv_std_smpte_274m_1080i_50:1;
-    unsigned int hdtv_std_smpte_274m_1080i_59:1;
-    unsigned int hdtv_std_smpte_274m_1080i_60:1;
-    unsigned int hdtv_std_smpte_274m_1080p_23:1;
-
-    unsigned int hdtv_std_smpte_274m_1080p_24:1;
-    unsigned int hdtv_std_smpte_274m_1080p_25:1;
-    unsigned int hdtv_std_smpte_274m_1080p_29:1;
-    unsigned int hdtv_std_smpte_274m_1080p_30:1;
-    unsigned int hdtv_std_smpte_274m_1080p_50:1;
-    unsigned int hdtv_std_smpte_274m_1080p_59:1;
-    unsigned int hdtv_std_smpte_274m_1080p_60:1;
-    unsigned int hdtv_std_smpte_295m_1080i_50:1;
-
-    unsigned int hdtv_std_smpte_295m_1080p_50:1;
-    unsigned int hdtv_std_smpte_296m_720p_59:1;
-    unsigned int hdtv_std_smpte_296m_720p_60:1;
-    unsigned int hdtv_std_smpte_296m_720p_50:1;
-    unsigned int hdtv_std_smpte_293m_480p_59:1;
-    unsigned int hdtv_std_smpte_170m_480i_59:1;
-    unsigned int hdtv_std_iturbt601_576i_50:1;
-    unsigned int hdtv_std_iturbt601_576p_50:1;
-
-    unsigned int hdtv_std_eia_7702a_480i_60:1;
-    unsigned int hdtv_std_eia_7702a_480p_60:1;
-    unsigned int pad:6;
+	unsigned int hdtv_std_smpte_240m_1080i_59:1;
+	unsigned int hdtv_std_smpte_240m_1080i_60:1;
+	unsigned int hdtv_std_smpte_260m_1080i_59:1;
+	unsigned int hdtv_std_smpte_260m_1080i_60:1;
+	unsigned int hdtv_std_smpte_274m_1080i_50:1;
+	unsigned int hdtv_std_smpte_274m_1080i_59:1;
+	unsigned int hdtv_std_smpte_274m_1080i_60:1;
+	unsigned int hdtv_std_smpte_274m_1080p_23:1;
+
+	unsigned int hdtv_std_smpte_274m_1080p_24:1;
+	unsigned int hdtv_std_smpte_274m_1080p_25:1;
+	unsigned int hdtv_std_smpte_274m_1080p_29:1;
+	unsigned int hdtv_std_smpte_274m_1080p_30:1;
+	unsigned int hdtv_std_smpte_274m_1080p_50:1;
+	unsigned int hdtv_std_smpte_274m_1080p_59:1;
+	unsigned int hdtv_std_smpte_274m_1080p_60:1;
+	unsigned int hdtv_std_smpte_295m_1080i_50:1;
+
+	unsigned int hdtv_std_smpte_295m_1080p_50:1;
+	unsigned int hdtv_std_smpte_296m_720p_59:1;
+	unsigned int hdtv_std_smpte_296m_720p_60:1;
+	unsigned int hdtv_std_smpte_296m_720p_50:1;
+	unsigned int hdtv_std_smpte_293m_480p_59:1;
+	unsigned int hdtv_std_smpte_170m_480i_59:1;
+	unsigned int hdtv_std_iturbt601_576i_50:1;
+	unsigned int hdtv_std_iturbt601_576p_50:1;
+
+	unsigned int hdtv_std_eia_7702a_480i_60:1;
+	unsigned int hdtv_std_eia_7702a_480p_60:1;
+	unsigned int pad:6;
 } __attribute__((packed));
 
 struct intel_sdvo_hdtv_resolution_reply {
-    unsigned int res_640x480:1;
-    unsigned int res_800x600:1;
-    unsigned int res_1024x768:1;
-    unsigned int res_1280x960:1;
-    unsigned int res_1400x1050:1;
-    unsigned int res_1600x1200:1;
-    unsigned int res_1920x1440:1;
-    unsigned int res_2048x1536:1;
-
-    unsigned int res_2560x1920:1;
-    unsigned int res_3200x2400:1;
-    unsigned int res_3840x2880:1;
-    unsigned int pad1:5;
-
-    unsigned int res_848x480:1;
-    unsigned int res_1064x600:1;
-    unsigned int res_1280x720:1;
-    unsigned int res_1360x768:1;
-    unsigned int res_1704x960:1;
-    unsigned int res_1864x1050:1;
-    unsigned int res_1920x1080:1;
-    unsigned int res_2128x1200:1;
-
-    unsigned int res_2560x1400:1;
-    unsigned int res_2728x1536:1;
-    unsigned int res_3408x1920:1;
-    unsigned int res_4264x2400:1;
-    unsigned int res_5120x2880:1;
-    unsigned int pad2:3;
-
-    unsigned int res_768x480:1;
-    unsigned int res_960x600:1;
-    unsigned int res_1152x720:1;
-    unsigned int res_1124x768:1;
-    unsigned int res_1536x960:1;
-    unsigned int res_1680x1050:1;
-    unsigned int res_1728x1080:1;
-    unsigned int res_1920x1200:1;
-
-    unsigned int res_2304x1440:1;
-    unsigned int res_2456x1536:1;
-    unsigned int res_3072x1920:1;
-    unsigned int res_3840x2400:1;
-    unsigned int res_4608x2880:1;
-    unsigned int pad3:3;
-
-    unsigned int res_1280x1024:1;
-    unsigned int pad4:7;
-
-    unsigned int res_1280x768:1;
-    unsigned int pad5:7;
+	unsigned int res_640x480:1;
+	unsigned int res_800x600:1;
+	unsigned int res_1024x768:1;
+	unsigned int res_1280x960:1;
+	unsigned int res_1400x1050:1;
+	unsigned int res_1600x1200:1;
+	unsigned int res_1920x1440:1;
+	unsigned int res_2048x1536:1;
+
+	unsigned int res_2560x1920:1;
+	unsigned int res_3200x2400:1;
+	unsigned int res_3840x2880:1;
+	unsigned int pad1:5;
+
+	unsigned int res_848x480:1;
+	unsigned int res_1064x600:1;
+	unsigned int res_1280x720:1;
+	unsigned int res_1360x768:1;
+	unsigned int res_1704x960:1;
+	unsigned int res_1864x1050:1;
+	unsigned int res_1920x1080:1;
+	unsigned int res_2128x1200:1;
+
+	unsigned int res_2560x1400:1;
+	unsigned int res_2728x1536:1;
+	unsigned int res_3408x1920:1;
+	unsigned int res_4264x2400:1;
+	unsigned int res_5120x2880:1;
+	unsigned int pad2:3;
+
+	unsigned int res_768x480:1;
+	unsigned int res_960x600:1;
+	unsigned int res_1152x720:1;
+	unsigned int res_1124x768:1;
+	unsigned int res_1536x960:1;
+	unsigned int res_1680x1050:1;
+	unsigned int res_1728x1080:1;
+	unsigned int res_1920x1200:1;
+
+	unsigned int res_2304x1440:1;
+	unsigned int res_2456x1536:1;
+	unsigned int res_3072x1920:1;
+	unsigned int res_3840x2400:1;
+	unsigned int res_4608x2880:1;
+	unsigned int pad3:3;
+
+	unsigned int res_1280x1024:1;
+	unsigned int pad4:7;
+
+	unsigned int res_1280x768:1;
+	unsigned int pad5:7;
 } __attribute__((packed));
 
 /* Get supported power state returns info for encoder and monitor, rely on
@@ -539,25 +539,25 @@ struct intel_sdvo_hdtv_resolution_reply {
  * The high fields are bits 8:9 of the 10-bit values.
  */
 struct sdvo_panel_power_sequencing {
-    u8 t0;
-    u8 t1;
-    u8 t2;
-    u8 t3;
-    u8 t4;
-
-    unsigned int t0_high:2;
-    unsigned int t1_high:2;
-    unsigned int t2_high:2;
-    unsigned int t3_high:2;
-
-    unsigned int t4_high:2;
-    unsigned int pad:6;
+	u8 t0;
+	u8 t1;
+	u8 t2;
+	u8 t3;
+	u8 t4;
+
+	unsigned int t0_high:2;
+	unsigned int t1_high:2;
+	unsigned int t2_high:2;
+	unsigned int t3_high:2;
+
+	unsigned int t4_high:2;
+	unsigned int pad:6;
 } __attribute__((packed));
 
 #define SDVO_CMD_GET_MAX_BACKLIGHT_LEVEL		0x30
 struct sdvo_max_backlight_reply {
-    u8 max_value;
-    u8 default_value;
+	u8 max_value;
+	u8 default_value;
 } __attribute__((packed));
 
 #define SDVO_CMD_GET_BACKLIGHT_LEVEL			0x31
@@ -565,16 +565,16 @@ struct sdvo_max_backlight_reply {
 
 #define SDVO_CMD_GET_AMBIENT_LIGHT			0x33
 struct sdvo_get_ambient_light_reply {
-    u16 trip_low;
-    u16 trip_high;
-    u16 value;
+	u16 trip_low;
+	u16 trip_high;
+	u16 value;
 } __attribute__((packed));
 #define SDVO_CMD_SET_AMBIENT_LIGHT			0x34
 struct sdvo_set_ambient_light_reply {
-    u16 trip_low;
-    u16 trip_high;
-    unsigned int enable:1;
-    unsigned int pad:7;
+	u16 trip_low;
+	u16 trip_high;
+	unsigned int enable:1;
+	unsigned int pad:7;
 } __attribute__((packed));
 
 /* Set display power state */
@@ -586,23 +586,23 @@ struct sdvo_set_ambient_light_reply {
 
 #define SDVO_CMD_GET_SUPPORTED_ENHANCEMENTS		0x84
 struct intel_sdvo_enhancements_reply {
-    unsigned int flicker_filter:1;
-    unsigned int flicker_filter_adaptive:1;
-    unsigned int flicker_filter_2d:1;
-    unsigned int saturation:1;
-    unsigned int hue:1;
-    unsigned int brightness:1;
-    unsigned int contrast:1;
-    unsigned int overscan_h:1;
-
-    unsigned int overscan_v:1;
-    unsigned int hpos:1;
-    unsigned int vpos:1;
-    unsigned int sharpness:1;
-    unsigned int dot_crawl:1;
-    unsigned int dither:1;
-    unsigned int tv_chroma_filter:1;
-    unsigned int tv_luma_filter:1;
+	unsigned int flicker_filter:1;
+	unsigned int flicker_filter_adaptive:1;
+	unsigned int flicker_filter_2d:1;
+	unsigned int saturation:1;
+	unsigned int hue:1;
+	unsigned int brightness:1;
+	unsigned int contrast:1;
+	unsigned int overscan_h:1;
+
+	unsigned int overscan_v:1;
+	unsigned int hpos:1;
+	unsigned int vpos:1;
+	unsigned int sharpness:1;
+	unsigned int dot_crawl:1;
+	unsigned int dither:1;
+	unsigned int tv_chroma_filter:1;
+	unsigned int tv_luma_filter:1;
 } __attribute__((packed));
 
 /* Picture enhancement limits below are dependent on the current TV format,
@@ -623,8 +623,8 @@ struct intel_sdvo_enhancements_reply {
 #define SDVO_CMD_GET_MAX_TV_CHROMA_FILTER		0x74
 #define SDVO_CMD_GET_MAX_TV_LUMA_FILTER			0x77
 struct intel_sdvo_enhancement_limits_reply {
-    u16 max_value;
-    u16 default_value;
+	u16 max_value;
+	u16 default_value;
 } __attribute__((packed));
 
 #define SDVO_CMD_GET_LVDS_PANEL_INFORMATION		0x7f
@@ -665,8 +665,8 @@ struct intel_sdvo_enhancement_limits_reply {
 #define SDVO_CMD_GET_TV_LUMA_FILTER			0x78
 #define SDVO_CMD_SET_TV_LUMA_FILTER			0x79
 struct intel_sdvo_enhancements_arg {
-    u16 value;
-}__attribute__((packed));
+	u16 value;
+} __attribute__((packed));
 
 #define SDVO_CMD_GET_DOT_CRAWL				0x70
 #define SDVO_CMD_SET_DOT_CRAWL				0x71
@@ -717,7 +717,7 @@ struct intel_sdvo_enhancements_arg {
 #define SDVO_CMD_GET_AUDIO_TX_INFO	0x9c
 #define SDVO_NEED_TO_STALL  (1 << 7)
 
-struct intel_sdvo_encode{
-    u8 dvi_rev;
-    u8 hdmi_rev;
+struct intel_sdvo_encode {
+	u8 dvi_rev;
+	u8 hdmi_rev;
 } __attribute__ ((packed));

+ 29 - 29
drivers/gpu/drm/i915/intel_tv.c

@@ -194,10 +194,10 @@ static const u32 filter_table[] = {
  *
  *     if (f >= 1) {
  *         exp = 0x7;
- * 	   mant = 1 << 8;
+ *	   mant = 1 << 8;
  *     } else {
  *         for (exp = 0; exp < 3 && f < 0.5; exp++)
- * 	       f *= 2.0;
+ *	   f *= 2.0;
  *         mant = (f * (1 << 9) + 0.5);
  *         if (mant >= (1 << 9))
  *             mant = (1 << 9) - 1;
@@ -430,7 +430,7 @@ static const struct tv_mode tv_modes[] = {
 		.vsync_start_f1	= 6,		    .vsync_start_f2	= 7,
 		.vsync_len	= 6,
 
-		.veq_ena	= true,		    .veq_start_f1    	= 0,
+		.veq_ena	= true,		    .veq_start_f1	= 0,
 		.veq_start_f2	= 1,		    .veq_len		= 18,
 
 		.vi_end_f1	= 20,		    .vi_end_f2		= 21,
@@ -472,7 +472,7 @@ static const struct tv_mode tv_modes[] = {
 		.vsync_start_f1 = 6,		    .vsync_start_f2	= 7,
 		.vsync_len	= 6,
 
-		.veq_ena	= true,		    .veq_start_f1    	= 0,
+		.veq_ena	= true,		    .veq_start_f1	= 0,
 		.veq_start_f2	= 1,		    .veq_len		= 18,
 
 		.vi_end_f1	= 20,		    .vi_end_f2		= 21,
@@ -515,7 +515,7 @@ static const struct tv_mode tv_modes[] = {
 		.vsync_start_f1	= 6,	    .vsync_start_f2	= 7,
 		.vsync_len	= 6,
 
-		.veq_ena	= true,		    .veq_start_f1    	= 0,
+		.veq_ena      = true,	    .veq_start_f1	= 0,
 		.veq_start_f2 = 1,	    .veq_len		= 18,
 
 		.vi_end_f1	= 20,		    .vi_end_f2		= 21,
@@ -558,7 +558,7 @@ static const struct tv_mode tv_modes[] = {
 		.vsync_start_f1	= 6,		    .vsync_start_f2	= 7,
 		.vsync_len	= 6,
 
-		.veq_ena	= true,		    .veq_start_f1    	= 0,
+		.veq_ena	= true,		    .veq_start_f1	= 0,
 		.veq_start_f2	= 1,		    .veq_len		= 18,
 
 		.vi_end_f1	= 20,		    .vi_end_f2		= 21,
@@ -602,14 +602,14 @@ static const struct tv_mode tv_modes[] = {
 		.vsync_start_f1	= 6,	   .vsync_start_f2	= 7,
 		.vsync_len	= 6,
 
-		.veq_ena	= true,		    .veq_start_f1    	= 0,
+		.veq_ena	= true,		    .veq_start_f1	= 0,
 		.veq_start_f2	= 1,		    .veq_len		= 18,
 
 		.vi_end_f1	= 24,		    .vi_end_f2		= 25,
 		.nbr_end	= 286,
 
 		.burst_ena	= true,
-		.hburst_start = 73,	    	    .hburst_len		= 34,
+		.hburst_start = 73,	    .hburst_len		= 34,
 		.vburst_start_f1 = 8,	    .vburst_end_f1	= 285,
 		.vburst_start_f2 = 8,	    .vburst_end_f2	= 286,
 		.vburst_start_f3 = 9,	    .vburst_end_f3	= 286,
@@ -646,7 +646,7 @@ static const struct tv_mode tv_modes[] = {
 		.vsync_start_f1	= 5,	    .vsync_start_f2	= 6,
 		.vsync_len	= 5,
 
-		.veq_ena	= true,		    .veq_start_f1    	= 0,
+		.veq_ena	= true,	    .veq_start_f1	= 0,
 		.veq_start_f2	= 1,	    .veq_len		= 15,
 
 		.vi_end_f1	= 24,		    .vi_end_f2		= 25,
@@ -675,7 +675,7 @@ static const struct tv_mode tv_modes[] = {
 	},
 	{
 		.name       = "480p@59.94Hz",
-		.clock 	= 107520,
+		.clock		= 107520,
 		.refresh	= 59940,
 		.oversample     = TV_OVERSAMPLE_4X,
 		.component_only = 1,
@@ -683,7 +683,7 @@ static const struct tv_mode tv_modes[] = {
 		.hsync_end      = 64,               .hblank_end         = 122,
 		.hblank_start   = 842,              .htotal             = 857,
 
-		.progressive    = true,.trilevel_sync = false,
+		.progressive    = true,		    .trilevel_sync = false,
 
 		.vsync_start_f1 = 12,               .vsync_start_f2     = 12,
 		.vsync_len      = 12,
@@ -699,7 +699,7 @@ static const struct tv_mode tv_modes[] = {
 	},
 	{
 		.name       = "480p@60Hz",
-		.clock 	= 107520,
+		.clock		= 107520,
 		.refresh	= 60000,
 		.oversample     = TV_OVERSAMPLE_4X,
 		.component_only = 1,
@@ -707,7 +707,7 @@ static const struct tv_mode tv_modes[] = {
 		.hsync_end      = 64,               .hblank_end         = 122,
 		.hblank_start   = 842,              .htotal             = 856,
 
-		.progressive    = true,.trilevel_sync = false,
+		.progressive    = true,		    .trilevel_sync = false,
 
 		.vsync_start_f1 = 12,               .vsync_start_f2     = 12,
 		.vsync_len      = 12,
@@ -723,7 +723,7 @@ static const struct tv_mode tv_modes[] = {
 	},
 	{
 		.name       = "576p",
-		.clock 	= 107520,
+		.clock		= 107520,
 		.refresh	= 50000,
 		.oversample     = TV_OVERSAMPLE_4X,
 		.component_only = 1,
@@ -755,7 +755,7 @@ static const struct tv_mode tv_modes[] = {
 		.hsync_end      = 80,               .hblank_end         = 300,
 		.hblank_start   = 1580,             .htotal             = 1649,
 
-		.progressive    = true, 	    .trilevel_sync = true,
+		.progressive	= true,		    .trilevel_sync = true,
 
 		.vsync_start_f1 = 10,               .vsync_start_f2     = 10,
 		.vsync_len      = 10,
@@ -779,7 +779,7 @@ static const struct tv_mode tv_modes[] = {
 		.hsync_end      = 80,               .hblank_end         = 300,
 		.hblank_start   = 1580,             .htotal             = 1651,
 
-		.progressive    = true, 	    .trilevel_sync = true,
+		.progressive	= true,		    .trilevel_sync = true,
 
 		.vsync_start_f1 = 10,               .vsync_start_f2     = 10,
 		.vsync_len      = 10,
@@ -803,7 +803,7 @@ static const struct tv_mode tv_modes[] = {
 		.hsync_end      = 80,               .hblank_end         = 300,
 		.hblank_start   = 1580,             .htotal             = 1979,
 
-		.progressive    = true, 	        .trilevel_sync = true,
+		.progressive	= true,		    .trilevel_sync = true,
 
 		.vsync_start_f1 = 10,               .vsync_start_f2     = 10,
 		.vsync_len      = 10,
@@ -828,12 +828,12 @@ static const struct tv_mode tv_modes[] = {
 		.hsync_end      = 88,               .hblank_end         = 235,
 		.hblank_start   = 2155,             .htotal             = 2639,
 
-		.progressive    = false, 	    .trilevel_sync = true,
+		.progressive	= false,	  .trilevel_sync = true,
 
 		.vsync_start_f1 = 4,              .vsync_start_f2     = 5,
 		.vsync_len      = 10,
 
-		.veq_ena	= true,		    .veq_start_f1    	= 4,
+		.veq_ena	= true,	    .veq_start_f1	= 4,
 		.veq_start_f2   = 4,	    .veq_len		= 10,
 
 
@@ -854,12 +854,12 @@ static const struct tv_mode tv_modes[] = {
 		.hsync_end      = 88,               .hblank_end         = 235,
 		.hblank_start   = 2155,             .htotal             = 2199,
 
-		.progressive    = false, 	    .trilevel_sync = true,
+		.progressive	= false,	    .trilevel_sync = true,
 
 		.vsync_start_f1 = 4,               .vsync_start_f2     = 5,
 		.vsync_len      = 10,
 
-		.veq_ena	= true,		    .veq_start_f1    	= 4,
+		.veq_ena	= true,		    .veq_start_f1	= 4,
 		.veq_start_f2	= 4,		    .veq_len		= 10,
 
 
@@ -880,16 +880,16 @@ static const struct tv_mode tv_modes[] = {
 		.hsync_end      = 88,               .hblank_end         = 235,
 		.hblank_start   = 2155,             .htotal             = 2201,
 
-		.progressive    = false, 	    .trilevel_sync = true,
+		.progressive	= false,	    .trilevel_sync = true,
 
 		.vsync_start_f1 = 4,            .vsync_start_f2    = 5,
 		.vsync_len      = 10,
 
 		.veq_ena	= true,		    .veq_start_f1	= 4,
-		.veq_start_f2 = 4,	    	    .veq_len = 10,
+		.veq_start_f2	= 4,		.veq_len	  = 10,
 
 
-		.vi_end_f1      = 21,           .vi_end_f2         	= 22,
+		.vi_end_f1	= 21,		.vi_end_f2	  = 22,
 		.nbr_end        = 539,
 
 		.burst_ena      = false,
@@ -916,7 +916,7 @@ intel_tv_dpms(struct drm_encoder *encoder, int mode)
 	struct drm_device *dev = encoder->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	switch(mode) {
+	switch (mode) {
 	case DRM_MODE_DPMS_ON:
 		I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
 		break;
@@ -933,7 +933,7 @@ intel_tv_mode_lookup(const char *tv_format)
 {
 	int i;
 
-	for (i = 0; i < sizeof(tv_modes) / sizeof (tv_modes[0]); i++) {
+	for (i = 0; i < sizeof(tv_modes) / sizeof(tv_modes[0]); i++) {
 		const struct tv_mode *tv_mode = &tv_modes[i];
 
 		if (!strcmp(tv_format, tv_mode->name))
@@ -1128,7 +1128,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
 	if (color_conversion) {
 		I915_WRITE(TV_CSC_Y, (color_conversion->ry << 16) |
 			   color_conversion->gy);
-		I915_WRITE(TV_CSC_Y2,(color_conversion->by << 16) |
+		I915_WRITE(TV_CSC_Y2, (color_conversion->by << 16) |
 			   color_conversion->ay);
 		I915_WRITE(TV_CSC_U, (color_conversion->ru << 16) |
 			   color_conversion->gu);
@@ -1232,7 +1232,7 @@ static const struct drm_display_mode reported_modes[] = {
  * \return false if TV is disconnected.
  */
 static int
-intel_tv_detect_type (struct intel_tv *intel_tv,
+intel_tv_detect_type(struct intel_tv *intel_tv,
 		      struct drm_connector *connector)
 {
 	struct drm_encoder *encoder = &intel_tv->base.base;
@@ -1486,7 +1486,7 @@ intel_tv_get_modes(struct drm_connector *connector)
 }
 
 static void
-intel_tv_destroy (struct drm_connector *connector)
+intel_tv_destroy(struct drm_connector *connector)
 {
 	drm_sysfs_connector_remove(connector);
 	drm_connector_cleanup(connector);

+ 5 - 4
drivers/gpu/drm/nouveau/Makefile

@@ -21,16 +21,17 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
              nv40_grctx.o nv50_grctx.o nvc0_grctx.o \
              nv84_crypt.o \
              nva3_copy.o nvc0_copy.o \
-             nv40_mpeg.o nv50_mpeg.o \
+             nv31_mpeg.o nv50_mpeg.o \
              nv04_instmem.o nv50_instmem.o nvc0_instmem.o \
-             nv50_evo.o nv50_crtc.o nv50_dac.o nv50_sor.o \
-             nv50_cursor.o nv50_display.o \
              nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \
              nv04_crtc.o nv04_display.o nv04_cursor.o \
+             nv50_evo.o nv50_crtc.o nv50_dac.o nv50_sor.o \
+             nv50_cursor.o nv50_display.o \
+             nvd0_display.o \
              nv04_fbcon.o nv50_fbcon.o nvc0_fbcon.o \
              nv10_gpio.o nv50_gpio.o \
 	     nv50_calc.o \
-	     nv04_pm.o nv50_pm.o nva3_pm.o \
+	     nv04_pm.o nv40_pm.o nv50_pm.o nva3_pm.o nvc0_pm.o \
 	     nv50_vram.o nvc0_vram.o \
 	     nv50_vm.o nvc0_vm.o
 

+ 126 - 43
drivers/gpu/drm/nouveau/nouveau_backlight.c

@@ -37,8 +37,10 @@
 #include "nouveau_drv.h"
 #include "nouveau_drm.h"
 #include "nouveau_reg.h"
+#include "nouveau_encoder.h"
 
-static int nv40_get_intensity(struct backlight_device *bd)
+static int
+nv40_get_intensity(struct backlight_device *bd)
 {
 	struct drm_device *dev = bl_get_data(bd);
 	int val = (nv_rd32(dev, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK)
@@ -47,7 +49,8 @@ static int nv40_get_intensity(struct backlight_device *bd)
 	return val;
 }
 
-static int nv40_set_intensity(struct backlight_device *bd)
+static int
+nv40_set_intensity(struct backlight_device *bd)
 {
 	struct drm_device *dev = bl_get_data(bd);
 	int val = bd->props.brightness;
@@ -65,30 +68,8 @@ static const struct backlight_ops nv40_bl_ops = {
 	.update_status = nv40_set_intensity,
 };
 
-static int nv50_get_intensity(struct backlight_device *bd)
-{
-	struct drm_device *dev = bl_get_data(bd);
-
-	return nv_rd32(dev, NV50_PDISPLAY_SOR_BACKLIGHT);
-}
-
-static int nv50_set_intensity(struct backlight_device *bd)
-{
-	struct drm_device *dev = bl_get_data(bd);
-	int val = bd->props.brightness;
-
-	nv_wr32(dev, NV50_PDISPLAY_SOR_BACKLIGHT,
-		val | NV50_PDISPLAY_SOR_BACKLIGHT_ENABLE);
-	return 0;
-}
-
-static const struct backlight_ops nv50_bl_ops = {
-	.options = BL_CORE_SUSPENDRESUME,
-	.get_brightness = nv50_get_intensity,
-	.update_status = nv50_set_intensity,
-};
-
-static int nouveau_nv40_backlight_init(struct drm_connector *connector)
+static int
+nv40_backlight_init(struct drm_connector *connector)
 {
 	struct drm_device *dev = connector->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -113,34 +94,129 @@ static int nouveau_nv40_backlight_init(struct drm_connector *connector)
 	return 0;
 }
 
-static int nouveau_nv50_backlight_init(struct drm_connector *connector)
+static int
+nv50_get_intensity(struct backlight_device *bd)
+{
+	struct nouveau_encoder *nv_encoder = bl_get_data(bd);
+	struct drm_device *dev = nv_encoder->base.base.dev;
+	int or = nv_encoder->or;
+	u32 div = 1025;
+	u32 val;
+
+	val  = nv_rd32(dev, NV50_PDISP_SOR_PWM_CTL(or));
+	val &= NV50_PDISP_SOR_PWM_CTL_VAL;
+	return ((val * 100) + (div / 2)) / div;
+}
+
+static int
+nv50_set_intensity(struct backlight_device *bd)
+{
+	struct nouveau_encoder *nv_encoder = bl_get_data(bd);
+	struct drm_device *dev = nv_encoder->base.base.dev;
+	int or = nv_encoder->or;
+	u32 div = 1025;
+	u32 val = (bd->props.brightness * div) / 100;
+
+	nv_wr32(dev, NV50_PDISP_SOR_PWM_CTL(or),
+		     NV50_PDISP_SOR_PWM_CTL_NEW | val);
+	return 0;
+}
+
+static const struct backlight_ops nv50_bl_ops = {
+	.options = BL_CORE_SUSPENDRESUME,
+	.get_brightness = nv50_get_intensity,
+	.update_status = nv50_set_intensity,
+};
+
+static int
+nva3_get_intensity(struct backlight_device *bd)
+{
+	struct nouveau_encoder *nv_encoder = bl_get_data(bd);
+	struct drm_device *dev = nv_encoder->base.base.dev;
+	int or = nv_encoder->or;
+	u32 div, val;
+
+	div  = nv_rd32(dev, NV50_PDISP_SOR_PWM_DIV(or));
+	val  = nv_rd32(dev, NV50_PDISP_SOR_PWM_CTL(or));
+	val &= NVA3_PDISP_SOR_PWM_CTL_VAL;
+	if (div && div >= val)
+		return ((val * 100) + (div / 2)) / div;
+
+	return 100;
+}
+
+static int
+nva3_set_intensity(struct backlight_device *bd)
+{
+	struct nouveau_encoder *nv_encoder = bl_get_data(bd);
+	struct drm_device *dev = nv_encoder->base.base.dev;
+	int or = nv_encoder->or;
+	u32 div, val;
+
+	div = nv_rd32(dev, NV50_PDISP_SOR_PWM_DIV(or));
+	val = (bd->props.brightness * div) / 100;
+	if (div) {
+		nv_wr32(dev, NV50_PDISP_SOR_PWM_CTL(or), val |
+			     NV50_PDISP_SOR_PWM_CTL_NEW |
+			     NVA3_PDISP_SOR_PWM_CTL_UNK);
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+static const struct backlight_ops nva3_bl_ops = {
+	.options = BL_CORE_SUSPENDRESUME,
+	.get_brightness = nva3_get_intensity,
+	.update_status = nva3_set_intensity,
+};
+
+static int
+nv50_backlight_init(struct drm_connector *connector)
 {
 	struct drm_device *dev = connector->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_encoder *nv_encoder;
 	struct backlight_properties props;
 	struct backlight_device *bd;
+	const struct backlight_ops *ops;
+
+	nv_encoder = find_encoder(connector, OUTPUT_LVDS);
+	if (!nv_encoder) {
+		nv_encoder = find_encoder(connector, OUTPUT_DP);
+		if (!nv_encoder)
+			return -ENODEV;
+	}
 
-	if (!nv_rd32(dev, NV50_PDISPLAY_SOR_BACKLIGHT))
+	if (!nv_rd32(dev, NV50_PDISP_SOR_PWM_CTL(nv_encoder->or)))
 		return 0;
 
+	if (dev_priv->chipset <= 0xa0 ||
+	    dev_priv->chipset == 0xaa ||
+	    dev_priv->chipset == 0xac)
+		ops = &nv50_bl_ops;
+	else
+		ops = &nva3_bl_ops;
+
 	memset(&props, 0, sizeof(struct backlight_properties));
 	props.type = BACKLIGHT_RAW;
-	props.max_brightness = 1025;
-	bd = backlight_device_register("nv_backlight", &connector->kdev, dev,
-				       &nv50_bl_ops, &props);
+	props.max_brightness = 100;
+	bd = backlight_device_register("nv_backlight", &connector->kdev,
+				       nv_encoder, ops, &props);
 	if (IS_ERR(bd))
 		return PTR_ERR(bd);
 
 	dev_priv->backlight = bd;
-	bd->props.brightness = nv50_get_intensity(bd);
+	bd->props.brightness = bd->ops->get_brightness(bd);
 	backlight_update_status(bd);
 	return 0;
 }
 
-int nouveau_backlight_init(struct drm_connector *connector)
+int
+nouveau_backlight_init(struct drm_device *dev)
 {
-	struct drm_device *dev = connector->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct drm_connector *connector;
 
 #ifdef CONFIG_ACPI
 	if (acpi_video_backlight_support()) {
@@ -150,21 +226,28 @@ int nouveau_backlight_init(struct drm_connector *connector)
 	}
 #endif
 
-	switch (dev_priv->card_type) {
-	case NV_40:
-		return nouveau_nv40_backlight_init(connector);
-	case NV_50:
-		return nouveau_nv50_backlight_init(connector);
-	default:
-		break;
+	list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+		if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS &&
+		    connector->connector_type != DRM_MODE_CONNECTOR_eDP)
+			continue;
+
+		switch (dev_priv->card_type) {
+		case NV_40:
+			return nv40_backlight_init(connector);
+		case NV_50:
+			return nv50_backlight_init(connector);
+		default:
+			break;
+		}
 	}
 
+
 	return 0;
 }
 
-void nouveau_backlight_exit(struct drm_connector *connector)
+void
+nouveau_backlight_exit(struct drm_device *dev)
 {
-	struct drm_device *dev = connector->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 
 	if (dev_priv->backlight) {

+ 139 - 136
drivers/gpu/drm/nouveau/nouveau_bios.c

@@ -296,6 +296,11 @@ munge_reg(struct nvbios *bios, uint32_t reg)
 	if (dev_priv->card_type < NV_50)
 		return reg;
 
+	if (reg & 0x80000000) {
+		BUG_ON(bios->display.crtc < 0);
+		reg += bios->display.crtc * 0x800;
+	}
+
 	if (reg & 0x40000000) {
 		BUG_ON(!dcbent);
 
@@ -304,7 +309,7 @@ munge_reg(struct nvbios *bios, uint32_t reg)
 			reg += 0x00000080;
 	}
 
-	reg &= ~0x60000000;
+	reg &= ~0xe0000000;
 	return reg;
 }
 
@@ -1174,22 +1179,19 @@ init_dp_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
 	 *
 	 */
 
-	struct bit_displayport_encoder_table *dpe = NULL;
 	struct dcb_entry *dcb = bios->display.output;
 	struct drm_device *dev = bios->dev;
 	uint8_t cond = bios->data[offset + 1];
-	int dummy;
+	uint8_t *table, *entry;
 
 	BIOSLOG(bios, "0x%04X: subop 0x%02X\n", offset, cond);
 
 	if (!iexec->execute)
 		return 3;
 
-	dpe = nouveau_bios_dp_table(dev, dcb, &dummy);
-	if (!dpe) {
-		NV_ERROR(dev, "0x%04X: INIT_3A: no encoder table!!\n", offset);
+	table = nouveau_dp_bios_data(dev, dcb, &entry);
+	if (!table)
 		return 3;
-	}
 
 	switch (cond) {
 	case 0:
@@ -1203,7 +1205,7 @@ init_dp_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
 		break;
 	case 1:
 	case 2:
-		if (!(dpe->unknown & cond))
+		if (!(entry[5] & cond))
 			iexec->execute = false;
 		break;
 	case 5:
@@ -3221,6 +3223,49 @@ init_8d(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
 	return 1;
 }
 
+static void
+init_gpio_unknv50(struct nvbios *bios, struct dcb_gpio_entry *gpio)
+{
+	const uint32_t nv50_gpio_ctl[2] = { 0xe100, 0xe28c };
+	u32 r, s, v;
+
+	/* Not a clue, needs de-magicing */
+	r = nv50_gpio_ctl[gpio->line >> 4];
+	s = (gpio->line & 0x0f);
+	v = bios_rd32(bios, r) & ~(0x00010001 << s);
+	switch ((gpio->entry & 0x06000000) >> 25) {
+	case 1:
+		v |= (0x00000001 << s);
+		break;
+	case 2:
+		v |= (0x00010000 << s);
+		break;
+	default:
+		break;
+	}
+
+	bios_wr32(bios, r, v);
+}
+
+static void
+init_gpio_unknvd0(struct nvbios *bios, struct dcb_gpio_entry *gpio)
+{
+	u32 v, i;
+
+	v  = bios_rd32(bios, 0x00d610 + (gpio->line * 4));
+	v &= 0xffffff00;
+	v |= (gpio->entry & 0x00ff0000) >> 16;
+	bios_wr32(bios, 0x00d610 + (gpio->line * 4), v);
+
+	i = (gpio->entry & 0x1f000000) >> 24;
+	if (i) {
+		v  = bios_rd32(bios, 0x00d640 + ((i - 1) * 4));
+		v &= 0xffffff00;
+		v |= gpio->line;
+		bios_wr32(bios, 0x00d640 + ((i - 1) * 4), v);
+	}
+}
+
 static int
 init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
 {
@@ -3235,7 +3280,6 @@ init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
 
 	struct drm_nouveau_private *dev_priv = bios->dev->dev_private;
 	struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio;
-	const uint32_t nv50_gpio_ctl[2] = { 0xe100, 0xe28c };
 	int i;
 
 	if (dev_priv->card_type < NV_50) {
@@ -3248,33 +3292,20 @@ init_gpio(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
 
 	for (i = 0; i < bios->dcb.gpio.entries; i++) {
 		struct dcb_gpio_entry *gpio = &bios->dcb.gpio.entry[i];
-		uint32_t r, s, v;
 
 		BIOSLOG(bios, "0x%04X: Entry: 0x%08X\n", offset, gpio->entry);
 
 		BIOSLOG(bios, "0x%04X: set gpio 0x%02x, state %d\n",
 			offset, gpio->tag, gpio->state_default);
-		if (bios->execute)
-			pgpio->set(bios->dev, gpio->tag, gpio->state_default);
 
-		/* The NVIDIA binary driver doesn't appear to actually do
-		 * any of this, my VBIOS does however.
-		 */
-		/* Not a clue, needs de-magicing */
-		r = nv50_gpio_ctl[gpio->line >> 4];
-		s = (gpio->line & 0x0f);
-		v = bios_rd32(bios, r) & ~(0x00010001 << s);
-		switch ((gpio->entry & 0x06000000) >> 25) {
-		case 1:
-			v |= (0x00000001 << s);
-			break;
-		case 2:
-			v |= (0x00010000 << s);
-			break;
-		default:
-			break;
-		}
-		bios_wr32(bios, r, v);
+		if (!bios->execute)
+			continue;
+
+		pgpio->set(bios->dev, gpio->tag, gpio->state_default);
+		if (dev_priv->card_type < NV_D0)
+			init_gpio_unknv50(bios, gpio);
+		else
+			init_gpio_unknvd0(bios, gpio);
 	}
 
 	return 1;
@@ -3737,6 +3768,10 @@ parse_init_table(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
 	int count = 0, i, ret;
 	uint8_t id;
 
+	/* catch NULL script pointers */
+	if (offset == 0)
+		return 0;
+
 	/*
 	 * Loop until INIT_DONE causes us to break out of the loop
 	 * (or until offset > bios length just in case... )
@@ -4389,86 +4424,37 @@ int nouveau_bios_parse_lvds_table(struct drm_device *dev, int pxclk, bool *dl, b
 	return 0;
 }
 
-static uint8_t *
-bios_output_config_match(struct drm_device *dev, struct dcb_entry *dcbent,
-			 uint16_t record, int record_len, int record_nr,
-			 bool match_link)
+/* BIT 'U'/'d' table encoder subtables have hashes matching them to
+ * a particular set of encoders.
+ *
+ * This function returns true if a particular DCB entry matches.
+ */
+bool
+bios_encoder_match(struct dcb_entry *dcb, u32 hash)
 {
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nvbios *bios = &dev_priv->vbios;
-	uint32_t entry;
-	uint16_t table;
-	int i, v;
+	if ((hash & 0x000000f0) != (dcb->location << 4))
+		return false;
+	if ((hash & 0x0000000f) != dcb->type)
+		return false;
+	if (!(hash & (dcb->or << 16)))
+		return false;
 
-	switch (dcbent->type) {
+	switch (dcb->type) {
 	case OUTPUT_TMDS:
 	case OUTPUT_LVDS:
 	case OUTPUT_DP:
-		break;
-	default:
-		match_link = false;
-		break;
-	}
-
-	for (i = 0; i < record_nr; i++, record += record_len) {
-		table = ROM16(bios->data[record]);
-		if (!table)
-			continue;
-		entry = ROM32(bios->data[table]);
-
-		if (match_link) {
-			v = (entry & 0x00c00000) >> 22;
-			if (!(v & dcbent->sorconf.link))
-				continue;
+		if (hash & 0x00c00000) {
+			if (!(hash & (dcb->sorconf.link << 22)))
+				return false;
 		}
-
-		v = (entry & 0x000f0000) >> 16;
-		if (!(v & dcbent->or))
-			continue;
-
-		v = (entry & 0x000000f0) >> 4;
-		if (v != dcbent->location)
-			continue;
-
-		v = (entry & 0x0000000f);
-		if (v != dcbent->type)
-			continue;
-
-		return &bios->data[table];
-	}
-
-	return NULL;
-}
-
-void *
-nouveau_bios_dp_table(struct drm_device *dev, struct dcb_entry *dcbent,
-		      int *length)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nvbios *bios = &dev_priv->vbios;
-	uint8_t *table;
-
-	if (!bios->display.dp_table_ptr) {
-		NV_ERROR(dev, "No pointer to DisplayPort table\n");
-		return NULL;
-	}
-	table = &bios->data[bios->display.dp_table_ptr];
-
-	if (table[0] != 0x20 && table[0] != 0x21) {
-		NV_ERROR(dev, "DisplayPort table version 0x%02x unknown\n",
-			 table[0]);
-		return NULL;
+	default:
+		return true;
 	}
-
-	*length = table[4];
-	return bios_output_config_match(dev, dcbent,
-					bios->display.dp_table_ptr + table[1],
-					table[2], table[3], table[0] >= 0x21);
 }
 
 int
-nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
-			       uint32_t sub, int pxclk)
+nouveau_bios_run_display_table(struct drm_device *dev, u16 type, int pclk,
+			       struct dcb_entry *dcbent, int crtc)
 {
 	/*
 	 * The display script table is located by the BIT 'U' table.
@@ -4498,7 +4484,7 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
 	uint8_t *table = &bios->data[bios->display.script_table_ptr];
 	uint8_t *otable = NULL;
 	uint16_t script;
-	int i = 0;
+	int i;
 
 	if (!bios->display.script_table_ptr) {
 		NV_ERROR(dev, "No pointer to output script table\n");
@@ -4550,30 +4536,33 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
 
 	NV_DEBUG_KMS(dev, "Searching for output entry for %d %d %d\n",
 			dcbent->type, dcbent->location, dcbent->or);
-	otable = bios_output_config_match(dev, dcbent, table[1] +
-					  bios->display.script_table_ptr,
-					  table[2], table[3], table[0] >= 0x21);
+	for (i = 0; i < table[3]; i++) {
+		otable = ROMPTR(bios, table[table[1] + (i * table[2])]);
+		if (otable && bios_encoder_match(dcbent, ROM32(otable[0])))
+			break;
+	}
+
 	if (!otable) {
 		NV_DEBUG_KMS(dev, "failed to match any output table\n");
 		return 1;
 	}
 
-	if (pxclk < -2 || pxclk > 0) {
+	if (pclk < -2 || pclk > 0) {
 		/* Try to find matching script table entry */
 		for (i = 0; i < otable[5]; i++) {
-			if (ROM16(otable[table[4] + i*6]) == sub)
+			if (ROM16(otable[table[4] + i*6]) == type)
 				break;
 		}
 
 		if (i == otable[5]) {
 			NV_ERROR(dev, "Table 0x%04x not found for %d/%d, "
 				      "using first\n",
-				 sub, dcbent->type, dcbent->or);
+				 type, dcbent->type, dcbent->or);
 			i = 0;
 		}
 	}
 
-	if (pxclk == 0) {
+	if (pclk == 0) {
 		script = ROM16(otable[6]);
 		if (!script) {
 			NV_DEBUG_KMS(dev, "output script 0 not found\n");
@@ -4581,9 +4570,9 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
 		}
 
 		NV_DEBUG_KMS(dev, "0x%04X: parsing output script 0\n", script);
-		nouveau_bios_run_init_table(dev, script, dcbent);
+		nouveau_bios_run_init_table(dev, script, dcbent, crtc);
 	} else
-	if (pxclk == -1) {
+	if (pclk == -1) {
 		script = ROM16(otable[8]);
 		if (!script) {
 			NV_DEBUG_KMS(dev, "output script 1 not found\n");
@@ -4591,9 +4580,9 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
 		}
 
 		NV_DEBUG_KMS(dev, "0x%04X: parsing output script 1\n", script);
-		nouveau_bios_run_init_table(dev, script, dcbent);
+		nouveau_bios_run_init_table(dev, script, dcbent, crtc);
 	} else
-	if (pxclk == -2) {
+	if (pclk == -2) {
 		if (table[4] >= 12)
 			script = ROM16(otable[10]);
 		else
@@ -4604,31 +4593,31 @@ nouveau_bios_run_display_table(struct drm_device *dev, struct dcb_entry *dcbent,
 		}
 
 		NV_DEBUG_KMS(dev, "0x%04X: parsing output script 2\n", script);
-		nouveau_bios_run_init_table(dev, script, dcbent);
+		nouveau_bios_run_init_table(dev, script, dcbent, crtc);
 	} else
-	if (pxclk > 0) {
+	if (pclk > 0) {
 		script = ROM16(otable[table[4] + i*6 + 2]);
 		if (script)
-			script = clkcmptable(bios, script, pxclk);
+			script = clkcmptable(bios, script, pclk);
 		if (!script) {
 			NV_DEBUG_KMS(dev, "clock script 0 not found\n");
 			return 1;
 		}
 
 		NV_DEBUG_KMS(dev, "0x%04X: parsing clock script 0\n", script);
-		nouveau_bios_run_init_table(dev, script, dcbent);
+		nouveau_bios_run_init_table(dev, script, dcbent, crtc);
 	} else
-	if (pxclk < 0) {
+	if (pclk < 0) {
 		script = ROM16(otable[table[4] + i*6 + 4]);
 		if (script)
-			script = clkcmptable(bios, script, -pxclk);
+			script = clkcmptable(bios, script, -pclk);
 		if (!script) {
 			NV_DEBUG_KMS(dev, "clock script 1 not found\n");
 			return 1;
 		}
 
 		NV_DEBUG_KMS(dev, "0x%04X: parsing clock script 1\n", script);
-		nouveau_bios_run_init_table(dev, script, dcbent);
+		nouveau_bios_run_init_table(dev, script, dcbent, crtc);
 	}
 
 	return 0;
@@ -5478,14 +5467,6 @@ parse_bit_U_tbl_entry(struct drm_device *dev, struct nvbios *bios,
 	return 0;
 }
 
-static int
-parse_bit_displayport_tbl_entry(struct drm_device *dev, struct nvbios *bios,
-				struct bit_entry *bitentry)
-{
-	bios->display.dp_table_ptr = ROM16(bios->data[bitentry->offset]);
-	return 0;
-}
-
 struct bit_table {
 	const char id;
 	int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
@@ -5559,7 +5540,6 @@ parse_bit_structure(struct nvbios *bios, const uint16_t bitoffset)
 	parse_bit_table(bios, bitoffset, &BIT_TABLE('L', lvds));
 	parse_bit_table(bios, bitoffset, &BIT_TABLE('T', tmds));
 	parse_bit_table(bios, bitoffset, &BIT_TABLE('U', U));
-	parse_bit_table(bios, bitoffset, &BIT_TABLE('d', displayport));
 
 	return 0;
 }
@@ -5884,9 +5864,15 @@ parse_dcb_gpio_table(struct nvbios *bios)
 			}
 
 			e->line = (e->entry & 0x0000001f) >> 0;
-			e->state_default = (e->entry & 0x01000000) >> 24;
-			e->state[0] = (e->entry & 0x18000000) >> 27;
-			e->state[1] = (e->entry & 0x60000000) >> 29;
+			if (gpio[0] == 0x40) {
+				e->state_default = (e->entry & 0x01000000) >> 24;
+				e->state[0] = (e->entry & 0x18000000) >> 27;
+				e->state[1] = (e->entry & 0x60000000) >> 29;
+			} else {
+				e->state_default = (e->entry & 0x00000080) >> 7;
+				e->state[0] = (entry[4] >> 4) & 3;
+				e->state[1] = (entry[4] >> 6) & 3;
+			}
 		}
 	}
 
@@ -6156,7 +6142,14 @@ parse_dcb20_entry(struct drm_device *dev, struct dcb_table *dcb,
 	}
 	case OUTPUT_DP:
 		entry->dpconf.sor.link = (conf & 0x00000030) >> 4;
-		entry->dpconf.link_bw = (conf & 0x00e00000) >> 21;
+		switch ((conf & 0x00e00000) >> 21) {
+		case 0:
+			entry->dpconf.link_bw = 162000;
+			break;
+		default:
+			entry->dpconf.link_bw = 270000;
+			break;
+		}
 		switch ((conf & 0x0f000000) >> 24) {
 		case 0xf:
 			entry->dpconf.link_nr = 4;
@@ -6769,7 +6762,7 @@ uint8_t *nouveau_bios_embedded_edid(struct drm_device *dev)
 
 void
 nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table,
-			    struct dcb_entry *dcbent)
+			    struct dcb_entry *dcbent, int crtc)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nvbios *bios = &dev_priv->vbios;
@@ -6777,11 +6770,22 @@ nouveau_bios_run_init_table(struct drm_device *dev, uint16_t table,
 
 	spin_lock_bh(&bios->lock);
 	bios->display.output = dcbent;
+	bios->display.crtc = crtc;
 	parse_init_table(bios, table, &iexec);
 	bios->display.output = NULL;
 	spin_unlock_bh(&bios->lock);
 }
 
+void
+nouveau_bios_init_exec(struct drm_device *dev, uint16_t table)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nvbios *bios = &dev_priv->vbios;
+	struct init_exec iexec = { true, false };
+
+	parse_init_table(bios, table, &iexec);
+}
+
 static bool NVInitVBIOS(struct drm_device *dev)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -6863,9 +6867,8 @@ nouveau_run_vbios_init(struct drm_device *dev)
 
 	if (dev_priv->card_type >= NV_50) {
 		for (i = 0; i < bios->dcb.entries; i++) {
-			nouveau_bios_run_display_table(dev,
-						       &bios->dcb.entry[i],
-						       0, 0);
+			nouveau_bios_run_display_table(dev, 0, 0,
+						       &bios->dcb.entry[i], -1);
 		}
 	}
 

+ 1 - 1
drivers/gpu/drm/nouveau/nouveau_bios.h

@@ -289,8 +289,8 @@ struct nvbios {
 
 	struct {
 		struct dcb_entry *output;
+		int crtc;
 		uint16_t script_table_ptr;
-		uint16_t dp_table_ptr;
 	} display;
 
 	struct {

+ 1 - 1
drivers/gpu/drm/nouveau/nouveau_bo.c

@@ -956,7 +956,7 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
 			break;
 		}
 
-		if (dev_priv->card_type == NV_C0)
+		if (dev_priv->card_type >= NV_C0)
 			page_shift = node->page_shift;
 		else
 			page_shift = 12;

+ 10 - 6
drivers/gpu/drm/nouveau/nouveau_channel.c

@@ -411,13 +411,17 @@ nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
 		return ret;
 	init->channel  = chan->id;
 
-	if (chan->dma.ib_max)
-		init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
-					NOUVEAU_GEM_DOMAIN_GART;
-	else if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_VRAM)
+	if (nouveau_vram_pushbuf == 0) {
+		if (chan->dma.ib_max)
+			init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM |
+						NOUVEAU_GEM_DOMAIN_GART;
+		else if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_VRAM)
+			init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
+		else
+			init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
+	} else {
 		init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM;
-	else
-		init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART;
+	}
 
 	if (dev_priv->card_type < NV_C0) {
 		init->subchan[0].handle = NvM2MF;

+ 14 - 38
drivers/gpu/drm/nouveau/nouveau_connector.c

@@ -39,7 +39,7 @@
 
 static void nouveau_connector_hotplug(void *, int);
 
-static struct nouveau_encoder *
+struct nouveau_encoder *
 find_encoder(struct drm_connector *connector, int type)
 {
 	struct drm_device *dev = connector->dev;
@@ -116,10 +116,6 @@ nouveau_connector_destroy(struct drm_connector *connector)
 				      nouveau_connector_hotplug, connector);
 	}
 
-	if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS ||
-	    connector->connector_type == DRM_MODE_CONNECTOR_eDP)
-		nouveau_backlight_exit(connector);
-
 	kfree(nv_connector->edid);
 	drm_sysfs_connector_remove(connector);
 	drm_connector_cleanup(connector);
@@ -712,11 +708,8 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
 	case OUTPUT_TV:
 		return get_slave_funcs(encoder)->mode_valid(encoder, mode);
 	case OUTPUT_DP:
-		if (nv_encoder->dp.link_bw == DP_LINK_BW_2_7)
-			max_clock = nv_encoder->dp.link_nr * 270000;
-		else
-			max_clock = nv_encoder->dp.link_nr * 162000;
-
+		max_clock  = nv_encoder->dp.link_nr;
+		max_clock *= nv_encoder->dp.link_bw;
 		clock = clock * nouveau_connector_bpp(connector) / 8;
 		break;
 	default:
@@ -871,7 +864,6 @@ nouveau_connector_create(struct drm_device *dev, int index)
 					dev->mode_config.scaling_mode_property,
 					nv_connector->scaling_mode);
 		}
-		connector->polled = DRM_CONNECTOR_POLL_CONNECT;
 		/* fall-through */
 	case DCB_CONNECTOR_TV_0:
 	case DCB_CONNECTOR_TV_1:
@@ -888,27 +880,20 @@ nouveau_connector_create(struct drm_device *dev, int index)
 				dev->mode_config.dithering_mode_property,
 				nv_connector->use_dithering ?
 				DRM_MODE_DITHERING_ON : DRM_MODE_DITHERING_OFF);
-
-		if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS) {
-			if (dev_priv->card_type >= NV_50)
-				connector->polled = DRM_CONNECTOR_POLL_HPD;
-			else
-				connector->polled = DRM_CONNECTOR_POLL_CONNECT;
-		}
 		break;
 	}
 
-	if (pgpio->irq_register) {
+	if (nv_connector->dcb->gpio_tag != 0xff && pgpio->irq_register) {
 		pgpio->irq_register(dev, nv_connector->dcb->gpio_tag,
 				    nouveau_connector_hotplug, connector);
+
+		connector->polled = DRM_CONNECTOR_POLL_HPD;
+	} else {
+		connector->polled = DRM_CONNECTOR_POLL_CONNECT;
 	}
 
 	drm_sysfs_connector_add(connector);
 
-	if (connector->connector_type == DRM_MODE_CONNECTOR_LVDS ||
-	    connector->connector_type == DRM_MODE_CONNECTOR_eDP)
-		nouveau_backlight_init(connector);
-
 	dcb->drm = connector;
 	return dcb->drm;
 
@@ -925,22 +910,13 @@ nouveau_connector_hotplug(void *data, int plugged)
 	struct drm_connector *connector = data;
 	struct drm_device *dev = connector->dev;
 
-	NV_INFO(dev, "%splugged %s\n", plugged ? "" : "un",
-		drm_get_connector_name(connector));
-
-	if (connector->encoder && connector->encoder->crtc &&
-	    connector->encoder->crtc->enabled) {
-		struct nouveau_encoder *nv_encoder = nouveau_encoder(connector->encoder);
-		struct drm_encoder_helper_funcs *helper =
-			connector->encoder->helper_private;
+	NV_DEBUG(dev, "%splugged %s\n", plugged ? "" : "un",
+		 drm_get_connector_name(connector));
 
-		if (nv_encoder->dcb->type == OUTPUT_DP) {
-			if (plugged)
-				helper->dpms(connector->encoder, DRM_MODE_DPMS_ON);
-			else
-				helper->dpms(connector->encoder, DRM_MODE_DPMS_OFF);
-		}
-	}
+	if (plugged)
+		drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+	else
+		drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
 
 	drm_helper_hpd_irq_event(dev);
 }

+ 1 - 2
drivers/gpu/drm/nouveau/nouveau_crtc.h

@@ -82,14 +82,13 @@ static inline struct drm_crtc *to_drm_crtc(struct nouveau_crtc *crtc)
 }
 
 int nv50_crtc_create(struct drm_device *dev, int index);
-int nv50_cursor_init(struct nouveau_crtc *);
-void nv50_cursor_fini(struct nouveau_crtc *);
 int nv50_crtc_cursor_set(struct drm_crtc *drm_crtc, struct drm_file *file_priv,
 			 uint32_t buffer_handle, uint32_t width,
 			 uint32_t height);
 int nv50_crtc_cursor_move(struct drm_crtc *drm_crtc, int x, int y);
 
 int nv04_cursor_init(struct nouveau_crtc *);
+int nv50_cursor_init(struct nouveau_crtc *);
 
 struct nouveau_connector *
 nouveau_crtc_connector_get(struct nouveau_crtc *crtc);

+ 6 - 3
drivers/gpu/drm/nouveau/nouveau_display.c

@@ -105,9 +105,12 @@ nouveau_framebuffer_init(struct drm_device *dev,
 		if (dev_priv->chipset == 0x50)
 			nv_fb->r_format |= (tile_flags << 8);
 
-		if (!tile_flags)
-			nv_fb->r_pitch = 0x00100000 | fb->pitch;
-		else {
+		if (!tile_flags) {
+			if (dev_priv->card_type < NV_D0)
+				nv_fb->r_pitch = 0x00100000 | fb->pitch;
+			else
+				nv_fb->r_pitch = 0x01000000 | fb->pitch;
+		} else {
 			u32 mode = nvbo->tile_mode;
 			if (dev_priv->card_type >= NV_C0)
 				mode >>= 4;

File diff suppressed because it is too large
+ 531 - 419
drivers/gpu/drm/nouveau/nouveau_dp.c


+ 1 - 1
drivers/gpu/drm/nouveau/nouveau_drv.c

@@ -41,7 +41,7 @@ int nouveau_agpmode = -1;
 module_param_named(agpmode, nouveau_agpmode, int, 0400);
 
 MODULE_PARM_DESC(modeset, "Enable kernel modesetting");
-static int nouveau_modeset = -1; /* kms */
+int nouveau_modeset = -1;
 module_param_named(modeset, nouveau_modeset, int, 0400);
 
 MODULE_PARM_DESC(vbios, "Override default VBIOS location");

+ 91 - 32
drivers/gpu/drm/nouveau/nouveau_drv.h

@@ -414,12 +414,13 @@ struct nouveau_gpio_engine {
 };
 
 struct nouveau_pm_voltage_level {
-	u8 voltage;
-	u8 vid;
+	u32 voltage; /* microvolts */
+	u8  vid;
 };
 
 struct nouveau_pm_voltage {
 	bool supported;
+	u8 version;
 	u8 vid_mask;
 
 	struct nouveau_pm_voltage_level *level;
@@ -428,17 +429,48 @@ struct nouveau_pm_voltage {
 
 struct nouveau_pm_memtiming {
 	int id;
-	u32 reg_100220;
-	u32 reg_100224;
-	u32 reg_100228;
-	u32 reg_10022c;
-	u32 reg_100230;
-	u32 reg_100234;
-	u32 reg_100238;
-	u32 reg_10023c;
-	u32 reg_100240;
+	u32 reg_0; /* 0x10f290 on Fermi, 0x100220 for older */
+	u32 reg_1;
+	u32 reg_2;
+	u32 reg_3;
+	u32 reg_4;
+	u32 reg_5;
+	u32 reg_6;
+	u32 reg_7;
+	u32 reg_8;
+	/* To be written to 0x1002c0 */
+	u8 CL;
+	u8 WR;
 };
 
+struct nouveau_pm_tbl_header{
+	u8 version;
+	u8 header_len;
+	u8 entry_cnt;
+	u8 entry_len;
+};
+
+struct nouveau_pm_tbl_entry{
+	u8 tWR;
+	u8 tUNK_1;
+	u8 tCL;
+	u8 tRP;		/* Byte 3 */
+	u8 empty_4;
+	u8 tRAS;	/* Byte 5 */
+	u8 empty_6;
+	u8 tRFC;	/* Byte 7 */
+	u8 empty_8;
+	u8 tRC;		/* Byte 9 */
+	u8 tUNK_10, tUNK_11, tUNK_12, tUNK_13, tUNK_14;
+	u8 empty_15,empty_16,empty_17;
+	u8 tUNK_18, tUNK_19, tUNK_20, tUNK_21;
+};
+
+/* nouveau_mem.c */
+void nv30_mem_timing_entry(struct drm_device *dev, struct nouveau_pm_tbl_header *hdr,
+							struct nouveau_pm_tbl_entry *e, uint8_t magic_number,
+							struct nouveau_pm_memtiming *timing);
+
 #define NOUVEAU_PM_MAX_LEVEL 8
 struct nouveau_pm_level {
 	struct device_attribute dev_attr;
@@ -448,11 +480,19 @@ struct nouveau_pm_level {
 	u32 core;
 	u32 memory;
 	u32 shader;
-	u32 unk05;
-	u32 unk0a;
-
-	u8 voltage;
-	u8 fanspeed;
+	u32 rop;
+	u32 copy;
+	u32 daemon;
+	u32 vdec;
+	u32 unk05;	/* nv50:nva3, roughly.. */
+	u32 unka0;	/* nva3:nvc0 */
+	u32 hub01;	/* nvc0- */
+	u32 hub06;	/* nvc0- */
+	u32 hub07;	/* nvc0- */
+
+	u32 volt_min; /* microvolts */
+	u32 volt_max;
+	u8  fanspeed;
 
 	u16 memscript;
 	struct nouveau_pm_memtiming *timing;
@@ -496,6 +536,11 @@ struct nouveau_pm_engine {
 	void *(*clock_pre)(struct drm_device *, struct nouveau_pm_level *,
 			   u32 id, int khz);
 	void (*clock_set)(struct drm_device *, void *);
+
+	int  (*clocks_get)(struct drm_device *, struct nouveau_pm_level *);
+	void *(*clocks_pre)(struct drm_device *, struct nouveau_pm_level *);
+	void (*clocks_set)(struct drm_device *, void *);
+
 	int (*voltage_get)(struct drm_device *);
 	int (*voltage_set)(struct drm_device *, int voltage);
 	int (*fanspeed_get)(struct drm_device *);
@@ -504,7 +549,7 @@ struct nouveau_pm_engine {
 };
 
 struct nouveau_vram_engine {
-	struct nouveau_mm *mm;
+	struct nouveau_mm mm;
 
 	int  (*init)(struct drm_device *);
 	void (*takedown)(struct drm_device *dev);
@@ -623,6 +668,7 @@ enum nouveau_card_type {
 	NV_40      = 0x40,
 	NV_50      = 0x50,
 	NV_C0      = 0xc0,
+	NV_D0      = 0xd0
 };
 
 struct drm_nouveau_private {
@@ -633,8 +679,8 @@ struct drm_nouveau_private {
 	enum nouveau_card_type card_type;
 	/* exact chipset, derived from NV_PMC_BOOT_0 */
 	int chipset;
-	int stepping;
 	int flags;
+	u32 crystal;
 
 	void __iomem *mmio;
 
@@ -721,7 +767,6 @@ struct drm_nouveau_private {
 	uint64_t vram_size;
 	uint64_t vram_sys_base;
 
-	uint64_t fb_phys;
 	uint64_t fb_available_size;
 	uint64_t fb_mappable_pages;
 	uint64_t fb_aper_free;
@@ -784,6 +829,7 @@ nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo)
 }
 
 /* nouveau_drv.c */
+extern int nouveau_modeset;
 extern int nouveau_agpmode;
 extern int nouveau_duallink;
 extern int nouveau_uscript_lvds;
@@ -824,6 +870,8 @@ extern bool nouveau_wait_eq(struct drm_device *, uint64_t timeout,
 			    uint32_t reg, uint32_t mask, uint32_t val);
 extern bool nouveau_wait_ne(struct drm_device *, uint64_t timeout,
 			    uint32_t reg, uint32_t mask, uint32_t val);
+extern bool nouveau_wait_cb(struct drm_device *, u64 timeout,
+			    bool (*cond)(void *), void *);
 extern bool nouveau_wait_for_idle(struct drm_device *);
 extern int  nouveau_card_init(struct drm_device *);
 
@@ -1006,15 +1054,15 @@ static inline int nouveau_acpi_edid(struct drm_device *dev, struct drm_connector
 
 /* nouveau_backlight.c */
 #ifdef CONFIG_DRM_NOUVEAU_BACKLIGHT
-extern int nouveau_backlight_init(struct drm_connector *);
-extern void nouveau_backlight_exit(struct drm_connector *);
+extern int nouveau_backlight_init(struct drm_device *);
+extern void nouveau_backlight_exit(struct drm_device *);
 #else
-static inline int nouveau_backlight_init(struct drm_connector *dev)
+static inline int nouveau_backlight_init(struct drm_device *dev)
 {
 	return 0;
 }
 
-static inline void nouveau_backlight_exit(struct drm_connector *dev) { }
+static inline void nouveau_backlight_exit(struct drm_device *dev) { }
 #endif
 
 /* nouveau_bios.c */
@@ -1022,7 +1070,8 @@ extern int nouveau_bios_init(struct drm_device *);
 extern void nouveau_bios_takedown(struct drm_device *dev);
 extern int nouveau_run_vbios_init(struct drm_device *);
 extern void nouveau_bios_run_init_table(struct drm_device *, uint16_t table,
-					struct dcb_entry *);
+					struct dcb_entry *, int crtc);
+extern void nouveau_bios_init_exec(struct drm_device *, uint16_t table);
 extern struct dcb_gpio_entry *nouveau_bios_gpio_entry(struct drm_device *,
 						      enum dcb_gpio_tag);
 extern struct dcb_connector_table_entry *
@@ -1030,11 +1079,8 @@ nouveau_bios_connector_entry(struct drm_device *, int index);
 extern u32 get_pll_register(struct drm_device *, enum pll_types);
 extern int get_pll_limits(struct drm_device *, uint32_t limit_match,
 			  struct pll_lims *);
-extern int nouveau_bios_run_display_table(struct drm_device *,
-					  struct dcb_entry *,
-					  uint32_t script, int pxclk);
-extern void *nouveau_bios_dp_table(struct drm_device *, struct dcb_entry *,
-				   int *length);
+extern int nouveau_bios_run_display_table(struct drm_device *, u16 id, int clk,
+					  struct dcb_entry *, int crtc);
 extern bool nouveau_bios_fp_mode(struct drm_device *, struct drm_display_mode *);
 extern uint8_t *nouveau_bios_embedded_edid(struct drm_device *);
 extern int nouveau_bios_parse_lvds_table(struct drm_device *, int pxclk,
@@ -1043,6 +1089,7 @@ extern int run_tmds_table(struct drm_device *, struct dcb_entry *,
 			  int head, int pxclk);
 extern int call_lvds_script(struct drm_device *, struct dcb_entry *, int head,
 			    enum LVDS_script, int pxclk);
+bool bios_encoder_match(struct dcb_entry *, u32 hash);
 
 /* nouveau_ttm.c */
 int nouveau_ttm_global_init(struct drm_nouveau_private *);
@@ -1053,7 +1100,9 @@ int nouveau_ttm_mmap(struct file *, struct vm_area_struct *);
 int nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
 		     uint8_t *data, int data_nr);
 bool nouveau_dp_detect(struct drm_encoder *);
-bool nouveau_dp_link_train(struct drm_encoder *);
+bool nouveau_dp_link_train(struct drm_encoder *, u32 datarate);
+void nouveau_dp_tu_update(struct drm_device *, int, int, u32, u32);
+u8 *nouveau_dp_bios_data(struct drm_device *, struct dcb_entry *, u8 **);
 
 /* nv04_fb.c */
 extern int  nv04_fb_init(struct drm_device *);
@@ -1179,8 +1228,8 @@ extern int  nva3_copy_create(struct drm_device *dev);
 /* nvc0_copy.c */
 extern int  nvc0_copy_create(struct drm_device *dev, int engine);
 
-/* nv40_mpeg.c */
-extern int  nv40_mpeg_create(struct drm_device *dev);
+/* nv31_mpeg.c */
+extern int  nv31_mpeg_create(struct drm_device *dev);
 
 /* nv50_mpeg.c */
 extern int  nv50_mpeg_create(struct drm_device *dev);
@@ -1265,6 +1314,11 @@ extern int nv04_display_create(struct drm_device *);
 extern int nv04_display_init(struct drm_device *);
 extern void nv04_display_destroy(struct drm_device *);
 
+/* nvd0_display.c */
+extern int nvd0_display_create(struct drm_device *);
+extern int nvd0_display_init(struct drm_device *);
+extern void nvd0_display_destroy(struct drm_device *);
+
 /* nv04_crtc.c */
 extern int nv04_crtc_create(struct drm_device *, int index);
 
@@ -1374,6 +1428,8 @@ int nv50_gpio_init(struct drm_device *dev);
 void nv50_gpio_fini(struct drm_device *dev);
 int nv50_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
 int nv50_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
+int nvd0_gpio_get(struct drm_device *dev, enum dcb_gpio_tag tag);
+int nvd0_gpio_set(struct drm_device *dev, enum dcb_gpio_tag tag, int state);
 int  nv50_gpio_irq_register(struct drm_device *, enum dcb_gpio_tag,
 			    void (*)(void *, int), void *);
 void nv50_gpio_irq_unregister(struct drm_device *, enum dcb_gpio_tag,
@@ -1448,6 +1504,8 @@ static inline void nv_wr08(struct drm_device *dev, unsigned reg, u8 val)
 	nouveau_wait_eq(dev, 2000000000ULL, (reg), (mask), (val))
 #define nv_wait_ne(dev, reg, mask, val) \
 	nouveau_wait_ne(dev, 2000000000ULL, (reg), (mask), (val))
+#define nv_wait_cb(dev, func, data) \
+	nouveau_wait_cb(dev, 2000000000ULL, (func), (data))
 
 /* PRAMIN access */
 static inline u32 nv_ri32(struct drm_device *dev, unsigned offset)
@@ -1514,6 +1572,7 @@ enum {
 	NOUVEAU_REG_DEBUG_RMVIO          = 0x80,
 	NOUVEAU_REG_DEBUG_VGAATTR        = 0x100,
 	NOUVEAU_REG_DEBUG_EVO            = 0x200,
+	NOUVEAU_REG_DEBUG_AUXCH          = 0x400
 };
 
 #define NV_REG_DEBUG(type, dev, fmt, arg...) do { \

+ 5 - 22
drivers/gpu/drm/nouveau/nouveau_encoder.h

@@ -49,17 +49,17 @@ struct nouveau_encoder {
 
 	union {
 		struct {
-			int mc_unknown;
-			uint32_t unk0;
-			uint32_t unk1;
-			int dpcd_version;
+			u8  dpcd[8];
 			int link_nr;
 			int link_bw;
-			bool enhanced_frame;
+			u32 datarate;
 		} dp;
 	};
 };
 
+struct nouveau_encoder *
+find_encoder(struct drm_connector *connector, int type);
+
 static inline struct nouveau_encoder *nouveau_encoder(struct drm_encoder *enc)
 {
 	struct drm_encoder_slave *slave = to_encoder_slave(enc);
@@ -83,21 +83,4 @@ nouveau_encoder_connector_get(struct nouveau_encoder *encoder);
 int nv50_sor_create(struct drm_connector *, struct dcb_entry *);
 int nv50_dac_create(struct drm_connector *, struct dcb_entry *);
 
-struct bit_displayport_encoder_table {
-	uint32_t match;
-	uint8_t  record_nr;
-	uint8_t  unknown;
-	uint16_t script0;
-	uint16_t script1;
-	uint16_t unknown_table;
-} __attribute__ ((packed));
-
-struct bit_displayport_encoder_table_entry {
-	uint8_t vs_level;
-	uint8_t pre_level;
-	uint8_t reg0;
-	uint8_t reg1;
-	uint8_t reg2;
-} __attribute__ ((packed));
-
 #endif /* __NOUVEAU_ENCODER_H__ */

+ 1 - 1
drivers/gpu/drm/nouveau/nouveau_fence.c

@@ -519,7 +519,7 @@ nouveau_fence_channel_init(struct nouveau_channel *chan)
 	if (USE_SEMA(dev) && dev_priv->chipset < 0x84) {
 		struct ttm_mem_reg *mem = &dev_priv->fence.bo->bo.mem;
 
-		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
+		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_FROM_MEMORY,
 					     mem->start << PAGE_SHIFT,
 					     mem->size, NV_MEM_ACCESS_RW,
 					     NV_MEM_TARGET_VRAM, &obj);

+ 39 - 17
drivers/gpu/drm/nouveau/nouveau_i2c.c

@@ -107,6 +107,13 @@ nv4e_i2c_getsda(void *data)
 	return !!((nv_rd32(dev, i2c->rd) >> 16) & 8);
 }
 
+static const uint32_t nv50_i2c_port[] = {
+	0x00e138, 0x00e150, 0x00e168, 0x00e180,
+	0x00e254, 0x00e274, 0x00e764, 0x00e780,
+	0x00e79c, 0x00e7b8
+};
+#define NV50_I2C_PORTS ARRAY_SIZE(nv50_i2c_port)
+
 static int
 nv50_i2c_getscl(void *data)
 {
@@ -130,28 +137,32 @@ static void
 nv50_i2c_setscl(void *data, int state)
 {
 	struct nouveau_i2c_chan *i2c = data;
-	struct drm_device *dev = i2c->dev;
 
-	nv_wr32(dev, i2c->wr, 4 | (i2c->data ? 2 : 0) | (state ? 1 : 0));
+	nv_wr32(i2c->dev, i2c->wr, 4 | (i2c->data ? 2 : 0) | (state ? 1 : 0));
 }
 
 static void
 nv50_i2c_setsda(void *data, int state)
 {
 	struct nouveau_i2c_chan *i2c = data;
-	struct drm_device *dev = i2c->dev;
 
-	nv_wr32(dev, i2c->wr,
-			(nv_rd32(dev, i2c->rd) & 1) | 4 | (state ? 2 : 0));
+	nv_mask(i2c->dev, i2c->wr, 0x00000006, 4 | (state ? 2 : 0));
 	i2c->data = state;
 }
 
-static const uint32_t nv50_i2c_port[] = {
-	0x00e138, 0x00e150, 0x00e168, 0x00e180,
-	0x00e254, 0x00e274, 0x00e764, 0x00e780,
-	0x00e79c, 0x00e7b8
-};
-#define NV50_I2C_PORTS ARRAY_SIZE(nv50_i2c_port)
+static int
+nvd0_i2c_getscl(void *data)
+{
+	struct nouveau_i2c_chan *i2c = data;
+	return !!(nv_rd32(i2c->dev, i2c->rd) & 0x10);
+}
+
+static int
+nvd0_i2c_getsda(void *data)
+{
+	struct nouveau_i2c_chan *i2c = data;
+	return !!(nv_rd32(i2c->dev, i2c->rd) & 0x20);
+}
 
 int
 nouveau_i2c_init(struct drm_device *dev, struct dcb_i2c_entry *entry, int index)
@@ -163,7 +174,8 @@ nouveau_i2c_init(struct drm_device *dev, struct dcb_i2c_entry *entry, int index)
 	if (entry->chan)
 		return -EEXIST;
 
-	if (dev_priv->card_type >= NV_50 && entry->read >= NV50_I2C_PORTS) {
+	if (dev_priv->card_type >= NV_50 &&
+	    dev_priv->card_type <= NV_C0 && entry->read >= NV50_I2C_PORTS) {
 		NV_ERROR(dev, "unknown i2c port %d\n", entry->read);
 		return -EINVAL;
 	}
@@ -192,10 +204,17 @@ nouveau_i2c_init(struct drm_device *dev, struct dcb_i2c_entry *entry, int index)
 	case 5:
 		i2c->bit.setsda = nv50_i2c_setsda;
 		i2c->bit.setscl = nv50_i2c_setscl;
-		i2c->bit.getsda = nv50_i2c_getsda;
-		i2c->bit.getscl = nv50_i2c_getscl;
-		i2c->rd = nv50_i2c_port[entry->read];
-		i2c->wr = i2c->rd;
+		if (dev_priv->card_type < NV_D0) {
+			i2c->bit.getsda = nv50_i2c_getsda;
+			i2c->bit.getscl = nv50_i2c_getscl;
+			i2c->rd = nv50_i2c_port[entry->read];
+			i2c->wr = i2c->rd;
+		} else {
+			i2c->bit.getsda = nvd0_i2c_getsda;
+			i2c->bit.getscl = nvd0_i2c_getscl;
+			i2c->rd = 0x00d014 + (entry->read * 0x20);
+			i2c->wr = i2c->rd;
+		}
 		break;
 	case 6:
 		i2c->rd = entry->read;
@@ -267,7 +286,10 @@ nouveau_i2c_find(struct drm_device *dev, int index)
 			val  = 0xe001;
 		}
 
-		nv_wr32(dev, reg, (nv_rd32(dev, reg) & ~0xf003) | val);
+		/* nfi, but neither auxch or i2c work if it's 1 */
+		nv_mask(dev, reg + 0x0c, 0x00000001, 0x00000000);
+		/* nfi, but switches auxch vs normal i2c */
+		nv_mask(dev, reg + 0x00, 0x0000f003, val);
 	}
 
 	if (!i2c->chan && nouveau_i2c_init(dev, i2c, index))

+ 151 - 133
drivers/gpu/drm/nouveau/nouveau_mem.c

@@ -408,8 +408,6 @@ nouveau_mem_vram_init(struct drm_device *dev)
 	if (ret)
 		return ret;
 
-	dev_priv->fb_phys = pci_resource_start(dev->pdev, 1);
-
 	ret = nouveau_ttm_global_init(dev_priv);
 	if (ret)
 		return ret;
@@ -504,35 +502,146 @@ nouveau_mem_gart_init(struct drm_device *dev)
 	return 0;
 }
 
+/* XXX: For now a dummy. More samples required, possibly even a card
+ * Called from nouveau_perf.c */
+void nv30_mem_timing_entry(struct drm_device *dev, struct nouveau_pm_tbl_header *hdr,
+							struct nouveau_pm_tbl_entry *e, uint8_t magic_number,
+							struct nouveau_pm_memtiming *timing) {
+
+	NV_DEBUG(dev,"Timing entry format unknown, please contact nouveau developers");
+}
+
+void nv40_mem_timing_entry(struct drm_device *dev, struct nouveau_pm_tbl_header *hdr,
+							struct nouveau_pm_tbl_entry *e, uint8_t magic_number,
+							struct nouveau_pm_memtiming *timing) {
+
+	timing->reg_0 = (e->tRC << 24 | e->tRFC << 16 | e->tRAS << 8 | e->tRP);
+
+	/* XXX: I don't trust the -1's and +1's... they must come
+	 *      from somewhere! */
+	timing->reg_1 = (e->tWR + 2 + magic_number) << 24 |
+				  1 << 16 |
+				  (e->tUNK_1 + 2 + magic_number) << 8 |
+				  (e->tCL + 2 - magic_number);
+	timing->reg_2 = (magic_number << 24 | e->tUNK_12 << 16 | e->tUNK_11 << 8 | e->tUNK_10);
+	timing->reg_2 |= 0x20200000;
+
+	NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x\n", timing->id,
+		 timing->reg_0, timing->reg_1,timing->reg_2);
+}
+
+void nv50_mem_timing_entry(struct drm_device *dev, struct bit_entry *P, struct nouveau_pm_tbl_header *hdr,
+							struct nouveau_pm_tbl_entry *e, uint8_t magic_number,struct nouveau_pm_memtiming *timing) {
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	uint8_t unk18 = 1,
+		unk19 = 1,
+		unk20 = 0,
+		unk21 = 0;
+
+	switch (min(hdr->entry_len, (u8) 22)) {
+	case 22:
+		unk21 = e->tUNK_21;
+	case 21:
+		unk20 = e->tUNK_20;
+	case 20:
+		unk19 = e->tUNK_19;
+	case 19:
+		unk18 = e->tUNK_18;
+		break;
+	}
+
+	timing->reg_0 = (e->tRC << 24 | e->tRFC << 16 | e->tRAS << 8 | e->tRP);
+
+	/* XXX: I don't trust the -1's and +1's... they must come
+	 *      from somewhere! */
+	timing->reg_1 = (e->tWR + unk19 + 1 + magic_number) << 24 |
+				  max(unk18, (u8) 1) << 16 |
+				  (e->tUNK_1 + unk19 + 1 + magic_number) << 8;
+	if (dev_priv->chipset == 0xa8) {
+		timing->reg_1 |= (e->tCL - 1);
+	} else {
+		timing->reg_1 |= (e->tCL + 2 - magic_number);
+	}
+	timing->reg_2 = (e->tUNK_12 << 16 | e->tUNK_11 << 8 | e->tUNK_10);
+
+	timing->reg_5 = (e->tRAS << 24 | e->tRC);
+	timing->reg_5 += max(e->tUNK_10, e->tUNK_11) << 16;
+
+	if (P->version == 1) {
+		timing->reg_2 |= magic_number << 24;
+		timing->reg_3 = (0x14 + e->tCL) << 24 |
+						0x16 << 16 |
+						(e->tCL - 1) << 8 |
+						(e->tCL - 1);
+		timing->reg_4 = (nv_rd32(dev,0x10022c) & 0xffff0000) | e->tUNK_13 << 8  | e->tUNK_13;
+		timing->reg_5 |= (e->tCL + 2) << 8;
+		timing->reg_7 = 0x4000202 | (e->tCL - 1) << 16;
+	} else {
+		timing->reg_2 |= (unk19 - 1) << 24;
+		/* XXX: reg_10022c for recentish cards pretty much unknown*/
+		timing->reg_3 = e->tCL - 1;
+		timing->reg_4 = (unk20 << 24 | unk21 << 16 |
+							e->tUNK_13 << 8  | e->tUNK_13);
+		/* XXX: +6? */
+		timing->reg_5 |= (unk19 + 6) << 8;
+
+		/* XXX: reg_10023c currently unknown
+		 * 10023c seen as 06xxxxxx, 0bxxxxxx or 0fxxxxxx */
+		timing->reg_7 = 0x202;
+	}
+
+	NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", timing->id,
+		 timing->reg_0, timing->reg_1,
+		 timing->reg_2, timing->reg_3);
+	NV_DEBUG(dev, "         230: %08x %08x %08x %08x\n",
+		 timing->reg_4, timing->reg_5,
+		 timing->reg_6, timing->reg_7);
+	NV_DEBUG(dev, "         240: %08x\n", timing->reg_8);
+}
+
+void nvc0_mem_timing_entry(struct drm_device *dev, struct nouveau_pm_tbl_header *hdr,
+							struct nouveau_pm_tbl_entry *e, struct nouveau_pm_memtiming *timing) {
+	timing->reg_0 = (e->tRC << 24 | (e->tRFC & 0x7f) << 17 | e->tRAS << 8 | e->tRP);
+	timing->reg_1 = (nv_rd32(dev,0x10f294) & 0xff000000) | (e->tUNK_11&0x0f) << 20 | (e->tUNK_19 << 7) | (e->tCL & 0x0f);
+	timing->reg_2 = (nv_rd32(dev,0x10f298) & 0xff0000ff) | e->tWR << 16 | e->tUNK_1 << 8;
+	timing->reg_3 = e->tUNK_20 << 9 | e->tUNK_13;
+	timing->reg_4 = (nv_rd32(dev,0x10f2a0) & 0xfff000ff) | e->tUNK_12 << 15;
+	NV_DEBUG(dev, "Entry %d: 290: %08x %08x %08x %08x\n", timing->id,
+		 timing->reg_0, timing->reg_1,
+		 timing->reg_2, timing->reg_3);
+	NV_DEBUG(dev, "         2a0: %08x %08x %08x %08x\n",
+		 timing->reg_4, timing->reg_5,
+		 timing->reg_6, timing->reg_7);
+}
+
+/**
+ * Processes the Memory Timing BIOS table, stores generated
+ * register values
+ * @pre init scripts were run, memtiming regs are initialized
+ */
 void
 nouveau_mem_timing_init(struct drm_device *dev)
 {
-	/* cards < NVC0 only */
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
 	struct nouveau_pm_memtimings *memtimings = &pm->memtimings;
 	struct nvbios *bios = &dev_priv->vbios;
 	struct bit_entry P;
-	u8 tUNK_0, tUNK_1, tUNK_2;
-	u8 tRP;		/* Byte 3 */
-	u8 tRAS;	/* Byte 5 */
-	u8 tRFC;	/* Byte 7 */
-	u8 tRC;		/* Byte 9 */
-	u8 tUNK_10, tUNK_11, tUNK_12, tUNK_13, tUNK_14;
-	u8 tUNK_18, tUNK_19, tUNK_20, tUNK_21;
-	u8 magic_number = 0; /* Yeah... sorry*/
-	u8 *mem = NULL, *entry;
-	int i, recordlen, entries;
+	struct nouveau_pm_tbl_header *hdr = NULL;
+	uint8_t magic_number;
+	u8 *entry;
+	int i;
 
 	if (bios->type == NVBIOS_BIT) {
 		if (bit_table(dev, 'P', &P))
 			return;
 
 		if (P.version == 1)
-			mem = ROMPTR(bios, P.data[4]);
+			hdr = (struct nouveau_pm_tbl_header *) ROMPTR(bios, P.data[4]);
 		else
 		if (P.version == 2)
-			mem = ROMPTR(bios, P.data[8]);
+			hdr = (struct nouveau_pm_tbl_header *) ROMPTR(bios, P.data[8]);
 		else {
 			NV_WARN(dev, "unknown mem for BIT P %d\n", P.version);
 		}
@@ -541,150 +650,56 @@ nouveau_mem_timing_init(struct drm_device *dev)
 		return;
 	}
 
-	if (!mem) {
+	if (!hdr) {
 		NV_DEBUG(dev, "memory timing table pointer invalid\n");
 		return;
 	}
 
-	if (mem[0] != 0x10) {
-		NV_WARN(dev, "memory timing table 0x%02x unknown\n", mem[0]);
+	if (hdr->version != 0x10) {
+		NV_WARN(dev, "memory timing table 0x%02x unknown\n", hdr->version);
 		return;
 	}
 
 	/* validate record length */
-	entries   = mem[2];
-	recordlen = mem[3];
-	if (recordlen < 15) {
-		NV_ERROR(dev, "mem timing table length unknown: %d\n", mem[3]);
+	if (hdr->entry_len < 15) {
+		NV_ERROR(dev, "mem timing table length unknown: %d\n", hdr->entry_len);
 		return;
 	}
 
 	/* parse vbios entries into common format */
 	memtimings->timing =
-		kcalloc(entries, sizeof(*memtimings->timing), GFP_KERNEL);
+		kcalloc(hdr->entry_cnt, sizeof(*memtimings->timing), GFP_KERNEL);
 	if (!memtimings->timing)
 		return;
 
 	/* Get "some number" from the timing reg for NV_40 and NV_50
-	 * Used in calculations later */
-	if (dev_priv->card_type >= NV_40 && dev_priv->chipset < 0x98) {
+	 * Used in calculations later... source unknown */
+	magic_number = 0;
+	if (P.version == 1) {
 		magic_number = (nv_rd32(dev, 0x100228) & 0x0f000000) >> 24;
 	}
 
-	entry = mem + mem[1];
-	for (i = 0; i < entries; i++, entry += recordlen) {
+	entry = (u8*) hdr + hdr->header_len;
+	for (i = 0; i < hdr->entry_cnt; i++, entry += hdr->entry_len) {
 		struct nouveau_pm_memtiming *timing = &pm->memtimings.timing[i];
 		if (entry[0] == 0)
 			continue;
 
-		tUNK_18 = 1;
-		tUNK_19 = 1;
-		tUNK_20 = 0;
-		tUNK_21 = 0;
-		switch (min(recordlen, 22)) {
-		case 22:
-			tUNK_21 = entry[21];
-		case 21:
-			tUNK_20 = entry[20];
-		case 20:
-			tUNK_19 = entry[19];
-		case 19:
-			tUNK_18 = entry[18];
-		default:
-			tUNK_0  = entry[0];
-			tUNK_1  = entry[1];
-			tUNK_2  = entry[2];
-			tRP     = entry[3];
-			tRAS    = entry[5];
-			tRFC    = entry[7];
-			tRC     = entry[9];
-			tUNK_10 = entry[10];
-			tUNK_11 = entry[11];
-			tUNK_12 = entry[12];
-			tUNK_13 = entry[13];
-			tUNK_14 = entry[14];
-			break;
-		}
-
-		timing->reg_100220 = (tRC << 24 | tRFC << 16 | tRAS << 8 | tRP);
-
-		/* XXX: I don't trust the -1's and +1's... they must come
-		 *      from somewhere! */
-		timing->reg_100224 = (tUNK_0 + tUNK_19 + 1 + magic_number) << 24 |
-				      max(tUNK_18, (u8) 1) << 16 |
-				      (tUNK_1 + tUNK_19 + 1 + magic_number) << 8;
-		if (dev_priv->chipset == 0xa8) {
-			timing->reg_100224 |= (tUNK_2 - 1);
-		} else {
-			timing->reg_100224 |= (tUNK_2 + 2 - magic_number);
-		}
-
-		timing->reg_100228 = (tUNK_12 << 16 | tUNK_11 << 8 | tUNK_10);
-		if (dev_priv->chipset >= 0xa3 && dev_priv->chipset < 0xaa)
-			timing->reg_100228 |= (tUNK_19 - 1) << 24;
-		else
-			timing->reg_100228 |= magic_number << 24;
-
-		if (dev_priv->card_type == NV_40) {
-			/* NV40: don't know what the rest of the regs are..
-			 * And don't need to know either */
-			timing->reg_100228 |= 0x20200000;
-		} else if (dev_priv->card_type >= NV_50) {
-			if (dev_priv->chipset < 0x98 ||
-			    (dev_priv->chipset == 0x98 &&
-			     dev_priv->stepping <= 0xa1)) {
-				timing->reg_10022c = (0x14 + tUNK_2) << 24 |
-						     0x16 << 16 |
-						     (tUNK_2 - 1) << 8 |
-						     (tUNK_2 - 1);
-			} else {
-				/* XXX: reg_10022c for recentish cards */
-				timing->reg_10022c = tUNK_2 - 1;
-			}
-
-			timing->reg_100230 = (tUNK_20 << 24 | tUNK_21 << 16 |
-						  tUNK_13 << 8  | tUNK_13);
-
-			timing->reg_100234 = (tRAS << 24 | tRC);
-			timing->reg_100234 += max(tUNK_10, tUNK_11) << 16;
-
-			if (dev_priv->chipset < 0x98 ||
-			    (dev_priv->chipset == 0x98 &&
-			     dev_priv->stepping <= 0xa1)) {
-				timing->reg_100234 |= (tUNK_2 + 2) << 8;
-			} else {
-				/* XXX: +6? */
-				timing->reg_100234 |= (tUNK_19 + 6) << 8;
-			}
-
-			/* XXX; reg_100238
-			 * reg_100238: 0x00?????? */
-			timing->reg_10023c = 0x202;
-			if (dev_priv->chipset < 0x98 ||
-			    (dev_priv->chipset == 0x98 &&
-			     dev_priv->stepping <= 0xa1)) {
-				timing->reg_10023c |= 0x4000000 | (tUNK_2 - 1) << 16;
-			} else {
-				/* XXX: reg_10023c
-				 * currently unknown
-				 * 10023c seen as 06xxxxxx, 0bxxxxxx or 0fxxxxxx */
-			}
-
-			/* XXX: reg_100240? */
-		}
 		timing->id = i;
-
-		NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", i,
-			 timing->reg_100220, timing->reg_100224,
-			 timing->reg_100228, timing->reg_10022c);
-		NV_DEBUG(dev, "         230: %08x %08x %08x %08x\n",
-			 timing->reg_100230, timing->reg_100234,
-			 timing->reg_100238, timing->reg_10023c);
-		NV_DEBUG(dev, "         240: %08x\n", timing->reg_100240);
+		timing->WR = entry[0];
+		timing->CL = entry[2];
+
+		if(dev_priv->card_type <= NV_40) {
+			nv40_mem_timing_entry(dev,hdr,(struct nouveau_pm_tbl_entry*) entry,magic_number,&pm->memtimings.timing[i]);
+		} else if(dev_priv->card_type == NV_50){
+			nv50_mem_timing_entry(dev,&P,hdr,(struct nouveau_pm_tbl_entry*) entry,magic_number,&pm->memtimings.timing[i]);
+		} else if(dev_priv->card_type == NV_C0) {
+			nvc0_mem_timing_entry(dev,hdr,(struct nouveau_pm_tbl_entry*) entry,&pm->memtimings.timing[i]);
+		}
 	}
 
-	memtimings->nr_timing = entries;
-	memtimings->supported = (dev_priv->chipset <= 0x98);
+	memtimings->nr_timing = hdr->entry_cnt;
+	memtimings->supported = P.version == 1;
 }
 
 void
@@ -693,7 +708,10 @@ nouveau_mem_timing_fini(struct drm_device *dev)
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_pm_memtimings *mem = &dev_priv->engine.pm.memtimings;
 
-	kfree(mem->timing);
+	if(mem->timing) {
+		kfree(mem->timing);
+		mem->timing = NULL;
+	}
 }
 
 static int

+ 43 - 46
drivers/gpu/drm/nouveau/nouveau_mm.c

@@ -27,7 +27,7 @@
 #include "nouveau_mm.h"
 
 static inline void
-region_put(struct nouveau_mm *rmm, struct nouveau_mm_node *a)
+region_put(struct nouveau_mm *mm, struct nouveau_mm_node *a)
 {
 	list_del(&a->nl_entry);
 	list_del(&a->fl_entry);
@@ -35,7 +35,7 @@ region_put(struct nouveau_mm *rmm, struct nouveau_mm_node *a)
 }
 
 static struct nouveau_mm_node *
-region_split(struct nouveau_mm *rmm, struct nouveau_mm_node *a, u32 size)
+region_split(struct nouveau_mm *mm, struct nouveau_mm_node *a, u32 size)
 {
 	struct nouveau_mm_node *b;
 
@@ -57,33 +57,33 @@ region_split(struct nouveau_mm *rmm, struct nouveau_mm_node *a, u32 size)
 	return b;
 }
 
-#define node(root, dir) ((root)->nl_entry.dir == &rmm->nodes) ? NULL : \
+#define node(root, dir) ((root)->nl_entry.dir == &mm->nodes) ? NULL : \
 	list_entry((root)->nl_entry.dir, struct nouveau_mm_node, nl_entry)
 
 void
-nouveau_mm_put(struct nouveau_mm *rmm, struct nouveau_mm_node *this)
+nouveau_mm_put(struct nouveau_mm *mm, struct nouveau_mm_node *this)
 {
 	struct nouveau_mm_node *prev = node(this, prev);
 	struct nouveau_mm_node *next = node(this, next);
 
-	list_add(&this->fl_entry, &rmm->free);
+	list_add(&this->fl_entry, &mm->free);
 	this->type = 0;
 
 	if (prev && prev->type == 0) {
 		prev->length += this->length;
-		region_put(rmm, this);
+		region_put(mm, this);
 		this = prev;
 	}
 
 	if (next && next->type == 0) {
 		next->offset  = this->offset;
 		next->length += this->length;
-		region_put(rmm, this);
+		region_put(mm, this);
 	}
 }
 
 int
-nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc,
+nouveau_mm_get(struct nouveau_mm *mm, int type, u32 size, u32 size_nc,
 	       u32 align, struct nouveau_mm_node **pnode)
 {
 	struct nouveau_mm_node *prev, *this, *next;
@@ -92,17 +92,17 @@ nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc,
 	u32 splitoff;
 	u32 s, e;
 
-	list_for_each_entry(this, &rmm->free, fl_entry) {
+	list_for_each_entry(this, &mm->free, fl_entry) {
 		e = this->offset + this->length;
 		s = this->offset;
 
 		prev = node(this, prev);
 		if (prev && prev->type != type)
-			s = roundup(s, rmm->block_size);
+			s = roundup(s, mm->block_size);
 
 		next = node(this, next);
 		if (next && next->type != type)
-			e = rounddown(e, rmm->block_size);
+			e = rounddown(e, mm->block_size);
 
 		s  = (s + align_mask) & ~align_mask;
 		e &= ~align_mask;
@@ -110,10 +110,10 @@ nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc,
 			continue;
 
 		splitoff = s - this->offset;
-		if (splitoff && !region_split(rmm, this, splitoff))
+		if (splitoff && !region_split(mm, this, splitoff))
 			return -ENOMEM;
 
-		this = region_split(rmm, this, min(size, e - s));
+		this = region_split(mm, this, min(size, e - s));
 		if (!this)
 			return -ENOMEM;
 
@@ -127,52 +127,49 @@ nouveau_mm_get(struct nouveau_mm *rmm, int type, u32 size, u32 size_nc,
 }
 
 int
-nouveau_mm_init(struct nouveau_mm **prmm, u32 offset, u32 length, u32 block)
+nouveau_mm_init(struct nouveau_mm *mm, u32 offset, u32 length, u32 block)
 {
-	struct nouveau_mm *rmm;
-	struct nouveau_mm_node *heap;
+	struct nouveau_mm_node *node;
+
+	if (block) {
+		mutex_init(&mm->mutex);
+		INIT_LIST_HEAD(&mm->nodes);
+		INIT_LIST_HEAD(&mm->free);
+		mm->block_size = block;
+		mm->heap_nodes = 0;
+	}
 
-	heap = kzalloc(sizeof(*heap), GFP_KERNEL);
-	if (!heap)
+	node = kzalloc(sizeof(*node), GFP_KERNEL);
+	if (!node)
 		return -ENOMEM;
-	heap->offset = roundup(offset, block);
-	heap->length = rounddown(offset + length, block) - heap->offset;
+	node->offset = roundup(offset, mm->block_size);
+	node->length = rounddown(offset + length, mm->block_size) - node->offset;
 
-	rmm = kzalloc(sizeof(*rmm), GFP_KERNEL);
-	if (!rmm) {
-		kfree(heap);
-		return -ENOMEM;
-	}
-	rmm->block_size = block;
-	mutex_init(&rmm->mutex);
-	INIT_LIST_HEAD(&rmm->nodes);
-	INIT_LIST_HEAD(&rmm->free);
-	list_add(&heap->nl_entry, &rmm->nodes);
-	list_add(&heap->fl_entry, &rmm->free);
-
-	*prmm = rmm;
+	list_add_tail(&node->nl_entry, &mm->nodes);
+	list_add_tail(&node->fl_entry, &mm->free);
+	mm->heap_nodes++;
 	return 0;
 }
 
 int
-nouveau_mm_fini(struct nouveau_mm **prmm)
+nouveau_mm_fini(struct nouveau_mm *mm)
 {
-	struct nouveau_mm *rmm = *prmm;
 	struct nouveau_mm_node *node, *heap =
-		list_first_entry(&rmm->nodes, struct nouveau_mm_node, nl_entry);
-
-	if (!list_is_singular(&rmm->nodes)) {
-		printk(KERN_ERR "nouveau_mm not empty at destroy time!\n");
-		list_for_each_entry(node, &rmm->nodes, nl_entry) {
-			printk(KERN_ERR "0x%02x: 0x%08x 0x%08x\n",
-			       node->type, node->offset, node->length);
+		list_first_entry(&mm->nodes, struct nouveau_mm_node, nl_entry);
+	int nodes = 0;
+
+	list_for_each_entry(node, &mm->nodes, nl_entry) {
+		if (nodes++ == mm->heap_nodes) {
+			printk(KERN_ERR "nouveau_mm in use at destroy time!\n");
+			list_for_each_entry(node, &mm->nodes, nl_entry) {
+				printk(KERN_ERR "0x%02x: 0x%08x 0x%08x\n",
+				       node->type, node->offset, node->length);
+			}
+			WARN_ON(1);
+			return -EBUSY;
 		}
-		WARN_ON(1);
-		return -EBUSY;
 	}
 
 	kfree(heap);
-	kfree(rmm);
-	*prmm = NULL;
 	return 0;
 }

+ 3 - 2
drivers/gpu/drm/nouveau/nouveau_mm.h

@@ -42,10 +42,11 @@ struct nouveau_mm {
 	struct mutex mutex;
 
 	u32 block_size;
+	int heap_nodes;
 };
 
-int  nouveau_mm_init(struct nouveau_mm **, u32 offset, u32 length, u32 block);
-int  nouveau_mm_fini(struct nouveau_mm **);
+int  nouveau_mm_init(struct nouveau_mm *, u32 offset, u32 length, u32 block);
+int  nouveau_mm_fini(struct nouveau_mm *);
 int  nouveau_mm_pre(struct nouveau_mm *);
 int  nouveau_mm_get(struct nouveau_mm *, int type, u32 size, u32 size_nc,
 		    u32 align, struct nouveau_mm_node **);

+ 11 - 7
drivers/gpu/drm/nouveau/nouveau_object.c

@@ -693,6 +693,7 @@ nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
 static int
 nvc0_gpuobj_channel_init(struct nouveau_channel *chan, struct nouveau_vm *vm)
 {
+	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
 	struct drm_device *dev = chan->dev;
 	struct nouveau_gpuobj *pgd = NULL;
 	struct nouveau_vm_pgd *vpgd;
@@ -722,6 +723,9 @@ nvc0_gpuobj_channel_init(struct nouveau_channel *chan, struct nouveau_vm *vm)
 	nv_wo32(chan->ramin, 0x020c, 0x000000ff);
 
 	/* map display semaphore buffers into channel's vm */
+	if (dev_priv->card_type >= NV_D0)
+		return 0;
+
 	for (i = 0; i < 2; i++) {
 		struct nv50_display_crtc *dispc = &nv50_display(dev)->crtc[i];
 
@@ -746,7 +750,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
 	int ret, i;
 
 	NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
-	if (dev_priv->card_type == NV_C0)
+	if (dev_priv->card_type >= NV_C0)
 		return nvc0_gpuobj_channel_init(chan, vm);
 
 	/* Allocate a chunk of memory for per-channel object storage */
@@ -793,7 +797,7 @@ nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
 			return ret;
 
 		/* dma objects for display sync channel semaphore blocks */
-		for (i = 0; i < 2; i++) {
+		for (i = 0; i < dev->mode_config.num_crtc; i++) {
 			struct nouveau_gpuobj *sem = NULL;
 			struct nv50_display_crtc *dispc =
 				&nv50_display(dev)->crtc[i];
@@ -875,18 +879,18 @@ nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
 
 	NV_DEBUG(dev, "ch%d\n", chan->id);
 
-	if (dev_priv->card_type >= NV_50) {
+	if (dev_priv->card_type >= NV_50 && dev_priv->card_type <= NV_C0) {
 		struct nv50_display *disp = nv50_display(dev);
 
-		for (i = 0; i < 2; i++) {
+		for (i = 0; i < dev->mode_config.num_crtc; i++) {
 			struct nv50_display_crtc *dispc = &disp->crtc[i];
 			nouveau_bo_vma_del(dispc->sem.bo, &chan->dispc_vma[i]);
 		}
-
-		nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
-		nouveau_gpuobj_ref(NULL, &chan->vm_pd);
 	}
 
+	nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
+	nouveau_gpuobj_ref(NULL, &chan->vm_pd);
+
 	if (drm_mm_initialized(&chan->ramin_heap))
 		drm_mm_takedown(&chan->ramin_heap);
 	nouveau_gpuobj_ref(NULL, &chan->ramin);

+ 95 - 23
drivers/gpu/drm/nouveau/nouveau_perf.c

@@ -127,13 +127,57 @@ nouveau_perf_timing(struct drm_device *dev, struct bit_entry *P,
 
 	entry += ramcfg * recordlen;
 	if (entry[1] >= pm->memtimings.nr_timing) {
-		NV_WARN(dev, "timingset %d does not exist\n", entry[1]);
+		if (entry[1] != 0xff)
+			NV_WARN(dev, "timingset %d does not exist\n", entry[1]);
 		return NULL;
 	}
 
 	return &pm->memtimings.timing[entry[1]];
 }
 
+static void
+nouveau_perf_voltage(struct drm_device *dev, struct bit_entry *P,
+		     struct nouveau_pm_level *perflvl)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nvbios *bios = &dev_priv->vbios;
+	u8 *vmap;
+	int id;
+
+	id = perflvl->volt_min;
+	perflvl->volt_min = 0;
+
+	/* boards using voltage table version <0x40 store the voltage
+	 * level directly in the perflvl entry as a multiple of 10mV
+	 */
+	if (dev_priv->engine.pm.voltage.version < 0x40) {
+		perflvl->volt_min = id * 10000;
+		perflvl->volt_max = perflvl->volt_min;
+		return;
+	}
+
+	/* on newer ones, the perflvl stores an index into yet another
+	 * vbios table containing a min/max voltage value for the perflvl
+	 */
+	if (P->version != 2 || P->length < 34) {
+		NV_DEBUG(dev, "where's our volt map table ptr? %d %d\n",
+			 P->version, P->length);
+		return;
+	}
+
+	vmap = ROMPTR(bios, P->data[32]);
+	if (!vmap) {
+		NV_DEBUG(dev, "volt map table pointer invalid\n");
+		return;
+	}
+
+	if (id < vmap[3]) {
+		vmap += vmap[1] + (vmap[2] * id);
+		perflvl->volt_min = ROM32(vmap[0]);
+		perflvl->volt_max = ROM32(vmap[4]);
+	}
+}
+
 void
 nouveau_perf_init(struct drm_device *dev)
 {
@@ -141,6 +185,8 @@ nouveau_perf_init(struct drm_device *dev)
 	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
 	struct nvbios *bios = &dev_priv->vbios;
 	struct bit_entry P;
+	struct nouveau_pm_memtimings *memtimings = &pm->memtimings;
+	struct nouveau_pm_tbl_header mt_hdr;
 	u8 version, headerlen, recordlen, entries;
 	u8 *perf, *entry;
 	int vid, i;
@@ -188,6 +234,22 @@ nouveau_perf_init(struct drm_device *dev)
 	}
 
 	entry = perf + headerlen;
+
+	/* For version 0x15, initialize memtiming table */
+	if(version == 0x15) {
+		memtimings->timing =
+				kcalloc(entries, sizeof(*memtimings->timing), GFP_KERNEL);
+		if(!memtimings) {
+			NV_WARN(dev,"Could not allocate memtiming table\n");
+			return;
+		}
+
+		mt_hdr.entry_cnt = entries;
+		mt_hdr.entry_len = 14;
+		mt_hdr.version = version;
+		mt_hdr.header_len = 4;
+	}
+
 	for (i = 0; i < entries; i++) {
 		struct nouveau_pm_level *perflvl = &pm->perflvl[pm->nr_perflvl];
 
@@ -203,7 +265,8 @@ nouveau_perf_init(struct drm_device *dev)
 		case 0x13:
 		case 0x15:
 			perflvl->fanspeed = entry[55];
-			perflvl->voltage = (recordlen > 56) ? entry[56] : 0;
+			if (recordlen > 56)
+				perflvl->volt_min = entry[56];
 			perflvl->core = ROM32(entry[1]) * 10;
 			perflvl->memory = ROM32(entry[5]) * 20;
 			break;
@@ -211,9 +274,10 @@ nouveau_perf_init(struct drm_device *dev)
 		case 0x23:
 		case 0x24:
 			perflvl->fanspeed = entry[4];
-			perflvl->voltage = entry[5];
-			perflvl->core = ROM16(entry[6]) * 1000;
-
+			perflvl->volt_min = entry[5];
+			perflvl->shader = ROM16(entry[6]) * 1000;
+			perflvl->core = perflvl->shader;
+			perflvl->core += (signed char)entry[8] * 1000;
 			if (dev_priv->chipset == 0x49 ||
 			    dev_priv->chipset == 0x4b)
 				perflvl->memory = ROM16(entry[11]) * 1000;
@@ -223,7 +287,7 @@ nouveau_perf_init(struct drm_device *dev)
 			break;
 		case 0x25:
 			perflvl->fanspeed = entry[4];
-			perflvl->voltage = entry[5];
+			perflvl->volt_min = entry[5];
 			perflvl->core = ROM16(entry[6]) * 1000;
 			perflvl->shader = ROM16(entry[10]) * 1000;
 			perflvl->memory = ROM16(entry[12]) * 1000;
@@ -232,7 +296,7 @@ nouveau_perf_init(struct drm_device *dev)
 			perflvl->memscript = ROM16(entry[2]);
 		case 0x35:
 			perflvl->fanspeed = entry[6];
-			perflvl->voltage = entry[7];
+			perflvl->volt_min = entry[7];
 			perflvl->core = ROM16(entry[8]) * 1000;
 			perflvl->shader = ROM16(entry[10]) * 1000;
 			perflvl->memory = ROM16(entry[12]) * 1000;
@@ -240,30 +304,34 @@ nouveau_perf_init(struct drm_device *dev)
 			perflvl->unk05 = ROM16(entry[16]) * 1000;
 			break;
 		case 0x40:
-#define subent(n) entry[perf[2] + ((n) * perf[3])]
+#define subent(n) (ROM16(entry[perf[2] + ((n) * perf[3])]) & 0xfff) * 1000
 			perflvl->fanspeed = 0; /*XXX*/
-			perflvl->voltage = entry[2];
+			perflvl->volt_min = entry[2];
 			if (dev_priv->card_type == NV_50) {
-				perflvl->core = ROM16(subent(0)) & 0xfff;
-				perflvl->shader = ROM16(subent(1)) & 0xfff;
-				perflvl->memory = ROM16(subent(2)) & 0xfff;
+				perflvl->core   = subent(0);
+				perflvl->shader = subent(1);
+				perflvl->memory = subent(2);
+				perflvl->vdec   = subent(3);
+				perflvl->unka0  = subent(4);
 			} else {
-				perflvl->shader = ROM16(subent(3)) & 0xfff;
+				perflvl->hub06  = subent(0);
+				perflvl->hub01  = subent(1);
+				perflvl->copy   = subent(2);
+				perflvl->shader = subent(3);
+				perflvl->rop    = subent(4);
+				perflvl->memory = subent(5);
+				perflvl->vdec   = subent(6);
+				perflvl->daemon = subent(10);
+				perflvl->hub07  = subent(11);
 				perflvl->core   = perflvl->shader / 2;
-				perflvl->unk0a  = ROM16(subent(4)) & 0xfff;
-				perflvl->memory = ROM16(subent(5)) & 0xfff;
 			}
-
-			perflvl->core *= 1000;
-			perflvl->shader *= 1000;
-			perflvl->memory *= 1000;
-			perflvl->unk0a *= 1000;
 			break;
 		}
 
 		/* make sure vid is valid */
-		if (pm->voltage.supported && perflvl->voltage) {
-			vid = nouveau_volt_vid_lookup(dev, perflvl->voltage);
+		nouveau_perf_voltage(dev, &P, perflvl);
+		if (pm->voltage.supported && perflvl->volt_min) {
+			vid = nouveau_volt_vid_lookup(dev, perflvl->volt_min);
 			if (vid < 0) {
 				NV_DEBUG(dev, "drop perflvl %d, bad vid\n", i);
 				entry += recordlen;
@@ -272,7 +340,11 @@ nouveau_perf_init(struct drm_device *dev)
 		}
 
 		/* get the corresponding memory timings */
-		if (version > 0x15) {
+		if (version == 0x15) {
+			memtimings->timing[i].id = i;
+			nv30_mem_timing_entry(dev,&mt_hdr,(struct nouveau_pm_tbl_entry*) &entry[41],0,&memtimings->timing[i]);
+			perflvl->timing = &memtimings->timing[i];
+		} else if (version > 0x15) {
 			/* last 3 args are for < 0x40, ignored for >= 0x40 */
 			perflvl->timing =
 				nouveau_perf_timing(dev, &P,

+ 60 - 40
drivers/gpu/drm/nouveau/nouveau_pm.c

@@ -64,18 +64,26 @@ nouveau_pm_perflvl_set(struct drm_device *dev, struct nouveau_pm_level *perflvl)
 	if (perflvl == pm->cur)
 		return 0;
 
-	if (pm->voltage.supported && pm->voltage_set && perflvl->voltage) {
-		ret = pm->voltage_set(dev, perflvl->voltage);
+	if (pm->voltage.supported && pm->voltage_set && perflvl->volt_min) {
+		ret = pm->voltage_set(dev, perflvl->volt_min);
 		if (ret) {
 			NV_ERROR(dev, "voltage_set %d failed: %d\n",
-				 perflvl->voltage, ret);
+				 perflvl->volt_min, ret);
 		}
 	}
 
-	nouveau_pm_clock_set(dev, perflvl, PLL_CORE, perflvl->core);
-	nouveau_pm_clock_set(dev, perflvl, PLL_SHADER, perflvl->shader);
-	nouveau_pm_clock_set(dev, perflvl, PLL_MEMORY, perflvl->memory);
-	nouveau_pm_clock_set(dev, perflvl, PLL_UNK05, perflvl->unk05);
+	if (pm->clocks_pre) {
+		void *state = pm->clocks_pre(dev, perflvl);
+		if (IS_ERR(state))
+			return PTR_ERR(state);
+		pm->clocks_set(dev, state);
+	} else
+	if (pm->clock_set) {
+		nouveau_pm_clock_set(dev, perflvl, PLL_CORE, perflvl->core);
+		nouveau_pm_clock_set(dev, perflvl, PLL_SHADER, perflvl->shader);
+		nouveau_pm_clock_set(dev, perflvl, PLL_MEMORY, perflvl->memory);
+		nouveau_pm_clock_set(dev, perflvl, PLL_UNK05, perflvl->unk05);
+	}
 
 	pm->cur = perflvl;
 	return 0;
@@ -92,9 +100,6 @@ nouveau_pm_profile_set(struct drm_device *dev, const char *profile)
 	if (nouveau_perflvl_wr != 7777)
 		return -EPERM;
 
-	if (!pm->clock_set)
-		return -EINVAL;
-
 	if (!strncmp(profile, "boot", 4))
 		perflvl = &pm->boot;
 	else {
@@ -123,31 +128,37 @@ nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
 	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
 	int ret;
 
-	if (!pm->clock_get)
-		return -EINVAL;
-
 	memset(perflvl, 0, sizeof(*perflvl));
 
-	ret = pm->clock_get(dev, PLL_CORE);
-	if (ret > 0)
-		perflvl->core = ret;
+	if (pm->clocks_get) {
+		ret = pm->clocks_get(dev, perflvl);
+		if (ret)
+			return ret;
+	} else
+	if (pm->clock_get) {
+		ret = pm->clock_get(dev, PLL_CORE);
+		if (ret > 0)
+			perflvl->core = ret;
 
-	ret = pm->clock_get(dev, PLL_MEMORY);
-	if (ret > 0)
-		perflvl->memory = ret;
+		ret = pm->clock_get(dev, PLL_MEMORY);
+		if (ret > 0)
+			perflvl->memory = ret;
 
-	ret = pm->clock_get(dev, PLL_SHADER);
-	if (ret > 0)
-		perflvl->shader = ret;
+		ret = pm->clock_get(dev, PLL_SHADER);
+		if (ret > 0)
+			perflvl->shader = ret;
 
-	ret = pm->clock_get(dev, PLL_UNK05);
-	if (ret > 0)
-		perflvl->unk05 = ret;
+		ret = pm->clock_get(dev, PLL_UNK05);
+		if (ret > 0)
+			perflvl->unk05 = ret;
+	}
 
 	if (pm->voltage.supported && pm->voltage_get) {
 		ret = pm->voltage_get(dev);
-		if (ret > 0)
-			perflvl->voltage = ret;
+		if (ret > 0) {
+			perflvl->volt_min = ret;
+			perflvl->volt_max = ret;
+		}
 	}
 
 	return 0;
@@ -156,7 +167,7 @@ nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
 static void
 nouveau_pm_perflvl_info(struct nouveau_pm_level *perflvl, char *ptr, int len)
 {
-	char c[16], s[16], v[16], f[16], t[16];
+	char c[16], s[16], v[32], f[16], t[16], m[16];
 
 	c[0] = '\0';
 	if (perflvl->core)
@@ -166,9 +177,19 @@ nouveau_pm_perflvl_info(struct nouveau_pm_level *perflvl, char *ptr, int len)
 	if (perflvl->shader)
 		snprintf(s, sizeof(s), " shader %dMHz", perflvl->shader / 1000);
 
+	m[0] = '\0';
+	if (perflvl->memory)
+		snprintf(m, sizeof(m), " memory %dMHz", perflvl->memory / 1000);
+
 	v[0] = '\0';
-	if (perflvl->voltage)
-		snprintf(v, sizeof(v), " voltage %dmV", perflvl->voltage * 10);
+	if (perflvl->volt_min && perflvl->volt_min != perflvl->volt_max) {
+		snprintf(v, sizeof(v), " voltage %dmV-%dmV",
+			 perflvl->volt_min / 1000, perflvl->volt_max / 1000);
+	} else
+	if (perflvl->volt_min) {
+		snprintf(v, sizeof(v), " voltage %dmV",
+			 perflvl->volt_min / 1000);
+	}
 
 	f[0] = '\0';
 	if (perflvl->fanspeed)
@@ -178,8 +199,7 @@ nouveau_pm_perflvl_info(struct nouveau_pm_level *perflvl, char *ptr, int len)
 	if (perflvl->timing)
 		snprintf(t, sizeof(t), " timing %d", perflvl->timing->id);
 
-	snprintf(ptr, len, "memory %dMHz%s%s%s%s%s\n", perflvl->memory / 1000,
-		 c, s, v, f, t);
+	snprintf(ptr, len, "%s%s%s%s%s%s\n", c, s, m, t, v, f);
 }
 
 static ssize_t
@@ -190,7 +210,7 @@ nouveau_pm_get_perflvl_info(struct device *d,
 	char *ptr = buf;
 	int len = PAGE_SIZE;
 
-	snprintf(ptr, len, "%d: ", perflvl->id);
+	snprintf(ptr, len, "%d:", perflvl->id);
 	ptr += strlen(buf);
 	len -= strlen(buf);
 
@@ -211,9 +231,9 @@ nouveau_pm_get_perflvl(struct device *d, struct device_attribute *a, char *buf)
 	if (!pm->cur)
 		snprintf(ptr, len, "setting: boot\n");
 	else if (pm->cur == &pm->boot)
-		snprintf(ptr, len, "setting: boot\nc: ");
+		snprintf(ptr, len, "setting: boot\nc:");
 	else
-		snprintf(ptr, len, "setting: static %d\nc: ", pm->cur->id);
+		snprintf(ptr, len, "setting: static %d\nc:", pm->cur->id);
 	ptr += strlen(buf);
 	len -= strlen(buf);
 
@@ -292,7 +312,7 @@ nouveau_sysfs_fini(struct drm_device *dev)
 	}
 }
 
-#ifdef CONFIG_HWMON
+#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
 static ssize_t
 nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf)
 {
@@ -409,7 +429,7 @@ static const struct attribute_group hwmon_attrgroup = {
 static int
 nouveau_hwmon_init(struct drm_device *dev)
 {
-#ifdef CONFIG_HWMON
+#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
 	struct device *hwmon_dev;
@@ -442,7 +462,7 @@ nouveau_hwmon_init(struct drm_device *dev)
 static void
 nouveau_hwmon_fini(struct drm_device *dev)
 {
-#ifdef CONFIG_HWMON
+#if defined(CONFIG_HWMON) || (defined(MODULE) && defined(CONFIG_HWMON_MODULE))
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
 
@@ -488,7 +508,7 @@ nouveau_pm_init(struct drm_device *dev)
 	NV_INFO(dev, "%d available performance level(s)\n", pm->nr_perflvl);
 	for (i = 0; i < pm->nr_perflvl; i++) {
 		nouveau_pm_perflvl_info(&pm->perflvl[i], info, sizeof(info));
-		NV_INFO(dev, "%d: %s", pm->perflvl[i].id, info);
+		NV_INFO(dev, "%d:%s", pm->perflvl[i].id, info);
 	}
 
 	/* determine current ("boot") performance level */
@@ -498,7 +518,7 @@ nouveau_pm_init(struct drm_device *dev)
 		pm->cur = &pm->boot;
 
 		nouveau_pm_perflvl_info(&pm->boot, info, sizeof(info));
-		NV_INFO(dev, "c: %s", info);
+		NV_INFO(dev, "c:%s", info);
 	}
 
 	/* switch performance levels now if requested */

+ 11 - 4
drivers/gpu/drm/nouveau/nouveau_pm.h

@@ -52,6 +52,11 @@ void *nv04_pm_clock_pre(struct drm_device *, struct nouveau_pm_level *,
 			u32 id, int khz);
 void nv04_pm_clock_set(struct drm_device *, void *);
 
+/* nv40_pm.c */
+int nv40_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
+void *nv40_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
+void nv40_pm_clocks_set(struct drm_device *, void *);
+
 /* nv50_pm.c */
 int nv50_pm_clock_get(struct drm_device *, u32 id);
 void *nv50_pm_clock_pre(struct drm_device *, struct nouveau_pm_level *,
@@ -59,10 +64,12 @@ void *nv50_pm_clock_pre(struct drm_device *, struct nouveau_pm_level *,
 void nv50_pm_clock_set(struct drm_device *, void *);
 
 /* nva3_pm.c */
-int nva3_pm_clock_get(struct drm_device *, u32 id);
-void *nva3_pm_clock_pre(struct drm_device *, struct nouveau_pm_level *,
-			u32 id, int khz);
-void nva3_pm_clock_set(struct drm_device *, void *);
+int nva3_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
+void *nva3_pm_clocks_pre(struct drm_device *, struct nouveau_pm_level *);
+void nva3_pm_clocks_set(struct drm_device *, void *);
+
+/* nvc0_pm.c */
+int nvc0_pm_clocks_get(struct drm_device *, struct nouveau_pm_level *);
 
 /* nouveau_temp.c */
 void nouveau_temp_init(struct drm_device *dev);

+ 7 - 4
drivers/gpu/drm/nouveau/nouveau_reg.h

@@ -826,9 +826,12 @@
 #define NV50_PDISPLAY_SOR_DPMS_STATE_ACTIVE                          0x00030000
 #define NV50_PDISPLAY_SOR_DPMS_STATE_BLANKED                         0x00080000
 #define NV50_PDISPLAY_SOR_DPMS_STATE_WAIT                            0x10000000
-#define NV50_PDISPLAY_SOR_BACKLIGHT                                  0x0061c084
-#define NV50_PDISPLAY_SOR_BACKLIGHT_ENABLE                           0x80000000
-#define NV50_PDISPLAY_SOR_BACKLIGHT_LEVEL                            0x00000fff
+#define NV50_PDISP_SOR_PWM_DIV(i)                     (0x0061c080 + (i) * 0x800)
+#define NV50_PDISP_SOR_PWM_CTL(i)                     (0x0061c084 + (i) * 0x800)
+#define NV50_PDISP_SOR_PWM_CTL_NEW                                   0x80000000
+#define NVA3_PDISP_SOR_PWM_CTL_UNK                                   0x40000000
+#define NV50_PDISP_SOR_PWM_CTL_VAL                                   0x000007ff
+#define NVA3_PDISP_SOR_PWM_CTL_VAL                                   0x00ffffff
 #define NV50_SOR_DP_CTRL(i, l)           (0x0061c10c + (i) * 0x800 + (l) * 0x80)
 #define NV50_SOR_DP_CTRL_ENABLED                                     0x00000001
 #define NV50_SOR_DP_CTRL_ENHANCED_FRAME_ENABLED                      0x00004000
@@ -843,7 +846,7 @@
 #define NV50_SOR_DP_CTRL_TRAINING_PATTERN_2                          0x02000000
 #define NV50_SOR_DP_UNK118(i, l)         (0x0061c118 + (i) * 0x800 + (l) * 0x80)
 #define NV50_SOR_DP_UNK120(i, l)         (0x0061c120 + (i) * 0x800 + (l) * 0x80)
-#define NV50_SOR_DP_UNK128(i, l)         (0x0061c128 + (i) * 0x800 + (l) * 0x80)
+#define NV50_SOR_DP_SCFG(i, l)           (0x0061c128 + (i) * 0x800 + (l) * 0x80)
 #define NV50_SOR_DP_UNK130(i, l)         (0x0061c130 + (i) * 0x800 + (l) * 0x80)
 
 #define NV50_PDISPLAY_USER(i)                        ((i) * 0x1000 + 0x00640000)

+ 21 - 45
drivers/gpu/drm/nouveau/nouveau_sgdma.c

@@ -12,8 +12,8 @@ struct nouveau_sgdma_be {
 	struct drm_device *dev;
 
 	dma_addr_t *pages;
-	bool *ttm_alloced;
 	unsigned nr_pages;
+	bool unmap_pages;
 
 	u64 offset;
 	bool bound;
@@ -26,43 +26,28 @@ nouveau_sgdma_populate(struct ttm_backend *be, unsigned long num_pages,
 {
 	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
 	struct drm_device *dev = nvbe->dev;
+	int i;
 
 	NV_DEBUG(nvbe->dev, "num_pages = %ld\n", num_pages);
 
-	if (nvbe->pages)
-		return -EINVAL;
-
-	nvbe->pages = kmalloc(sizeof(dma_addr_t) * num_pages, GFP_KERNEL);
-	if (!nvbe->pages)
-		return -ENOMEM;
+	nvbe->pages = dma_addrs;
+	nvbe->nr_pages = num_pages;
+	nvbe->unmap_pages = true;
 
-	nvbe->ttm_alloced = kmalloc(sizeof(bool) * num_pages, GFP_KERNEL);
-	if (!nvbe->ttm_alloced) {
-		kfree(nvbe->pages);
-		nvbe->pages = NULL;
-		return -ENOMEM;
+	/* this code path isn't called and is incorrect anyways */
+	if (0) { /* dma_addrs[0] != DMA_ERROR_CODE) { */
+		nvbe->unmap_pages = false;
+		return 0;
 	}
 
-	nvbe->nr_pages = 0;
-	while (num_pages--) {
-		/* this code path isn't called and is incorrect anyways */
-		if (0) { /*dma_addrs[nvbe->nr_pages] != DMA_ERROR_CODE)*/
-			nvbe->pages[nvbe->nr_pages] =
-					dma_addrs[nvbe->nr_pages];
-		 	nvbe->ttm_alloced[nvbe->nr_pages] = true;
-		} else {
-			nvbe->pages[nvbe->nr_pages] =
-				pci_map_page(dev->pdev, pages[nvbe->nr_pages], 0,
-				     PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
-			if (pci_dma_mapping_error(dev->pdev,
-						  nvbe->pages[nvbe->nr_pages])) {
-				be->func->clear(be);
-				return -EFAULT;
-			}
-			nvbe->ttm_alloced[nvbe->nr_pages] = false;
+	for (i = 0; i < num_pages; i++) {
+		nvbe->pages[i] = pci_map_page(dev->pdev, pages[i], 0,
+					      PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+		if (pci_dma_mapping_error(dev->pdev, nvbe->pages[i])) {
+			nvbe->nr_pages = --i;
+			be->func->clear(be);
+			return -EFAULT;
 		}
-
-		nvbe->nr_pages++;
 	}
 
 	return 0;
@@ -72,25 +57,16 @@ static void
 nouveau_sgdma_clear(struct ttm_backend *be)
 {
 	struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)be;
-	struct drm_device *dev;
-
-	if (nvbe && nvbe->pages) {
-		dev = nvbe->dev;
-		NV_DEBUG(dev, "\n");
+	struct drm_device *dev = nvbe->dev;
 
-		if (nvbe->bound)
-			be->func->unbind(be);
+	if (nvbe->bound)
+		be->func->unbind(be);
 
+	if (nvbe->unmap_pages) {
 		while (nvbe->nr_pages--) {
-			if (!nvbe->ttm_alloced[nvbe->nr_pages])
-				pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
+			pci_unmap_page(dev->pdev, nvbe->pages[nvbe->nr_pages],
 				       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
 		}
-		kfree(nvbe->pages);
-		kfree(nvbe->ttm_alloced);
-		nvbe->pages = NULL;
-		nvbe->ttm_alloced = NULL;
-		nvbe->nr_pages = 0;
 	}
 }
 

+ 163 - 47
drivers/gpu/drm/nouveau/nouveau_state.c

@@ -286,9 +286,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
 		engine->gpio.get		= nv10_gpio_get;
 		engine->gpio.set		= nv10_gpio_set;
 		engine->gpio.irq_enable		= NULL;
-		engine->pm.clock_get		= nv04_pm_clock_get;
-		engine->pm.clock_pre		= nv04_pm_clock_pre;
-		engine->pm.clock_set		= nv04_pm_clock_set;
+		engine->pm.clocks_get		= nv40_pm_clocks_get;
+		engine->pm.clocks_pre		= nv40_pm_clocks_pre;
+		engine->pm.clocks_set		= nv40_pm_clocks_set;
 		engine->pm.voltage_get		= nouveau_voltage_gpio_get;
 		engine->pm.voltage_set		= nouveau_voltage_gpio_set;
 		engine->pm.temp_get		= nv40_temp_get;
@@ -299,7 +299,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
 	case 0x50:
 	case 0x80: /* gotta love NVIDIA's consistency.. */
 	case 0x90:
-	case 0xA0:
+	case 0xa0:
 		engine->instmem.init		= nv50_instmem_init;
 		engine->instmem.takedown	= nv50_instmem_takedown;
 		engine->instmem.suspend		= nv50_instmem_suspend;
@@ -359,9 +359,9 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
 			engine->pm.clock_set	= nv50_pm_clock_set;
 			break;
 		default:
-			engine->pm.clock_get	= nva3_pm_clock_get;
-			engine->pm.clock_pre	= nva3_pm_clock_pre;
-			engine->pm.clock_set	= nva3_pm_clock_set;
+			engine->pm.clocks_get	= nva3_pm_clocks_get;
+			engine->pm.clocks_pre	= nva3_pm_clocks_pre;
+			engine->pm.clocks_set	= nva3_pm_clocks_set;
 			break;
 		}
 		engine->pm.voltage_get		= nouveau_voltage_gpio_get;
@@ -376,7 +376,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
 		engine->vram.put		= nv50_vram_del;
 		engine->vram.flags_valid	= nv50_vram_flags_valid;
 		break;
-	case 0xC0:
+	case 0xc0:
 		engine->instmem.init		= nvc0_instmem_init;
 		engine->instmem.takedown	= nvc0_instmem_takedown;
 		engine->instmem.suspend		= nvc0_instmem_suspend;
@@ -422,12 +422,73 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
 		engine->vram.put		= nv50_vram_del;
 		engine->vram.flags_valid	= nvc0_vram_flags_valid;
 		engine->pm.temp_get		= nv84_temp_get;
+		engine->pm.clocks_get		= nvc0_pm_clocks_get;
+		engine->pm.voltage_get		= nouveau_voltage_gpio_get;
+		engine->pm.voltage_set		= nouveau_voltage_gpio_set;
+		break;
+	case 0xd0:
+		engine->instmem.init		= nvc0_instmem_init;
+		engine->instmem.takedown	= nvc0_instmem_takedown;
+		engine->instmem.suspend		= nvc0_instmem_suspend;
+		engine->instmem.resume		= nvc0_instmem_resume;
+		engine->instmem.get		= nv50_instmem_get;
+		engine->instmem.put		= nv50_instmem_put;
+		engine->instmem.map		= nv50_instmem_map;
+		engine->instmem.unmap		= nv50_instmem_unmap;
+		engine->instmem.flush		= nv84_instmem_flush;
+		engine->mc.init			= nv50_mc_init;
+		engine->mc.takedown		= nv50_mc_takedown;
+		engine->timer.init		= nv04_timer_init;
+		engine->timer.read		= nv04_timer_read;
+		engine->timer.takedown		= nv04_timer_takedown;
+		engine->fb.init			= nvc0_fb_init;
+		engine->fb.takedown		= nvc0_fb_takedown;
+		engine->fifo.channels		= 128;
+		engine->fifo.init		= nvc0_fifo_init;
+		engine->fifo.takedown		= nvc0_fifo_takedown;
+		engine->fifo.disable		= nvc0_fifo_disable;
+		engine->fifo.enable		= nvc0_fifo_enable;
+		engine->fifo.reassign		= nvc0_fifo_reassign;
+		engine->fifo.channel_id		= nvc0_fifo_channel_id;
+		engine->fifo.create_context	= nvc0_fifo_create_context;
+		engine->fifo.destroy_context	= nvc0_fifo_destroy_context;
+		engine->fifo.load_context	= nvc0_fifo_load_context;
+		engine->fifo.unload_context	= nvc0_fifo_unload_context;
+		engine->display.early_init	= nouveau_stub_init;
+		engine->display.late_takedown	= nouveau_stub_takedown;
+		engine->display.create		= nvd0_display_create;
+		engine->display.init		= nvd0_display_init;
+		engine->display.destroy		= nvd0_display_destroy;
+		engine->gpio.init		= nv50_gpio_init;
+		engine->gpio.takedown		= nouveau_stub_takedown;
+		engine->gpio.get		= nvd0_gpio_get;
+		engine->gpio.set		= nvd0_gpio_set;
+		engine->gpio.irq_register	= nv50_gpio_irq_register;
+		engine->gpio.irq_unregister	= nv50_gpio_irq_unregister;
+		engine->gpio.irq_enable		= nv50_gpio_irq_enable;
+		engine->vram.init		= nvc0_vram_init;
+		engine->vram.takedown		= nv50_vram_fini;
+		engine->vram.get		= nvc0_vram_new;
+		engine->vram.put		= nv50_vram_del;
+		engine->vram.flags_valid	= nvc0_vram_flags_valid;
+		engine->pm.clocks_get		= nvc0_pm_clocks_get;
+		engine->pm.voltage_get		= nouveau_voltage_gpio_get;
+		engine->pm.voltage_set		= nouveau_voltage_gpio_set;
 		break;
 	default:
 		NV_ERROR(dev, "NV%02x unsupported\n", dev_priv->chipset);
 		return 1;
 	}
 
+	/* headless mode */
+	if (nouveau_modeset == 2) {
+		engine->display.early_init = nouveau_stub_init;
+		engine->display.late_takedown = nouveau_stub_takedown;
+		engine->display.create = nouveau_stub_init;
+		engine->display.init = nouveau_stub_init;
+		engine->display.destroy = nouveau_stub_takedown;
+	}
+
 	return 0;
 }
 
@@ -449,21 +510,6 @@ nouveau_vga_set_decode(void *priv, bool state)
 		return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
 }
 
-static int
-nouveau_card_init_channel(struct drm_device *dev)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	int ret;
-
-	ret = nouveau_channel_alloc(dev, &dev_priv->channel, NULL,
-				    NvDmaFB, NvDmaTT);
-	if (ret)
-		return ret;
-
-	mutex_unlock(&dev_priv->channel->mutex);
-	return 0;
-}
-
 static void nouveau_switcheroo_set_state(struct pci_dev *pdev,
 					 enum vga_switcheroo_state state)
 {
@@ -630,8 +676,11 @@ nouveau_card_init(struct drm_device *dev)
 			break;
 		}
 
-		if (dev_priv->card_type == NV_40)
-			nv40_mpeg_create(dev);
+		if (dev_priv->card_type == NV_40 ||
+		    dev_priv->chipset == 0x31 ||
+		    dev_priv->chipset == 0x34 ||
+		    dev_priv->chipset == 0x36)
+			nv31_mpeg_create(dev);
 		else
 		if (dev_priv->card_type == NV_50 &&
 		    (dev_priv->chipset < 0x98 || dev_priv->chipset == 0xa0))
@@ -651,41 +700,69 @@ nouveau_card_init(struct drm_device *dev)
 			goto out_engine;
 	}
 
-	ret = engine->display.create(dev);
+	ret = nouveau_irq_init(dev);
 	if (ret)
 		goto out_fifo;
 
-	ret = drm_vblank_init(dev, nv_two_heads(dev) ? 2 : 1);
-	if (ret)
-		goto out_vblank;
+	/* initialise general modesetting */
+	drm_mode_config_init(dev);
+	drm_mode_create_scaling_mode_property(dev);
+	drm_mode_create_dithering_property(dev);
+	dev->mode_config.funcs = (void *)&nouveau_mode_config_funcs;
+	dev->mode_config.fb_base = pci_resource_start(dev->pdev, 1);
+	dev->mode_config.min_width = 0;
+	dev->mode_config.min_height = 0;
+	if (dev_priv->card_type < NV_10) {
+		dev->mode_config.max_width = 2048;
+		dev->mode_config.max_height = 2048;
+	} else
+	if (dev_priv->card_type < NV_50) {
+		dev->mode_config.max_width = 4096;
+		dev->mode_config.max_height = 4096;
+	} else {
+		dev->mode_config.max_width = 8192;
+		dev->mode_config.max_height = 8192;
+	}
 
-	ret = nouveau_irq_init(dev);
+	ret = engine->display.create(dev);
 	if (ret)
-		goto out_vblank;
+		goto out_irq;
 
-	/* what about PVIDEO/PCRTC/PRAMDAC etc? */
+	nouveau_backlight_init(dev);
 
 	if (dev_priv->eng[NVOBJ_ENGINE_GR]) {
 		ret = nouveau_fence_init(dev);
 		if (ret)
-			goto out_irq;
+			goto out_disp;
 
-		ret = nouveau_card_init_channel(dev);
+		ret = nouveau_channel_alloc(dev, &dev_priv->channel, NULL,
+					    NvDmaFB, NvDmaTT);
 		if (ret)
 			goto out_fence;
+
+		mutex_unlock(&dev_priv->channel->mutex);
+	}
+
+	if (dev->mode_config.num_crtc) {
+		ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
+		if (ret)
+			goto out_chan;
+
+		nouveau_fbcon_init(dev);
+		drm_kms_helper_poll_init(dev);
 	}
 
-	nouveau_fbcon_init(dev);
-	drm_kms_helper_poll_init(dev);
 	return 0;
 
+out_chan:
+	nouveau_channel_put_unlocked(&dev_priv->channel);
 out_fence:
 	nouveau_fence_fini(dev);
+out_disp:
+	nouveau_backlight_exit(dev);
+	engine->display.destroy(dev);
 out_irq:
 	nouveau_irq_fini(dev);
-out_vblank:
-	drm_vblank_cleanup(dev);
-	engine->display.destroy(dev);
 out_fifo:
 	if (!dev_priv->noaccel)
 		engine->fifo.takedown(dev);
@@ -732,15 +809,20 @@ static void nouveau_card_takedown(struct drm_device *dev)
 	struct nouveau_engine *engine = &dev_priv->engine;
 	int e;
 
-	drm_kms_helper_poll_fini(dev);
-	nouveau_fbcon_fini(dev);
+	if (dev->mode_config.num_crtc) {
+		drm_kms_helper_poll_fini(dev);
+		nouveau_fbcon_fini(dev);
+		drm_vblank_cleanup(dev);
+	}
 
 	if (dev_priv->channel) {
 		nouveau_channel_put_unlocked(&dev_priv->channel);
 		nouveau_fence_fini(dev);
 	}
 
+	nouveau_backlight_exit(dev);
 	engine->display.destroy(dev);
+	drm_mode_config_cleanup(dev);
 
 	if (!dev_priv->noaccel) {
 		engine->fifo.takedown(dev);
@@ -774,7 +856,6 @@ static void nouveau_card_takedown(struct drm_device *dev)
 	engine->vram.takedown(dev);
 
 	nouveau_irq_fini(dev);
-	drm_vblank_cleanup(dev);
 
 	nouveau_pm_fini(dev);
 	nouveau_bios_takedown(dev);
@@ -907,7 +988,7 @@ static int nouveau_remove_conflicting_drivers(struct drm_device *dev)
 int nouveau_load(struct drm_device *dev, unsigned long flags)
 {
 	struct drm_nouveau_private *dev_priv;
-	uint32_t reg0;
+	uint32_t reg0, strap;
 	resource_size_t mmio_start_offs;
 	int ret;
 
@@ -951,13 +1032,11 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
 
 	/* Time to determine the card architecture */
 	reg0 = nv_rd32(dev, NV03_PMC_BOOT_0);
-	dev_priv->stepping = 0; /* XXX: add stepping for pre-NV10? */
 
 	/* We're dealing with >=NV10 */
 	if ((reg0 & 0x0f000000) > 0) {
 		/* Bit 27-20 contain the architecture in hex */
 		dev_priv->chipset = (reg0 & 0xff00000) >> 20;
-		dev_priv->stepping = (reg0 & 0xff);
 	/* NV04 or NV05 */
 	} else if ((reg0 & 0xff00fff0) == 0x20004000) {
 		if (reg0 & 0x00f00000)
@@ -987,6 +1066,9 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
 	case 0xc0:
 		dev_priv->card_type = NV_C0;
 		break;
+	case 0xd0:
+		dev_priv->card_type = NV_D0;
+		break;
 	default:
 		NV_INFO(dev, "Unsupported chipset 0x%08x\n", reg0);
 		ret = -EINVAL;
@@ -996,6 +1078,23 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
 	NV_INFO(dev, "Detected an NV%2x generation card (0x%08x)\n",
 		dev_priv->card_type, reg0);
 
+	/* determine frequency of timing crystal */
+	strap = nv_rd32(dev, 0x101000);
+	if ( dev_priv->chipset < 0x17 ||
+	    (dev_priv->chipset >= 0x20 && dev_priv->chipset <= 0x25))
+		strap &= 0x00000040;
+	else
+		strap &= 0x00400040;
+
+	switch (strap) {
+	case 0x00000000: dev_priv->crystal = 13500; break;
+	case 0x00000040: dev_priv->crystal = 14318; break;
+	case 0x00400000: dev_priv->crystal = 27000; break;
+	case 0x00400040: dev_priv->crystal = 25000; break;
+	}
+
+	NV_DEBUG(dev, "crystal freq: %dKHz\n", dev_priv->crystal);
+
 	/* Determine whether we'll attempt acceleration or not, some
 	 * cards are disabled by default here due to them being known
 	 * non-functional, or never been tested due to lack of hw.
@@ -1030,7 +1129,7 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
 			ioremap(pci_resource_start(dev->pdev, ramin_bar),
 				dev_priv->ramin_size);
 		if (!dev_priv->ramin) {
-			NV_ERROR(dev, "Failed to PRAMIN BAR");
+			NV_ERROR(dev, "Failed to map PRAMIN BAR\n");
 			ret = -ENOMEM;
 			goto err_mmio;
 		}
@@ -1130,7 +1229,7 @@ int nouveau_ioctl_getparam(struct drm_device *dev, void *data,
 		getparam->value = 1;
 		break;
 	case NOUVEAU_GETPARAM_HAS_PAGEFLIP:
-		getparam->value = 1;
+		getparam->value = dev_priv->card_type < NV_D0;
 		break;
 	case NOUVEAU_GETPARAM_GRAPH_UNITS:
 		/* NV40 and NV50 versions are quite different, but register
@@ -1198,6 +1297,23 @@ nouveau_wait_ne(struct drm_device *dev, uint64_t timeout,
 	return false;
 }
 
+/* Wait until cond(data) == true, up until timeout has hit */
+bool
+nouveau_wait_cb(struct drm_device *dev, u64 timeout,
+		bool (*cond)(void *), void *data)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
+	u64 start = ptimer->read(dev);
+
+	do {
+		if (cond(data) == true)
+			return true;
+	} while (ptimer->read(dev) - start < timeout);
+
+	return false;
+}
+
 /* Waits for PGRAPH to go completely idle */
 bool nouveau_wait_for_idle(struct drm_device *dev)
 {

+ 20 - 20
drivers/gpu/drm/nouveau/nouveau_vm.c

@@ -172,9 +172,9 @@ nouveau_vm_unmap_pgt(struct nouveau_vm *vm, int big, u32 fpde, u32 lpde)
 			vm->map_pgt(vpgd->obj, pde, vpgt->obj);
 		}
 
-		mutex_unlock(&vm->mm->mutex);
+		mutex_unlock(&vm->mm.mutex);
 		nouveau_gpuobj_ref(NULL, &pgt);
-		mutex_lock(&vm->mm->mutex);
+		mutex_lock(&vm->mm.mutex);
 	}
 }
 
@@ -191,18 +191,18 @@ nouveau_vm_map_pgt(struct nouveau_vm *vm, u32 pde, u32 type)
 	pgt_size  = (1 << (vm->pgt_bits + 12)) >> type;
 	pgt_size *= 8;
 
-	mutex_unlock(&vm->mm->mutex);
+	mutex_unlock(&vm->mm.mutex);
 	ret = nouveau_gpuobj_new(vm->dev, NULL, pgt_size, 0x1000,
 				 NVOBJ_FLAG_ZERO_ALLOC, &pgt);
-	mutex_lock(&vm->mm->mutex);
+	mutex_lock(&vm->mm.mutex);
 	if (unlikely(ret))
 		return ret;
 
 	/* someone beat us to filling the PDE while we didn't have the lock */
 	if (unlikely(vpgt->refcount[big]++)) {
-		mutex_unlock(&vm->mm->mutex);
+		mutex_unlock(&vm->mm.mutex);
 		nouveau_gpuobj_ref(NULL, &pgt);
-		mutex_lock(&vm->mm->mutex);
+		mutex_lock(&vm->mm.mutex);
 		return 0;
 	}
 
@@ -223,10 +223,10 @@ nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift,
 	u32 fpde, lpde, pde;
 	int ret;
 
-	mutex_lock(&vm->mm->mutex);
-	ret = nouveau_mm_get(vm->mm, page_shift, msize, 0, align, &vma->node);
+	mutex_lock(&vm->mm.mutex);
+	ret = nouveau_mm_get(&vm->mm, page_shift, msize, 0, align, &vma->node);
 	if (unlikely(ret != 0)) {
-		mutex_unlock(&vm->mm->mutex);
+		mutex_unlock(&vm->mm.mutex);
 		return ret;
 	}
 
@@ -245,13 +245,13 @@ nouveau_vm_get(struct nouveau_vm *vm, u64 size, u32 page_shift,
 		if (ret) {
 			if (pde != fpde)
 				nouveau_vm_unmap_pgt(vm, big, fpde, pde - 1);
-			nouveau_mm_put(vm->mm, vma->node);
-			mutex_unlock(&vm->mm->mutex);
+			nouveau_mm_put(&vm->mm, vma->node);
+			mutex_unlock(&vm->mm.mutex);
 			vma->node = NULL;
 			return ret;
 		}
 	}
-	mutex_unlock(&vm->mm->mutex);
+	mutex_unlock(&vm->mm.mutex);
 
 	vma->vm     = vm;
 	vma->offset = (u64)vma->node->offset << 12;
@@ -270,11 +270,11 @@ nouveau_vm_put(struct nouveau_vma *vma)
 	fpde = (vma->node->offset >> vm->pgt_bits);
 	lpde = (vma->node->offset + vma->node->length - 1) >> vm->pgt_bits;
 
-	mutex_lock(&vm->mm->mutex);
+	mutex_lock(&vm->mm.mutex);
 	nouveau_vm_unmap_pgt(vm, vma->node->type != vm->spg_shift, fpde, lpde);
-	nouveau_mm_put(vm->mm, vma->node);
+	nouveau_mm_put(&vm->mm, vma->node);
 	vma->node = NULL;
-	mutex_unlock(&vm->mm->mutex);
+	mutex_unlock(&vm->mm.mutex);
 }
 
 int
@@ -306,7 +306,7 @@ nouveau_vm_new(struct drm_device *dev, u64 offset, u64 length, u64 mm_offset,
 			block = length;
 
 	} else
-	if (dev_priv->card_type == NV_C0) {
+	if (dev_priv->card_type >= NV_C0) {
 		vm->map_pgt = nvc0_vm_map_pgt;
 		vm->map = nvc0_vm_map;
 		vm->map_sg = nvc0_vm_map_sg;
@@ -360,11 +360,11 @@ nouveau_vm_link(struct nouveau_vm *vm, struct nouveau_gpuobj *pgd)
 
 	nouveau_gpuobj_ref(pgd, &vpgd->obj);
 
-	mutex_lock(&vm->mm->mutex);
+	mutex_lock(&vm->mm.mutex);
 	for (i = vm->fpde; i <= vm->lpde; i++)
 		vm->map_pgt(pgd, i, vm->pgt[i - vm->fpde].obj);
 	list_add(&vpgd->head, &vm->pgd_list);
-	mutex_unlock(&vm->mm->mutex);
+	mutex_unlock(&vm->mm.mutex);
 	return 0;
 }
 
@@ -377,7 +377,7 @@ nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *mpgd)
 	if (!mpgd)
 		return;
 
-	mutex_lock(&vm->mm->mutex);
+	mutex_lock(&vm->mm.mutex);
 	list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) {
 		if (vpgd->obj == mpgd) {
 			pgd = vpgd->obj;
@@ -386,7 +386,7 @@ nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *mpgd)
 			break;
 		}
 	}
-	mutex_unlock(&vm->mm->mutex);
+	mutex_unlock(&vm->mm.mutex);
 
 	nouveau_gpuobj_ref(NULL, &pgd);
 }

+ 1 - 1
drivers/gpu/drm/nouveau/nouveau_vm.h

@@ -51,7 +51,7 @@ struct nouveau_vma {
 
 struct nouveau_vm {
 	struct drm_device *dev;
-	struct nouveau_mm *mm;
+	struct nouveau_mm mm;
 	int refcount;
 
 	struct list_head pgd_list;

+ 37 - 9
drivers/gpu/drm/nouveau/nouveau_volt.c

@@ -27,7 +27,7 @@
 #include "nouveau_drv.h"
 #include "nouveau_pm.h"
 
-static const enum dcb_gpio_tag vidtag[] = { 0x04, 0x05, 0x06, 0x1a };
+static const enum dcb_gpio_tag vidtag[] = { 0x04, 0x05, 0x06, 0x1a, 0x73 };
 static int nr_vidtag = sizeof(vidtag) / sizeof(vidtag[0]);
 
 int
@@ -170,6 +170,13 @@ nouveau_volt_init(struct drm_device *dev)
 		 */
 		vidshift  = 2;
 		break;
+	case 0x40:
+		headerlen = volt[1];
+		recordlen = volt[2];
+		entries   = volt[3]; /* not a clue what the entries are for.. */
+		vidmask   = volt[11]; /* guess.. */
+		vidshift  = 0;
+		break;
 	default:
 		NV_WARN(dev, "voltage table 0x%02x unknown\n", volt[0]);
 		return;
@@ -197,16 +204,37 @@ nouveau_volt_init(struct drm_device *dev)
 	}
 
 	/* parse vbios entries into common format */
-	voltage->level = kcalloc(entries, sizeof(*voltage->level), GFP_KERNEL);
-	if (!voltage->level)
-		return;
+	voltage->version = volt[0];
+	if (voltage->version < 0x40) {
+		voltage->nr_level = entries;
+		voltage->level =
+			kcalloc(entries, sizeof(*voltage->level), GFP_KERNEL);
+		if (!voltage->level)
+			return;
 
-	entry = volt + headerlen;
-	for (i = 0; i < entries; i++, entry += recordlen) {
-		voltage->level[i].voltage = entry[0];
-		voltage->level[i].vid     = entry[1] >> vidshift;
+		entry = volt + headerlen;
+		for (i = 0; i < entries; i++, entry += recordlen) {
+			voltage->level[i].voltage = entry[0] * 10000;
+			voltage->level[i].vid     = entry[1] >> vidshift;
+		}
+	} else {
+		u32 volt_uv = ROM32(volt[4]);
+		s16 step_uv = ROM16(volt[8]);
+		u8 vid;
+
+		voltage->nr_level = voltage->vid_mask + 1;
+		voltage->level = kcalloc(voltage->nr_level,
+					 sizeof(*voltage->level), GFP_KERNEL);
+		if (!voltage->level)
+			return;
+
+		for (vid = 0; vid <= voltage->vid_mask; vid++) {
+			voltage->level[vid].voltage = volt_uv;
+			voltage->level[vid].vid = vid;
+			volt_uv += step_uv;
+		}
 	}
-	voltage->nr_level  = entries;
+
 	voltage->supported = true;
 }
 

+ 0 - 23
drivers/gpu/drm/nouveau/nv04_display.c

@@ -126,27 +126,6 @@ nv04_display_create(struct drm_device *dev)
 
 	nouveau_hw_save_vga_fonts(dev, 1);
 
-	drm_mode_config_init(dev);
-	drm_mode_create_scaling_mode_property(dev);
-	drm_mode_create_dithering_property(dev);
-
-	dev->mode_config.funcs = (void *)&nouveau_mode_config_funcs;
-
-	dev->mode_config.min_width = 0;
-	dev->mode_config.min_height = 0;
-	switch (dev_priv->card_type) {
-	case NV_04:
-		dev->mode_config.max_width = 2048;
-		dev->mode_config.max_height = 2048;
-		break;
-	default:
-		dev->mode_config.max_width = 4096;
-		dev->mode_config.max_height = 4096;
-		break;
-	}
-
-	dev->mode_config.fb_base = dev_priv->fb_phys;
-
 	nv04_crtc_create(dev, 0);
 	if (nv_two_heads(dev))
 		nv04_crtc_create(dev, 1);
@@ -235,8 +214,6 @@ nv04_display_destroy(struct drm_device *dev)
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head)
 		crtc->funcs->restore(crtc);
 
-	drm_mode_config_cleanup(dev);
-
 	nouveau_hw_save_vga_fonts(dev, 0);
 }
 

+ 4 - 0
drivers/gpu/drm/nouveau/nv04_pm.c

@@ -68,6 +68,7 @@ void
 nv04_pm_clock_set(struct drm_device *dev, void *pre_state)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
 	struct nv04_pm_state *state = pre_state;
 	u32 reg = state->pll.reg;
 
@@ -85,6 +86,9 @@ nv04_pm_clock_set(struct drm_device *dev, void *pre_state)
 		nv_mask(dev, 0x1002c0, 0, 1 << 8);
 	}
 
+	if (reg == NV_PRAMDAC_NVPLL_COEFF)
+		ptimer->init(dev);
+
 	kfree(state);
 }
 

+ 58 - 26
drivers/gpu/drm/nouveau/nv04_timer.c

@@ -6,43 +6,75 @@
 int
 nv04_timer_init(struct drm_device *dev)
 {
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	u32 m, n, d;
+
 	nv_wr32(dev, NV04_PTIMER_INTR_EN_0, 0x00000000);
 	nv_wr32(dev, NV04_PTIMER_INTR_0, 0xFFFFFFFF);
 
-	/* Just use the pre-existing values when possible for now; these regs
-	 * are not written in nv (driver writer missed a /4 on the address), and
-	 * writing 8 and 3 to the correct regs breaks the timings on the LVDS
-	 * hardware sequencing microcode.
-	 * A correct solution (involving calculations with the GPU PLL) can
-	 * be done when kernel modesetting lands
-	 */
-	if (!nv_rd32(dev, NV04_PTIMER_NUMERATOR) ||
-				!nv_rd32(dev, NV04_PTIMER_DENOMINATOR)) {
-		nv_wr32(dev, NV04_PTIMER_NUMERATOR, 0x00000008);
-		nv_wr32(dev, NV04_PTIMER_DENOMINATOR, 0x00000003);
+	/* aim for 31.25MHz, which gives us nanosecond timestamps */
+	d = 1000000 / 32;
+
+	/* determine base clock for timer source */
+	if (dev_priv->chipset < 0x40) {
+		n = dev_priv->engine.pm.clock_get(dev, PLL_CORE);
+	} else
+	if (dev_priv->chipset == 0x40) {
+		/*XXX: figure this out */
+		n = 0;
+	} else {
+		n = dev_priv->crystal;
+		m = 1;
+		while (n < (d * 2)) {
+			n += (n / m);
+			m++;
+		}
+
+		nv_wr32(dev, 0x009220, m - 1);
+	}
+
+	if (!n) {
+		NV_WARN(dev, "PTIMER: unknown input clock freq\n");
+		if (!nv_rd32(dev, NV04_PTIMER_NUMERATOR) ||
+		    !nv_rd32(dev, NV04_PTIMER_DENOMINATOR)) {
+			nv_wr32(dev, NV04_PTIMER_NUMERATOR, 1);
+			nv_wr32(dev, NV04_PTIMER_DENOMINATOR, 1);
+		}
+		return 0;
+	}
+
+	/* reduce ratio to acceptable values */
+	while (((n % 5) == 0) && ((d % 5) == 0)) {
+		n /= 5;
+		d /= 5;
 	}
 
+	while (((n % 2) == 0) && ((d % 2) == 0)) {
+		n /= 2;
+		d /= 2;
+	}
+
+	while (n > 0xffff || d > 0xffff) {
+		n >>= 1;
+		d >>= 1;
+	}
+
+	nv_wr32(dev, NV04_PTIMER_NUMERATOR, n);
+	nv_wr32(dev, NV04_PTIMER_DENOMINATOR, d);
 	return 0;
 }
 
-uint64_t
+u64
 nv04_timer_read(struct drm_device *dev)
 {
-	uint32_t low;
-	/* From kmmio dumps on nv28 this looks like how the blob does this.
-	 * It reads the high dword twice, before and after.
-	 * The only explanation seems to be that the 64-bit timer counter
-	 * advances between high and low dword reads and may corrupt the
-	 * result. Not confirmed.
-	 */
-	uint32_t high2 = nv_rd32(dev, NV04_PTIMER_TIME_1);
-	uint32_t high1;
+	u32 hi, lo;
+
 	do {
-		high1 = high2;
-		low = nv_rd32(dev, NV04_PTIMER_TIME_0);
-		high2 = nv_rd32(dev, NV04_PTIMER_TIME_1);
-	} while (high1 != high2);
-	return (((uint64_t)high2) << 32) | (uint64_t)low;
+		hi = nv_rd32(dev, NV04_PTIMER_TIME_1);
+		lo = nv_rd32(dev, NV04_PTIMER_TIME_0);
+	} while (hi != nv_rd32(dev, NV04_PTIMER_TIME_1));
+
+	return ((u64)hi << 32 | lo);
 }
 
 void

+ 62 - 29
drivers/gpu/drm/nouveau/nv40_mpeg.c → drivers/gpu/drm/nouveau/nv31_mpeg.c

@@ -26,10 +26,32 @@
 #include "nouveau_drv.h"
 #include "nouveau_ramht.h"
 
-struct nv40_mpeg_engine {
+struct nv31_mpeg_engine {
 	struct nouveau_exec_engine base;
+	atomic_t refcount;
 };
 
+
+static int
+nv31_mpeg_context_new(struct nouveau_channel *chan, int engine)
+{
+	struct nv31_mpeg_engine *pmpeg = nv_engine(chan->dev, engine);
+
+	if (!atomic_add_unless(&pmpeg->refcount, 1, 1))
+		return -EBUSY;
+
+	chan->engctx[engine] = (void *)0xdeadcafe;
+	return 0;
+}
+
+static void
+nv31_mpeg_context_del(struct nouveau_channel *chan, int engine)
+{
+	struct nv31_mpeg_engine *pmpeg = nv_engine(chan->dev, engine);
+	atomic_dec(&pmpeg->refcount);
+	chan->engctx[engine] = NULL;
+}
+
 static int
 nv40_mpeg_context_new(struct nouveau_channel *chan, int engine)
 {
@@ -81,7 +103,7 @@ nv40_mpeg_context_del(struct nouveau_channel *chan, int engine)
 }
 
 static int
-nv40_mpeg_object_new(struct nouveau_channel *chan, int engine,
+nv31_mpeg_object_new(struct nouveau_channel *chan, int engine,
 		      u32 handle, u16 class)
 {
 	struct drm_device *dev = chan->dev;
@@ -103,10 +125,10 @@ nv40_mpeg_object_new(struct nouveau_channel *chan, int engine,
 }
 
 static int
-nv40_mpeg_init(struct drm_device *dev, int engine)
+nv31_mpeg_init(struct drm_device *dev, int engine)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nv40_mpeg_engine *pmpeg = nv_engine(dev, engine);
+	struct nv31_mpeg_engine *pmpeg = nv_engine(dev, engine);
 	int i;
 
 	/* VPE init */
@@ -121,7 +143,7 @@ nv40_mpeg_init(struct drm_device *dev, int engine)
 	/* PMPEG init */
 	nv_wr32(dev, 0x00b32c, 0x00000000);
 	nv_wr32(dev, 0x00b314, 0x00000100);
-	nv_wr32(dev, 0x00b220, 0x00000044);
+	nv_wr32(dev, 0x00b220, nv44_graph_class(dev) ? 0x00000044 : 0x00000031);
 	nv_wr32(dev, 0x00b300, 0x02001ec1);
 	nv_mask(dev, 0x00b32c, 0x00000001, 0x00000001);
 
@@ -137,7 +159,7 @@ nv40_mpeg_init(struct drm_device *dev, int engine)
 }
 
 static int
-nv40_mpeg_fini(struct drm_device *dev, int engine, bool suspend)
+nv31_mpeg_fini(struct drm_device *dev, int engine, bool suspend)
 {
 	/*XXX: context save? */
 	nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000);
@@ -146,7 +168,7 @@ nv40_mpeg_fini(struct drm_device *dev, int engine, bool suspend)
 }
 
 static int
-nv40_mpeg_mthd_dma(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
+nv31_mpeg_mthd_dma(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
 {
 	struct drm_device *dev = chan->dev;
 	u32 inst = data << 4;
@@ -184,13 +206,17 @@ nv40_mpeg_mthd_dma(struct nouveau_channel *chan, u32 class, u32 mthd, u32 data)
 }
 
 static int
-nv40_mpeg_isr_chid(struct drm_device *dev, u32 inst)
+nv31_mpeg_isr_chid(struct drm_device *dev, u32 inst)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_gpuobj *ctx;
 	unsigned long flags;
 	int i;
 
+	/* hardcode drm channel id on nv3x, so swmthd lookup works */
+	if (dev_priv->card_type < NV_40)
+		return 0;
+
 	spin_lock_irqsave(&dev_priv->channels.lock, flags);
 	for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
 		if (!dev_priv->channels.ptr[i])
@@ -205,7 +231,7 @@ nv40_mpeg_isr_chid(struct drm_device *dev, u32 inst)
 }
 
 static void
-nv40_vpe_set_tile_region(struct drm_device *dev, int i)
+nv31_vpe_set_tile_region(struct drm_device *dev, int i)
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
@@ -216,10 +242,10 @@ nv40_vpe_set_tile_region(struct drm_device *dev, int i)
 }
 
 static void
-nv40_mpeg_isr(struct drm_device *dev)
+nv31_mpeg_isr(struct drm_device *dev)
 {
 	u32 inst = (nv_rd32(dev, 0x00b318) & 0x000fffff) << 4;
-	u32 chid = nv40_mpeg_isr_chid(dev, inst);
+	u32 chid = nv31_mpeg_isr_chid(dev, inst);
 	u32 stat = nv_rd32(dev, 0x00b100);
 	u32 type = nv_rd32(dev, 0x00b230);
 	u32 mthd = nv_rd32(dev, 0x00b234);
@@ -249,10 +275,10 @@ nv40_mpeg_isr(struct drm_device *dev)
 }
 
 static void
-nv40_vpe_isr(struct drm_device *dev)
+nv31_vpe_isr(struct drm_device *dev)
 {
 	if (nv_rd32(dev, 0x00b100))
-		nv40_mpeg_isr(dev);
+		nv31_mpeg_isr(dev);
 
 	if (nv_rd32(dev, 0x00b800)) {
 		u32 stat = nv_rd32(dev, 0x00b800);
@@ -262,9 +288,9 @@ nv40_vpe_isr(struct drm_device *dev)
 }
 
 static void
-nv40_mpeg_destroy(struct drm_device *dev, int engine)
+nv31_mpeg_destroy(struct drm_device *dev, int engine)
 {
-	struct nv40_mpeg_engine *pmpeg = nv_engine(dev, engine);
+	struct nv31_mpeg_engine *pmpeg = nv_engine(dev, engine);
 
 	nouveau_irq_unregister(dev, 0);
 
@@ -273,34 +299,41 @@ nv40_mpeg_destroy(struct drm_device *dev, int engine)
 }
 
 int
-nv40_mpeg_create(struct drm_device *dev)
+nv31_mpeg_create(struct drm_device *dev)
 {
-	struct nv40_mpeg_engine *pmpeg;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nv31_mpeg_engine *pmpeg;
 
 	pmpeg = kzalloc(sizeof(*pmpeg), GFP_KERNEL);
 	if (!pmpeg)
 		return -ENOMEM;
-
-	pmpeg->base.destroy = nv40_mpeg_destroy;
-	pmpeg->base.init = nv40_mpeg_init;
-	pmpeg->base.fini = nv40_mpeg_fini;
-	pmpeg->base.context_new = nv40_mpeg_context_new;
-	pmpeg->base.context_del = nv40_mpeg_context_del;
-	pmpeg->base.object_new = nv40_mpeg_object_new;
+	atomic_set(&pmpeg->refcount, 0);
+
+	pmpeg->base.destroy = nv31_mpeg_destroy;
+	pmpeg->base.init = nv31_mpeg_init;
+	pmpeg->base.fini = nv31_mpeg_fini;
+	if (dev_priv->card_type < NV_40) {
+		pmpeg->base.context_new = nv31_mpeg_context_new;
+		pmpeg->base.context_del = nv31_mpeg_context_del;
+	} else {
+		pmpeg->base.context_new = nv40_mpeg_context_new;
+		pmpeg->base.context_del = nv40_mpeg_context_del;
+	}
+	pmpeg->base.object_new = nv31_mpeg_object_new;
 
 	/* ISR vector, PMC_ENABLE bit,  and TILE regs are shared between
 	 * all VPE engines, for this driver's purposes the PMPEG engine
 	 * will be treated as the "master" and handle the global VPE
 	 * bits too
 	 */
-	pmpeg->base.set_tile_region = nv40_vpe_set_tile_region;
-	nouveau_irq_register(dev, 0, nv40_vpe_isr);
+	pmpeg->base.set_tile_region = nv31_vpe_set_tile_region;
+	nouveau_irq_register(dev, 0, nv31_vpe_isr);
 
 	NVOBJ_ENGINE_ADD(dev, MPEG, &pmpeg->base);
 	NVOBJ_CLASS(dev, 0x3174, MPEG);
-	NVOBJ_MTHD (dev, 0x3174, 0x0190, nv40_mpeg_mthd_dma);
-	NVOBJ_MTHD (dev, 0x3174, 0x01a0, nv40_mpeg_mthd_dma);
-	NVOBJ_MTHD (dev, 0x3174, 0x01b0, nv40_mpeg_mthd_dma);
+	NVOBJ_MTHD (dev, 0x3174, 0x0190, nv31_mpeg_mthd_dma);
+	NVOBJ_MTHD (dev, 0x3174, 0x01a0, nv31_mpeg_mthd_dma);
+	NVOBJ_MTHD (dev, 0x3174, 0x01b0, nv31_mpeg_mthd_dma);
 
 #if 0
 	NVOBJ_ENGINE_ADD(dev, ME, &pme->base);

Some files were not shown because too many files changed in this diff