Эх сурвалжийг харах

Merge branch 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6

* 'drm-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (45 commits)
  drm/nv04: Fix set_operation software method.
  drm/nouveau: initialise DMA tracking parameters earlier
  drm/nouveau: use dma.max rather than pushbuf size for checking GET validity
  drm/nv04: differentiate between nv04/nv05
  drm/nouveau: Fix null deref in nouveau_fence_emit due to deleted fence
  drm/nv50: prevent a possible ctxprog hang
  drm/nouveau: have ttm's fault handler called directly
  drm/nv50: restore correct cache1 get/put address on fifoctx load
  drm/nouveau: create function for "dealing" with gpu lockup
  drm/nouveau: remove unused nouveau_channel_idle() function
  drm/nouveau: fix handling of fbcon colours in 8bpp
  drm/nv04: Context switching fixes.
  drm/nouveau: Use the software object for fencing.
  drm/nouveau: Allocate a per-channel instance of NV_SW.
  drm/nv50: make the blocksize depend on vram size
  drm/nouveau: better alignment of bo sizes and use roundup instead of ALIGN
  drm/nouveau: Don't skip card take down on nv0x.
  drm/nouveau: Implement nv42-nv43 TV load detection.
  drm/nouveau: Clean up the nv17-nv4x load detection code a bit.
  drm/nv50: fix fillrect color
  ...
Linus Torvalds 15 жил өмнө
parent
commit
c07d7237a6
65 өөрчлөгдсөн 2411 нэмэгдсэн , 973 устгасан
  1. 1 0
      drivers/gpu/drm/drm_crtc.c
  2. 24 2
      drivers/gpu/drm/drm_crtc_helper.c
  3. 4 5
      drivers/gpu/drm/drm_fb_helper.c
  4. 4 1
      drivers/gpu/drm/drm_irq.c
  5. 2 3
      drivers/gpu/drm/nouveau/Kconfig
  6. 166 77
      drivers/gpu/drm/nouveau/nouveau_bo.c
  7. 5 42
      drivers/gpu/drm/nouveau/nouveau_channel.c
  8. 27 7
      drivers/gpu/drm/nouveau/nouveau_dma.c
  9. 6 4
      drivers/gpu/drm/nouveau/nouveau_dma.h
  10. 57 15
      drivers/gpu/drm/nouveau/nouveau_drv.h
  11. 13 6
      drivers/gpu/drm/nouveau/nouveau_fbcon.c
  12. 1 0
      drivers/gpu/drm/nouveau/nouveau_fbcon.h
  13. 1 1
      drivers/gpu/drm/nouveau/nouveau_fence.c
  14. 13 20
      drivers/gpu/drm/nouveau/nouveau_gem.c
  15. 1 0
      drivers/gpu/drm/nouveau/nouveau_irq.c
  16. 87 0
      drivers/gpu/drm/nouveau/nouveau_mem.c
  17. 1 1
      drivers/gpu/drm/nouveau/nouveau_object.c
  18. 8 8
      drivers/gpu/drm/nouveau/nouveau_reg.h
  19. 24 3
      drivers/gpu/drm/nouveau/nouveau_state.c
  20. 1 29
      drivers/gpu/drm/nouveau/nouveau_ttm.c
  21. 19 16
      drivers/gpu/drm/nouveau/nv04_dac.c
  22. 20 21
      drivers/gpu/drm/nouveau/nv04_fbcon.c
  23. 34 0
      drivers/gpu/drm/nouveau/nv04_fifo.c
  24. 82 77
      drivers/gpu/drm/nouveau/nv04_graph.c
  25. 26 6
      drivers/gpu/drm/nouveau/nv10_fb.c
  26. 17 11
      drivers/gpu/drm/nouveau/nv10_graph.c
  27. 105 10
      drivers/gpu/drm/nouveau/nv17_tv.c
  28. 28 33
      drivers/gpu/drm/nouveau/nv20_graph.c
  29. 33 20
      drivers/gpu/drm/nouveau/nv40_fb.c
  30. 47 69
      drivers/gpu/drm/nouveau/nv40_graph.c
  31. 17 0
      drivers/gpu/drm/nouveau/nv50_display.c
  32. 10 13
      drivers/gpu/drm/nouveau/nv50_fbcon.c
  33. 2 4
      drivers/gpu/drm/nouveau/nv50_fifo.c
  34. 5 0
      drivers/gpu/drm/radeon/Makefile
  35. 433 368
      drivers/gpu/drm/radeon/ObjectID.h
  36. 4 2
      drivers/gpu/drm/radeon/atombios_dp.c
  37. 3 1
      drivers/gpu/drm/radeon/mkregtable.c
  38. 14 9
      drivers/gpu/drm/radeon/r100.c
  39. 16 1
      drivers/gpu/drm/radeon/r300.c
  40. 40 1
      drivers/gpu/drm/radeon/r420.c
  41. 1 0
      drivers/gpu/drm/radeon/r520.c
  42. 13 8
      drivers/gpu/drm/radeon/r600.c
  43. 2 2
      drivers/gpu/drm/radeon/r600_blit_kms.c
  44. 6 3
      drivers/gpu/drm/radeon/radeon.h
  45. 2 4
      drivers/gpu/drm/radeon/radeon_agp.c
  46. 0 12
      drivers/gpu/drm/radeon/radeon_asic.h
  47. 40 1
      drivers/gpu/drm/radeon/radeon_atombios.c
  48. 14 0
      drivers/gpu/drm/radeon/radeon_combios.c
  49. 17 6
      drivers/gpu/drm/radeon/radeon_connectors.c
  50. 5 2
      drivers/gpu/drm/radeon/radeon_display.c
  51. 8 6
      drivers/gpu/drm/radeon/radeon_encoders.c
  52. 0 2
      drivers/gpu/drm/radeon/radeon_gem.c
  53. 8 2
      drivers/gpu/drm/radeon/radeon_irq_kms.c
  54. 7 7
      drivers/gpu/drm/radeon/radeon_legacy_tv.c
  55. 0 26
      drivers/gpu/drm/radeon/radeon_mode.h
  56. 3 2
      drivers/gpu/drm/radeon/radeon_object.c
  57. 795 0
      drivers/gpu/drm/radeon/reg_srcs/r420
  58. 67 1
      drivers/gpu/drm/radeon/reg_srcs/rs600
  59. 6 0
      drivers/gpu/drm/radeon/reg_srcs/rv515
  60. 2 0
      drivers/gpu/drm/radeon/rs400.c
  61. 9 1
      drivers/gpu/drm/radeon/rs600.c
  62. 2 0
      drivers/gpu/drm/radeon/rs690.c
  63. 1 0
      drivers/gpu/drm/radeon/rv515.c
  64. 1 2
      drivers/gpu/drm/radeon/rv770.c
  65. 1 0
      include/drm/drm_mode.h

+ 1 - 0
drivers/gpu/drm/drm_crtc.c

@@ -158,6 +158,7 @@ static struct drm_conn_prop_enum_list drm_connector_enum_list[] =
 	{ DRM_MODE_CONNECTOR_HDMIA, "HDMI Type A", 0 },
 	{ DRM_MODE_CONNECTOR_HDMIA, "HDMI Type A", 0 },
 	{ DRM_MODE_CONNECTOR_HDMIB, "HDMI Type B", 0 },
 	{ DRM_MODE_CONNECTOR_HDMIB, "HDMI Type B", 0 },
 	{ DRM_MODE_CONNECTOR_TV, "TV", 0 },
 	{ DRM_MODE_CONNECTOR_TV, "TV", 0 },
+	{ DRM_MODE_CONNECTOR_eDP, "Embedded DisplayPort", 0 },
 };
 };
 
 
 static struct drm_prop_enum_list drm_encoder_enum_list[] =
 static struct drm_prop_enum_list drm_encoder_enum_list[] =

+ 24 - 2
drivers/gpu/drm/drm_crtc_helper.c

@@ -216,7 +216,7 @@ bool drm_helper_crtc_in_use(struct drm_crtc *crtc)
 EXPORT_SYMBOL(drm_helper_crtc_in_use);
 EXPORT_SYMBOL(drm_helper_crtc_in_use);
 
 
 /**
 /**
- * drm_disable_unused_functions - disable unused objects
+ * drm_helper_disable_unused_functions - disable unused objects
  * @dev: DRM device
  * @dev: DRM device
  *
  *
  * LOCKING:
  * LOCKING:
@@ -1032,7 +1032,7 @@ bool drm_helper_initial_config(struct drm_device *dev)
 	/*
 	/*
 	 * we shouldn't end up with no modes here.
 	 * we shouldn't end up with no modes here.
 	 */
 	 */
-	WARN(!count, "No connectors reported connected with modes\n");
+	printk(KERN_INFO "No connectors reported conncted with modes\n");
 
 
 	drm_setup_crtcs(dev);
 	drm_setup_crtcs(dev);
 
 
@@ -1162,6 +1162,9 @@ EXPORT_SYMBOL(drm_helper_mode_fill_fb_struct);
 int drm_helper_resume_force_mode(struct drm_device *dev)
 int drm_helper_resume_force_mode(struct drm_device *dev)
 {
 {
 	struct drm_crtc *crtc;
 	struct drm_crtc *crtc;
+	struct drm_encoder *encoder;
+	struct drm_encoder_helper_funcs *encoder_funcs;
+	struct drm_crtc_helper_funcs *crtc_funcs;
 	int ret;
 	int ret;
 
 
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
 	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
@@ -1174,6 +1177,25 @@ int drm_helper_resume_force_mode(struct drm_device *dev)
 
 
 		if (ret == false)
 		if (ret == false)
 			DRM_ERROR("failed to set mode on crtc %p\n", crtc);
 			DRM_ERROR("failed to set mode on crtc %p\n", crtc);
+
+		/* Turn off outputs that were already powered off */
+		if (drm_helper_choose_crtc_dpms(crtc)) {
+			list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+
+				if(encoder->crtc != crtc)
+					continue;
+
+				encoder_funcs = encoder->helper_private;
+				if (encoder_funcs->dpms)
+					(*encoder_funcs->dpms) (encoder,
+								drm_helper_choose_encoder_dpms(encoder));
+
+				crtc_funcs = crtc->helper_private;
+				if (crtc_funcs->dpms)
+					(*crtc_funcs->dpms) (crtc,
+							     drm_helper_choose_crtc_dpms(crtc));
+			}
+		}
 	}
 	}
 	/* disable the unused connectors while restoring the modesetting */
 	/* disable the unused connectors while restoring the modesetting */
 	drm_helper_disable_unused_functions(dev);
 	drm_helper_disable_unused_functions(dev);

+ 4 - 5
drivers/gpu/drm/drm_fb_helper.c

@@ -606,11 +606,10 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
 		return -EINVAL;
 		return -EINVAL;
 
 
 	/* Need to resize the fb object !!! */
 	/* Need to resize the fb object !!! */
-	if (var->xres > fb->width || var->yres > fb->height) {
-		DRM_ERROR("Requested width/height is greater than current fb "
-			   "object %dx%d > %dx%d\n", var->xres, var->yres,
-			   fb->width, fb->height);
-		DRM_ERROR("Need resizing code.\n");
+	if (var->bits_per_pixel > fb->bits_per_pixel || var->xres > fb->width || var->yres > fb->height) {
+		DRM_DEBUG("fb userspace requested width/height/bpp is greater than current fb "
+			  "object %dx%d-%d > %dx%d-%d\n", var->xres, var->yres, var->bits_per_pixel,
+			  fb->width, fb->height, fb->bits_per_pixel);
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 
 

+ 4 - 1
drivers/gpu/drm/drm_irq.c

@@ -115,6 +115,7 @@ void drm_vblank_cleanup(struct drm_device *dev)
 
 
 	dev->num_crtcs = 0;
 	dev->num_crtcs = 0;
 }
 }
+EXPORT_SYMBOL(drm_vblank_cleanup);
 
 
 int drm_vblank_init(struct drm_device *dev, int num_crtcs)
 int drm_vblank_init(struct drm_device *dev, int num_crtcs)
 {
 {
@@ -163,7 +164,6 @@ int drm_vblank_init(struct drm_device *dev, int num_crtcs)
 	}
 	}
 
 
 	dev->vblank_disable_allowed = 0;
 	dev->vblank_disable_allowed = 0;
-
 	return 0;
 	return 0;
 
 
 err:
 err:
@@ -493,6 +493,9 @@ EXPORT_SYMBOL(drm_vblank_off);
  */
  */
 void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
 void drm_vblank_pre_modeset(struct drm_device *dev, int crtc)
 {
 {
+	/* vblank is not initialized (IRQ not installed ?) */
+	if (!dev->num_crtcs)
+		return;
 	/*
 	/*
 	 * To avoid all the problems that might happen if interrupts
 	 * To avoid all the problems that might happen if interrupts
 	 * were enabled/disabled around or between these calls, we just
 	 * were enabled/disabled around or between these calls, we just

+ 2 - 3
drivers/gpu/drm/nouveau/Kconfig

@@ -30,12 +30,11 @@ config DRM_NOUVEAU_DEBUG
 	  via debugfs.
 	  via debugfs.
 
 
 menu "I2C encoder or helper chips"
 menu "I2C encoder or helper chips"
-     depends on DRM && I2C
+     depends on DRM && DRM_KMS_HELPER && I2C
 
 
 config DRM_I2C_CH7006
 config DRM_I2C_CH7006
 	tristate "Chrontel ch7006 TV encoder"
 	tristate "Chrontel ch7006 TV encoder"
-	depends on DRM_NOUVEAU
-	default m
+	default m if DRM_NOUVEAU
 	help
 	help
 	  Support for Chrontel ch7006 and similar TV encoders, found
 	  Support for Chrontel ch7006 and similar TV encoders, found
 	  on some nVidia video cards.
 	  on some nVidia video cards.

+ 166 - 77
drivers/gpu/drm/nouveau/nouveau_bo.c

@@ -33,10 +33,13 @@
 #include "nouveau_drv.h"
 #include "nouveau_drv.h"
 #include "nouveau_dma.h"
 #include "nouveau_dma.h"
 
 
+#include <linux/log2.h>
+
 static void
 static void
 nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
 nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
 {
 {
 	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
 	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+	struct drm_device *dev = dev_priv->dev;
 	struct nouveau_bo *nvbo = nouveau_bo(bo);
 	struct nouveau_bo *nvbo = nouveau_bo(bo);
 
 
 	ttm_bo_kunmap(&nvbo->kmap);
 	ttm_bo_kunmap(&nvbo->kmap);
@@ -44,12 +47,87 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
 	if (unlikely(nvbo->gem))
 	if (unlikely(nvbo->gem))
 		DRM_ERROR("bo %p still attached to GEM object\n", bo);
 		DRM_ERROR("bo %p still attached to GEM object\n", bo);
 
 
+	if (nvbo->tile)
+		nv10_mem_expire_tiling(dev, nvbo->tile, NULL);
+
 	spin_lock(&dev_priv->ttm.bo_list_lock);
 	spin_lock(&dev_priv->ttm.bo_list_lock);
 	list_del(&nvbo->head);
 	list_del(&nvbo->head);
 	spin_unlock(&dev_priv->ttm.bo_list_lock);
 	spin_unlock(&dev_priv->ttm.bo_list_lock);
 	kfree(nvbo);
 	kfree(nvbo);
 }
 }
 
 
+static void
+nouveau_bo_fixup_align(struct drm_device *dev,
+		       uint32_t tile_mode, uint32_t tile_flags,
+		       int *align, int *size)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	/*
+	 * Some of the tile_flags have a periodic structure of N*4096 bytes,
+	 * align to to that as well as the page size. Overallocate memory to
+	 * avoid corruption of other buffer objects.
+	 */
+	if (dev_priv->card_type == NV_50) {
+		uint32_t block_size = nouveau_mem_fb_amount(dev) >> 15;
+		int i;
+
+		switch (tile_flags) {
+		case 0x1800:
+		case 0x2800:
+		case 0x4800:
+		case 0x7a00:
+			*size = roundup(*size, block_size);
+			if (is_power_of_2(block_size)) {
+				*size += 3 * block_size;
+				for (i = 1; i < 10; i++) {
+					*align = 12 * i * block_size;
+					if (!(*align % 65536))
+						break;
+				}
+			} else {
+				*size += 6 * block_size;
+				for (i = 1; i < 10; i++) {
+					*align = 8 * i * block_size;
+					if (!(*align % 65536))
+						break;
+				}
+			}
+			break;
+		default:
+			break;
+		}
+
+	} else {
+		if (tile_mode) {
+			if (dev_priv->chipset >= 0x40) {
+				*align = 65536;
+				*size = roundup(*size, 64 * tile_mode);
+
+			} else if (dev_priv->chipset >= 0x30) {
+				*align = 32768;
+				*size = roundup(*size, 64 * tile_mode);
+
+			} else if (dev_priv->chipset >= 0x20) {
+				*align = 16384;
+				*size = roundup(*size, 64 * tile_mode);
+
+			} else if (dev_priv->chipset >= 0x10) {
+				*align = 16384;
+				*size = roundup(*size, 32 * tile_mode);
+			}
+		}
+	}
+
+	/* ALIGN works only on powers of two. */
+	*size = roundup(*size, PAGE_SIZE);
+
+	if (dev_priv->card_type == NV_50) {
+		*size = roundup(*size, 65536);
+		*align = max(65536, *align);
+	}
+}
+
 int
 int
 nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
 nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
 	       int size, int align, uint32_t flags, uint32_t tile_mode,
 	       int size, int align, uint32_t flags, uint32_t tile_mode,
@@ -58,7 +136,7 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
 {
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_bo *nvbo;
 	struct nouveau_bo *nvbo;
-	int ret, n = 0;
+	int ret = 0;
 
 
 	nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
 	nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
 	if (!nvbo)
 	if (!nvbo)
@@ -70,59 +148,14 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
 	nvbo->tile_mode = tile_mode;
 	nvbo->tile_mode = tile_mode;
 	nvbo->tile_flags = tile_flags;
 	nvbo->tile_flags = tile_flags;
 
 
-	/*
-	 * Some of the tile_flags have a periodic structure of N*4096 bytes,
-	 * align to to that as well as the page size. Overallocate memory to
-	 * avoid corruption of other buffer objects.
-	 */
-	switch (tile_flags) {
-	case 0x1800:
-	case 0x2800:
-	case 0x4800:
-	case 0x7a00:
-		if (dev_priv->chipset >= 0xA0) {
-			/* This is based on high end cards with 448 bits
-			 * memory bus, could be different elsewhere.*/
-			size += 6 * 28672;
-			/* 8 * 28672 is the actual alignment requirement,
-			 * but we must also align to page size. */
-			align = 2 * 8 * 28672;
-		} else if (dev_priv->chipset >= 0x90) {
-			size += 3 * 16384;
-			align = 12 * 16834;
-		} else {
-			size += 3 * 8192;
-			/* 12 * 8192 is the actual alignment requirement,
-			 * but we must also align to page size. */
-			align = 2 * 12 * 8192;
-		}
-		break;
-	default:
-		break;
-	}
-
+	nouveau_bo_fixup_align(dev, tile_mode, tile_flags, &align, &size);
 	align >>= PAGE_SHIFT;
 	align >>= PAGE_SHIFT;
 
 
-	size = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1);
-	if (dev_priv->card_type == NV_50) {
-		size = (size + 65535) & ~65535;
-		if (align < (65536 / PAGE_SIZE))
-			align = (65536 / PAGE_SIZE);
-	}
-
-	if (flags & TTM_PL_FLAG_VRAM)
-		nvbo->placements[n++] = TTM_PL_FLAG_VRAM | TTM_PL_MASK_CACHING;
-	if (flags & TTM_PL_FLAG_TT)
-		nvbo->placements[n++] = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
 	nvbo->placement.fpfn = 0;
 	nvbo->placement.fpfn = 0;
 	nvbo->placement.lpfn = mappable ? dev_priv->fb_mappable_pages : 0;
 	nvbo->placement.lpfn = mappable ? dev_priv->fb_mappable_pages : 0;
-	nvbo->placement.placement = nvbo->placements;
-	nvbo->placement.busy_placement = nvbo->placements;
-	nvbo->placement.num_placement = n;
-	nvbo->placement.num_busy_placement = n;
+	nouveau_bo_placement_set(nvbo, flags);
 
 
 	nvbo->channel = chan;
 	nvbo->channel = chan;
-	nouveau_bo_placement_set(nvbo, flags);
 	ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
 	ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
 			  ttm_bo_type_device, &nvbo->placement, align, 0,
 			  ttm_bo_type_device, &nvbo->placement, align, 0,
 			  false, NULL, size, nouveau_bo_del_ttm);
 			  false, NULL, size, nouveau_bo_del_ttm);
@@ -421,6 +454,7 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
 /* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
 /* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
  * TTM_PL_{VRAM,TT} directly.
  * TTM_PL_{VRAM,TT} directly.
  */
  */
+
 static int
 static int
 nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
 nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
 			      struct nouveau_bo *nvbo, bool evict, bool no_wait,
 			      struct nouveau_bo *nvbo, bool evict, bool no_wait,
@@ -455,11 +489,12 @@ nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
 }
 }
 
 
 static int
 static int
-nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, int no_wait,
-		     struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
+nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
+		     int no_wait, struct ttm_mem_reg *new_mem)
 {
 {
 	struct nouveau_bo *nvbo = nouveau_bo(bo);
 	struct nouveau_bo *nvbo = nouveau_bo(bo);
 	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
 	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+	struct ttm_mem_reg *old_mem = &bo->mem;
 	struct nouveau_channel *chan;
 	struct nouveau_channel *chan;
 	uint64_t src_offset, dst_offset;
 	uint64_t src_offset, dst_offset;
 	uint32_t page_count;
 	uint32_t page_count;
@@ -547,7 +582,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
 
 
 	placement.fpfn = placement.lpfn = 0;
 	placement.fpfn = placement.lpfn = 0;
 	placement.num_placement = placement.num_busy_placement = 1;
 	placement.num_placement = placement.num_busy_placement = 1;
-	placement.placement = &placement_memtype;
+	placement.placement = placement.busy_placement = &placement_memtype;
 
 
 	tmp_mem = *new_mem;
 	tmp_mem = *new_mem;
 	tmp_mem.mm_node = NULL;
 	tmp_mem.mm_node = NULL;
@@ -559,7 +594,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
 	if (ret)
 	if (ret)
 		goto out;
 		goto out;
 
 
-	ret = nouveau_bo_move_m2mf(bo, true, no_wait, &bo->mem, &tmp_mem);
+	ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait, &tmp_mem);
 	if (ret)
 	if (ret)
 		goto out;
 		goto out;
 
 
@@ -585,7 +620,7 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
 
 
 	placement.fpfn = placement.lpfn = 0;
 	placement.fpfn = placement.lpfn = 0;
 	placement.num_placement = placement.num_busy_placement = 1;
 	placement.num_placement = placement.num_busy_placement = 1;
-	placement.placement = &placement_memtype;
+	placement.placement = placement.busy_placement = &placement_memtype;
 
 
 	tmp_mem = *new_mem;
 	tmp_mem = *new_mem;
 	tmp_mem.mm_node = NULL;
 	tmp_mem.mm_node = NULL;
@@ -597,7 +632,7 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
 	if (ret)
 	if (ret)
 		goto out;
 		goto out;
 
 
-	ret = nouveau_bo_move_m2mf(bo, true, no_wait, &bo->mem, new_mem);
+	ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem);
 	if (ret)
 	if (ret)
 		goto out;
 		goto out;
 
 
@@ -612,52 +647,106 @@ out:
 }
 }
 
 
 static int
 static int
-nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
-		bool no_wait, struct ttm_mem_reg *new_mem)
+nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
+		   struct nouveau_tile_reg **new_tile)
 {
 {
 	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
 	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
-	struct nouveau_bo *nvbo = nouveau_bo(bo);
 	struct drm_device *dev = dev_priv->dev;
 	struct drm_device *dev = dev_priv->dev;
-	struct ttm_mem_reg *old_mem = &bo->mem;
+	struct nouveau_bo *nvbo = nouveau_bo(bo);
+	uint64_t offset;
 	int ret;
 	int ret;
 
 
-	if (dev_priv->card_type == NV_50 && new_mem->mem_type == TTM_PL_VRAM &&
-	    !nvbo->no_vm) {
-		uint64_t offset = new_mem->mm_node->start << PAGE_SHIFT;
+	if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) {
+		/* Nothing to do. */
+		*new_tile = NULL;
+		return 0;
+	}
+
+	offset = new_mem->mm_node->start << PAGE_SHIFT;
 
 
+	if (dev_priv->card_type == NV_50) {
 		ret = nv50_mem_vm_bind_linear(dev,
 		ret = nv50_mem_vm_bind_linear(dev,
 					      offset + dev_priv->vm_vram_base,
 					      offset + dev_priv->vm_vram_base,
 					      new_mem->size, nvbo->tile_flags,
 					      new_mem->size, nvbo->tile_flags,
 					      offset);
 					      offset);
 		if (ret)
 		if (ret)
 			return ret;
 			return ret;
+
+	} else if (dev_priv->card_type >= NV_10) {
+		*new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
+						nvbo->tile_mode);
 	}
 	}
 
 
+	return 0;
+}
+
+static void
+nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
+		      struct nouveau_tile_reg *new_tile,
+		      struct nouveau_tile_reg **old_tile)
+{
+	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+	struct drm_device *dev = dev_priv->dev;
+
+	if (dev_priv->card_type >= NV_10 &&
+	    dev_priv->card_type < NV_50) {
+		if (*old_tile)
+			nv10_mem_expire_tiling(dev, *old_tile, bo->sync_obj);
+
+		*old_tile = new_tile;
+	}
+}
+
+static int
+nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
+		bool no_wait, struct ttm_mem_reg *new_mem)
+{
+	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+	struct nouveau_bo *nvbo = nouveau_bo(bo);
+	struct ttm_mem_reg *old_mem = &bo->mem;
+	struct nouveau_tile_reg *new_tile = NULL;
+	int ret = 0;
+
+	ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
+	if (ret)
+		return ret;
+
+	/* Software copy if the card isn't up and running yet. */
 	if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE ||
 	if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE ||
-	    !dev_priv->channel)
-		return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+	    !dev_priv->channel) {
+		ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+		goto out;
+	}
 
 
+	/* Fake bo copy. */
 	if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
 	if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
 		BUG_ON(bo->mem.mm_node != NULL);
 		BUG_ON(bo->mem.mm_node != NULL);
 		bo->mem = *new_mem;
 		bo->mem = *new_mem;
 		new_mem->mm_node = NULL;
 		new_mem->mm_node = NULL;
-		return 0;
+		goto out;
 	}
 	}
 
 
-	if (new_mem->mem_type == TTM_PL_SYSTEM) {
-		if (old_mem->mem_type == TTM_PL_SYSTEM)
-			return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
-		if (nouveau_bo_move_flipd(bo, evict, intr, no_wait, new_mem))
-			return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
-	} else if (old_mem->mem_type == TTM_PL_SYSTEM) {
-		if (nouveau_bo_move_flips(bo, evict, intr, no_wait, new_mem))
-			return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
-	} else {
-		if (nouveau_bo_move_m2mf(bo, evict, no_wait, old_mem, new_mem))
-			return ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
-	}
+	/* Hardware assisted copy. */
+	if (new_mem->mem_type == TTM_PL_SYSTEM)
+		ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait, new_mem);
+	else if (old_mem->mem_type == TTM_PL_SYSTEM)
+		ret = nouveau_bo_move_flips(bo, evict, intr, no_wait, new_mem);
+	else
+		ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem);
 
 
-	return 0;
+	if (!ret)
+		goto out;
+
+	/* Fallback to software copy. */
+	ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
+
+out:
+	if (ret)
+		nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
+	else
+		nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
+
+	return ret;
 }
 }
 
 
 static int
 static int

+ 5 - 42
drivers/gpu/drm/nouveau/nouveau_channel.c

@@ -158,6 +158,8 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
 		return ret;
 		return ret;
 	}
 	}
 
 
+	nouveau_dma_pre_init(chan);
+
 	/* Locate channel's user control regs */
 	/* Locate channel's user control regs */
 	if (dev_priv->card_type < NV_40)
 	if (dev_priv->card_type < NV_40)
 		user = NV03_USER(channel);
 		user = NV03_USER(channel);
@@ -235,47 +237,6 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
 	return 0;
 	return 0;
 }
 }
 
 
-int
-nouveau_channel_idle(struct nouveau_channel *chan)
-{
-	struct drm_device *dev = chan->dev;
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_engine *engine = &dev_priv->engine;
-	uint32_t caches;
-	int idle;
-
-	if (!chan) {
-		NV_ERROR(dev, "no channel...\n");
-		return 1;
-	}
-
-	caches = nv_rd32(dev, NV03_PFIFO_CACHES);
-	nv_wr32(dev, NV03_PFIFO_CACHES, caches & ~1);
-
-	if (engine->fifo.channel_id(dev) != chan->id) {
-		struct nouveau_gpuobj *ramfc =
-			chan->ramfc ? chan->ramfc->gpuobj : NULL;
-
-		if (!ramfc) {
-			NV_ERROR(dev, "No RAMFC for channel %d\n", chan->id);
-			return 1;
-		}
-
-		engine->instmem.prepare_access(dev, false);
-		if (nv_ro32(dev, ramfc, 0) != nv_ro32(dev, ramfc, 1))
-			idle = 0;
-		else
-			idle = 1;
-		engine->instmem.finish_access(dev);
-	} else {
-		idle = (nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_GET) ==
-			nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT));
-	}
-
-	nv_wr32(dev, NV03_PFIFO_CACHES, caches);
-	return idle;
-}
-
 /* stops a fifo */
 /* stops a fifo */
 void
 void
 nouveau_channel_free(struct nouveau_channel *chan)
 nouveau_channel_free(struct nouveau_channel *chan)
@@ -414,7 +375,9 @@ nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data,
 		init->subchan[0].grclass = 0x0039;
 		init->subchan[0].grclass = 0x0039;
 	else
 	else
 		init->subchan[0].grclass = 0x5039;
 		init->subchan[0].grclass = 0x5039;
-	init->nr_subchan = 1;
+	init->subchan[1].handle = NvSw;
+	init->subchan[1].grclass = NV_SW;
+	init->nr_subchan = 2;
 
 
 	/* Named memory object area */
 	/* Named memory object area */
 	ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem,
 	ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem,

+ 27 - 7
drivers/gpu/drm/nouveau/nouveau_dma.c

@@ -29,12 +29,22 @@
 #include "nouveau_drv.h"
 #include "nouveau_drv.h"
 #include "nouveau_dma.h"
 #include "nouveau_dma.h"
 
 
+void
+nouveau_dma_pre_init(struct nouveau_channel *chan)
+{
+	chan->dma.max  = (chan->pushbuf_bo->bo.mem.size >> 2) - 2;
+	chan->dma.put  = 0;
+	chan->dma.cur  = chan->dma.put;
+	chan->dma.free = chan->dma.max - chan->dma.cur;
+}
+
 int
 int
 nouveau_dma_init(struct nouveau_channel *chan)
 nouveau_dma_init(struct nouveau_channel *chan)
 {
 {
 	struct drm_device *dev = chan->dev;
 	struct drm_device *dev = chan->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_gpuobj *m2mf = NULL;
 	struct nouveau_gpuobj *m2mf = NULL;
+	struct nouveau_gpuobj *nvsw = NULL;
 	int ret, i;
 	int ret, i;
 
 
 	/* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */
 	/* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */
@@ -47,6 +57,15 @@ nouveau_dma_init(struct nouveau_channel *chan)
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
+	/* Create an NV_SW object for various sync purposes */
+	ret = nouveau_gpuobj_sw_new(chan, NV_SW, &nvsw);
+	if (ret)
+		return ret;
+
+	ret = nouveau_gpuobj_ref_add(dev, chan, NvSw, nvsw, NULL);
+	if (ret)
+		return ret;
+
 	/* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */
 	/* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */
 	ret = nouveau_notifier_alloc(chan, NvNotify0, 32, &chan->m2mf_ntfy);
 	ret = nouveau_notifier_alloc(chan, NvNotify0, 32, &chan->m2mf_ntfy);
 	if (ret)
 	if (ret)
@@ -64,12 +83,6 @@ nouveau_dma_init(struct nouveau_channel *chan)
 			return ret;
 			return ret;
 	}
 	}
 
 
-	/* Initialise DMA vars */
-	chan->dma.max  = (chan->pushbuf_bo->bo.mem.size >> 2) - 2;
-	chan->dma.put  = 0;
-	chan->dma.cur  = chan->dma.put;
-	chan->dma.free = chan->dma.max - chan->dma.cur;
-
 	/* Insert NOPS for NOUVEAU_DMA_SKIPS */
 	/* Insert NOPS for NOUVEAU_DMA_SKIPS */
 	ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
 	ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
 	if (ret)
 	if (ret)
@@ -87,6 +100,13 @@ nouveau_dma_init(struct nouveau_channel *chan)
 	BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 1);
 	BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 1);
 	OUT_RING(chan, NvNotify0);
 	OUT_RING(chan, NvNotify0);
 
 
+	/* Initialise NV_SW */
+	ret = RING_SPACE(chan, 2);
+	if (ret)
+		return ret;
+	BEGIN_RING(chan, NvSubSw, 0, 1);
+	OUT_RING(chan, NvSw);
+
 	/* Sit back and pray the channel works.. */
 	/* Sit back and pray the channel works.. */
 	FIRE_RING(chan);
 	FIRE_RING(chan);
 
 
@@ -113,7 +133,7 @@ READ_GET(struct nouveau_channel *chan, uint32_t *get)
 
 
 	val = nvchan_rd32(chan, chan->user_get);
 	val = nvchan_rd32(chan, chan->user_get);
 	if (val < chan->pushbuf_base ||
 	if (val < chan->pushbuf_base ||
-	    val >= chan->pushbuf_base + chan->pushbuf_bo->bo.mem.size) {
+	    val > chan->pushbuf_base + (chan->dma.max << 2)) {
 		/* meaningless to dma_wait() except to know whether the
 		/* meaningless to dma_wait() except to know whether the
 		 * GPU has stalled or not
 		 * GPU has stalled or not
 		 */
 		 */

+ 6 - 4
drivers/gpu/drm/nouveau/nouveau_dma.h

@@ -46,10 +46,11 @@
 /* Hardcoded object assignments to subchannels (subchannel id). */
 /* Hardcoded object assignments to subchannels (subchannel id). */
 enum {
 enum {
 	NvSubM2MF	= 0,
 	NvSubM2MF	= 0,
-	NvSub2D		= 1,
-	NvSubCtxSurf2D  = 1,
-	NvSubGdiRect    = 2,
-	NvSubImageBlit  = 3
+	NvSubSw		= 1,
+	NvSub2D		= 2,
+	NvSubCtxSurf2D  = 2,
+	NvSubGdiRect    = 3,
+	NvSubImageBlit  = 4
 };
 };
 
 
 /* Object handles. */
 /* Object handles. */
@@ -67,6 +68,7 @@ enum {
 	NvClipRect	= 0x8000000b,
 	NvClipRect	= 0x8000000b,
 	NvGdiRect	= 0x8000000c,
 	NvGdiRect	= 0x8000000c,
 	NvImageBlit	= 0x8000000d,
 	NvImageBlit	= 0x8000000d,
+	NvSw		= 0x8000000e,
 
 
 	/* G80+ display objects */
 	/* G80+ display objects */
 	NvEvoVRAM	= 0x01000000,
 	NvEvoVRAM	= 0x01000000,

+ 57 - 15
drivers/gpu/drm/nouveau/nouveau_drv.h

@@ -59,11 +59,19 @@ struct nouveau_grctx;
 #define MAX_NUM_DCB_ENTRIES 16
 #define MAX_NUM_DCB_ENTRIES 16
 
 
 #define NOUVEAU_MAX_CHANNEL_NR 128
 #define NOUVEAU_MAX_CHANNEL_NR 128
+#define NOUVEAU_MAX_TILE_NR 15
 
 
 #define NV50_VM_MAX_VRAM (2*1024*1024*1024ULL)
 #define NV50_VM_MAX_VRAM (2*1024*1024*1024ULL)
 #define NV50_VM_BLOCK    (512*1024*1024ULL)
 #define NV50_VM_BLOCK    (512*1024*1024ULL)
 #define NV50_VM_VRAM_NR  (NV50_VM_MAX_VRAM / NV50_VM_BLOCK)
 #define NV50_VM_VRAM_NR  (NV50_VM_MAX_VRAM / NV50_VM_BLOCK)
 
 
+struct nouveau_tile_reg {
+	struct nouveau_fence *fence;
+	uint32_t addr;
+	uint32_t size;
+	bool used;
+};
+
 struct nouveau_bo {
 struct nouveau_bo {
 	struct ttm_buffer_object bo;
 	struct ttm_buffer_object bo;
 	struct ttm_placement placement;
 	struct ttm_placement placement;
@@ -83,6 +91,7 @@ struct nouveau_bo {
 
 
 	uint32_t tile_mode;
 	uint32_t tile_mode;
 	uint32_t tile_flags;
 	uint32_t tile_flags;
+	struct nouveau_tile_reg *tile;
 
 
 	struct drm_gem_object *gem;
 	struct drm_gem_object *gem;
 	struct drm_file *cpu_filp;
 	struct drm_file *cpu_filp;
@@ -277,8 +286,13 @@ struct nouveau_timer_engine {
 };
 };
 
 
 struct nouveau_fb_engine {
 struct nouveau_fb_engine {
+	int num_tiles;
+
 	int  (*init)(struct drm_device *dev);
 	int  (*init)(struct drm_device *dev);
 	void (*takedown)(struct drm_device *dev);
 	void (*takedown)(struct drm_device *dev);
+
+	void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr,
+				 uint32_t size, uint32_t pitch);
 };
 };
 
 
 struct nouveau_fifo_engine {
 struct nouveau_fifo_engine {
@@ -292,6 +306,8 @@ struct nouveau_fifo_engine {
 	void (*disable)(struct drm_device *);
 	void (*disable)(struct drm_device *);
 	void (*enable)(struct drm_device *);
 	void (*enable)(struct drm_device *);
 	bool (*reassign)(struct drm_device *, bool enable);
 	bool (*reassign)(struct drm_device *, bool enable);
+	bool (*cache_flush)(struct drm_device *dev);
+	bool (*cache_pull)(struct drm_device *dev, bool enable);
 
 
 	int  (*channel_id)(struct drm_device *);
 	int  (*channel_id)(struct drm_device *);
 
 
@@ -330,6 +346,9 @@ struct nouveau_pgraph_engine {
 	void (*destroy_context)(struct nouveau_channel *);
 	void (*destroy_context)(struct nouveau_channel *);
 	int  (*load_context)(struct nouveau_channel *);
 	int  (*load_context)(struct nouveau_channel *);
 	int  (*unload_context)(struct drm_device *);
 	int  (*unload_context)(struct drm_device *);
+
+	void (*set_region_tiling)(struct drm_device *dev, int i, uint32_t addr,
+				  uint32_t size, uint32_t pitch);
 };
 };
 
 
 struct nouveau_engine {
 struct nouveau_engine {
@@ -548,6 +567,12 @@ struct drm_nouveau_private {
 		unsigned long sg_handle;
 		unsigned long sg_handle;
 	} gart_info;
 	} gart_info;
 
 
+	/* nv10-nv40 tiling regions */
+	struct {
+		struct nouveau_tile_reg reg[NOUVEAU_MAX_TILE_NR];
+		spinlock_t lock;
+	} tile;
+
 	/* G8x/G9x virtual address space */
 	/* G8x/G9x virtual address space */
 	uint64_t vm_gart_base;
 	uint64_t vm_gart_base;
 	uint64_t vm_gart_size;
 	uint64_t vm_gart_size;
@@ -685,6 +710,13 @@ extern void nouveau_mem_release(struct drm_file *, struct mem_block *heap);
 extern int  nouveau_mem_init(struct drm_device *);
 extern int  nouveau_mem_init(struct drm_device *);
 extern int  nouveau_mem_init_agp(struct drm_device *);
 extern int  nouveau_mem_init_agp(struct drm_device *);
 extern void nouveau_mem_close(struct drm_device *);
 extern void nouveau_mem_close(struct drm_device *);
+extern struct nouveau_tile_reg *nv10_mem_set_tiling(struct drm_device *dev,
+						    uint32_t addr,
+						    uint32_t size,
+						    uint32_t pitch);
+extern void nv10_mem_expire_tiling(struct drm_device *dev,
+				   struct nouveau_tile_reg *tile,
+				   struct nouveau_fence *fence);
 extern int  nv50_mem_vm_bind_linear(struct drm_device *, uint64_t virt,
 extern int  nv50_mem_vm_bind_linear(struct drm_device *, uint64_t virt,
 				    uint32_t size, uint32_t flags,
 				    uint32_t size, uint32_t flags,
 				    uint64_t phys);
 				    uint64_t phys);
@@ -713,7 +745,6 @@ extern int  nouveau_channel_alloc(struct drm_device *dev,
 				  struct drm_file *file_priv,
 				  struct drm_file *file_priv,
 				  uint32_t fb_ctxdma, uint32_t tt_ctxdma);
 				  uint32_t fb_ctxdma, uint32_t tt_ctxdma);
 extern void nouveau_channel_free(struct nouveau_channel *);
 extern void nouveau_channel_free(struct nouveau_channel *);
-extern int  nouveau_channel_idle(struct nouveau_channel *chan);
 
 
 /* nouveau_object.c */
 /* nouveau_object.c */
 extern int  nouveau_gpuobj_early_init(struct drm_device *);
 extern int  nouveau_gpuobj_early_init(struct drm_device *);
@@ -756,6 +787,8 @@ extern int nouveau_gpuobj_gart_dma_new(struct nouveau_channel *,
 				       uint32_t *o_ret);
 				       uint32_t *o_ret);
 extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, int class,
 extern int nouveau_gpuobj_gr_new(struct nouveau_channel *, int class,
 				 struct nouveau_gpuobj **);
 				 struct nouveau_gpuobj **);
+extern int nouveau_gpuobj_sw_new(struct nouveau_channel *, int class,
+				 struct nouveau_gpuobj **);
 extern int nouveau_ioctl_grobj_alloc(struct drm_device *, void *data,
 extern int nouveau_ioctl_grobj_alloc(struct drm_device *, void *data,
 				     struct drm_file *);
 				     struct drm_file *);
 extern int nouveau_ioctl_gpuobj_free(struct drm_device *, void *data,
 extern int nouveau_ioctl_gpuobj_free(struct drm_device *, void *data,
@@ -804,6 +837,7 @@ nouveau_debugfs_channel_fini(struct nouveau_channel *chan)
 #endif
 #endif
 
 
 /* nouveau_dma.c */
 /* nouveau_dma.c */
+extern void nouveau_dma_pre_init(struct nouveau_channel *);
 extern int  nouveau_dma_init(struct nouveau_channel *);
 extern int  nouveau_dma_init(struct nouveau_channel *);
 extern int  nouveau_dma_wait(struct nouveau_channel *, int size);
 extern int  nouveau_dma_wait(struct nouveau_channel *, int size);
 
 
@@ -879,16 +913,22 @@ extern void nv04_fb_takedown(struct drm_device *);
 /* nv10_fb.c */
 /* nv10_fb.c */
 extern int  nv10_fb_init(struct drm_device *);
 extern int  nv10_fb_init(struct drm_device *);
 extern void nv10_fb_takedown(struct drm_device *);
 extern void nv10_fb_takedown(struct drm_device *);
+extern void nv10_fb_set_region_tiling(struct drm_device *, int, uint32_t,
+				      uint32_t, uint32_t);
 
 
 /* nv40_fb.c */
 /* nv40_fb.c */
 extern int  nv40_fb_init(struct drm_device *);
 extern int  nv40_fb_init(struct drm_device *);
 extern void nv40_fb_takedown(struct drm_device *);
 extern void nv40_fb_takedown(struct drm_device *);
+extern void nv40_fb_set_region_tiling(struct drm_device *, int, uint32_t,
+				      uint32_t, uint32_t);
 
 
 /* nv04_fifo.c */
 /* nv04_fifo.c */
 extern int  nv04_fifo_init(struct drm_device *);
 extern int  nv04_fifo_init(struct drm_device *);
 extern void nv04_fifo_disable(struct drm_device *);
 extern void nv04_fifo_disable(struct drm_device *);
 extern void nv04_fifo_enable(struct drm_device *);
 extern void nv04_fifo_enable(struct drm_device *);
 extern bool nv04_fifo_reassign(struct drm_device *, bool);
 extern bool nv04_fifo_reassign(struct drm_device *, bool);
+extern bool nv04_fifo_cache_flush(struct drm_device *);
+extern bool nv04_fifo_cache_pull(struct drm_device *, bool);
 extern int  nv04_fifo_channel_id(struct drm_device *);
 extern int  nv04_fifo_channel_id(struct drm_device *);
 extern int  nv04_fifo_create_context(struct nouveau_channel *);
 extern int  nv04_fifo_create_context(struct nouveau_channel *);
 extern void nv04_fifo_destroy_context(struct nouveau_channel *);
 extern void nv04_fifo_destroy_context(struct nouveau_channel *);
@@ -941,6 +981,8 @@ extern void nv10_graph_destroy_context(struct nouveau_channel *);
 extern int  nv10_graph_load_context(struct nouveau_channel *);
 extern int  nv10_graph_load_context(struct nouveau_channel *);
 extern int  nv10_graph_unload_context(struct drm_device *);
 extern int  nv10_graph_unload_context(struct drm_device *);
 extern void nv10_graph_context_switch(struct drm_device *);
 extern void nv10_graph_context_switch(struct drm_device *);
+extern void nv10_graph_set_region_tiling(struct drm_device *, int, uint32_t,
+					 uint32_t, uint32_t);
 
 
 /* nv20_graph.c */
 /* nv20_graph.c */
 extern struct nouveau_pgraph_object_class nv20_graph_grclass[];
 extern struct nouveau_pgraph_object_class nv20_graph_grclass[];
@@ -952,6 +994,8 @@ extern int  nv20_graph_unload_context(struct drm_device *);
 extern int  nv20_graph_init(struct drm_device *);
 extern int  nv20_graph_init(struct drm_device *);
 extern void nv20_graph_takedown(struct drm_device *);
 extern void nv20_graph_takedown(struct drm_device *);
 extern int  nv30_graph_init(struct drm_device *);
 extern int  nv30_graph_init(struct drm_device *);
+extern void nv20_graph_set_region_tiling(struct drm_device *, int, uint32_t,
+					 uint32_t, uint32_t);
 
 
 /* nv40_graph.c */
 /* nv40_graph.c */
 extern struct nouveau_pgraph_object_class nv40_graph_grclass[];
 extern struct nouveau_pgraph_object_class nv40_graph_grclass[];
@@ -963,6 +1007,8 @@ extern void nv40_graph_destroy_context(struct nouveau_channel *);
 extern int  nv40_graph_load_context(struct nouveau_channel *);
 extern int  nv40_graph_load_context(struct nouveau_channel *);
 extern int  nv40_graph_unload_context(struct drm_device *);
 extern int  nv40_graph_unload_context(struct drm_device *);
 extern void nv40_grctx_init(struct nouveau_grctx *);
 extern void nv40_grctx_init(struct nouveau_grctx *);
+extern void nv40_graph_set_region_tiling(struct drm_device *, int, uint32_t,
+					 uint32_t, uint32_t);
 
 
 /* nv50_graph.c */
 /* nv50_graph.c */
 extern struct nouveau_pgraph_object_class nv50_graph_grclass[];
 extern struct nouveau_pgraph_object_class nv50_graph_grclass[];
@@ -1030,8 +1076,7 @@ extern long nouveau_compat_ioctl(struct file *file, unsigned int cmd,
 
 
 /* nv04_dac.c */
 /* nv04_dac.c */
 extern int nv04_dac_create(struct drm_device *dev, struct dcb_entry *entry);
 extern int nv04_dac_create(struct drm_device *dev, struct dcb_entry *entry);
-extern enum drm_connector_status nv17_dac_detect(struct drm_encoder *encoder,
-						 struct drm_connector *connector);
+extern uint32_t nv17_dac_sample_load(struct drm_encoder *encoder);
 extern int nv04_dac_output_offset(struct drm_encoder *encoder);
 extern int nv04_dac_output_offset(struct drm_encoder *encoder);
 extern void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable);
 extern void nv04_dac_update_dacclk(struct drm_encoder *encoder, bool enable);
 
 
@@ -1049,9 +1094,6 @@ extern int nv04_tv_create(struct drm_device *dev, struct dcb_entry *entry);
 
 
 /* nv17_tv.c */
 /* nv17_tv.c */
 extern int nv17_tv_create(struct drm_device *dev, struct dcb_entry *entry);
 extern int nv17_tv_create(struct drm_device *dev, struct dcb_entry *entry);
-extern enum drm_connector_status nv17_tv_detect(struct drm_encoder *encoder,
-						struct drm_connector *connector,
-						uint32_t pin_mask);
 
 
 /* nv04_display.c */
 /* nv04_display.c */
 extern int nv04_display_create(struct drm_device *);
 extern int nv04_display_create(struct drm_device *);
@@ -1290,14 +1332,14 @@ nv_two_reg_pll(struct drm_device *dev)
 	return false;
 	return false;
 }
 }
 
 
-#define NV50_NVSW                                                    0x0000506e
-#define NV50_NVSW_DMA_SEMAPHORE                                      0x00000060
-#define NV50_NVSW_SEMAPHORE_OFFSET                                   0x00000064
-#define NV50_NVSW_SEMAPHORE_ACQUIRE                                  0x00000068
-#define NV50_NVSW_SEMAPHORE_RELEASE                                  0x0000006c
-#define NV50_NVSW_DMA_VBLSEM                                         0x0000018c
-#define NV50_NVSW_VBLSEM_OFFSET                                      0x00000400
-#define NV50_NVSW_VBLSEM_RELEASE_VALUE                               0x00000404
-#define NV50_NVSW_VBLSEM_RELEASE                                     0x00000408
+#define NV_SW                                                        0x0000506e
+#define NV_SW_DMA_SEMAPHORE                                          0x00000060
+#define NV_SW_SEMAPHORE_OFFSET                                       0x00000064
+#define NV_SW_SEMAPHORE_ACQUIRE                                      0x00000068
+#define NV_SW_SEMAPHORE_RELEASE                                      0x0000006c
+#define NV_SW_DMA_VBLSEM                                             0x0000018c
+#define NV_SW_VBLSEM_OFFSET                                          0x00000400
+#define NV_SW_VBLSEM_RELEASE_VALUE                                   0x00000404
+#define NV_SW_VBLSEM_RELEASE                                         0x00000408
 
 
 #endif /* __NOUVEAU_DRV_H__ */
 #endif /* __NOUVEAU_DRV_H__ */

+ 13 - 6
drivers/gpu/drm/nouveau/nouveau_fbcon.c

@@ -64,8 +64,7 @@ nouveau_fbcon_sync(struct fb_info *info)
 		return 0;
 		return 0;
 
 
 	if (RING_SPACE(chan, 4)) {
 	if (RING_SPACE(chan, 4)) {
-		NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
-		info->flags |= FBINFO_HWACCEL_DISABLED;
+		nouveau_fbcon_gpu_lockup(info);
 		return 0;
 		return 0;
 	}
 	}
 
 
@@ -86,8 +85,7 @@ nouveau_fbcon_sync(struct fb_info *info)
 	}
 	}
 
 
 	if (ret) {
 	if (ret) {
-		NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
-		info->flags |= FBINFO_HWACCEL_DISABLED;
+		nouveau_fbcon_gpu_lockup(info);
 		return 0;
 		return 0;
 	}
 	}
 
 
@@ -212,11 +210,11 @@ nouveau_fbcon_create(struct drm_device *dev, uint32_t fb_width,
 
 
 	mode_cmd.bpp = surface_bpp;
 	mode_cmd.bpp = surface_bpp;
 	mode_cmd.pitch = mode_cmd.width * (mode_cmd.bpp >> 3);
 	mode_cmd.pitch = mode_cmd.width * (mode_cmd.bpp >> 3);
-	mode_cmd.pitch = ALIGN(mode_cmd.pitch, 256);
+	mode_cmd.pitch = roundup(mode_cmd.pitch, 256);
 	mode_cmd.depth = surface_depth;
 	mode_cmd.depth = surface_depth;
 
 
 	size = mode_cmd.pitch * mode_cmd.height;
 	size = mode_cmd.pitch * mode_cmd.height;
-	size = ALIGN(size, PAGE_SIZE);
+	size = roundup(size, PAGE_SIZE);
 
 
 	ret = nouveau_gem_new(dev, dev_priv->channel, size, 0, TTM_PL_FLAG_VRAM,
 	ret = nouveau_gem_new(dev, dev_priv->channel, size, 0, TTM_PL_FLAG_VRAM,
 			      0, 0x0000, false, true, &nvbo);
 			      0, 0x0000, false, true, &nvbo);
@@ -380,3 +378,12 @@ nouveau_fbcon_remove(struct drm_device *dev, struct drm_framebuffer *fb)
 
 
 	return 0;
 	return 0;
 }
 }
+
+void nouveau_fbcon_gpu_lockup(struct fb_info *info)
+{
+	struct nouveau_fbcon_par *par = info->par;
+	struct drm_device *dev = par->dev;
+
+	NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
+	info->flags |= FBINFO_HWACCEL_DISABLED;
+}

+ 1 - 0
drivers/gpu/drm/nouveau/nouveau_fbcon.h

@@ -43,5 +43,6 @@ void nouveau_fbcon_zfill(struct drm_device *dev);
 int nv04_fbcon_accel_init(struct fb_info *info);
 int nv04_fbcon_accel_init(struct fb_info *info);
 int nv50_fbcon_accel_init(struct fb_info *info);
 int nv50_fbcon_accel_init(struct fb_info *info);
 
 
+void nouveau_fbcon_gpu_lockup(struct fb_info *info);
 #endif /* __NV50_FBCON_H__ */
 #endif /* __NV50_FBCON_H__ */
 
 

+ 1 - 1
drivers/gpu/drm/nouveau/nouveau_fence.c

@@ -142,7 +142,7 @@ nouveau_fence_emit(struct nouveau_fence *fence)
 	list_add_tail(&fence->entry, &chan->fence.pending);
 	list_add_tail(&fence->entry, &chan->fence.pending);
 	spin_unlock_irqrestore(&chan->fence.lock, flags);
 	spin_unlock_irqrestore(&chan->fence.lock, flags);
 
 
-	BEGIN_RING(chan, NvSubM2MF, USE_REFCNT ? 0x0050 : 0x0150, 1);
+	BEGIN_RING(chan, NvSubSw, USE_REFCNT ? 0x0050 : 0x0150, 1);
 	OUT_RING(chan, fence->sequence);
 	OUT_RING(chan, fence->sequence);
 	FIRE_RING(chan);
 	FIRE_RING(chan);
 
 

+ 13 - 20
drivers/gpu/drm/nouveau/nouveau_gem.c

@@ -220,7 +220,6 @@ nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains,
 }
 }
 
 
 struct validate_op {
 struct validate_op {
-	struct nouveau_fence *fence;
 	struct list_head vram_list;
 	struct list_head vram_list;
 	struct list_head gart_list;
 	struct list_head gart_list;
 	struct list_head both_list;
 	struct list_head both_list;
@@ -252,17 +251,11 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence)
 }
 }
 
 
 static void
 static void
-validate_fini(struct validate_op *op, bool success)
+validate_fini(struct validate_op *op, struct nouveau_fence* fence)
 {
 {
-	struct nouveau_fence *fence = op->fence;
-
-	if (unlikely(!success))
-		op->fence = NULL;
-
-	validate_fini_list(&op->vram_list, op->fence);
-	validate_fini_list(&op->gart_list, op->fence);
-	validate_fini_list(&op->both_list, op->fence);
-	nouveau_fence_unref((void *)&fence);
+	validate_fini_list(&op->vram_list, fence);
+	validate_fini_list(&op->gart_list, fence);
+	validate_fini_list(&op->both_list, fence);
 }
 }
 
 
 static int
 static int
@@ -420,10 +413,6 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan,
 	INIT_LIST_HEAD(&op->gart_list);
 	INIT_LIST_HEAD(&op->gart_list);
 	INIT_LIST_HEAD(&op->both_list);
 	INIT_LIST_HEAD(&op->both_list);
 
 
-	ret = nouveau_fence_new(chan, &op->fence, false);
-	if (ret)
-		return ret;
-
 	if (nr_buffers == 0)
 	if (nr_buffers == 0)
 		return 0;
 		return 0;
 
 
@@ -541,6 +530,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
 	struct drm_nouveau_gem_pushbuf_bo *bo = NULL;
 	struct drm_nouveau_gem_pushbuf_bo *bo = NULL;
 	struct nouveau_channel *chan;
 	struct nouveau_channel *chan;
 	struct validate_op op;
 	struct validate_op op;
+	struct nouveau_fence* fence = 0;
 	uint32_t *pushbuf = NULL;
 	uint32_t *pushbuf = NULL;
 	int ret = 0, do_reloc = 0, i;
 	int ret = 0, do_reloc = 0, i;
 
 
@@ -597,7 +587,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
 
 
 	OUT_RINGp(chan, pushbuf, req->nr_dwords);
 	OUT_RINGp(chan, pushbuf, req->nr_dwords);
 
 
-	ret = nouveau_fence_emit(op.fence);
+	ret = nouveau_fence_new(chan, &fence, true);
 	if (ret) {
 	if (ret) {
 		NV_ERROR(dev, "error fencing pushbuf: %d\n", ret);
 		NV_ERROR(dev, "error fencing pushbuf: %d\n", ret);
 		WIND_RING(chan);
 		WIND_RING(chan);
@@ -605,7 +595,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
 	}
 	}
 
 
 	if (nouveau_gem_pushbuf_sync(chan)) {
 	if (nouveau_gem_pushbuf_sync(chan)) {
-		ret = nouveau_fence_wait(op.fence, NULL, false, false);
+		ret = nouveau_fence_wait(fence, NULL, false, false);
 		if (ret) {
 		if (ret) {
 			for (i = 0; i < req->nr_dwords; i++)
 			for (i = 0; i < req->nr_dwords; i++)
 				NV_ERROR(dev, "0x%08x\n", pushbuf[i]);
 				NV_ERROR(dev, "0x%08x\n", pushbuf[i]);
@@ -614,7 +604,8 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data,
 	}
 	}
 
 
 out:
 out:
-	validate_fini(&op, ret == 0);
+	validate_fini(&op, fence);
+	nouveau_fence_unref((void**)&fence);
 	mutex_unlock(&dev->struct_mutex);
 	mutex_unlock(&dev->struct_mutex);
 	kfree(pushbuf);
 	kfree(pushbuf);
 	kfree(bo);
 	kfree(bo);
@@ -634,6 +625,7 @@ nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
 	struct drm_gem_object *gem;
 	struct drm_gem_object *gem;
 	struct nouveau_bo *pbbo;
 	struct nouveau_bo *pbbo;
 	struct validate_op op;
 	struct validate_op op;
+	struct nouveau_fence* fence = 0;
 	int i, ret = 0, do_reloc = 0;
 	int i, ret = 0, do_reloc = 0;
 
 
 	NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
 	NOUVEAU_CHECK_INITIALISED_WITH_RETURN;
@@ -772,7 +764,7 @@ nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
 			OUT_RING(chan, 0);
 			OUT_RING(chan, 0);
 	}
 	}
 
 
-	ret = nouveau_fence_emit(op.fence);
+	ret = nouveau_fence_new(chan, &fence, true);
 	if (ret) {
 	if (ret) {
 		NV_ERROR(dev, "error fencing pushbuf: %d\n", ret);
 		NV_ERROR(dev, "error fencing pushbuf: %d\n", ret);
 		WIND_RING(chan);
 		WIND_RING(chan);
@@ -780,7 +772,8 @@ nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data,
 	}
 	}
 
 
 out:
 out:
-	validate_fini(&op, ret == 0);
+	validate_fini(&op, fence);
+	nouveau_fence_unref((void**)&fence);
 	mutex_unlock(&dev->struct_mutex);
 	mutex_unlock(&dev->struct_mutex);
 	kfree(bo);
 	kfree(bo);
 
 

+ 1 - 0
drivers/gpu/drm/nouveau/nouveau_irq.c

@@ -635,6 +635,7 @@ nv50_pgraph_irq_handler(struct drm_device *dev)
 
 
 		if ((nv_rd32(dev, 0x400500) & isb) != isb)
 		if ((nv_rd32(dev, 0x400500) & isb) != isb)
 			nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | isb);
 			nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | isb);
+		nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
 	}
 	}
 
 
 	nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
 	nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);

+ 87 - 0
drivers/gpu/drm/nouveau/nouveau_mem.c

@@ -191,6 +191,92 @@ void nouveau_mem_release(struct drm_file *file_priv, struct mem_block *heap)
 	}
 	}
 }
 }
 
 
+/*
+ * NV10-NV40 tiling helpers
+ */
+
+static void
+nv10_mem_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
+			   uint32_t size, uint32_t pitch)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
+	struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
+	struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+
+	tile->addr = addr;
+	tile->size = size;
+	tile->used = !!pitch;
+	nouveau_fence_unref((void **)&tile->fence);
+
+	if (!pfifo->cache_flush(dev))
+		return;
+
+	pfifo->reassign(dev, false);
+	pfifo->cache_flush(dev);
+	pfifo->cache_pull(dev, false);
+
+	nouveau_wait_for_idle(dev);
+
+	pgraph->set_region_tiling(dev, i, addr, size, pitch);
+	pfb->set_region_tiling(dev, i, addr, size, pitch);
+
+	pfifo->cache_pull(dev, true);
+	pfifo->reassign(dev, true);
+}
+
+struct nouveau_tile_reg *
+nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
+		    uint32_t pitch)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+	struct nouveau_tile_reg *tile = dev_priv->tile.reg, *found = NULL;
+	int i;
+
+	spin_lock(&dev_priv->tile.lock);
+
+	for (i = 0; i < pfb->num_tiles; i++) {
+		if (tile[i].used)
+			/* Tile region in use. */
+			continue;
+
+		if (tile[i].fence &&
+		    !nouveau_fence_signalled(tile[i].fence, NULL))
+			/* Pending tile region. */
+			continue;
+
+		if (max(tile[i].addr, addr) <
+		    min(tile[i].addr + tile[i].size, addr + size))
+			/* Kill an intersecting tile region. */
+			nv10_mem_set_region_tiling(dev, i, 0, 0, 0);
+
+		if (pitch && !found) {
+			/* Free tile region. */
+			nv10_mem_set_region_tiling(dev, i, addr, size, pitch);
+			found = &tile[i];
+		}
+	}
+
+	spin_unlock(&dev_priv->tile.lock);
+
+	return found;
+}
+
+void
+nv10_mem_expire_tiling(struct drm_device *dev, struct nouveau_tile_reg *tile,
+		       struct nouveau_fence *fence)
+{
+	if (fence) {
+		/* Mark it as pending. */
+		tile->fence = fence;
+		nouveau_fence_ref(fence);
+	}
+
+	tile->used = false;
+}
+
 /*
 /*
  * NV50 VM helpers
  * NV50 VM helpers
  */
  */
@@ -513,6 +599,7 @@ nouveau_mem_init(struct drm_device *dev)
 
 
 	INIT_LIST_HEAD(&dev_priv->ttm.bo_list);
 	INIT_LIST_HEAD(&dev_priv->ttm.bo_list);
 	spin_lock_init(&dev_priv->ttm.bo_list_lock);
 	spin_lock_init(&dev_priv->ttm.bo_list_lock);
+	spin_lock_init(&dev_priv->tile.lock);
 
 
 	dev_priv->fb_available_size = nouveau_mem_fb_amount(dev);
 	dev_priv->fb_available_size = nouveau_mem_fb_amount(dev);
 
 

+ 1 - 1
drivers/gpu/drm/nouveau/nouveau_object.c

@@ -881,7 +881,7 @@ nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class,
 	return 0;
 	return 0;
 }
 }
 
 
-static int
+int
 nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
 nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
 		      struct nouveau_gpuobj **gpuobj_ret)
 		      struct nouveau_gpuobj **gpuobj_ret)
 {
 {

+ 8 - 8
drivers/gpu/drm/nouveau/nouveau_reg.h

@@ -349,19 +349,19 @@
 #define NV04_PGRAPH_BLEND                                  0x00400824
 #define NV04_PGRAPH_BLEND                                  0x00400824
 #define NV04_PGRAPH_STORED_FMT                             0x00400830
 #define NV04_PGRAPH_STORED_FMT                             0x00400830
 #define NV04_PGRAPH_PATT_COLORRAM                          0x00400900
 #define NV04_PGRAPH_PATT_COLORRAM                          0x00400900
-#define NV40_PGRAPH_TILE0(i)                               (0x00400900 + (i*16))
-#define NV40_PGRAPH_TLIMIT0(i)                             (0x00400904 + (i*16))
-#define NV40_PGRAPH_TSIZE0(i)                              (0x00400908 + (i*16))
-#define NV40_PGRAPH_TSTATUS0(i)                            (0x0040090C + (i*16))
+#define NV20_PGRAPH_TILE(i)                                (0x00400900 + (i*16))
+#define NV20_PGRAPH_TLIMIT(i)                              (0x00400904 + (i*16))
+#define NV20_PGRAPH_TSIZE(i)                               (0x00400908 + (i*16))
+#define NV20_PGRAPH_TSTATUS(i)                             (0x0040090C + (i*16))
 #define NV10_PGRAPH_TILE(i)                                (0x00400B00 + (i*16))
 #define NV10_PGRAPH_TILE(i)                                (0x00400B00 + (i*16))
 #define NV10_PGRAPH_TLIMIT(i)                              (0x00400B04 + (i*16))
 #define NV10_PGRAPH_TLIMIT(i)                              (0x00400B04 + (i*16))
 #define NV10_PGRAPH_TSIZE(i)                               (0x00400B08 + (i*16))
 #define NV10_PGRAPH_TSIZE(i)                               (0x00400B08 + (i*16))
 #define NV10_PGRAPH_TSTATUS(i)                             (0x00400B0C + (i*16))
 #define NV10_PGRAPH_TSTATUS(i)                             (0x00400B0C + (i*16))
 #define NV04_PGRAPH_U_RAM                                  0x00400D00
 #define NV04_PGRAPH_U_RAM                                  0x00400D00
-#define NV47_PGRAPH_TILE0(i)                               (0x00400D00 + (i*16))
-#define NV47_PGRAPH_TLIMIT0(i)                             (0x00400D04 + (i*16))
-#define NV47_PGRAPH_TSIZE0(i)                              (0x00400D08 + (i*16))
-#define NV47_PGRAPH_TSTATUS0(i)                            (0x00400D0C + (i*16))
+#define NV47_PGRAPH_TILE(i)                                (0x00400D00 + (i*16))
+#define NV47_PGRAPH_TLIMIT(i)                              (0x00400D04 + (i*16))
+#define NV47_PGRAPH_TSIZE(i)                               (0x00400D08 + (i*16))
+#define NV47_PGRAPH_TSTATUS(i)                             (0x00400D0C + (i*16))
 #define NV04_PGRAPH_V_RAM                                  0x00400D40
 #define NV04_PGRAPH_V_RAM                                  0x00400D40
 #define NV04_PGRAPH_W_RAM                                  0x00400D80
 #define NV04_PGRAPH_W_RAM                                  0x00400D80
 #define NV10_PGRAPH_COMBINER0_IN_ALPHA                     0x00400E40
 #define NV10_PGRAPH_COMBINER0_IN_ALPHA                     0x00400E40

+ 24 - 3
drivers/gpu/drm/nouveau/nouveau_state.c

@@ -76,6 +76,8 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
 		engine->fifo.disable		= nv04_fifo_disable;
 		engine->fifo.disable		= nv04_fifo_disable;
 		engine->fifo.enable		= nv04_fifo_enable;
 		engine->fifo.enable		= nv04_fifo_enable;
 		engine->fifo.reassign		= nv04_fifo_reassign;
 		engine->fifo.reassign		= nv04_fifo_reassign;
+		engine->fifo.cache_flush	= nv04_fifo_cache_flush;
+		engine->fifo.cache_pull		= nv04_fifo_cache_pull;
 		engine->fifo.channel_id		= nv04_fifo_channel_id;
 		engine->fifo.channel_id		= nv04_fifo_channel_id;
 		engine->fifo.create_context	= nv04_fifo_create_context;
 		engine->fifo.create_context	= nv04_fifo_create_context;
 		engine->fifo.destroy_context	= nv04_fifo_destroy_context;
 		engine->fifo.destroy_context	= nv04_fifo_destroy_context;
@@ -100,6 +102,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
 		engine->timer.takedown		= nv04_timer_takedown;
 		engine->timer.takedown		= nv04_timer_takedown;
 		engine->fb.init			= nv10_fb_init;
 		engine->fb.init			= nv10_fb_init;
 		engine->fb.takedown		= nv10_fb_takedown;
 		engine->fb.takedown		= nv10_fb_takedown;
+		engine->fb.set_region_tiling	= nv10_fb_set_region_tiling;
 		engine->graph.grclass		= nv10_graph_grclass;
 		engine->graph.grclass		= nv10_graph_grclass;
 		engine->graph.init		= nv10_graph_init;
 		engine->graph.init		= nv10_graph_init;
 		engine->graph.takedown		= nv10_graph_takedown;
 		engine->graph.takedown		= nv10_graph_takedown;
@@ -109,12 +112,15 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
 		engine->graph.fifo_access	= nv04_graph_fifo_access;
 		engine->graph.fifo_access	= nv04_graph_fifo_access;
 		engine->graph.load_context	= nv10_graph_load_context;
 		engine->graph.load_context	= nv10_graph_load_context;
 		engine->graph.unload_context	= nv10_graph_unload_context;
 		engine->graph.unload_context	= nv10_graph_unload_context;
+		engine->graph.set_region_tiling	= nv10_graph_set_region_tiling;
 		engine->fifo.channels		= 32;
 		engine->fifo.channels		= 32;
 		engine->fifo.init		= nv10_fifo_init;
 		engine->fifo.init		= nv10_fifo_init;
 		engine->fifo.takedown		= nouveau_stub_takedown;
 		engine->fifo.takedown		= nouveau_stub_takedown;
 		engine->fifo.disable		= nv04_fifo_disable;
 		engine->fifo.disable		= nv04_fifo_disable;
 		engine->fifo.enable		= nv04_fifo_enable;
 		engine->fifo.enable		= nv04_fifo_enable;
 		engine->fifo.reassign		= nv04_fifo_reassign;
 		engine->fifo.reassign		= nv04_fifo_reassign;
+		engine->fifo.cache_flush	= nv04_fifo_cache_flush;
+		engine->fifo.cache_pull		= nv04_fifo_cache_pull;
 		engine->fifo.channel_id		= nv10_fifo_channel_id;
 		engine->fifo.channel_id		= nv10_fifo_channel_id;
 		engine->fifo.create_context	= nv10_fifo_create_context;
 		engine->fifo.create_context	= nv10_fifo_create_context;
 		engine->fifo.destroy_context	= nv10_fifo_destroy_context;
 		engine->fifo.destroy_context	= nv10_fifo_destroy_context;
@@ -139,6 +145,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
 		engine->timer.takedown		= nv04_timer_takedown;
 		engine->timer.takedown		= nv04_timer_takedown;
 		engine->fb.init			= nv10_fb_init;
 		engine->fb.init			= nv10_fb_init;
 		engine->fb.takedown		= nv10_fb_takedown;
 		engine->fb.takedown		= nv10_fb_takedown;
+		engine->fb.set_region_tiling	= nv10_fb_set_region_tiling;
 		engine->graph.grclass		= nv20_graph_grclass;
 		engine->graph.grclass		= nv20_graph_grclass;
 		engine->graph.init		= nv20_graph_init;
 		engine->graph.init		= nv20_graph_init;
 		engine->graph.takedown		= nv20_graph_takedown;
 		engine->graph.takedown		= nv20_graph_takedown;
@@ -148,12 +155,15 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
 		engine->graph.fifo_access	= nv04_graph_fifo_access;
 		engine->graph.fifo_access	= nv04_graph_fifo_access;
 		engine->graph.load_context	= nv20_graph_load_context;
 		engine->graph.load_context	= nv20_graph_load_context;
 		engine->graph.unload_context	= nv20_graph_unload_context;
 		engine->graph.unload_context	= nv20_graph_unload_context;
+		engine->graph.set_region_tiling	= nv20_graph_set_region_tiling;
 		engine->fifo.channels		= 32;
 		engine->fifo.channels		= 32;
 		engine->fifo.init		= nv10_fifo_init;
 		engine->fifo.init		= nv10_fifo_init;
 		engine->fifo.takedown		= nouveau_stub_takedown;
 		engine->fifo.takedown		= nouveau_stub_takedown;
 		engine->fifo.disable		= nv04_fifo_disable;
 		engine->fifo.disable		= nv04_fifo_disable;
 		engine->fifo.enable		= nv04_fifo_enable;
 		engine->fifo.enable		= nv04_fifo_enable;
 		engine->fifo.reassign		= nv04_fifo_reassign;
 		engine->fifo.reassign		= nv04_fifo_reassign;
+		engine->fifo.cache_flush	= nv04_fifo_cache_flush;
+		engine->fifo.cache_pull		= nv04_fifo_cache_pull;
 		engine->fifo.channel_id		= nv10_fifo_channel_id;
 		engine->fifo.channel_id		= nv10_fifo_channel_id;
 		engine->fifo.create_context	= nv10_fifo_create_context;
 		engine->fifo.create_context	= nv10_fifo_create_context;
 		engine->fifo.destroy_context	= nv10_fifo_destroy_context;
 		engine->fifo.destroy_context	= nv10_fifo_destroy_context;
@@ -178,6 +188,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
 		engine->timer.takedown		= nv04_timer_takedown;
 		engine->timer.takedown		= nv04_timer_takedown;
 		engine->fb.init			= nv10_fb_init;
 		engine->fb.init			= nv10_fb_init;
 		engine->fb.takedown		= nv10_fb_takedown;
 		engine->fb.takedown		= nv10_fb_takedown;
+		engine->fb.set_region_tiling	= nv10_fb_set_region_tiling;
 		engine->graph.grclass		= nv30_graph_grclass;
 		engine->graph.grclass		= nv30_graph_grclass;
 		engine->graph.init		= nv30_graph_init;
 		engine->graph.init		= nv30_graph_init;
 		engine->graph.takedown		= nv20_graph_takedown;
 		engine->graph.takedown		= nv20_graph_takedown;
@@ -187,12 +198,15 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
 		engine->graph.destroy_context	= nv20_graph_destroy_context;
 		engine->graph.destroy_context	= nv20_graph_destroy_context;
 		engine->graph.load_context	= nv20_graph_load_context;
 		engine->graph.load_context	= nv20_graph_load_context;
 		engine->graph.unload_context	= nv20_graph_unload_context;
 		engine->graph.unload_context	= nv20_graph_unload_context;
+		engine->graph.set_region_tiling	= nv20_graph_set_region_tiling;
 		engine->fifo.channels		= 32;
 		engine->fifo.channels		= 32;
 		engine->fifo.init		= nv10_fifo_init;
 		engine->fifo.init		= nv10_fifo_init;
 		engine->fifo.takedown		= nouveau_stub_takedown;
 		engine->fifo.takedown		= nouveau_stub_takedown;
 		engine->fifo.disable		= nv04_fifo_disable;
 		engine->fifo.disable		= nv04_fifo_disable;
 		engine->fifo.enable		= nv04_fifo_enable;
 		engine->fifo.enable		= nv04_fifo_enable;
 		engine->fifo.reassign		= nv04_fifo_reassign;
 		engine->fifo.reassign		= nv04_fifo_reassign;
+		engine->fifo.cache_flush	= nv04_fifo_cache_flush;
+		engine->fifo.cache_pull		= nv04_fifo_cache_pull;
 		engine->fifo.channel_id		= nv10_fifo_channel_id;
 		engine->fifo.channel_id		= nv10_fifo_channel_id;
 		engine->fifo.create_context	= nv10_fifo_create_context;
 		engine->fifo.create_context	= nv10_fifo_create_context;
 		engine->fifo.destroy_context	= nv10_fifo_destroy_context;
 		engine->fifo.destroy_context	= nv10_fifo_destroy_context;
@@ -218,6 +232,7 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
 		engine->timer.takedown		= nv04_timer_takedown;
 		engine->timer.takedown		= nv04_timer_takedown;
 		engine->fb.init			= nv40_fb_init;
 		engine->fb.init			= nv40_fb_init;
 		engine->fb.takedown		= nv40_fb_takedown;
 		engine->fb.takedown		= nv40_fb_takedown;
+		engine->fb.set_region_tiling	= nv40_fb_set_region_tiling;
 		engine->graph.grclass		= nv40_graph_grclass;
 		engine->graph.grclass		= nv40_graph_grclass;
 		engine->graph.init		= nv40_graph_init;
 		engine->graph.init		= nv40_graph_init;
 		engine->graph.takedown		= nv40_graph_takedown;
 		engine->graph.takedown		= nv40_graph_takedown;
@@ -227,12 +242,15 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
 		engine->graph.destroy_context	= nv40_graph_destroy_context;
 		engine->graph.destroy_context	= nv40_graph_destroy_context;
 		engine->graph.load_context	= nv40_graph_load_context;
 		engine->graph.load_context	= nv40_graph_load_context;
 		engine->graph.unload_context	= nv40_graph_unload_context;
 		engine->graph.unload_context	= nv40_graph_unload_context;
+		engine->graph.set_region_tiling	= nv40_graph_set_region_tiling;
 		engine->fifo.channels		= 32;
 		engine->fifo.channels		= 32;
 		engine->fifo.init		= nv40_fifo_init;
 		engine->fifo.init		= nv40_fifo_init;
 		engine->fifo.takedown		= nouveau_stub_takedown;
 		engine->fifo.takedown		= nouveau_stub_takedown;
 		engine->fifo.disable		= nv04_fifo_disable;
 		engine->fifo.disable		= nv04_fifo_disable;
 		engine->fifo.enable		= nv04_fifo_enable;
 		engine->fifo.enable		= nv04_fifo_enable;
 		engine->fifo.reassign		= nv04_fifo_reassign;
 		engine->fifo.reassign		= nv04_fifo_reassign;
+		engine->fifo.cache_flush	= nv04_fifo_cache_flush;
+		engine->fifo.cache_pull		= nv04_fifo_cache_pull;
 		engine->fifo.channel_id		= nv10_fifo_channel_id;
 		engine->fifo.channel_id		= nv10_fifo_channel_id;
 		engine->fifo.create_context	= nv40_fifo_create_context;
 		engine->fifo.create_context	= nv40_fifo_create_context;
 		engine->fifo.destroy_context	= nv40_fifo_destroy_context;
 		engine->fifo.destroy_context	= nv40_fifo_destroy_context;
@@ -624,7 +642,10 @@ int nouveau_load(struct drm_device *dev, unsigned long flags)
 		dev_priv->chipset = (reg0 & 0xff00000) >> 20;
 		dev_priv->chipset = (reg0 & 0xff00000) >> 20;
 	/* NV04 or NV05 */
 	/* NV04 or NV05 */
 	} else if ((reg0 & 0xff00fff0) == 0x20004000) {
 	} else if ((reg0 & 0xff00fff0) == 0x20004000) {
-		dev_priv->chipset = 0x04;
+		if (reg0 & 0x00f00000)
+			dev_priv->chipset = 0x05;
+		else
+			dev_priv->chipset = 0x04;
 	} else
 	} else
 		dev_priv->chipset = 0xff;
 		dev_priv->chipset = 0xff;
 
 
@@ -704,8 +725,8 @@ static void nouveau_close(struct drm_device *dev)
 {
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 
 
-	/* In the case of an error dev_priv may not be be allocated yet */
-	if (dev_priv && dev_priv->card_type)
+	/* In the case of an error dev_priv may not be allocated yet */
+	if (dev_priv)
 		nouveau_card_takedown(dev);
 		nouveau_card_takedown(dev);
 }
 }
 
 

+ 1 - 29
drivers/gpu/drm/nouveau/nouveau_ttm.c

@@ -28,45 +28,17 @@
 
 
 #include "nouveau_drv.h"
 #include "nouveau_drv.h"
 
 
-static struct vm_operations_struct nouveau_ttm_vm_ops;
-static const struct vm_operations_struct *ttm_vm_ops;
-
-static int
-nouveau_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
-	struct ttm_buffer_object *bo = vma->vm_private_data;
-	int ret;
-
-	if (unlikely(bo == NULL))
-		return VM_FAULT_NOPAGE;
-
-	ret = ttm_vm_ops->fault(vma, vmf);
-	return ret;
-}
-
 int
 int
 nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
 nouveau_ttm_mmap(struct file *filp, struct vm_area_struct *vma)
 {
 {
 	struct drm_file *file_priv = filp->private_data;
 	struct drm_file *file_priv = filp->private_data;
 	struct drm_nouveau_private *dev_priv =
 	struct drm_nouveau_private *dev_priv =
 		file_priv->minor->dev->dev_private;
 		file_priv->minor->dev->dev_private;
-	int ret;
 
 
 	if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
 	if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
 		return drm_mmap(filp, vma);
 		return drm_mmap(filp, vma);
 
 
-	ret = ttm_bo_mmap(filp, vma, &dev_priv->ttm.bdev);
-	if (unlikely(ret != 0))
-		return ret;
-
-	if (unlikely(ttm_vm_ops == NULL)) {
-		ttm_vm_ops = vma->vm_ops;
-		nouveau_ttm_vm_ops = *ttm_vm_ops;
-		nouveau_ttm_vm_ops.fault = &nouveau_ttm_fault;
-	}
-
-	vma->vm_ops = &nouveau_ttm_vm_ops;
-	return 0;
+	return ttm_bo_mmap(filp, vma, &dev_priv->ttm.bdev);
 }
 }
 
 
 static int
 static int

+ 19 - 16
drivers/gpu/drm/nouveau/nv04_dac.c

@@ -212,16 +212,15 @@ out:
 	return connector_status_disconnected;
 	return connector_status_disconnected;
 }
 }
 
 
-enum drm_connector_status nv17_dac_detect(struct drm_encoder *encoder,
-					  struct drm_connector *connector)
+uint32_t nv17_dac_sample_load(struct drm_encoder *encoder)
 {
 {
 	struct drm_device *dev = encoder->dev;
 	struct drm_device *dev = encoder->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
 	struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
-	uint32_t testval, regoffset = nv04_dac_output_offset(encoder);
+	uint32_t sample, testval, regoffset = nv04_dac_output_offset(encoder);
 	uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput,
 	uint32_t saved_powerctrl_2 = 0, saved_powerctrl_4 = 0, saved_routput,
 		saved_rtest_ctrl, saved_gpio0, saved_gpio1, temp, routput;
 		saved_rtest_ctrl, saved_gpio0, saved_gpio1, temp, routput;
-	int head, present = 0;
+	int head;
 
 
 #define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20)
 #define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20)
 	if (dcb->type == OUTPUT_TV) {
 	if (dcb->type == OUTPUT_TV) {
@@ -287,13 +286,7 @@ enum drm_connector_status nv17_dac_detect(struct drm_encoder *encoder,
 		      temp | NV_PRAMDAC_TEST_CONTROL_TP_INS_EN_ASSERTED);
 		      temp | NV_PRAMDAC_TEST_CONTROL_TP_INS_EN_ASSERTED);
 	msleep(5);
 	msleep(5);
 
 
-	temp = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset);
-
-	if (dcb->type == OUTPUT_TV)
-		present = (nv17_tv_detect(encoder, connector, temp)
-			   == connector_status_connected);
-	else
-		present = temp & NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI;
+	sample = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset);
 
 
 	temp = NVReadRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL);
 	temp = NVReadRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL);
 	NVWriteRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL,
 	NVWriteRAMDAC(dev, head, NV_PRAMDAC_TEST_CONTROL,
@@ -310,15 +303,25 @@ enum drm_connector_status nv17_dac_detect(struct drm_encoder *encoder,
 	nv17_gpio_set(dev, DCB_GPIO_TVDAC1, saved_gpio1);
 	nv17_gpio_set(dev, DCB_GPIO_TVDAC1, saved_gpio1);
 	nv17_gpio_set(dev, DCB_GPIO_TVDAC0, saved_gpio0);
 	nv17_gpio_set(dev, DCB_GPIO_TVDAC0, saved_gpio0);
 
 
-	if (present) {
-		NV_INFO(dev, "Load detected on output %c\n", '@' + ffs(dcb->or));
+	return sample;
+}
+
+static enum drm_connector_status
+nv17_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
+{
+	struct drm_device *dev = encoder->dev;
+	struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb;
+	uint32_t sample = nv17_dac_sample_load(encoder);
+
+	if (sample & NV_PRAMDAC_TEST_CONTROL_SENSEB_ALLHI) {
+		NV_INFO(dev, "Load detected on output %c\n",
+			'@' + ffs(dcb->or));
 		return connector_status_connected;
 		return connector_status_connected;
+	} else {
+		return connector_status_disconnected;
 	}
 	}
-
-	return connector_status_disconnected;
 }
 }
 
 
-
 static bool nv04_dac_mode_fixup(struct drm_encoder *encoder,
 static bool nv04_dac_mode_fixup(struct drm_encoder *encoder,
 				struct drm_display_mode *mode,
 				struct drm_display_mode *mode,
 				struct drm_display_mode *adjusted_mode)
 				struct drm_display_mode *adjusted_mode)

+ 20 - 21
drivers/gpu/drm/nouveau/nv04_fbcon.c

@@ -39,8 +39,7 @@ nv04_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
 		return;
 		return;
 
 
 	if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 4)) {
 	if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 4)) {
-		NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
-		info->flags |= FBINFO_HWACCEL_DISABLED;
+		nouveau_fbcon_gpu_lockup(info);
 	}
 	}
 
 
 	if (info->flags & FBINFO_HWACCEL_DISABLED) {
 	if (info->flags & FBINFO_HWACCEL_DISABLED) {
@@ -62,14 +61,12 @@ nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
 	struct drm_device *dev = par->dev;
 	struct drm_device *dev = par->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_channel *chan = dev_priv->channel;
 	struct nouveau_channel *chan = dev_priv->channel;
-	uint32_t color = ((uint32_t *) info->pseudo_palette)[rect->color];
 
 
 	if (info->state != FBINFO_STATE_RUNNING)
 	if (info->state != FBINFO_STATE_RUNNING)
 		return;
 		return;
 
 
 	if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 7)) {
 	if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 7)) {
-		NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
-		info->flags |= FBINFO_HWACCEL_DISABLED;
+		nouveau_fbcon_gpu_lockup(info);
 	}
 	}
 
 
 	if (info->flags & FBINFO_HWACCEL_DISABLED) {
 	if (info->flags & FBINFO_HWACCEL_DISABLED) {
@@ -80,7 +77,11 @@ nv04_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
 	BEGIN_RING(chan, NvSubGdiRect, 0x02fc, 1);
 	BEGIN_RING(chan, NvSubGdiRect, 0x02fc, 1);
 	OUT_RING(chan, (rect->rop != ROP_COPY) ? 1 : 3);
 	OUT_RING(chan, (rect->rop != ROP_COPY) ? 1 : 3);
 	BEGIN_RING(chan, NvSubGdiRect, 0x03fc, 1);
 	BEGIN_RING(chan, NvSubGdiRect, 0x03fc, 1);
-	OUT_RING(chan, color);
+	if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+	    info->fix.visual == FB_VISUAL_DIRECTCOLOR)
+		OUT_RING(chan, ((uint32_t *)info->pseudo_palette)[rect->color]);
+	else
+		OUT_RING(chan, rect->color);
 	BEGIN_RING(chan, NvSubGdiRect, 0x0400, 2);
 	BEGIN_RING(chan, NvSubGdiRect, 0x0400, 2);
 	OUT_RING(chan, (rect->dx << 16) | rect->dy);
 	OUT_RING(chan, (rect->dx << 16) | rect->dy);
 	OUT_RING(chan, (rect->width << 16) | rect->height);
 	OUT_RING(chan, (rect->width << 16) | rect->height);
@@ -109,8 +110,7 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
 	}
 	}
 
 
 	if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 8)) {
 	if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 8)) {
-		NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
-		info->flags |= FBINFO_HWACCEL_DISABLED;
+		nouveau_fbcon_gpu_lockup(info);
 	}
 	}
 
 
 	if (info->flags & FBINFO_HWACCEL_DISABLED) {
 	if (info->flags & FBINFO_HWACCEL_DISABLED) {
@@ -144,8 +144,7 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
 		int iter_len = dsize > 128 ? 128 : dsize;
 		int iter_len = dsize > 128 ? 128 : dsize;
 
 
 		if (RING_SPACE(chan, iter_len + 1)) {
 		if (RING_SPACE(chan, iter_len + 1)) {
-			NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
-			info->flags |= FBINFO_HWACCEL_DISABLED;
+			nouveau_fbcon_gpu_lockup(info);
 			cfb_imageblit(info, image);
 			cfb_imageblit(info, image);
 			return;
 			return;
 		}
 		}
@@ -184,6 +183,7 @@ nv04_fbcon_accel_init(struct fb_info *info)
 	struct drm_device *dev = par->dev;
 	struct drm_device *dev = par->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_channel *chan = dev_priv->channel;
 	struct nouveau_channel *chan = dev_priv->channel;
+	const int sub = NvSubCtxSurf2D;
 	int surface_fmt, pattern_fmt, rect_fmt;
 	int surface_fmt, pattern_fmt, rect_fmt;
 	int ret;
 	int ret;
 
 
@@ -242,30 +242,29 @@ nv04_fbcon_accel_init(struct fb_info *info)
 		return ret;
 		return ret;
 
 
 	if (RING_SPACE(chan, 49)) {
 	if (RING_SPACE(chan, 49)) {
-		NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
-		info->flags |= FBINFO_HWACCEL_DISABLED;
+		nouveau_fbcon_gpu_lockup(info);
 		return 0;
 		return 0;
 	}
 	}
 
 
-	BEGIN_RING(chan, 1, 0x0000, 1);
+	BEGIN_RING(chan, sub, 0x0000, 1);
 	OUT_RING(chan, NvCtxSurf2D);
 	OUT_RING(chan, NvCtxSurf2D);
-	BEGIN_RING(chan, 1, 0x0184, 2);
+	BEGIN_RING(chan, sub, 0x0184, 2);
 	OUT_RING(chan, NvDmaFB);
 	OUT_RING(chan, NvDmaFB);
 	OUT_RING(chan, NvDmaFB);
 	OUT_RING(chan, NvDmaFB);
-	BEGIN_RING(chan, 1, 0x0300, 4);
+	BEGIN_RING(chan, sub, 0x0300, 4);
 	OUT_RING(chan, surface_fmt);
 	OUT_RING(chan, surface_fmt);
 	OUT_RING(chan, info->fix.line_length | (info->fix.line_length << 16));
 	OUT_RING(chan, info->fix.line_length | (info->fix.line_length << 16));
 	OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base);
 	OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base);
 	OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base);
 	OUT_RING(chan, info->fix.smem_start - dev->mode_config.fb_base);
 
 
-	BEGIN_RING(chan, 1, 0x0000, 1);
+	BEGIN_RING(chan, sub, 0x0000, 1);
 	OUT_RING(chan, NvRop);
 	OUT_RING(chan, NvRop);
-	BEGIN_RING(chan, 1, 0x0300, 1);
+	BEGIN_RING(chan, sub, 0x0300, 1);
 	OUT_RING(chan, 0x55);
 	OUT_RING(chan, 0x55);
 
 
-	BEGIN_RING(chan, 1, 0x0000, 1);
+	BEGIN_RING(chan, sub, 0x0000, 1);
 	OUT_RING(chan, NvImagePatt);
 	OUT_RING(chan, NvImagePatt);
-	BEGIN_RING(chan, 1, 0x0300, 8);
+	BEGIN_RING(chan, sub, 0x0300, 8);
 	OUT_RING(chan, pattern_fmt);
 	OUT_RING(chan, pattern_fmt);
 #ifdef __BIG_ENDIAN
 #ifdef __BIG_ENDIAN
 	OUT_RING(chan, 2);
 	OUT_RING(chan, 2);
@@ -279,9 +278,9 @@ nv04_fbcon_accel_init(struct fb_info *info)
 	OUT_RING(chan, ~0);
 	OUT_RING(chan, ~0);
 	OUT_RING(chan, ~0);
 	OUT_RING(chan, ~0);
 
 
-	BEGIN_RING(chan, 1, 0x0000, 1);
+	BEGIN_RING(chan, sub, 0x0000, 1);
 	OUT_RING(chan, NvClipRect);
 	OUT_RING(chan, NvClipRect);
-	BEGIN_RING(chan, 1, 0x0300, 2);
+	BEGIN_RING(chan, sub, 0x0300, 2);
 	OUT_RING(chan, 0);
 	OUT_RING(chan, 0);
 	OUT_RING(chan, (info->var.yres_virtual << 16) | info->var.xres_virtual);
 	OUT_RING(chan, (info->var.yres_virtual << 16) | info->var.xres_virtual);
 
 

+ 34 - 0
drivers/gpu/drm/nouveau/nv04_fifo.c

@@ -71,6 +71,40 @@ nv04_fifo_reassign(struct drm_device *dev, bool enable)
 	return (reassign == 1);
 	return (reassign == 1);
 }
 }
 
 
+bool
+nv04_fifo_cache_flush(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
+	uint64_t start = ptimer->read(dev);
+
+	do {
+		if (nv_rd32(dev, NV03_PFIFO_CACHE1_GET) ==
+		    nv_rd32(dev, NV03_PFIFO_CACHE1_PUT))
+			return true;
+
+	} while (ptimer->read(dev) - start < 100000000);
+
+	NV_ERROR(dev, "Timeout flushing the PFIFO cache.\n");
+
+	return false;
+}
+
+bool
+nv04_fifo_cache_pull(struct drm_device *dev, bool enable)
+{
+	uint32_t pull = nv_rd32(dev, NV04_PFIFO_CACHE1_PULL0);
+
+	if (enable) {
+		nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, pull | 1);
+	} else {
+		nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, pull & ~1);
+		nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
+	}
+
+	return !!(pull & 1);
+}
+
 int
 int
 nv04_fifo_channel_id(struct drm_device *dev)
 nv04_fifo_channel_id(struct drm_device *dev)
 {
 {

+ 82 - 77
drivers/gpu/drm/nouveau/nv04_graph.c

@@ -28,6 +28,10 @@
 #include "nouveau_drv.h"
 #include "nouveau_drv.h"
 
 
 static uint32_t nv04_graph_ctx_regs[] = {
 static uint32_t nv04_graph_ctx_regs[] = {
+	0x0040053c,
+	0x00400544,
+	0x00400540,
+	0x00400548,
 	NV04_PGRAPH_CTX_SWITCH1,
 	NV04_PGRAPH_CTX_SWITCH1,
 	NV04_PGRAPH_CTX_SWITCH2,
 	NV04_PGRAPH_CTX_SWITCH2,
 	NV04_PGRAPH_CTX_SWITCH3,
 	NV04_PGRAPH_CTX_SWITCH3,
@@ -102,69 +106,69 @@ static uint32_t nv04_graph_ctx_regs[] = {
 	NV04_PGRAPH_PATT_COLOR0,
 	NV04_PGRAPH_PATT_COLOR0,
 	NV04_PGRAPH_PATT_COLOR1,
 	NV04_PGRAPH_PATT_COLOR1,
 	NV04_PGRAPH_PATT_COLORRAM+0x00,
 	NV04_PGRAPH_PATT_COLORRAM+0x00,
-	NV04_PGRAPH_PATT_COLORRAM+0x01,
-	NV04_PGRAPH_PATT_COLORRAM+0x02,
-	NV04_PGRAPH_PATT_COLORRAM+0x03,
 	NV04_PGRAPH_PATT_COLORRAM+0x04,
 	NV04_PGRAPH_PATT_COLORRAM+0x04,
-	NV04_PGRAPH_PATT_COLORRAM+0x05,
-	NV04_PGRAPH_PATT_COLORRAM+0x06,
-	NV04_PGRAPH_PATT_COLORRAM+0x07,
 	NV04_PGRAPH_PATT_COLORRAM+0x08,
 	NV04_PGRAPH_PATT_COLORRAM+0x08,
-	NV04_PGRAPH_PATT_COLORRAM+0x09,
-	NV04_PGRAPH_PATT_COLORRAM+0x0A,
-	NV04_PGRAPH_PATT_COLORRAM+0x0B,
-	NV04_PGRAPH_PATT_COLORRAM+0x0C,
-	NV04_PGRAPH_PATT_COLORRAM+0x0D,
-	NV04_PGRAPH_PATT_COLORRAM+0x0E,
-	NV04_PGRAPH_PATT_COLORRAM+0x0F,
+	NV04_PGRAPH_PATT_COLORRAM+0x0c,
 	NV04_PGRAPH_PATT_COLORRAM+0x10,
 	NV04_PGRAPH_PATT_COLORRAM+0x10,
-	NV04_PGRAPH_PATT_COLORRAM+0x11,
-	NV04_PGRAPH_PATT_COLORRAM+0x12,
-	NV04_PGRAPH_PATT_COLORRAM+0x13,
 	NV04_PGRAPH_PATT_COLORRAM+0x14,
 	NV04_PGRAPH_PATT_COLORRAM+0x14,
-	NV04_PGRAPH_PATT_COLORRAM+0x15,
-	NV04_PGRAPH_PATT_COLORRAM+0x16,
-	NV04_PGRAPH_PATT_COLORRAM+0x17,
 	NV04_PGRAPH_PATT_COLORRAM+0x18,
 	NV04_PGRAPH_PATT_COLORRAM+0x18,
-	NV04_PGRAPH_PATT_COLORRAM+0x19,
-	NV04_PGRAPH_PATT_COLORRAM+0x1A,
-	NV04_PGRAPH_PATT_COLORRAM+0x1B,
-	NV04_PGRAPH_PATT_COLORRAM+0x1C,
-	NV04_PGRAPH_PATT_COLORRAM+0x1D,
-	NV04_PGRAPH_PATT_COLORRAM+0x1E,
-	NV04_PGRAPH_PATT_COLORRAM+0x1F,
+	NV04_PGRAPH_PATT_COLORRAM+0x1c,
 	NV04_PGRAPH_PATT_COLORRAM+0x20,
 	NV04_PGRAPH_PATT_COLORRAM+0x20,
-	NV04_PGRAPH_PATT_COLORRAM+0x21,
-	NV04_PGRAPH_PATT_COLORRAM+0x22,
-	NV04_PGRAPH_PATT_COLORRAM+0x23,
 	NV04_PGRAPH_PATT_COLORRAM+0x24,
 	NV04_PGRAPH_PATT_COLORRAM+0x24,
-	NV04_PGRAPH_PATT_COLORRAM+0x25,
-	NV04_PGRAPH_PATT_COLORRAM+0x26,
-	NV04_PGRAPH_PATT_COLORRAM+0x27,
 	NV04_PGRAPH_PATT_COLORRAM+0x28,
 	NV04_PGRAPH_PATT_COLORRAM+0x28,
-	NV04_PGRAPH_PATT_COLORRAM+0x29,
-	NV04_PGRAPH_PATT_COLORRAM+0x2A,
-	NV04_PGRAPH_PATT_COLORRAM+0x2B,
-	NV04_PGRAPH_PATT_COLORRAM+0x2C,
-	NV04_PGRAPH_PATT_COLORRAM+0x2D,
-	NV04_PGRAPH_PATT_COLORRAM+0x2E,
-	NV04_PGRAPH_PATT_COLORRAM+0x2F,
+	NV04_PGRAPH_PATT_COLORRAM+0x2c,
 	NV04_PGRAPH_PATT_COLORRAM+0x30,
 	NV04_PGRAPH_PATT_COLORRAM+0x30,
-	NV04_PGRAPH_PATT_COLORRAM+0x31,
-	NV04_PGRAPH_PATT_COLORRAM+0x32,
-	NV04_PGRAPH_PATT_COLORRAM+0x33,
 	NV04_PGRAPH_PATT_COLORRAM+0x34,
 	NV04_PGRAPH_PATT_COLORRAM+0x34,
-	NV04_PGRAPH_PATT_COLORRAM+0x35,
-	NV04_PGRAPH_PATT_COLORRAM+0x36,
-	NV04_PGRAPH_PATT_COLORRAM+0x37,
 	NV04_PGRAPH_PATT_COLORRAM+0x38,
 	NV04_PGRAPH_PATT_COLORRAM+0x38,
-	NV04_PGRAPH_PATT_COLORRAM+0x39,
-	NV04_PGRAPH_PATT_COLORRAM+0x3A,
-	NV04_PGRAPH_PATT_COLORRAM+0x3B,
-	NV04_PGRAPH_PATT_COLORRAM+0x3C,
-	NV04_PGRAPH_PATT_COLORRAM+0x3D,
-	NV04_PGRAPH_PATT_COLORRAM+0x3E,
-	NV04_PGRAPH_PATT_COLORRAM+0x3F,
+	NV04_PGRAPH_PATT_COLORRAM+0x3c,
+	NV04_PGRAPH_PATT_COLORRAM+0x40,
+	NV04_PGRAPH_PATT_COLORRAM+0x44,
+	NV04_PGRAPH_PATT_COLORRAM+0x48,
+	NV04_PGRAPH_PATT_COLORRAM+0x4c,
+	NV04_PGRAPH_PATT_COLORRAM+0x50,
+	NV04_PGRAPH_PATT_COLORRAM+0x54,
+	NV04_PGRAPH_PATT_COLORRAM+0x58,
+	NV04_PGRAPH_PATT_COLORRAM+0x5c,
+	NV04_PGRAPH_PATT_COLORRAM+0x60,
+	NV04_PGRAPH_PATT_COLORRAM+0x64,
+	NV04_PGRAPH_PATT_COLORRAM+0x68,
+	NV04_PGRAPH_PATT_COLORRAM+0x6c,
+	NV04_PGRAPH_PATT_COLORRAM+0x70,
+	NV04_PGRAPH_PATT_COLORRAM+0x74,
+	NV04_PGRAPH_PATT_COLORRAM+0x78,
+	NV04_PGRAPH_PATT_COLORRAM+0x7c,
+	NV04_PGRAPH_PATT_COLORRAM+0x80,
+	NV04_PGRAPH_PATT_COLORRAM+0x84,
+	NV04_PGRAPH_PATT_COLORRAM+0x88,
+	NV04_PGRAPH_PATT_COLORRAM+0x8c,
+	NV04_PGRAPH_PATT_COLORRAM+0x90,
+	NV04_PGRAPH_PATT_COLORRAM+0x94,
+	NV04_PGRAPH_PATT_COLORRAM+0x98,
+	NV04_PGRAPH_PATT_COLORRAM+0x9c,
+	NV04_PGRAPH_PATT_COLORRAM+0xa0,
+	NV04_PGRAPH_PATT_COLORRAM+0xa4,
+	NV04_PGRAPH_PATT_COLORRAM+0xa8,
+	NV04_PGRAPH_PATT_COLORRAM+0xac,
+	NV04_PGRAPH_PATT_COLORRAM+0xb0,
+	NV04_PGRAPH_PATT_COLORRAM+0xb4,
+	NV04_PGRAPH_PATT_COLORRAM+0xb8,
+	NV04_PGRAPH_PATT_COLORRAM+0xbc,
+	NV04_PGRAPH_PATT_COLORRAM+0xc0,
+	NV04_PGRAPH_PATT_COLORRAM+0xc4,
+	NV04_PGRAPH_PATT_COLORRAM+0xc8,
+	NV04_PGRAPH_PATT_COLORRAM+0xcc,
+	NV04_PGRAPH_PATT_COLORRAM+0xd0,
+	NV04_PGRAPH_PATT_COLORRAM+0xd4,
+	NV04_PGRAPH_PATT_COLORRAM+0xd8,
+	NV04_PGRAPH_PATT_COLORRAM+0xdc,
+	NV04_PGRAPH_PATT_COLORRAM+0xe0,
+	NV04_PGRAPH_PATT_COLORRAM+0xe4,
+	NV04_PGRAPH_PATT_COLORRAM+0xe8,
+	NV04_PGRAPH_PATT_COLORRAM+0xec,
+	NV04_PGRAPH_PATT_COLORRAM+0xf0,
+	NV04_PGRAPH_PATT_COLORRAM+0xf4,
+	NV04_PGRAPH_PATT_COLORRAM+0xf8,
+	NV04_PGRAPH_PATT_COLORRAM+0xfc,
 	NV04_PGRAPH_PATTERN,
 	NV04_PGRAPH_PATTERN,
 	0x0040080c,
 	0x0040080c,
 	NV04_PGRAPH_PATTERN_SHAPE,
 	NV04_PGRAPH_PATTERN_SHAPE,
@@ -247,14 +251,6 @@ static uint32_t nv04_graph_ctx_regs[] = {
 	0x004004f8,
 	0x004004f8,
 	0x0040047c,
 	0x0040047c,
 	0x004004fc,
 	0x004004fc,
-	0x0040053c,
-	0x00400544,
-	0x00400540,
-	0x00400548,
-	0x00400560,
-	0x00400568,
-	0x00400564,
-	0x0040056c,
 	0x00400534,
 	0x00400534,
 	0x00400538,
 	0x00400538,
 	0x00400514,
 	0x00400514,
@@ -341,9 +337,8 @@ static uint32_t nv04_graph_ctx_regs[] = {
 	0x00400500,
 	0x00400500,
 	0x00400504,
 	0x00400504,
 	NV04_PGRAPH_VALID1,
 	NV04_PGRAPH_VALID1,
-	NV04_PGRAPH_VALID2
-
-
+	NV04_PGRAPH_VALID2,
+	NV04_PGRAPH_DEBUG_3
 };
 };
 
 
 struct graph_state {
 struct graph_state {
@@ -388,6 +383,18 @@ nv04_graph_context_switch(struct drm_device *dev)
 	pgraph->fifo_access(dev, true);
 	pgraph->fifo_access(dev, true);
 }
 }
 
 
+static uint32_t *ctx_reg(struct graph_state *ctx, uint32_t reg)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(nv04_graph_ctx_regs); i++) {
+		if (nv04_graph_ctx_regs[i] == reg)
+			return &ctx->nv04[i];
+	}
+
+	return NULL;
+}
+
 int nv04_graph_create_context(struct nouveau_channel *chan)
 int nv04_graph_create_context(struct nouveau_channel *chan)
 {
 {
 	struct graph_state *pgraph_ctx;
 	struct graph_state *pgraph_ctx;
@@ -398,15 +405,8 @@ int nv04_graph_create_context(struct nouveau_channel *chan)
 	if (pgraph_ctx == NULL)
 	if (pgraph_ctx == NULL)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
-	/* dev_priv->fifos[channel].pgraph_ctx_user = channel << 24; */
-	pgraph_ctx->nv04[0] = 0x0001ffff;
-	/* is it really needed ??? */
-#if 0
-	dev_priv->fifos[channel].pgraph_ctx[1] =
-					nv_rd32(dev, NV_PGRAPH_DEBUG_4);
-	dev_priv->fifos[channel].pgraph_ctx[2] =
-					nv_rd32(dev, 0x004006b0);
-#endif
+	*ctx_reg(pgraph_ctx, NV04_PGRAPH_DEBUG_3) = 0xfad4ff31;
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -429,9 +429,13 @@ int nv04_graph_load_context(struct nouveau_channel *chan)
 		nv_wr32(dev, nv04_graph_ctx_regs[i], pgraph_ctx->nv04[i]);
 		nv_wr32(dev, nv04_graph_ctx_regs[i], pgraph_ctx->nv04[i]);
 
 
 	nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL, 0x10010100);
 	nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL, 0x10010100);
-	nv_wr32(dev, NV04_PGRAPH_CTX_USER, chan->id << 24);
+
+	tmp  = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
+	nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp | chan->id << 24);
+
 	tmp = nv_rd32(dev, NV04_PGRAPH_FFINTFC_ST2);
 	tmp = nv_rd32(dev, NV04_PGRAPH_FFINTFC_ST2);
 	nv_wr32(dev, NV04_PGRAPH_FFINTFC_ST2, tmp & 0x000fffff);
 	nv_wr32(dev, NV04_PGRAPH_FFINTFC_ST2, tmp & 0x000fffff);
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -494,7 +498,7 @@ int nv04_graph_init(struct drm_device *dev)
 	nv_wr32(dev, NV04_PGRAPH_STATE        , 0xFFFFFFFF);
 	nv_wr32(dev, NV04_PGRAPH_STATE        , 0xFFFFFFFF);
 	nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL  , 0x10000100);
 	nv_wr32(dev, NV04_PGRAPH_CTX_CONTROL  , 0x10000100);
 	tmp  = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
 	tmp  = nv_rd32(dev, NV04_PGRAPH_CTX_USER) & 0x00ffffff;
-	tmp |= dev_priv->engine.fifo.channels << 24;
+	tmp |= (dev_priv->engine.fifo.channels - 1) << 24;
 	nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp);
 	nv_wr32(dev, NV04_PGRAPH_CTX_USER, tmp);
 
 
 	/* These don't belong here, they're part of a per-channel context */
 	/* These don't belong here, they're part of a per-channel context */
@@ -533,7 +537,7 @@ nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass,
 			      int mthd, uint32_t data)
 			      int mthd, uint32_t data)
 {
 {
 	struct drm_device *dev = chan->dev;
 	struct drm_device *dev = chan->dev;
-	uint32_t instance = nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff;
+	uint32_t instance = (nv_rd32(dev, NV04_PGRAPH_CTX_SWITCH4) & 0xffff) << 4;
 	int subc = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7;
 	int subc = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 13) & 0x7;
 	uint32_t tmp;
 	uint32_t tmp;
 
 
@@ -547,7 +551,7 @@ nv04_graph_mthd_set_operation(struct nouveau_channel *chan, int grclass,
 	return 0;
 	return 0;
 }
 }
 
 
-static struct nouveau_pgraph_object_method nv04_graph_mthds_m2mf[] = {
+static struct nouveau_pgraph_object_method nv04_graph_mthds_sw[] = {
 	{ 0x0150, nv04_graph_mthd_set_ref },
 	{ 0x0150, nv04_graph_mthd_set_ref },
 	{}
 	{}
 };
 };
@@ -558,7 +562,7 @@ static struct nouveau_pgraph_object_method nv04_graph_mthds_set_operation[] = {
 };
 };
 
 
 struct nouveau_pgraph_object_class nv04_graph_grclass[] = {
 struct nouveau_pgraph_object_class nv04_graph_grclass[] = {
-	{ 0x0039, false, nv04_graph_mthds_m2mf },
+	{ 0x0039, false, NULL },
 	{ 0x004a, false, nv04_graph_mthds_set_operation }, /* gdirect */
 	{ 0x004a, false, nv04_graph_mthds_set_operation }, /* gdirect */
 	{ 0x005f, false, nv04_graph_mthds_set_operation }, /* imageblit */
 	{ 0x005f, false, nv04_graph_mthds_set_operation }, /* imageblit */
 	{ 0x0061, false, nv04_graph_mthds_set_operation }, /* ifc */
 	{ 0x0061, false, nv04_graph_mthds_set_operation }, /* ifc */
@@ -574,6 +578,7 @@ struct nouveau_pgraph_object_class nv04_graph_grclass[] = {
 	{ 0x0053, false, NULL }, /* surf3d */
 	{ 0x0053, false, NULL }, /* surf3d */
 	{ 0x0054, false, NULL }, /* tex_tri */
 	{ 0x0054, false, NULL }, /* tex_tri */
 	{ 0x0055, false, NULL }, /* multitex_tri */
 	{ 0x0055, false, NULL }, /* multitex_tri */
+	{ 0x506e, true, nv04_graph_mthds_sw },
 	{}
 	{}
 };
 };
 
 

+ 26 - 6
drivers/gpu/drm/nouveau/nv10_fb.c

@@ -3,17 +3,37 @@
 #include "nouveau_drv.h"
 #include "nouveau_drv.h"
 #include "nouveau_drm.h"
 #include "nouveau_drm.h"
 
 
+void
+nv10_fb_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
+			  uint32_t size, uint32_t pitch)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	uint32_t limit = max(1u, addr + size) - 1;
+
+	if (pitch) {
+		if (dev_priv->card_type >= NV_20)
+			addr |= 1;
+		else
+			addr |= 1 << 31;
+	}
+
+	nv_wr32(dev, NV10_PFB_TLIMIT(i), limit);
+	nv_wr32(dev, NV10_PFB_TSIZE(i), pitch);
+	nv_wr32(dev, NV10_PFB_TILE(i), addr);
+}
+
 int
 int
 nv10_fb_init(struct drm_device *dev)
 nv10_fb_init(struct drm_device *dev)
 {
 {
-	uint32_t fb_bar_size;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
 	int i;
 	int i;
 
 
-	fb_bar_size = drm_get_resource_len(dev, 0) - 1;
-	for (i = 0; i < NV10_PFB_TILE__SIZE; i++) {
-		nv_wr32(dev, NV10_PFB_TILE(i), 0);
-		nv_wr32(dev, NV10_PFB_TLIMIT(i), fb_bar_size);
-	}
+	pfb->num_tiles = NV10_PFB_TILE__SIZE;
+
+	/* Turn all the tiling regions off. */
+	for (i = 0; i < pfb->num_tiles; i++)
+		pfb->set_region_tiling(dev, i, 0, 0, 0);
 
 
 	return 0;
 	return 0;
 }
 }

+ 17 - 11
drivers/gpu/drm/nouveau/nv10_graph.c

@@ -807,6 +807,20 @@ void nv10_graph_destroy_context(struct nouveau_channel *chan)
 	chan->pgraph_ctx = NULL;
 	chan->pgraph_ctx = NULL;
 }
 }
 
 
+void
+nv10_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
+			     uint32_t size, uint32_t pitch)
+{
+	uint32_t limit = max(1u, addr + size) - 1;
+
+	if (pitch)
+		addr |= 1 << 31;
+
+	nv_wr32(dev, NV10_PGRAPH_TLIMIT(i), limit);
+	nv_wr32(dev, NV10_PGRAPH_TSIZE(i), pitch);
+	nv_wr32(dev, NV10_PGRAPH_TILE(i), addr);
+}
+
 int nv10_graph_init(struct drm_device *dev)
 int nv10_graph_init(struct drm_device *dev)
 {
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -838,17 +852,9 @@ int nv10_graph_init(struct drm_device *dev)
 	} else
 	} else
 		nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00000000);
 		nv_wr32(dev, NV10_PGRAPH_DEBUG_4, 0x00000000);
 
 
-	/* copy tile info from PFB */
-	for (i = 0; i < NV10_PFB_TILE__SIZE; i++) {
-		nv_wr32(dev, NV10_PGRAPH_TILE(i),
-					nv_rd32(dev, NV10_PFB_TILE(i)));
-		nv_wr32(dev, NV10_PGRAPH_TLIMIT(i),
-					nv_rd32(dev, NV10_PFB_TLIMIT(i)));
-		nv_wr32(dev, NV10_PGRAPH_TSIZE(i),
-					nv_rd32(dev, NV10_PFB_TSIZE(i)));
-		nv_wr32(dev, NV10_PGRAPH_TSTATUS(i),
-					nv_rd32(dev, NV10_PFB_TSTATUS(i)));
-	}
+	/* Turn all the tiling regions off. */
+	for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
+		nv10_graph_set_region_tiling(dev, i, 0, 0, 0);
 
 
 	nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH1, 0x00000000);
 	nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH1, 0x00000000);
 	nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH2, 0x00000000);
 	nv_wr32(dev, NV10_PGRAPH_CTX_SWITCH2, 0x00000000);

+ 105 - 10
drivers/gpu/drm/nouveau/nv17_tv.c

@@ -33,13 +33,103 @@
 #include "nouveau_hw.h"
 #include "nouveau_hw.h"
 #include "nv17_tv.h"
 #include "nv17_tv.h"
 
 
-enum drm_connector_status nv17_tv_detect(struct drm_encoder *encoder,
-					 struct drm_connector *connector,
-					 uint32_t pin_mask)
+static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder)
 {
 {
+	struct drm_device *dev = encoder->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	uint32_t testval, regoffset = nv04_dac_output_offset(encoder);
+	uint32_t gpio0, gpio1, fp_htotal, fp_hsync_start, fp_hsync_end,
+		fp_control, test_ctrl, dacclk, ctv_14, ctv_1c, ctv_6c;
+	uint32_t sample = 0;
+	int head;
+
+#define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20)
+	testval = RGB_TEST_DATA(0x82, 0xeb, 0x82);
+	if (dev_priv->vbios->tvdactestval)
+		testval = dev_priv->vbios->tvdactestval;
+
+	dacclk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset);
+	head = (dacclk & 0x100) >> 8;
+
+	/* Save the previous state. */
+	gpio1 = nv17_gpio_get(dev, DCB_GPIO_TVDAC1);
+	gpio0 = nv17_gpio_get(dev, DCB_GPIO_TVDAC0);
+	fp_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL);
+	fp_hsync_start = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START);
+	fp_hsync_end = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END);
+	fp_control = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL);
+	test_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset);
+	ctv_1c = NVReadRAMDAC(dev, head, 0x680c1c);
+	ctv_14 = NVReadRAMDAC(dev, head, 0x680c14);
+	ctv_6c = NVReadRAMDAC(dev, head, 0x680c6c);
+
+	/* Prepare the DAC for load detection.  */
+	nv17_gpio_set(dev, DCB_GPIO_TVDAC1, true);
+	nv17_gpio_set(dev, DCB_GPIO_TVDAC0, true);
+
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, 1343);
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, 1047);
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, 1183);
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL,
+		      NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS |
+		      NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12 |
+		      NV_PRAMDAC_FP_TG_CONTROL_READ_PROG |
+		      NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS |
+		      NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS);
+
+	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, 0);
+
+	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset,
+		      (dacclk & ~0xff) | 0x22);
+	msleep(1);
+	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset,
+		      (dacclk & ~0xff) | 0x21);
+
+	NVWriteRAMDAC(dev, head, 0x680c1c, 1 << 20);
+	NVWriteRAMDAC(dev, head, 0x680c14, 4 << 16);
+
+	/* Sample pin 0x4 (usually S-video luma). */
+	NVWriteRAMDAC(dev, head, 0x680c6c, testval >> 10 & 0x3ff);
+	msleep(20);
+	sample |= NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset)
+		& 0x4 << 28;
+
+	/* Sample the remaining pins. */
+	NVWriteRAMDAC(dev, head, 0x680c6c, testval & 0x3ff);
+	msleep(20);
+	sample |= NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset)
+		& 0xa << 28;
+
+	/* Restore the previous state. */
+	NVWriteRAMDAC(dev, head, 0x680c1c, ctv_1c);
+	NVWriteRAMDAC(dev, head, 0x680c14, ctv_14);
+	NVWriteRAMDAC(dev, head, 0x680c6c, ctv_6c);
+	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, dacclk);
+	NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, test_ctrl);
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL, fp_control);
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, fp_hsync_end);
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, fp_hsync_start);
+	NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, fp_htotal);
+	nv17_gpio_set(dev, DCB_GPIO_TVDAC1, gpio1);
+	nv17_gpio_set(dev, DCB_GPIO_TVDAC0, gpio0);
+
+	return sample;
+}
+
+static enum drm_connector_status
+nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector)
+{
+	struct drm_device *dev = encoder->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct drm_mode_config *conf = &dev->mode_config;
 	struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
 	struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder);
+	struct dcb_entry *dcb = tv_enc->base.dcb;
 
 
-	tv_enc->pin_mask = pin_mask >> 28 & 0xe;
+	if (dev_priv->chipset == 0x42 ||
+	    dev_priv->chipset == 0x43)
+		tv_enc->pin_mask = nv42_tv_sample_load(encoder) >> 28 & 0xe;
+	else
+		tv_enc->pin_mask = nv17_dac_sample_load(encoder) >> 28 & 0xe;
 
 
 	switch (tv_enc->pin_mask) {
 	switch (tv_enc->pin_mask) {
 	case 0x2:
 	case 0x2:
@@ -50,7 +140,7 @@ enum drm_connector_status nv17_tv_detect(struct drm_encoder *encoder,
 		tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_SVIDEO;
 		tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_SVIDEO;
 		break;
 		break;
 	case 0xe:
 	case 0xe:
-		if (nouveau_encoder(encoder)->dcb->tvconf.has_component_output)
+		if (dcb->tvconf.has_component_output)
 			tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Component;
 			tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Component;
 		else
 		else
 			tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_SCART;
 			tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_SCART;
@@ -61,11 +151,16 @@ enum drm_connector_status nv17_tv_detect(struct drm_encoder *encoder,
 	}
 	}
 
 
 	drm_connector_property_set_value(connector,
 	drm_connector_property_set_value(connector,
-			encoder->dev->mode_config.tv_subconnector_property,
-							tv_enc->subconnector);
+					 conf->tv_subconnector_property,
+					 tv_enc->subconnector);
 
 
-	return tv_enc->subconnector ? connector_status_connected :
-					connector_status_disconnected;
+	if (tv_enc->subconnector) {
+		NV_INFO(dev, "Load detected on output %c\n",
+			'@' + ffs(dcb->or));
+		return connector_status_connected;
+	} else {
+		return connector_status_disconnected;
+	}
 }
 }
 
 
 static const struct {
 static const struct {
@@ -633,7 +728,7 @@ static struct drm_encoder_helper_funcs nv17_tv_helper_funcs = {
 	.prepare = nv17_tv_prepare,
 	.prepare = nv17_tv_prepare,
 	.commit = nv17_tv_commit,
 	.commit = nv17_tv_commit,
 	.mode_set = nv17_tv_mode_set,
 	.mode_set = nv17_tv_mode_set,
-	.detect = nv17_dac_detect,
+	.detect = nv17_tv_detect,
 };
 };
 
 
 static struct drm_encoder_slave_funcs nv17_tv_slave_funcs = {
 static struct drm_encoder_slave_funcs nv17_tv_slave_funcs = {

+ 28 - 33
drivers/gpu/drm/nouveau/nv20_graph.c

@@ -514,6 +514,27 @@ nv20_graph_rdi(struct drm_device *dev)
 	nouveau_wait_for_idle(dev);
 	nouveau_wait_for_idle(dev);
 }
 }
 
 
+void
+nv20_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
+			     uint32_t size, uint32_t pitch)
+{
+	uint32_t limit = max(1u, addr + size) - 1;
+
+	if (pitch)
+		addr |= 1;
+
+	nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit);
+	nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch);
+	nv_wr32(dev, NV20_PGRAPH_TILE(i), addr);
+
+	nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + 4 * i);
+	nv_wr32(dev, NV10_PGRAPH_RDI_DATA, limit);
+	nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + 4 * i);
+	nv_wr32(dev, NV10_PGRAPH_RDI_DATA, pitch);
+	nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + 4 * i);
+	nv_wr32(dev, NV10_PGRAPH_RDI_DATA, addr);
+}
+
 int
 int
 nv20_graph_init(struct drm_device *dev)
 nv20_graph_init(struct drm_device *dev)
 {
 {
@@ -572,27 +593,10 @@ nv20_graph_init(struct drm_device *dev)
 		nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000030);
 		nv_wr32(dev, NV10_PGRAPH_RDI_DATA , 0x00000030);
 	}
 	}
 
 
-	/* copy tile info from PFB */
-	for (i = 0; i < NV10_PFB_TILE__SIZE; i++) {
-		nv_wr32(dev, 0x00400904 + i * 0x10,
-					nv_rd32(dev, NV10_PFB_TLIMIT(i)));
-			/* which is NV40_PGRAPH_TLIMIT0(i) ?? */
-		nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0030 + i * 4);
-		nv_wr32(dev, NV10_PGRAPH_RDI_DATA,
-					nv_rd32(dev, NV10_PFB_TLIMIT(i)));
-		nv_wr32(dev, 0x00400908 + i * 0x10,
-					nv_rd32(dev, NV10_PFB_TSIZE(i)));
-			/* which is NV40_PGRAPH_TSIZE0(i) ?? */
-		nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0050 + i * 4);
-		nv_wr32(dev, NV10_PGRAPH_RDI_DATA,
-					nv_rd32(dev, NV10_PFB_TSIZE(i)));
-		nv_wr32(dev, 0x00400900 + i * 0x10,
-					nv_rd32(dev, NV10_PFB_TILE(i)));
-			/* which is NV40_PGRAPH_TILE0(i) ?? */
-		nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0010 + i * 4);
-		nv_wr32(dev, NV10_PGRAPH_RDI_DATA,
-					nv_rd32(dev, NV10_PFB_TILE(i)));
-	}
+	/* Turn all the tiling regions off. */
+	for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
+		nv20_graph_set_region_tiling(dev, i, 0, 0, 0);
+
 	for (i = 0; i < 8; i++) {
 	for (i = 0; i < 8; i++) {
 		nv_wr32(dev, 0x400980 + i * 4, nv_rd32(dev, 0x100300 + i * 4));
 		nv_wr32(dev, 0x400980 + i * 4, nv_rd32(dev, 0x100300 + i * 4));
 		nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0090 + i * 4);
 		nv_wr32(dev, NV10_PGRAPH_RDI_INDEX, 0x00EA0090 + i * 4);
@@ -704,18 +708,9 @@ nv30_graph_init(struct drm_device *dev)
 
 
 	nv_wr32(dev, 0x4000c0, 0x00000016);
 	nv_wr32(dev, 0x4000c0, 0x00000016);
 
 
-	/* copy tile info from PFB */
-	for (i = 0; i < NV10_PFB_TILE__SIZE; i++) {
-		nv_wr32(dev, 0x00400904 + i * 0x10,
-					nv_rd32(dev, NV10_PFB_TLIMIT(i)));
-			/* which is NV40_PGRAPH_TLIMIT0(i) ?? */
-		nv_wr32(dev, 0x00400908 + i * 0x10,
-					nv_rd32(dev, NV10_PFB_TSIZE(i)));
-			/* which is NV40_PGRAPH_TSIZE0(i) ?? */
-		nv_wr32(dev, 0x00400900 + i * 0x10,
-					nv_rd32(dev, NV10_PFB_TILE(i)));
-			/* which is NV40_PGRAPH_TILE0(i) ?? */
-	}
+	/* Turn all the tiling regions off. */
+	for (i = 0; i < NV10_PFB_TILE__SIZE; i++)
+		nv20_graph_set_region_tiling(dev, i, 0, 0, 0);
 
 
 	nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
 	nv_wr32(dev, NV10_PGRAPH_CTX_CONTROL, 0x10000100);
 	nv_wr32(dev, NV10_PGRAPH_STATE      , 0xFFFFFFFF);
 	nv_wr32(dev, NV10_PGRAPH_STATE      , 0xFFFFFFFF);

+ 33 - 20
drivers/gpu/drm/nouveau/nv40_fb.c

@@ -3,12 +3,37 @@
 #include "nouveau_drv.h"
 #include "nouveau_drv.h"
 #include "nouveau_drm.h"
 #include "nouveau_drm.h"
 
 
+void
+nv40_fb_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
+			  uint32_t size, uint32_t pitch)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	uint32_t limit = max(1u, addr + size) - 1;
+
+	if (pitch)
+		addr |= 1;
+
+	switch (dev_priv->chipset) {
+	case 0x40:
+		nv_wr32(dev, NV10_PFB_TLIMIT(i), limit);
+		nv_wr32(dev, NV10_PFB_TSIZE(i), pitch);
+		nv_wr32(dev, NV10_PFB_TILE(i), addr);
+		break;
+
+	default:
+		nv_wr32(dev, NV40_PFB_TLIMIT(i), limit);
+		nv_wr32(dev, NV40_PFB_TSIZE(i), pitch);
+		nv_wr32(dev, NV40_PFB_TILE(i), addr);
+		break;
+	}
+}
+
 int
 int
 nv40_fb_init(struct drm_device *dev)
 nv40_fb_init(struct drm_device *dev)
 {
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	uint32_t fb_bar_size, tmp;
-	int num_tiles;
+	struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+	uint32_t tmp;
 	int i;
 	int i;
 
 
 	/* This is strictly a NV4x register (don't know about NV5x). */
 	/* This is strictly a NV4x register (don't know about NV5x). */
@@ -23,35 +48,23 @@ nv40_fb_init(struct drm_device *dev)
 	case 0x45:
 	case 0x45:
 		tmp = nv_rd32(dev, NV10_PFB_CLOSE_PAGE2);
 		tmp = nv_rd32(dev, NV10_PFB_CLOSE_PAGE2);
 		nv_wr32(dev, NV10_PFB_CLOSE_PAGE2, tmp & ~(1 << 15));
 		nv_wr32(dev, NV10_PFB_CLOSE_PAGE2, tmp & ~(1 << 15));
-		num_tiles = NV10_PFB_TILE__SIZE;
+		pfb->num_tiles = NV10_PFB_TILE__SIZE;
 		break;
 		break;
 	case 0x46: /* G72 */
 	case 0x46: /* G72 */
 	case 0x47: /* G70 */
 	case 0x47: /* G70 */
 	case 0x49: /* G71 */
 	case 0x49: /* G71 */
 	case 0x4b: /* G73 */
 	case 0x4b: /* G73 */
 	case 0x4c: /* C51 (G7X version) */
 	case 0x4c: /* C51 (G7X version) */
-		num_tiles = NV40_PFB_TILE__SIZE_1;
+		pfb->num_tiles = NV40_PFB_TILE__SIZE_1;
 		break;
 		break;
 	default:
 	default:
-		num_tiles = NV40_PFB_TILE__SIZE_0;
+		pfb->num_tiles = NV40_PFB_TILE__SIZE_0;
 		break;
 		break;
 	}
 	}
 
 
-	fb_bar_size = drm_get_resource_len(dev, 0) - 1;
-	switch (dev_priv->chipset) {
-	case 0x40:
-		for (i = 0; i < num_tiles; i++) {
-			nv_wr32(dev, NV10_PFB_TILE(i), 0);
-			nv_wr32(dev, NV10_PFB_TLIMIT(i), fb_bar_size);
-		}
-		break;
-	default:
-		for (i = 0; i < num_tiles; i++) {
-			nv_wr32(dev, NV40_PFB_TILE(i), 0);
-			nv_wr32(dev, NV40_PFB_TLIMIT(i), fb_bar_size);
-		}
-		break;
-	}
+	/* Turn all the tiling regions off. */
+	for (i = 0; i < pfb->num_tiles; i++)
+		pfb->set_region_tiling(dev, i, 0, 0, 0);
 
 
 	return 0;
 	return 0;
 }
 }

+ 47 - 69
drivers/gpu/drm/nouveau/nv40_graph.c

@@ -181,6 +181,48 @@ nv40_graph_unload_context(struct drm_device *dev)
 	return ret;
 	return ret;
 }
 }
 
 
+void
+nv40_graph_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
+			     uint32_t size, uint32_t pitch)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	uint32_t limit = max(1u, addr + size) - 1;
+
+	if (pitch)
+		addr |= 1;
+
+	switch (dev_priv->chipset) {
+	case 0x44:
+	case 0x4a:
+	case 0x4e:
+		nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch);
+		nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit);
+		nv_wr32(dev, NV20_PGRAPH_TILE(i), addr);
+		break;
+
+	case 0x46:
+	case 0x47:
+	case 0x49:
+	case 0x4b:
+		nv_wr32(dev, NV47_PGRAPH_TSIZE(i), pitch);
+		nv_wr32(dev, NV47_PGRAPH_TLIMIT(i), limit);
+		nv_wr32(dev, NV47_PGRAPH_TILE(i), addr);
+		nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), pitch);
+		nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), limit);
+		nv_wr32(dev, NV40_PGRAPH_TILE1(i), addr);
+		break;
+
+	default:
+		nv_wr32(dev, NV20_PGRAPH_TSIZE(i), pitch);
+		nv_wr32(dev, NV20_PGRAPH_TLIMIT(i), limit);
+		nv_wr32(dev, NV20_PGRAPH_TILE(i), addr);
+		nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), pitch);
+		nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), limit);
+		nv_wr32(dev, NV40_PGRAPH_TILE1(i), addr);
+		break;
+	}
+}
+
 /*
 /*
  * G70		0x47
  * G70		0x47
  * G71		0x49
  * G71		0x49
@@ -195,7 +237,8 @@ nv40_graph_init(struct drm_device *dev)
 {
 {
 	struct drm_nouveau_private *dev_priv =
 	struct drm_nouveau_private *dev_priv =
 		(struct drm_nouveau_private *)dev->dev_private;
 		(struct drm_nouveau_private *)dev->dev_private;
-	uint32_t vramsz, tmp;
+	struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
+	uint32_t vramsz;
 	int i, j;
 	int i, j;
 
 
 	nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
 	nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) &
@@ -292,74 +335,9 @@ nv40_graph_init(struct drm_device *dev)
 	nv_wr32(dev, 0x400b38, 0x2ffff800);
 	nv_wr32(dev, 0x400b38, 0x2ffff800);
 	nv_wr32(dev, 0x400b3c, 0x00006000);
 	nv_wr32(dev, 0x400b3c, 0x00006000);
 
 
-	/* copy tile info from PFB */
-	switch (dev_priv->chipset) {
-	case 0x40: /* vanilla NV40 */
-		for (i = 0; i < NV10_PFB_TILE__SIZE; i++) {
-			tmp = nv_rd32(dev, NV10_PFB_TILE(i));
-			nv_wr32(dev, NV40_PGRAPH_TILE0(i), tmp);
-			nv_wr32(dev, NV40_PGRAPH_TILE1(i), tmp);
-			tmp = nv_rd32(dev, NV10_PFB_TLIMIT(i));
-			nv_wr32(dev, NV40_PGRAPH_TLIMIT0(i), tmp);
-			nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tmp);
-			tmp = nv_rd32(dev, NV10_PFB_TSIZE(i));
-			nv_wr32(dev, NV40_PGRAPH_TSIZE0(i), tmp);
-			nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tmp);
-			tmp = nv_rd32(dev, NV10_PFB_TSTATUS(i));
-			nv_wr32(dev, NV40_PGRAPH_TSTATUS0(i), tmp);
-			nv_wr32(dev, NV40_PGRAPH_TSTATUS1(i), tmp);
-		}
-		break;
-	case 0x44:
-	case 0x4a:
-	case 0x4e: /* NV44-based cores don't have 0x406900? */
-		for (i = 0; i < NV40_PFB_TILE__SIZE_0; i++) {
-			tmp = nv_rd32(dev, NV40_PFB_TILE(i));
-			nv_wr32(dev, NV40_PGRAPH_TILE0(i), tmp);
-			tmp = nv_rd32(dev, NV40_PFB_TLIMIT(i));
-			nv_wr32(dev, NV40_PGRAPH_TLIMIT0(i), tmp);
-			tmp = nv_rd32(dev, NV40_PFB_TSIZE(i));
-			nv_wr32(dev, NV40_PGRAPH_TSIZE0(i), tmp);
-			tmp = nv_rd32(dev, NV40_PFB_TSTATUS(i));
-			nv_wr32(dev, NV40_PGRAPH_TSTATUS0(i), tmp);
-		}
-		break;
-	case 0x46:
-	case 0x47:
-	case 0x49:
-	case 0x4b: /* G7X-based cores */
-		for (i = 0; i < NV40_PFB_TILE__SIZE_1; i++) {
-			tmp = nv_rd32(dev, NV40_PFB_TILE(i));
-			nv_wr32(dev, NV47_PGRAPH_TILE0(i), tmp);
-			nv_wr32(dev, NV40_PGRAPH_TILE1(i), tmp);
-			tmp = nv_rd32(dev, NV40_PFB_TLIMIT(i));
-			nv_wr32(dev, NV47_PGRAPH_TLIMIT0(i), tmp);
-			nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tmp);
-			tmp = nv_rd32(dev, NV40_PFB_TSIZE(i));
-			nv_wr32(dev, NV47_PGRAPH_TSIZE0(i), tmp);
-			nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tmp);
-			tmp = nv_rd32(dev, NV40_PFB_TSTATUS(i));
-			nv_wr32(dev, NV47_PGRAPH_TSTATUS0(i), tmp);
-			nv_wr32(dev, NV40_PGRAPH_TSTATUS1(i), tmp);
-		}
-		break;
-	default: /* everything else */
-		for (i = 0; i < NV40_PFB_TILE__SIZE_0; i++) {
-			tmp = nv_rd32(dev, NV40_PFB_TILE(i));
-			nv_wr32(dev, NV40_PGRAPH_TILE0(i), tmp);
-			nv_wr32(dev, NV40_PGRAPH_TILE1(i), tmp);
-			tmp = nv_rd32(dev, NV40_PFB_TLIMIT(i));
-			nv_wr32(dev, NV40_PGRAPH_TLIMIT0(i), tmp);
-			nv_wr32(dev, NV40_PGRAPH_TLIMIT1(i), tmp);
-			tmp = nv_rd32(dev, NV40_PFB_TSIZE(i));
-			nv_wr32(dev, NV40_PGRAPH_TSIZE0(i), tmp);
-			nv_wr32(dev, NV40_PGRAPH_TSIZE1(i), tmp);
-			tmp = nv_rd32(dev, NV40_PFB_TSTATUS(i));
-			nv_wr32(dev, NV40_PGRAPH_TSTATUS0(i), tmp);
-			nv_wr32(dev, NV40_PGRAPH_TSTATUS1(i), tmp);
-		}
-		break;
-	}
+	/* Turn all the tiling regions off. */
+	for (i = 0; i < pfb->num_tiles; i++)
+		nv40_graph_set_region_tiling(dev, i, 0, 0, 0);
 
 
 	/* begin RAM config */
 	/* begin RAM config */
 	vramsz = drm_get_resource_len(dev, 0) - 1;
 	vramsz = drm_get_resource_len(dev, 0) - 1;

+ 17 - 0
drivers/gpu/drm/nouveau/nv50_display.c

@@ -690,9 +690,21 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcbent,
 			   int pxclk)
 			   int pxclk)
 {
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_connector *nv_connector = NULL;
+	struct drm_encoder *encoder;
 	struct nvbios *bios = &dev_priv->VBIOS;
 	struct nvbios *bios = &dev_priv->VBIOS;
 	uint32_t mc, script = 0, or;
 	uint32_t mc, script = 0, or;
 
 
+	list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+		struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
+
+		if (nv_encoder->dcb != dcbent)
+			continue;
+
+		nv_connector = nouveau_encoder_connector_get(nv_encoder);
+		break;
+	}
+
 	or = ffs(dcbent->or) - 1;
 	or = ffs(dcbent->or) - 1;
 	mc = nv50_display_mode_ctrl(dev, dcbent->type != OUTPUT_ANALOG, or);
 	mc = nv50_display_mode_ctrl(dev, dcbent->type != OUTPUT_ANALOG, or);
 	switch (dcbent->type) {
 	switch (dcbent->type) {
@@ -711,6 +723,11 @@ nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcbent,
 			} else
 			} else
 			if (bios->fp.strapless_is_24bit & 1)
 			if (bios->fp.strapless_is_24bit & 1)
 				script |= 0x0200;
 				script |= 0x0200;
+
+			if (nv_connector && nv_connector->edid &&
+			    (nv_connector->edid->revision >= 4) &&
+			    (nv_connector->edid->input & 0x70) >= 0x20)
+				script |= 0x0200;
 		}
 		}
 
 
 		if (nouveau_uscript_lvds >= 0) {
 		if (nouveau_uscript_lvds >= 0) {

+ 10 - 13
drivers/gpu/drm/nouveau/nv50_fbcon.c

@@ -16,9 +16,7 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
 
 
 	if (!(info->flags & FBINFO_HWACCEL_DISABLED) &&
 	if (!(info->flags & FBINFO_HWACCEL_DISABLED) &&
 	     RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11)) {
 	     RING_SPACE(chan, rect->rop == ROP_COPY ? 7 : 11)) {
-		NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
-
-		info->flags |= FBINFO_HWACCEL_DISABLED;
+		nouveau_fbcon_gpu_lockup(info);
 	}
 	}
 
 
 	if (info->flags & FBINFO_HWACCEL_DISABLED) {
 	if (info->flags & FBINFO_HWACCEL_DISABLED) {
@@ -31,7 +29,11 @@ nv50_fbcon_fillrect(struct fb_info *info, const struct fb_fillrect *rect)
 		OUT_RING(chan, 1);
 		OUT_RING(chan, 1);
 	}
 	}
 	BEGIN_RING(chan, NvSub2D, 0x0588, 1);
 	BEGIN_RING(chan, NvSub2D, 0x0588, 1);
-	OUT_RING(chan, rect->color);
+	if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
+	    info->fix.visual == FB_VISUAL_DIRECTCOLOR)
+		OUT_RING(chan, ((uint32_t *)info->pseudo_palette)[rect->color]);
+	else
+		OUT_RING(chan, rect->color);
 	BEGIN_RING(chan, NvSub2D, 0x0600, 4);
 	BEGIN_RING(chan, NvSub2D, 0x0600, 4);
 	OUT_RING(chan, rect->dx);
 	OUT_RING(chan, rect->dx);
 	OUT_RING(chan, rect->dy);
 	OUT_RING(chan, rect->dy);
@@ -56,9 +58,7 @@ nv50_fbcon_copyarea(struct fb_info *info, const struct fb_copyarea *region)
 		return;
 		return;
 
 
 	if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 12)) {
 	if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 12)) {
-		NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
-
-		info->flags |= FBINFO_HWACCEL_DISABLED;
+		nouveau_fbcon_gpu_lockup(info);
 	}
 	}
 
 
 	if (info->flags & FBINFO_HWACCEL_DISABLED) {
 	if (info->flags & FBINFO_HWACCEL_DISABLED) {
@@ -101,8 +101,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
 	}
 	}
 
 
 	if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 11)) {
 	if (!(info->flags & FBINFO_HWACCEL_DISABLED) && RING_SPACE(chan, 11)) {
-		NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
-		info->flags |= FBINFO_HWACCEL_DISABLED;
+		nouveau_fbcon_gpu_lockup(info);
 	}
 	}
 
 
 	if (info->flags & FBINFO_HWACCEL_DISABLED) {
 	if (info->flags & FBINFO_HWACCEL_DISABLED) {
@@ -135,9 +134,7 @@ nv50_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
 		int push = dwords > 2047 ? 2047 : dwords;
 		int push = dwords > 2047 ? 2047 : dwords;
 
 
 		if (RING_SPACE(chan, push + 1)) {
 		if (RING_SPACE(chan, push + 1)) {
-			NV_ERROR(dev,
-				 "GPU lockup - switching to software fbcon\n");
-			info->flags |= FBINFO_HWACCEL_DISABLED;
+			nouveau_fbcon_gpu_lockup(info);
 			cfb_imageblit(info, image);
 			cfb_imageblit(info, image);
 			return;
 			return;
 		}
 		}
@@ -199,7 +196,7 @@ nv50_fbcon_accel_init(struct fb_info *info)
 
 
 	ret = RING_SPACE(chan, 59);
 	ret = RING_SPACE(chan, 59);
 	if (ret) {
 	if (ret) {
-		NV_ERROR(dev, "GPU lockup - switching to software fbcon\n");
+		nouveau_fbcon_gpu_lockup(info);
 		return ret;
 		return ret;
 	}
 	}
 
 

+ 2 - 4
drivers/gpu/drm/nouveau/nv50_fifo.c

@@ -384,8 +384,8 @@ nv50_fifo_load_context(struct nouveau_channel *chan)
 		nv_wr32(dev, NV40_PFIFO_CACHE1_DATA(ptr),
 		nv_wr32(dev, NV40_PFIFO_CACHE1_DATA(ptr),
 			nv_ro32(dev, cache, (ptr * 2) + 1));
 			nv_ro32(dev, cache, (ptr * 2) + 1));
 	}
 	}
-	nv_wr32(dev, 0x3210, cnt << 2);
-	nv_wr32(dev, 0x3270, 0);
+	nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, cnt << 2);
+	nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
 
 
 	/* guessing that all the 0x34xx regs aren't on NV50 */
 	/* guessing that all the 0x34xx regs aren't on NV50 */
 	if (!IS_G80) {
 	if (!IS_G80) {
@@ -398,8 +398,6 @@ nv50_fifo_load_context(struct nouveau_channel *chan)
 
 
 	dev_priv->engine.instmem.finish_access(dev);
 	dev_priv->engine.instmem.finish_access(dev);
 
 
-	nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
-	nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
 	nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16));
 	nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16));
 	return 0;
 	return 0;
 }
 }

+ 5 - 0
drivers/gpu/drm/radeon/Makefile

@@ -24,6 +24,9 @@ $(obj)/rv515_reg_safe.h: $(src)/reg_srcs/rv515 $(obj)/mkregtable
 $(obj)/r300_reg_safe.h: $(src)/reg_srcs/r300 $(obj)/mkregtable
 $(obj)/r300_reg_safe.h: $(src)/reg_srcs/r300 $(obj)/mkregtable
 	$(call if_changed,mkregtable)
 	$(call if_changed,mkregtable)
 
 
+$(obj)/r420_reg_safe.h: $(src)/reg_srcs/r420 $(obj)/mkregtable
+	$(call if_changed,mkregtable)
+
 $(obj)/rs600_reg_safe.h: $(src)/reg_srcs/rs600 $(obj)/mkregtable
 $(obj)/rs600_reg_safe.h: $(src)/reg_srcs/rs600 $(obj)/mkregtable
 	$(call if_changed,mkregtable)
 	$(call if_changed,mkregtable)
 
 
@@ -35,6 +38,8 @@ $(obj)/rv515.o: $(obj)/rv515_reg_safe.h
 
 
 $(obj)/r300.o: $(obj)/r300_reg_safe.h
 $(obj)/r300.o: $(obj)/r300_reg_safe.h
 
 
+$(obj)/r420.o: $(obj)/r420_reg_safe.h
+
 $(obj)/rs600.o: $(obj)/rs600_reg_safe.h
 $(obj)/rs600.o: $(obj)/rs600_reg_safe.h
 
 
 radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \
 radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \

+ 433 - 368
drivers/gpu/drm/radeon/ObjectID.h

@@ -1,5 +1,5 @@
 /*
 /*
-* Copyright 2006-2007 Advanced Micro Devices, Inc.
+* Copyright 2006-2007 Advanced Micro Devices, Inc.  
 *
 *
 * Permission is hereby granted, free of charge, to any person obtaining a
 * Permission is hereby granted, free of charge, to any person obtaining a
 * copy of this software and associated documentation files (the "Software"),
 * copy of this software and associated documentation files (the "Software"),
@@ -41,14 +41,14 @@
 /****************************************************/
 /****************************************************/
 /* Encoder Object ID Definition                     */
 /* Encoder Object ID Definition                     */
 /****************************************************/
 /****************************************************/
-#define ENCODER_OBJECT_ID_NONE                    0x00
+#define ENCODER_OBJECT_ID_NONE                    0x00 
 
 
 /* Radeon Class Display Hardware */
 /* Radeon Class Display Hardware */
 #define ENCODER_OBJECT_ID_INTERNAL_LVDS           0x01
 #define ENCODER_OBJECT_ID_INTERNAL_LVDS           0x01
 #define ENCODER_OBJECT_ID_INTERNAL_TMDS1          0x02
 #define ENCODER_OBJECT_ID_INTERNAL_TMDS1          0x02
 #define ENCODER_OBJECT_ID_INTERNAL_TMDS2          0x03
 #define ENCODER_OBJECT_ID_INTERNAL_TMDS2          0x03
 #define ENCODER_OBJECT_ID_INTERNAL_DAC1           0x04
 #define ENCODER_OBJECT_ID_INTERNAL_DAC1           0x04
-#define ENCODER_OBJECT_ID_INTERNAL_DAC2           0x05	/* TV/CV DAC */
+#define ENCODER_OBJECT_ID_INTERNAL_DAC2           0x05     /* TV/CV DAC */
 #define ENCODER_OBJECT_ID_INTERNAL_SDVOA          0x06
 #define ENCODER_OBJECT_ID_INTERNAL_SDVOA          0x06
 #define ENCODER_OBJECT_ID_INTERNAL_SDVOB          0x07
 #define ENCODER_OBJECT_ID_INTERNAL_SDVOB          0x07
 
 
@@ -56,11 +56,11 @@
 #define ENCODER_OBJECT_ID_SI170B                  0x08
 #define ENCODER_OBJECT_ID_SI170B                  0x08
 #define ENCODER_OBJECT_ID_CH7303                  0x09
 #define ENCODER_OBJECT_ID_CH7303                  0x09
 #define ENCODER_OBJECT_ID_CH7301                  0x0A
 #define ENCODER_OBJECT_ID_CH7301                  0x0A
-#define ENCODER_OBJECT_ID_INTERNAL_DVO1           0x0B	/* This belongs to Radeon Class Display Hardware */
+#define ENCODER_OBJECT_ID_INTERNAL_DVO1           0x0B    /* This belongs to Radeon Class Display Hardware */
 #define ENCODER_OBJECT_ID_EXTERNAL_SDVOA          0x0C
 #define ENCODER_OBJECT_ID_EXTERNAL_SDVOA          0x0C
 #define ENCODER_OBJECT_ID_EXTERNAL_SDVOB          0x0D
 #define ENCODER_OBJECT_ID_EXTERNAL_SDVOB          0x0D
 #define ENCODER_OBJECT_ID_TITFP513                0x0E
 #define ENCODER_OBJECT_ID_TITFP513                0x0E
-#define ENCODER_OBJECT_ID_INTERNAL_LVTM1          0x0F	/* not used for Radeon */
+#define ENCODER_OBJECT_ID_INTERNAL_LVTM1          0x0F    /* not used for Radeon */
 #define ENCODER_OBJECT_ID_VT1623                  0x10
 #define ENCODER_OBJECT_ID_VT1623                  0x10
 #define ENCODER_OBJECT_ID_HDMI_SI1930             0x11
 #define ENCODER_OBJECT_ID_HDMI_SI1930             0x11
 #define ENCODER_OBJECT_ID_HDMI_INTERNAL           0x12
 #define ENCODER_OBJECT_ID_HDMI_INTERNAL           0x12
@@ -68,9 +68,9 @@
 #define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1   0x13
 #define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1   0x13
 #define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1    0x14
 #define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1    0x14
 #define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1    0x15
 #define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1    0x15
-#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2    0x16	/* Shared with CV/TV and CRT */
-#define ENCODER_OBJECT_ID_SI178                   0X17	/* External TMDS (dual link, no HDCP.) */
-#define ENCODER_OBJECT_ID_MVPU_FPGA               0x18	/* MVPU FPGA chip */
+#define ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2    0x16  /* Shared with CV/TV and CRT */
+#define ENCODER_OBJECT_ID_SI178                   0X17  /* External TMDS (dual link, no HDCP.) */
+#define ENCODER_OBJECT_ID_MVPU_FPGA               0x18  /* MVPU FPGA chip */
 #define ENCODER_OBJECT_ID_INTERNAL_DDI            0x19
 #define ENCODER_OBJECT_ID_INTERNAL_DDI            0x19
 #define ENCODER_OBJECT_ID_VT1625                  0x1A
 #define ENCODER_OBJECT_ID_VT1625                  0x1A
 #define ENCODER_OBJECT_ID_HDMI_SI1932             0x1B
 #define ENCODER_OBJECT_ID_HDMI_SI1932             0x1B
@@ -86,7 +86,7 @@
 /****************************************************/
 /****************************************************/
 /* Connector Object ID Definition                   */
 /* Connector Object ID Definition                   */
 /****************************************************/
 /****************************************************/
-#define CONNECTOR_OBJECT_ID_NONE                  0x00
+#define CONNECTOR_OBJECT_ID_NONE                  0x00 
 #define CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I     0x01
 #define CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I     0x01
 #define CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I       0x02
 #define CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I       0x02
 #define CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D     0x03
 #define CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D     0x03
@@ -96,7 +96,7 @@
 #define CONNECTOR_OBJECT_ID_SVIDEO                0x07
 #define CONNECTOR_OBJECT_ID_SVIDEO                0x07
 #define CONNECTOR_OBJECT_ID_YPbPr                 0x08
 #define CONNECTOR_OBJECT_ID_YPbPr                 0x08
 #define CONNECTOR_OBJECT_ID_D_CONNECTOR           0x09
 #define CONNECTOR_OBJECT_ID_D_CONNECTOR           0x09
-#define CONNECTOR_OBJECT_ID_9PIN_DIN              0x0A	/* Supports both CV & TV */
+#define CONNECTOR_OBJECT_ID_9PIN_DIN              0x0A  /* Supports both CV & TV */
 #define CONNECTOR_OBJECT_ID_SCART                 0x0B
 #define CONNECTOR_OBJECT_ID_SCART                 0x0B
 #define CONNECTOR_OBJECT_ID_HDMI_TYPE_A           0x0C
 #define CONNECTOR_OBJECT_ID_HDMI_TYPE_A           0x0C
 #define CONNECTOR_OBJECT_ID_HDMI_TYPE_B           0x0D
 #define CONNECTOR_OBJECT_ID_HDMI_TYPE_B           0x0D
@@ -106,6 +106,8 @@
 #define CONNECTOR_OBJECT_ID_CROSSFIRE             0x11
 #define CONNECTOR_OBJECT_ID_CROSSFIRE             0x11
 #define CONNECTOR_OBJECT_ID_HARDCODE_DVI          0x12
 #define CONNECTOR_OBJECT_ID_HARDCODE_DVI          0x12
 #define CONNECTOR_OBJECT_ID_DISPLAYPORT           0x13
 #define CONNECTOR_OBJECT_ID_DISPLAYPORT           0x13
+#define CONNECTOR_OBJECT_ID_eDP                   0x14
+#define CONNECTOR_OBJECT_ID_MXM                   0x15
 
 
 /* deleted */
 /* deleted */
 
 
@@ -115,6 +117,14 @@
 #define ROUTER_OBJECT_ID_NONE											0x00
 #define ROUTER_OBJECT_ID_NONE											0x00
 #define ROUTER_OBJECT_ID_I2C_EXTENDER_CNTL				0x01
 #define ROUTER_OBJECT_ID_I2C_EXTENDER_CNTL				0x01
 
 
+/****************************************************/
+/* Generic Object ID Definition                     */
+/****************************************************/
+#define GENERIC_OBJECT_ID_NONE                    0x00
+#define GENERIC_OBJECT_ID_GLSYNC                  0x01
+#define GENERIC_OBJECT_ID_PX2_NON_DRIVABLE        0x02
+#define GENERIC_OBJECT_ID_MXM_OPM                 0x03
+
 /****************************************************/
 /****************************************************/
 /* Graphics Object ENUM ID Definition               */
 /* Graphics Object ENUM ID Definition               */
 /****************************************************/
 /****************************************************/
@@ -124,6 +134,7 @@
 #define GRAPH_OBJECT_ENUM_ID4                     0x04
 #define GRAPH_OBJECT_ENUM_ID4                     0x04
 #define GRAPH_OBJECT_ENUM_ID5                     0x05
 #define GRAPH_OBJECT_ENUM_ID5                     0x05
 #define GRAPH_OBJECT_ENUM_ID6                     0x06
 #define GRAPH_OBJECT_ENUM_ID6                     0x06
+#define GRAPH_OBJECT_ENUM_ID7                     0x07
 
 
 /****************************************************/
 /****************************************************/
 /* Graphics Object ID Bit definition                */
 /* Graphics Object ID Bit definition                */
@@ -133,35 +144,35 @@
 #define RESERVED1_ID_MASK                         0x0800
 #define RESERVED1_ID_MASK                         0x0800
 #define OBJECT_TYPE_MASK                          0x7000
 #define OBJECT_TYPE_MASK                          0x7000
 #define RESERVED2_ID_MASK                         0x8000
 #define RESERVED2_ID_MASK                         0x8000
-
+                                                  
 #define OBJECT_ID_SHIFT                           0x00
 #define OBJECT_ID_SHIFT                           0x00
 #define ENUM_ID_SHIFT                             0x08
 #define ENUM_ID_SHIFT                             0x08
 #define OBJECT_TYPE_SHIFT                         0x0C
 #define OBJECT_TYPE_SHIFT                         0x0C
 
 
+
 /****************************************************/
 /****************************************************/
 /* Graphics Object family definition                */
 /* Graphics Object family definition                */
 /****************************************************/
 /****************************************************/
-#define CONSTRUCTOBJECTFAMILYID(GRAPHICS_OBJECT_TYPE, GRAPHICS_OBJECT_ID) \
-	(GRAPHICS_OBJECT_TYPE << OBJECT_TYPE_SHIFT | \
-	 GRAPHICS_OBJECT_ID   << OBJECT_ID_SHIFT)
+#define CONSTRUCTOBJECTFAMILYID(GRAPHICS_OBJECT_TYPE, GRAPHICS_OBJECT_ID) (GRAPHICS_OBJECT_TYPE << OBJECT_TYPE_SHIFT | \
+                                                                           GRAPHICS_OBJECT_ID   << OBJECT_ID_SHIFT)
 /****************************************************/
 /****************************************************/
 /* GPU Object ID definition - Shared with BIOS      */
 /* GPU Object ID definition - Shared with BIOS      */
 /****************************************************/
 /****************************************************/
-#define GPU_ENUM_ID1	(GRAPH_OBJECT_TYPE_GPU << OBJECT_TYPE_SHIFT |\
-			 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT)
+#define GPU_ENUM_ID1                            ( GRAPH_OBJECT_TYPE_GPU << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT)
 
 
 /****************************************************/
 /****************************************************/
 /* Encoder Object ID definition - Shared with BIOS  */
 /* Encoder Object ID definition - Shared with BIOS  */
 /****************************************************/
 /****************************************************/
 /*
 /*
-#define ENCODER_INTERNAL_LVDS_ENUM_ID1        0x2101
+#define ENCODER_INTERNAL_LVDS_ENUM_ID1        0x2101      
 #define ENCODER_INTERNAL_TMDS1_ENUM_ID1       0x2102
 #define ENCODER_INTERNAL_TMDS1_ENUM_ID1       0x2102
 #define ENCODER_INTERNAL_TMDS2_ENUM_ID1       0x2103
 #define ENCODER_INTERNAL_TMDS2_ENUM_ID1       0x2103
 #define ENCODER_INTERNAL_DAC1_ENUM_ID1        0x2104
 #define ENCODER_INTERNAL_DAC1_ENUM_ID1        0x2104
 #define ENCODER_INTERNAL_DAC2_ENUM_ID1        0x2105
 #define ENCODER_INTERNAL_DAC2_ENUM_ID1        0x2105
 #define ENCODER_INTERNAL_SDVOA_ENUM_ID1       0x2106
 #define ENCODER_INTERNAL_SDVOA_ENUM_ID1       0x2106
 #define ENCODER_INTERNAL_SDVOB_ENUM_ID1       0x2107
 #define ENCODER_INTERNAL_SDVOB_ENUM_ID1       0x2107
-#define ENCODER_SIL170B_ENUM_ID1              0x2108
+#define ENCODER_SIL170B_ENUM_ID1              0x2108  
 #define ENCODER_CH7303_ENUM_ID1               0x2109
 #define ENCODER_CH7303_ENUM_ID1               0x2109
 #define ENCODER_CH7301_ENUM_ID1               0x210A
 #define ENCODER_CH7301_ENUM_ID1               0x210A
 #define ENCODER_INTERNAL_DVO1_ENUM_ID1        0x210B
 #define ENCODER_INTERNAL_DVO1_ENUM_ID1        0x210B
@@ -175,8 +186,8 @@
 #define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1   0x2113
 #define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1   0x2113
 #define ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1    0x2114
 #define ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1    0x2114
 #define ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1    0x2115
 #define ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1    0x2115
-#define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1    0x2116
-#define ENCODER_SI178_ENUM_ID1                   0x2117
+#define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1    0x2116  
+#define ENCODER_SI178_ENUM_ID1                   0x2117 
 #define ENCODER_MVPU_FPGA_ENUM_ID1               0x2118
 #define ENCODER_MVPU_FPGA_ENUM_ID1               0x2118
 #define ENCODER_INTERNAL_DDI_ENUM_ID1            0x2119
 #define ENCODER_INTERNAL_DDI_ENUM_ID1            0x2119
 #define ENCODER_VT1625_ENUM_ID1                  0x211A
 #define ENCODER_VT1625_ENUM_ID1                  0x211A
@@ -185,205 +196,169 @@
 #define ENCODER_DP_DP501_ENUM_ID1                0x211D
 #define ENCODER_DP_DP501_ENUM_ID1                0x211D
 #define ENCODER_INTERNAL_UNIPHY_ENUM_ID1         0x211E
 #define ENCODER_INTERNAL_UNIPHY_ENUM_ID1         0x211E
 */
 */
-#define ENCODER_INTERNAL_LVDS_ENUM_ID1 \
-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
-	 ENCODER_OBJECT_ID_INTERNAL_LVDS << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_TMDS1_ENUM_ID1 \
-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
-	 ENCODER_OBJECT_ID_INTERNAL_TMDS1 << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_TMDS2_ENUM_ID1 \
-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
-	 ENCODER_OBJECT_ID_INTERNAL_TMDS2 << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_DAC1_ENUM_ID1 \
-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
-	 ENCODER_OBJECT_ID_INTERNAL_DAC1 << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_DAC2_ENUM_ID1 \
-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
-	 ENCODER_OBJECT_ID_INTERNAL_DAC2 << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_SDVOA_ENUM_ID1 \
-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
-	 ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_SDVOA_ENUM_ID2 \
-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
-	 ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_SDVOB_ENUM_ID1 \
-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
-	 ENCODER_OBJECT_ID_INTERNAL_SDVOB << OBJECT_ID_SHIFT)
-
-#define ENCODER_SIL170B_ENUM_ID1 \
-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
-	 ENCODER_OBJECT_ID_SI170B << OBJECT_ID_SHIFT)
-
-#define ENCODER_CH7303_ENUM_ID1 \
-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
-	 ENCODER_OBJECT_ID_CH7303 << OBJECT_ID_SHIFT)
-
-#define ENCODER_CH7301_ENUM_ID1 \
-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
-	 ENCODER_OBJECT_ID_CH7301 << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_DVO1_ENUM_ID1 \
-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
-	 ENCODER_OBJECT_ID_INTERNAL_DVO1 << OBJECT_ID_SHIFT)
-
-#define ENCODER_EXTERNAL_SDVOA_ENUM_ID1 \
-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
-	 ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT)
-
-#define ENCODER_EXTERNAL_SDVOA_ENUM_ID2 \
-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
-	 ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT)
-
-#define ENCODER_EXTERNAL_SDVOB_ENUM_ID1 \
-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
-	 ENCODER_OBJECT_ID_EXTERNAL_SDVOB << OBJECT_ID_SHIFT)
-
-#define ENCODER_TITFP513_ENUM_ID1 \
-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
-	 ENCODER_OBJECT_ID_TITFP513 << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_LVTM1_ENUM_ID1 \
-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
-	 ENCODER_OBJECT_ID_INTERNAL_LVTM1 << OBJECT_ID_SHIFT)
-
-#define ENCODER_VT1623_ENUM_ID1 \
-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
-	 ENCODER_OBJECT_ID_VT1623 << OBJECT_ID_SHIFT)
-
-#define ENCODER_HDMI_SI1930_ENUM_ID1 \
-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
-	 ENCODER_OBJECT_ID_HDMI_SI1930 << OBJECT_ID_SHIFT)
-
-#define ENCODER_HDMI_INTERNAL_ENUM_ID1 \
-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
-	 ENCODER_OBJECT_ID_HDMI_INTERNAL << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1 \
-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
-	 ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID2 \
-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
-	 ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1 \
-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
-	 ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1 \
-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
-	 ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1 << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1 \
-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
-	 ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2 << OBJECT_ID_SHIFT) /* Shared with CV/TV and CRT */
-
-#define ENCODER_SI178_ENUM_ID1  \
-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
-	 ENCODER_OBJECT_ID_SI178 << OBJECT_ID_SHIFT)
-
-#define ENCODER_MVPU_FPGA_ENUM_ID1 \
-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
-	 ENCODER_OBJECT_ID_MVPU_FPGA << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_DDI_ENUM_ID1 \
-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
-	 ENCODER_OBJECT_ID_INTERNAL_DDI << OBJECT_ID_SHIFT)
-
-#define ENCODER_VT1625_ENUM_ID1 \
-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
-	 ENCODER_OBJECT_ID_VT1625 << OBJECT_ID_SHIFT)
-
-#define ENCODER_HDMI_SI1932_ENUM_ID1 \
-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
-	 ENCODER_OBJECT_ID_HDMI_SI1932 << OBJECT_ID_SHIFT)
-
-#define ENCODER_DP_DP501_ENUM_ID1 \
-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
-	 ENCODER_OBJECT_ID_DP_DP501 << OBJECT_ID_SHIFT)
-
-#define ENCODER_DP_AN9801_ENUM_ID1 \
-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
-	 ENCODER_OBJECT_ID_DP_AN9801 << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_UNIPHY_ENUM_ID1 \
-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
-	 ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_UNIPHY_ENUM_ID2 \
-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
-	 ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_KLDSCP_LVTMA_ENUM_ID1 \
-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
-	 ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID1 \
-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
-	 ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID2 \
-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
-	 ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID1 \
-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
-	 ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT)
-
-#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID2 \
-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
-	 ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT)
-
-#define ENCODER_GENERAL_EXTERNAL_DVO_ENUM_ID1 \
-	(GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
-	 ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO << OBJECT_ID_SHIFT)
+#define ENCODER_INTERNAL_LVDS_ENUM_ID1     ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_INTERNAL_LVDS << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_TMDS1_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_INTERNAL_TMDS1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_TMDS2_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_INTERNAL_TMDS2 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_DAC1_ENUM_ID1     ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_INTERNAL_DAC1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_DAC2_ENUM_ID1     ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_INTERNAL_DAC2 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_SDVOA_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_SDVOA_ENUM_ID2    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_INTERNAL_SDVOA << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_SDVOB_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_INTERNAL_SDVOB << OBJECT_ID_SHIFT)
+
+#define ENCODER_SIL170B_ENUM_ID1           ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_SI170B << OBJECT_ID_SHIFT)
+
+#define ENCODER_CH7303_ENUM_ID1            ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_CH7303 << OBJECT_ID_SHIFT)
+
+#define ENCODER_CH7301_ENUM_ID1            ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_CH7301 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_DVO1_ENUM_ID1     ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_INTERNAL_DVO1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_EXTERNAL_SDVOA_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT)
+
+#define ENCODER_EXTERNAL_SDVOA_ENUM_ID2    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_EXTERNAL_SDVOA << OBJECT_ID_SHIFT)
+
+
+#define ENCODER_EXTERNAL_SDVOB_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_EXTERNAL_SDVOB << OBJECT_ID_SHIFT)
+
+
+#define ENCODER_TITFP513_ENUM_ID1          ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_TITFP513 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_LVTM1_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_INTERNAL_LVTM1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_VT1623_ENUM_ID1            ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_VT1623 << OBJECT_ID_SHIFT)
+
+#define ENCODER_HDMI_SI1930_ENUM_ID1       ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_HDMI_SI1930 << OBJECT_ID_SHIFT)
+
+#define ENCODER_HDMI_INTERNAL_ENUM_ID1     ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_HDMI_INTERNAL << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID1   ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                   GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                   ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT)
+
+
+#define ENCODER_INTERNAL_KLDSCP_TMDS1_ENUM_ID2   ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                   GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                   ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1 << OBJECT_ID_SHIFT)
+
+
+#define ENCODER_INTERNAL_KLDSCP_DVO1_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                   GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                   ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_KLDSCP_DAC1_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                   GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                   ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_KLDSCP_DAC2_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                   GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                   ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DAC2 << OBJECT_ID_SHIFT)  // Shared with CV/TV and CRT
+
+#define ENCODER_SI178_ENUM_ID1                    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                   GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                   ENCODER_OBJECT_ID_SI178 << OBJECT_ID_SHIFT)  
+
+#define ENCODER_MVPU_FPGA_ENUM_ID1                ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                   GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                   ENCODER_OBJECT_ID_MVPU_FPGA << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_DDI_ENUM_ID1     (  GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_INTERNAL_DDI << OBJECT_ID_SHIFT) 
+
+#define ENCODER_VT1625_ENUM_ID1            ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_VT1625 << OBJECT_ID_SHIFT)
+
+#define ENCODER_HDMI_SI1932_ENUM_ID1       ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_HDMI_SI1932 << OBJECT_ID_SHIFT)
+
+#define ENCODER_DP_DP501_ENUM_ID1            ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_DP_DP501 << OBJECT_ID_SHIFT)
+
+#define ENCODER_DP_AN9801_ENUM_ID1            ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                             GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                             ENCODER_OBJECT_ID_DP_AN9801 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY_ENUM_ID1         ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY_ENUM_ID2         ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 ENCODER_OBJECT_ID_INTERNAL_UNIPHY << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_KLDSCP_LVTMA_ENUM_ID1   ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA << OBJECT_ID_SHIFT)  
+
+#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID1         ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY1_ENUM_ID2         ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 ENCODER_OBJECT_ID_INTERNAL_UNIPHY1 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID1         ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT)
+
+#define ENCODER_INTERNAL_UNIPHY2_ENUM_ID2         ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 ENCODER_OBJECT_ID_INTERNAL_UNIPHY2 << OBJECT_ID_SHIFT)
+
+#define ENCODER_GENERAL_EXTERNAL_DVO_ENUM_ID1    ( GRAPH_OBJECT_TYPE_ENCODER << OBJECT_TYPE_SHIFT |\
+                                                  GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                  ENCODER_OBJECT_ID_GENERAL_EXTERNAL_DVO << OBJECT_ID_SHIFT)
 
 
 /****************************************************/
 /****************************************************/
 /* Connector Object ID definition - Shared with BIOS */
 /* Connector Object ID definition - Shared with BIOS */
@@ -406,167 +381,253 @@
 #define CONNECTOR_7PIN_DIN_ENUM_ID1                 0x310F
 #define CONNECTOR_7PIN_DIN_ENUM_ID1                 0x310F
 #define CONNECTOR_PCIE_CONNECTOR_ENUM_ID1           0x3110
 #define CONNECTOR_PCIE_CONNECTOR_ENUM_ID1           0x3110
 */
 */
-#define CONNECTOR_LVDS_ENUM_ID1 \
-	(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
-	 CONNECTOR_OBJECT_ID_LVDS << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID1 \
-	(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
-	 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID2 \
-	(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
-	 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID1 \
-	(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
-	 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID2 \
-	(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
-	 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID1 \
-	(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
-	 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID2 \
-	(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
-	 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1 \
-	(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
-	 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_VGA_ENUM_ID1 \
-	(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
-	 CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_VGA_ENUM_ID2 \
-	(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
-	 CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_COMPOSITE_ENUM_ID1 \
-	(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
-	 CONNECTOR_OBJECT_ID_COMPOSITE << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_SVIDEO_ENUM_ID1 \
-	(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
-	 CONNECTOR_OBJECT_ID_SVIDEO << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_YPbPr_ENUM_ID1 \
-	(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
-	 CONNECTOR_OBJECT_ID_YPbPr << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_D_CONNECTOR_ENUM_ID1 \
-	(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
-	 CONNECTOR_OBJECT_ID_D_CONNECTOR << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_9PIN_DIN_ENUM_ID1 \
-	(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
-	 CONNECTOR_OBJECT_ID_9PIN_DIN << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_SCART_ENUM_ID1 \
-	(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
-	 CONNECTOR_OBJECT_ID_SCART << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_HDMI_TYPE_A_ENUM_ID1 \
-	(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
-	 CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_HDMI_TYPE_B_ENUM_ID1 \
-	(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
-	 CONNECTOR_OBJECT_ID_HDMI_TYPE_B << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_7PIN_DIN_ENUM_ID1 \
-	(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
-	 CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID1 \
-	(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
-	 CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID2 \
-	(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
-	 CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_CROSSFIRE_ENUM_ID1 \
-	(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
-	 CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_CROSSFIRE_ENUM_ID2 \
-	(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
-	 CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_HARDCODE_DVI_ENUM_ID1 \
-	(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
-	 CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_HARDCODE_DVI_ENUM_ID2 \
-	(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
-	 CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_DISPLAYPORT_ENUM_ID1 \
-	(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
-	 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_DISPLAYPORT_ENUM_ID2 \
-	(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
-	 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_DISPLAYPORT_ENUM_ID3 \
-	(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
-	 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
-
-#define CONNECTOR_DISPLAYPORT_ENUM_ID4 \
-	(GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
-	 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+#define CONNECTOR_LVDS_ENUM_ID1                ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_LVDS << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_LVDS_ENUM_ID2                ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_LVDS << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_eDP_ENUM_ID1                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_eDP << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_eDP_ENUM_ID2                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_eDP << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID1   ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_I_ENUM_ID2   ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID1     ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DUAL_LINK_DVI_I_ENUM_ID2     ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID1   ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SINGLE_LINK_DVI_D_ENUM_ID2   ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID1     ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID2     ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DUAL_LINK_DVI_D_ENUM_ID3     ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_VGA_ENUM_ID1                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_VGA_ENUM_ID2                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_VGA << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_COMPOSITE_ENUM_ID1           ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_COMPOSITE << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_COMPOSITE_ENUM_ID2           ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_COMPOSITE << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SVIDEO_ENUM_ID1              ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_SVIDEO << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SVIDEO_ENUM_ID2              ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_SVIDEO << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_YPbPr_ENUM_ID1               ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_YPbPr << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_YPbPr_ENUM_ID2               ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_YPbPr << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_D_CONNECTOR_ENUM_ID1         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_D_CONNECTOR << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_D_CONNECTOR_ENUM_ID2         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_D_CONNECTOR << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_9PIN_DIN_ENUM_ID1            ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_9PIN_DIN << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_9PIN_DIN_ENUM_ID2            ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_9PIN_DIN << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SCART_ENUM_ID1               ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_SCART << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_SCART_ENUM_ID2               ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_SCART << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HDMI_TYPE_A_ENUM_ID1         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HDMI_TYPE_A_ENUM_ID2         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HDMI_TYPE_A_ENUM_ID3         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_HDMI_TYPE_A << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HDMI_TYPE_B_ENUM_ID1         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_HDMI_TYPE_B << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HDMI_TYPE_B_ENUM_ID2         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_HDMI_TYPE_B << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_7PIN_DIN_ENUM_ID1            ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT)
+#define CONNECTOR_7PIN_DIN_ENUM_ID2            ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_7PIN_DIN << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID1      ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_PCIE_CONNECTOR_ENUM_ID2      ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_PCIE_CONNECTOR << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_CROSSFIRE_ENUM_ID1           ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_CROSSFIRE_ENUM_ID2           ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_CROSSFIRE << OBJECT_ID_SHIFT)
+
+
+#define CONNECTOR_HARDCODE_DVI_ENUM_ID1        ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_HARDCODE_DVI_ENUM_ID2        ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_HARDCODE_DVI << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID1         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID2         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID3         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID4         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID5         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID5 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_DISPLAYPORT_ENUM_ID6         ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID6 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_DISPLAYPORT << OBJECT_ID_SHIFT)
+
+#define CONNECTOR_MXM_ENUM_ID1                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT)          //Mapping to MXM_DP_A
+
+#define CONNECTOR_MXM_ENUM_ID2                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT)          //Mapping to MXM_DP_B
+
+#define CONNECTOR_MXM_ENUM_ID3                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID3 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT)          //Mapping to MXM_DP_C
+
+#define CONNECTOR_MXM_ENUM_ID4                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID4 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT)          //Mapping to MXM_DP_D
+
+#define CONNECTOR_MXM_ENUM_ID5                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID5 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT)          //Mapping to MXM_LVDS_TXxx
+
+#define CONNECTOR_MXM_ENUM_ID6                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID6 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT)          //Mapping to MXM_LVDS_UXxx
+
+#define CONNECTOR_MXM_ENUM_ID7                 ( GRAPH_OBJECT_TYPE_CONNECTOR << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID7 << ENUM_ID_SHIFT |\
+                                                 CONNECTOR_OBJECT_ID_MXM << OBJECT_ID_SHIFT)          //Mapping to MXM_DAC
 
 
 /****************************************************/
 /****************************************************/
 /* Router Object ID definition - Shared with BIOS   */
 /* Router Object ID definition - Shared with BIOS   */
 /****************************************************/
 /****************************************************/
-#define ROUTER_I2C_EXTENDER_CNTL_ENUM_ID1 \
-	(GRAPH_OBJECT_TYPE_ROUTER << OBJECT_TYPE_SHIFT |\
-	 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
-	 ROUTER_OBJECT_ID_I2C_EXTENDER_CNTL << OBJECT_ID_SHIFT)
+#define ROUTER_I2C_EXTENDER_CNTL_ENUM_ID1      ( GRAPH_OBJECT_TYPE_ROUTER << OBJECT_TYPE_SHIFT |\
+                                                GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                ROUTER_OBJECT_ID_I2C_EXTENDER_CNTL << OBJECT_ID_SHIFT)
 
 
 /* deleted */
 /* deleted */
 
 
+/****************************************************/
+/* Generic Object ID definition - Shared with BIOS  */
+/****************************************************/
+#define GENERICOBJECT_GLSYNC_ENUM_ID1           (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 GENERIC_OBJECT_ID_GLSYNC << OBJECT_ID_SHIFT)
+
+#define GENERICOBJECT_PX2_NON_DRIVABLE_ID1       (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 GENERIC_OBJECT_ID_PX2_NON_DRIVABLE<< OBJECT_ID_SHIFT)
+
+#define GENERICOBJECT_PX2_NON_DRIVABLE_ID2       (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID2 << ENUM_ID_SHIFT |\
+                                                 GENERIC_OBJECT_ID_PX2_NON_DRIVABLE<< OBJECT_ID_SHIFT)
+
+#define GENERICOBJECT_MXM_OPM_ENUM_ID1           (GRAPH_OBJECT_TYPE_GENERIC << OBJECT_TYPE_SHIFT |\
+                                                 GRAPH_OBJECT_ENUM_ID1 << ENUM_ID_SHIFT |\
+                                                 GENERIC_OBJECT_ID_MXM_OPM << OBJECT_ID_SHIFT)
+
 /****************************************************/
 /****************************************************/
 /* Object Cap definition - Shared with BIOS         */
 /* Object Cap definition - Shared with BIOS         */
 /****************************************************/
 /****************************************************/
 #define GRAPHICS_OBJECT_CAP_I2C                 0x00000001L
 #define GRAPHICS_OBJECT_CAP_I2C                 0x00000001L
 #define GRAPHICS_OBJECT_CAP_TABLE_ID            0x00000002L
 #define GRAPHICS_OBJECT_CAP_TABLE_ID            0x00000002L
 
 
+
 #define GRAPHICS_OBJECT_I2CCOMMAND_TABLE_ID                   0x01
 #define GRAPHICS_OBJECT_I2CCOMMAND_TABLE_ID                   0x01
 #define GRAPHICS_OBJECT_HOTPLUGDETECTIONINTERUPT_TABLE_ID     0x02
 #define GRAPHICS_OBJECT_HOTPLUGDETECTIONINTERUPT_TABLE_ID     0x02
 #define GRAPHICS_OBJECT_ENCODER_OUTPUT_PROTECTION_TABLE_ID    0x03
 #define GRAPHICS_OBJECT_ENCODER_OUTPUT_PROTECTION_TABLE_ID    0x03
@@ -575,4 +636,8 @@
 #pragma pack()
 #pragma pack()
 #endif
 #endif
 
 
-#endif /*GRAPHICTYPE */
+#endif  /*GRAPHICTYPE */
+
+
+
+

+ 4 - 2
drivers/gpu/drm/radeon/atombios_dp.c

@@ -468,7 +468,8 @@ void radeon_dp_set_link_config(struct drm_connector *connector,
 	struct radeon_connector *radeon_connector;
 	struct radeon_connector *radeon_connector;
 	struct radeon_connector_atom_dig *dig_connector;
 	struct radeon_connector_atom_dig *dig_connector;
 
 
-	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
+	if ((connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) ||
+	    (connector->connector_type != DRM_MODE_CONNECTOR_eDP))
 		return;
 		return;
 
 
 	radeon_connector = to_radeon_connector(connector);
 	radeon_connector = to_radeon_connector(connector);
@@ -582,7 +583,8 @@ void dp_link_train(struct drm_encoder *encoder,
 	u8 train_set[4];
 	u8 train_set[4];
 	int i;
 	int i;
 
 
-	if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
+	if ((connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort) ||
+	    (connector->connector_type != DRM_MODE_CONNECTOR_eDP))
 		return;
 		return;
 
 
 	if (!radeon_encoder->enc_priv)
 	if (!radeon_encoder->enc_priv)

+ 3 - 1
drivers/gpu/drm/radeon/mkregtable.c

@@ -661,8 +661,10 @@ static int parser_auth(struct table *t, const char *filename)
 	fseek(file, 0, SEEK_SET);
 	fseek(file, 0, SEEK_SET);
 
 
 	/* get header */
 	/* get header */
-	if (fgets(buf, 1024, file) == NULL)
+	if (fgets(buf, 1024, file) == NULL) {
+		fclose(file);
 		return -1;
 		return -1;
+	}
 
 
 	/* first line will contain the last register
 	/* first line will contain the last register
 	 * and gpu name */
 	 * and gpu name */

+ 14 - 9
drivers/gpu/drm/radeon/r100.c

@@ -131,7 +131,8 @@ void r100_hpd_init(struct radeon_device *rdev)
 			break;
 			break;
 		}
 		}
 	}
 	}
-	r100_irq_set(rdev);
+	if (rdev->irq.installed)
+		r100_irq_set(rdev);
 }
 }
 
 
 void r100_hpd_fini(struct radeon_device *rdev)
 void r100_hpd_fini(struct radeon_device *rdev)
@@ -243,6 +244,11 @@ int r100_irq_set(struct radeon_device *rdev)
 {
 {
 	uint32_t tmp = 0;
 	uint32_t tmp = 0;
 
 
+	if (!rdev->irq.installed) {
+		WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
+		WREG32(R_000040_GEN_INT_CNTL, 0);
+		return -EINVAL;
+	}
 	if (rdev->irq.sw_int) {
 	if (rdev->irq.sw_int) {
 		tmp |= RADEON_SW_INT_ENABLE;
 		tmp |= RADEON_SW_INT_ENABLE;
 	}
 	}
@@ -356,6 +362,11 @@ void r100_fence_ring_emit(struct radeon_device *rdev,
 	/* Wait until IDLE & CLEAN */
 	/* Wait until IDLE & CLEAN */
 	radeon_ring_write(rdev, PACKET0(0x1720, 0));
 	radeon_ring_write(rdev, PACKET0(0x1720, 0));
 	radeon_ring_write(rdev, (1 << 16) | (1 << 17));
 	radeon_ring_write(rdev, (1 << 16) | (1 << 17));
+	radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+	radeon_ring_write(rdev, rdev->config.r100.hdp_cntl |
+				RADEON_HDP_READ_BUFFER_INVALIDATE);
+	radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+	radeon_ring_write(rdev, rdev->config.r100.hdp_cntl);
 	/* Emit fence sequence & fire IRQ */
 	/* Emit fence sequence & fire IRQ */
 	radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
 	radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
 	radeon_ring_write(rdev, fence->seq);
 	radeon_ring_write(rdev, fence->seq);
@@ -1713,14 +1724,6 @@ void r100_gpu_init(struct radeon_device *rdev)
 	r100_hdp_reset(rdev);
 	r100_hdp_reset(rdev);
 }
 }
 
 
-void r100_hdp_flush(struct radeon_device *rdev)
-{
-	u32 tmp;
-	tmp = RREG32(RADEON_HOST_PATH_CNTL);
-	tmp |= RADEON_HDP_READ_BUFFER_INVALIDATE;
-	WREG32(RADEON_HOST_PATH_CNTL, tmp);
-}
-
 void r100_hdp_reset(struct radeon_device *rdev)
 void r100_hdp_reset(struct radeon_device *rdev)
 {
 {
 	uint32_t tmp;
 	uint32_t tmp;
@@ -3313,6 +3316,7 @@ static int r100_startup(struct radeon_device *rdev)
 	}
 	}
 	/* Enable IRQ */
 	/* Enable IRQ */
 	r100_irq_set(rdev);
 	r100_irq_set(rdev);
+	rdev->config.r100.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
 	/* 1M ring buffer */
 	/* 1M ring buffer */
 	r = r100_cp_init(rdev, 1024 * 1024);
 	r = r100_cp_init(rdev, 1024 * 1024);
 	if (r) {
 	if (r) {
@@ -3371,6 +3375,7 @@ void r100_fini(struct radeon_device *rdev)
 	radeon_gem_fini(rdev);
 	radeon_gem_fini(rdev);
 	if (rdev->flags & RADEON_IS_PCI)
 	if (rdev->flags & RADEON_IS_PCI)
 		r100_pci_gart_fini(rdev);
 		r100_pci_gart_fini(rdev);
+	radeon_agp_fini(rdev);
 	radeon_irq_kms_fini(rdev);
 	radeon_irq_kms_fini(rdev);
 	radeon_fence_driver_fini(rdev);
 	radeon_fence_driver_fini(rdev);
 	radeon_bo_fini(rdev);
 	radeon_bo_fini(rdev);

+ 16 - 1
drivers/gpu/drm/radeon/r300.c

@@ -36,7 +36,15 @@
 #include "rv350d.h"
 #include "rv350d.h"
 #include "r300_reg_safe.h"
 #include "r300_reg_safe.h"
 
 
-/* This files gather functions specifics to: r300,r350,rv350,rv370,rv380 */
+/* This files gather functions specifics to: r300,r350,rv350,rv370,rv380
+ *
+ * GPU Errata:
+ * - HOST_PATH_CNTL: r300 family seems to dislike write to HOST_PATH_CNTL
+ *   using MMIO to flush host path read cache, this lead to HARDLOCKUP.
+ *   However, scheduling such write to the ring seems harmless, i suspect
+ *   the CP read collide with the flush somehow, or maybe the MC, hard to
+ *   tell. (Jerome Glisse)
+ */
 
 
 /*
 /*
  * rv370,rv380 PCIE GART
  * rv370,rv380 PCIE GART
@@ -178,6 +186,11 @@ void r300_fence_ring_emit(struct radeon_device *rdev,
 	/* Wait until IDLE & CLEAN */
 	/* Wait until IDLE & CLEAN */
 	radeon_ring_write(rdev, PACKET0(0x1720, 0));
 	radeon_ring_write(rdev, PACKET0(0x1720, 0));
 	radeon_ring_write(rdev, (1 << 17) | (1 << 16)  | (1 << 9));
 	radeon_ring_write(rdev, (1 << 17) | (1 << 16)  | (1 << 9));
+	radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+	radeon_ring_write(rdev, rdev->config.r300.hdp_cntl |
+				RADEON_HDP_READ_BUFFER_INVALIDATE);
+	radeon_ring_write(rdev, PACKET0(RADEON_HOST_PATH_CNTL, 0));
+	radeon_ring_write(rdev, rdev->config.r300.hdp_cntl);
 	/* Emit fence sequence & fire IRQ */
 	/* Emit fence sequence & fire IRQ */
 	radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
 	radeon_ring_write(rdev, PACKET0(rdev->fence_drv.scratch_reg, 0));
 	radeon_ring_write(rdev, fence->seq);
 	radeon_ring_write(rdev, fence->seq);
@@ -1258,6 +1271,7 @@ static int r300_startup(struct radeon_device *rdev)
 	}
 	}
 	/* Enable IRQ */
 	/* Enable IRQ */
 	r100_irq_set(rdev);
 	r100_irq_set(rdev);
+	rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
 	/* 1M ring buffer */
 	/* 1M ring buffer */
 	r = r100_cp_init(rdev, 1024 * 1024);
 	r = r100_cp_init(rdev, 1024 * 1024);
 	if (r) {
 	if (r) {
@@ -1322,6 +1336,7 @@ void r300_fini(struct radeon_device *rdev)
 		rv370_pcie_gart_fini(rdev);
 		rv370_pcie_gart_fini(rdev);
 	if (rdev->flags & RADEON_IS_PCI)
 	if (rdev->flags & RADEON_IS_PCI)
 		r100_pci_gart_fini(rdev);
 		r100_pci_gart_fini(rdev);
+	radeon_agp_fini(rdev);
 	radeon_irq_kms_fini(rdev);
 	radeon_irq_kms_fini(rdev);
 	radeon_fence_driver_fini(rdev);
 	radeon_fence_driver_fini(rdev);
 	radeon_bo_fini(rdev);
 	radeon_bo_fini(rdev);

+ 40 - 1
drivers/gpu/drm/radeon/r420.c

@@ -30,7 +30,15 @@
 #include "radeon_reg.h"
 #include "radeon_reg.h"
 #include "radeon.h"
 #include "radeon.h"
 #include "atom.h"
 #include "atom.h"
+#include "r100d.h"
 #include "r420d.h"
 #include "r420d.h"
+#include "r420_reg_safe.h"
+
+static void r420_set_reg_safe(struct radeon_device *rdev)
+{
+	rdev->config.r300.reg_safe_bm = r420_reg_safe_bm;
+	rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r420_reg_safe_bm);
+}
 
 
 int r420_mc_init(struct radeon_device *rdev)
 int r420_mc_init(struct radeon_device *rdev)
 {
 {
@@ -165,6 +173,34 @@ static void r420_clock_resume(struct radeon_device *rdev)
 	WREG32_PLL(R_00000D_SCLK_CNTL, sclk_cntl);
 	WREG32_PLL(R_00000D_SCLK_CNTL, sclk_cntl);
 }
 }
 
 
+static void r420_cp_errata_init(struct radeon_device *rdev)
+{
+	/* RV410 and R420 can lock up if CP DMA to host memory happens
+	 * while the 2D engine is busy.
+	 *
+	 * The proper workaround is to queue a RESYNC at the beginning
+	 * of the CP init, apparently.
+	 */
+	radeon_scratch_get(rdev, &rdev->config.r300.resync_scratch);
+	radeon_ring_lock(rdev, 8);
+	radeon_ring_write(rdev, PACKET0(R300_CP_RESYNC_ADDR, 1));
+	radeon_ring_write(rdev, rdev->config.r300.resync_scratch);
+	radeon_ring_write(rdev, 0xDEADBEEF);
+	radeon_ring_unlock_commit(rdev);
+}
+
+static void r420_cp_errata_fini(struct radeon_device *rdev)
+{
+	/* Catch the RESYNC we dispatched all the way back,
+	 * at the very beginning of the CP init.
+	 */
+	radeon_ring_lock(rdev, 8);
+	radeon_ring_write(rdev, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0));
+	radeon_ring_write(rdev, R300_RB3D_DC_FINISH);
+	radeon_ring_unlock_commit(rdev);
+	radeon_scratch_free(rdev, rdev->config.r300.resync_scratch);
+}
+
 static int r420_startup(struct radeon_device *rdev)
 static int r420_startup(struct radeon_device *rdev)
 {
 {
 	int r;
 	int r;
@@ -190,12 +226,14 @@ static int r420_startup(struct radeon_device *rdev)
 	r420_pipes_init(rdev);
 	r420_pipes_init(rdev);
 	/* Enable IRQ */
 	/* Enable IRQ */
 	r100_irq_set(rdev);
 	r100_irq_set(rdev);
+	rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
 	/* 1M ring buffer */
 	/* 1M ring buffer */
 	r = r100_cp_init(rdev, 1024 * 1024);
 	r = r100_cp_init(rdev, 1024 * 1024);
 	if (r) {
 	if (r) {
 		dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
 		dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
 		return r;
 		return r;
 	}
 	}
+	r420_cp_errata_init(rdev);
 	r = r100_wb_init(rdev);
 	r = r100_wb_init(rdev);
 	if (r) {
 	if (r) {
 		dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
 		dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
@@ -238,6 +276,7 @@ int r420_resume(struct radeon_device *rdev)
 
 
 int r420_suspend(struct radeon_device *rdev)
 int r420_suspend(struct radeon_device *rdev)
 {
 {
+	r420_cp_errata_fini(rdev);
 	r100_cp_disable(rdev);
 	r100_cp_disable(rdev);
 	r100_wb_disable(rdev);
 	r100_wb_disable(rdev);
 	r100_irq_disable(rdev);
 	r100_irq_disable(rdev);
@@ -346,7 +385,7 @@ int r420_init(struct radeon_device *rdev)
 		if (r)
 		if (r)
 			return r;
 			return r;
 	}
 	}
-	r300_set_reg_safe(rdev);
+	r420_set_reg_safe(rdev);
 	rdev->accel_working = true;
 	rdev->accel_working = true;
 	r = r420_startup(rdev);
 	r = r420_startup(rdev);
 	if (r) {
 	if (r) {

+ 1 - 0
drivers/gpu/drm/radeon/r520.c

@@ -186,6 +186,7 @@ static int r520_startup(struct radeon_device *rdev)
 	}
 	}
 	/* Enable IRQ */
 	/* Enable IRQ */
 	rs600_irq_set(rdev);
 	rs600_irq_set(rdev);
+	rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
 	/* 1M ring buffer */
 	/* 1M ring buffer */
 	r = r100_cp_init(rdev, 1024 * 1024);
 	r = r100_cp_init(rdev, 1024 * 1024);
 	if (r) {
 	if (r) {

+ 13 - 8
drivers/gpu/drm/radeon/r600.c

@@ -285,7 +285,8 @@ void r600_hpd_init(struct radeon_device *rdev)
 			}
 			}
 		}
 		}
 	}
 	}
-	r600_irq_set(rdev);
+	if (rdev->irq.installed)
+		r600_irq_set(rdev);
 }
 }
 
 
 void r600_hpd_fini(struct radeon_device *rdev)
 void r600_hpd_fini(struct radeon_device *rdev)
@@ -726,6 +727,10 @@ int r600_mc_init(struct radeon_device *rdev)
 	a.full = rfixed_const(100);
 	a.full = rfixed_const(100);
 	rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
 	rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
 	rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
 	rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
+
+	if (rdev->flags & RADEON_IS_IGP)
+		rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -1384,11 +1389,6 @@ void r600_pciep_wreg(struct radeon_device *rdev, u32 reg, u32 v)
 	(void)RREG32(PCIE_PORT_DATA);
 	(void)RREG32(PCIE_PORT_DATA);
 }
 }
 
 
-void r600_hdp_flush(struct radeon_device *rdev)
-{
-	WREG32(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0x1);
-}
-
 /*
 /*
  * CP & Ring
  * CP & Ring
  */
  */
@@ -1785,6 +1785,8 @@ void r600_fence_ring_emit(struct radeon_device *rdev,
 	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
 	radeon_ring_write(rdev, PACKET3(PACKET3_SET_CONFIG_REG, 1));
 	radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
 	radeon_ring_write(rdev, ((rdev->fence_drv.scratch_reg - PACKET3_SET_CONFIG_REG_OFFSET) >> 2));
 	radeon_ring_write(rdev, fence->seq);
 	radeon_ring_write(rdev, fence->seq);
+	radeon_ring_write(rdev, PACKET0(R_005480_HDP_MEM_COHERENCY_FLUSH_CNTL, 0));
+	radeon_ring_write(rdev, 1);
 	/* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
 	/* CP_INTERRUPT packet 3 no longer exists, use packet 0 */
 	radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
 	radeon_ring_write(rdev, PACKET0(CP_INT_STATUS, 0));
 	radeon_ring_write(rdev, RB_INT_STAT);
 	radeon_ring_write(rdev, RB_INT_STAT);
@@ -2089,8 +2091,7 @@ void r600_fini(struct radeon_device *rdev)
 	radeon_gem_fini(rdev);
 	radeon_gem_fini(rdev);
 	radeon_fence_driver_fini(rdev);
 	radeon_fence_driver_fini(rdev);
 	radeon_clocks_fini(rdev);
 	radeon_clocks_fini(rdev);
-	if (rdev->flags & RADEON_IS_AGP)
-		radeon_agp_fini(rdev);
+	radeon_agp_fini(rdev);
 	radeon_bo_fini(rdev);
 	radeon_bo_fini(rdev);
 	radeon_atombios_fini(rdev);
 	radeon_atombios_fini(rdev);
 	kfree(rdev->bios);
 	kfree(rdev->bios);
@@ -2461,6 +2462,10 @@ int r600_irq_set(struct radeon_device *rdev)
 	u32 mode_int = 0;
 	u32 mode_int = 0;
 	u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
 	u32 hpd1, hpd2, hpd3, hpd4 = 0, hpd5 = 0, hpd6 = 0;
 
 
+	if (!rdev->irq.installed) {
+		WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
+		return -EINVAL;
+	}
 	/* don't enable anything if the ih is disabled */
 	/* don't enable anything if the ih is disabled */
 	if (!rdev->ih.enabled)
 	if (!rdev->ih.enabled)
 		return 0;
 		return 0;

+ 2 - 2
drivers/gpu/drm/radeon/r600_blit_kms.c

@@ -577,9 +577,9 @@ int r600_blit_prepare_copy(struct radeon_device *rdev, int size_bytes)
 	ring_size = num_loops * dwords_per_loop;
 	ring_size = num_loops * dwords_per_loop;
 	/* set default  + shaders */
 	/* set default  + shaders */
 	ring_size += 40; /* shaders + def state */
 	ring_size += 40; /* shaders + def state */
-	ring_size += 5; /* fence emit for VB IB */
+	ring_size += 7; /* fence emit for VB IB */
 	ring_size += 5; /* done copy */
 	ring_size += 5; /* done copy */
-	ring_size += 5; /* fence emit for done copy */
+	ring_size += 7; /* fence emit for done copy */
 	r = radeon_ring_lock(rdev, ring_size);
 	r = radeon_ring_lock(rdev, ring_size);
 	WARN_ON(r);
 	WARN_ON(r);
 
 

+ 6 - 3
drivers/gpu/drm/radeon/radeon.h

@@ -319,10 +319,12 @@ struct radeon_mc {
 	u64			real_vram_size;
 	u64			real_vram_size;
 	int			vram_mtrr;
 	int			vram_mtrr;
 	bool			vram_is_ddr;
 	bool			vram_is_ddr;
+	bool                    igp_sideport_enabled;
 };
 };
 
 
 int radeon_mc_setup(struct radeon_device *rdev);
 int radeon_mc_setup(struct radeon_device *rdev);
-
+bool radeon_combios_sideport_present(struct radeon_device *rdev);
+bool radeon_atombios_sideport_present(struct radeon_device *rdev);
 
 
 /*
 /*
  * GPU scratch registers structures, functions & helpers
  * GPU scratch registers structures, functions & helpers
@@ -654,7 +656,6 @@ struct radeon_asic {
 			       uint32_t offset, uint32_t obj_size);
 			       uint32_t offset, uint32_t obj_size);
 	int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
 	int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
 	void (*bandwidth_update)(struct radeon_device *rdev);
 	void (*bandwidth_update)(struct radeon_device *rdev);
-	void (*hdp_flush)(struct radeon_device *rdev);
 	void (*hpd_init)(struct radeon_device *rdev);
 	void (*hpd_init)(struct radeon_device *rdev);
 	void (*hpd_fini)(struct radeon_device *rdev);
 	void (*hpd_fini)(struct radeon_device *rdev);
 	bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
 	bool (*hpd_sense)(struct radeon_device *rdev, enum radeon_hpd_id hpd);
@@ -667,11 +668,14 @@ struct radeon_asic {
 struct r100_asic {
 struct r100_asic {
 	const unsigned	*reg_safe_bm;
 	const unsigned	*reg_safe_bm;
 	unsigned	reg_safe_bm_size;
 	unsigned	reg_safe_bm_size;
+	u32		hdp_cntl;
 };
 };
 
 
 struct r300_asic {
 struct r300_asic {
 	const unsigned	*reg_safe_bm;
 	const unsigned	*reg_safe_bm;
 	unsigned	reg_safe_bm_size;
 	unsigned	reg_safe_bm_size;
+	u32		resync_scratch;
+	u32		hdp_cntl;
 };
 };
 
 
 struct r600_asic {
 struct r600_asic {
@@ -1007,7 +1011,6 @@ static inline void radeon_ring_write(struct radeon_device *rdev, uint32_t v)
 #define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s)))
 #define radeon_set_surface_reg(rdev, r, f, p, o, s) ((rdev)->asic->set_surface_reg((rdev), (r), (f), (p), (o), (s)))
 #define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->clear_surface_reg((rdev), (r)))
 #define radeon_clear_surface_reg(rdev, r) ((rdev)->asic->clear_surface_reg((rdev), (r)))
 #define radeon_bandwidth_update(rdev) (rdev)->asic->bandwidth_update((rdev))
 #define radeon_bandwidth_update(rdev) (rdev)->asic->bandwidth_update((rdev))
-#define radeon_hdp_flush(rdev) (rdev)->asic->hdp_flush((rdev))
 #define radeon_hpd_init(rdev) (rdev)->asic->hpd_init((rdev))
 #define radeon_hpd_init(rdev) (rdev)->asic->hpd_init((rdev))
 #define radeon_hpd_fini(rdev) (rdev)->asic->hpd_fini((rdev))
 #define radeon_hpd_fini(rdev) (rdev)->asic->hpd_fini((rdev))
 #define radeon_hpd_sense(rdev, hpd) (rdev)->asic->hpd_sense((rdev), (hpd))
 #define radeon_hpd_sense(rdev, hpd) (rdev)->asic->hpd_sense((rdev), (hpd))

+ 2 - 4
drivers/gpu/drm/radeon/radeon_agp.c

@@ -252,10 +252,8 @@ void radeon_agp_resume(struct radeon_device *rdev)
 void radeon_agp_fini(struct radeon_device *rdev)
 void radeon_agp_fini(struct radeon_device *rdev)
 {
 {
 #if __OS_HAS_AGP
 #if __OS_HAS_AGP
-	if (rdev->flags & RADEON_IS_AGP) {
-		if (rdev->ddev->agp && rdev->ddev->agp->acquired) {
-			drm_agp_release(rdev->ddev);
-		}
+	if (rdev->ddev->agp && rdev->ddev->agp->acquired) {
+		drm_agp_release(rdev->ddev);
 	}
 	}
 #endif
 #endif
 }
 }

+ 0 - 12
drivers/gpu/drm/radeon/radeon_asic.h

@@ -77,7 +77,6 @@ int r100_clear_surface_reg(struct radeon_device *rdev, int reg);
 void r100_bandwidth_update(struct radeon_device *rdev);
 void r100_bandwidth_update(struct radeon_device *rdev);
 void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
 void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
 int r100_ring_test(struct radeon_device *rdev);
 int r100_ring_test(struct radeon_device *rdev);
-void r100_hdp_flush(struct radeon_device *rdev);
 void r100_hpd_init(struct radeon_device *rdev);
 void r100_hpd_init(struct radeon_device *rdev);
 void r100_hpd_fini(struct radeon_device *rdev);
 void r100_hpd_fini(struct radeon_device *rdev);
 bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
 bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
@@ -114,7 +113,6 @@ static struct radeon_asic r100_asic = {
 	.set_surface_reg = r100_set_surface_reg,
 	.set_surface_reg = r100_set_surface_reg,
 	.clear_surface_reg = r100_clear_surface_reg,
 	.clear_surface_reg = r100_clear_surface_reg,
 	.bandwidth_update = &r100_bandwidth_update,
 	.bandwidth_update = &r100_bandwidth_update,
-	.hdp_flush = &r100_hdp_flush,
 	.hpd_init = &r100_hpd_init,
 	.hpd_init = &r100_hpd_init,
 	.hpd_fini = &r100_hpd_fini,
 	.hpd_fini = &r100_hpd_fini,
 	.hpd_sense = &r100_hpd_sense,
 	.hpd_sense = &r100_hpd_sense,
@@ -174,7 +172,6 @@ static struct radeon_asic r300_asic = {
 	.set_surface_reg = r100_set_surface_reg,
 	.set_surface_reg = r100_set_surface_reg,
 	.clear_surface_reg = r100_clear_surface_reg,
 	.clear_surface_reg = r100_clear_surface_reg,
 	.bandwidth_update = &r100_bandwidth_update,
 	.bandwidth_update = &r100_bandwidth_update,
-	.hdp_flush = &r100_hdp_flush,
 	.hpd_init = &r100_hpd_init,
 	.hpd_init = &r100_hpd_init,
 	.hpd_fini = &r100_hpd_fini,
 	.hpd_fini = &r100_hpd_fini,
 	.hpd_sense = &r100_hpd_sense,
 	.hpd_sense = &r100_hpd_sense,
@@ -218,7 +215,6 @@ static struct radeon_asic r420_asic = {
 	.set_surface_reg = r100_set_surface_reg,
 	.set_surface_reg = r100_set_surface_reg,
 	.clear_surface_reg = r100_clear_surface_reg,
 	.clear_surface_reg = r100_clear_surface_reg,
 	.bandwidth_update = &r100_bandwidth_update,
 	.bandwidth_update = &r100_bandwidth_update,
-	.hdp_flush = &r100_hdp_flush,
 	.hpd_init = &r100_hpd_init,
 	.hpd_init = &r100_hpd_init,
 	.hpd_fini = &r100_hpd_fini,
 	.hpd_fini = &r100_hpd_fini,
 	.hpd_sense = &r100_hpd_sense,
 	.hpd_sense = &r100_hpd_sense,
@@ -267,7 +263,6 @@ static struct radeon_asic rs400_asic = {
 	.set_surface_reg = r100_set_surface_reg,
 	.set_surface_reg = r100_set_surface_reg,
 	.clear_surface_reg = r100_clear_surface_reg,
 	.clear_surface_reg = r100_clear_surface_reg,
 	.bandwidth_update = &r100_bandwidth_update,
 	.bandwidth_update = &r100_bandwidth_update,
-	.hdp_flush = &r100_hdp_flush,
 	.hpd_init = &r100_hpd_init,
 	.hpd_init = &r100_hpd_init,
 	.hpd_fini = &r100_hpd_fini,
 	.hpd_fini = &r100_hpd_fini,
 	.hpd_sense = &r100_hpd_sense,
 	.hpd_sense = &r100_hpd_sense,
@@ -324,7 +319,6 @@ static struct radeon_asic rs600_asic = {
 	.set_pcie_lanes = NULL,
 	.set_pcie_lanes = NULL,
 	.set_clock_gating = &radeon_atom_set_clock_gating,
 	.set_clock_gating = &radeon_atom_set_clock_gating,
 	.bandwidth_update = &rs600_bandwidth_update,
 	.bandwidth_update = &rs600_bandwidth_update,
-	.hdp_flush = &r100_hdp_flush,
 	.hpd_init = &rs600_hpd_init,
 	.hpd_init = &rs600_hpd_init,
 	.hpd_fini = &rs600_hpd_fini,
 	.hpd_fini = &rs600_hpd_fini,
 	.hpd_sense = &rs600_hpd_sense,
 	.hpd_sense = &rs600_hpd_sense,
@@ -372,7 +366,6 @@ static struct radeon_asic rs690_asic = {
 	.set_surface_reg = r100_set_surface_reg,
 	.set_surface_reg = r100_set_surface_reg,
 	.clear_surface_reg = r100_clear_surface_reg,
 	.clear_surface_reg = r100_clear_surface_reg,
 	.bandwidth_update = &rs690_bandwidth_update,
 	.bandwidth_update = &rs690_bandwidth_update,
-	.hdp_flush = &r100_hdp_flush,
 	.hpd_init = &rs600_hpd_init,
 	.hpd_init = &rs600_hpd_init,
 	.hpd_fini = &rs600_hpd_fini,
 	.hpd_fini = &rs600_hpd_fini,
 	.hpd_sense = &rs600_hpd_sense,
 	.hpd_sense = &rs600_hpd_sense,
@@ -424,7 +417,6 @@ static struct radeon_asic rv515_asic = {
 	.set_surface_reg = r100_set_surface_reg,
 	.set_surface_reg = r100_set_surface_reg,
 	.clear_surface_reg = r100_clear_surface_reg,
 	.clear_surface_reg = r100_clear_surface_reg,
 	.bandwidth_update = &rv515_bandwidth_update,
 	.bandwidth_update = &rv515_bandwidth_update,
-	.hdp_flush = &r100_hdp_flush,
 	.hpd_init = &rs600_hpd_init,
 	.hpd_init = &rs600_hpd_init,
 	.hpd_fini = &rs600_hpd_fini,
 	.hpd_fini = &rs600_hpd_fini,
 	.hpd_sense = &rs600_hpd_sense,
 	.hpd_sense = &rs600_hpd_sense,
@@ -467,7 +459,6 @@ static struct radeon_asic r520_asic = {
 	.set_surface_reg = r100_set_surface_reg,
 	.set_surface_reg = r100_set_surface_reg,
 	.clear_surface_reg = r100_clear_surface_reg,
 	.clear_surface_reg = r100_clear_surface_reg,
 	.bandwidth_update = &rv515_bandwidth_update,
 	.bandwidth_update = &rv515_bandwidth_update,
-	.hdp_flush = &r100_hdp_flush,
 	.hpd_init = &rs600_hpd_init,
 	.hpd_init = &rs600_hpd_init,
 	.hpd_fini = &rs600_hpd_fini,
 	.hpd_fini = &rs600_hpd_fini,
 	.hpd_sense = &rs600_hpd_sense,
 	.hpd_sense = &rs600_hpd_sense,
@@ -508,7 +499,6 @@ int r600_ring_test(struct radeon_device *rdev);
 int r600_copy_blit(struct radeon_device *rdev,
 int r600_copy_blit(struct radeon_device *rdev,
 		   uint64_t src_offset, uint64_t dst_offset,
 		   uint64_t src_offset, uint64_t dst_offset,
 		   unsigned num_pages, struct radeon_fence *fence);
 		   unsigned num_pages, struct radeon_fence *fence);
-void r600_hdp_flush(struct radeon_device *rdev);
 void r600_hpd_init(struct radeon_device *rdev);
 void r600_hpd_init(struct radeon_device *rdev);
 void r600_hpd_fini(struct radeon_device *rdev);
 void r600_hpd_fini(struct radeon_device *rdev);
 bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
 bool r600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
@@ -544,7 +534,6 @@ static struct radeon_asic r600_asic = {
 	.set_surface_reg = r600_set_surface_reg,
 	.set_surface_reg = r600_set_surface_reg,
 	.clear_surface_reg = r600_clear_surface_reg,
 	.clear_surface_reg = r600_clear_surface_reg,
 	.bandwidth_update = &rv515_bandwidth_update,
 	.bandwidth_update = &rv515_bandwidth_update,
-	.hdp_flush = &r600_hdp_flush,
 	.hpd_init = &r600_hpd_init,
 	.hpd_init = &r600_hpd_init,
 	.hpd_fini = &r600_hpd_fini,
 	.hpd_fini = &r600_hpd_fini,
 	.hpd_sense = &r600_hpd_sense,
 	.hpd_sense = &r600_hpd_sense,
@@ -589,7 +578,6 @@ static struct radeon_asic rv770_asic = {
 	.set_surface_reg = r600_set_surface_reg,
 	.set_surface_reg = r600_set_surface_reg,
 	.clear_surface_reg = r600_clear_surface_reg,
 	.clear_surface_reg = r600_clear_surface_reg,
 	.bandwidth_update = &rv515_bandwidth_update,
 	.bandwidth_update = &rv515_bandwidth_update,
-	.hdp_flush = &r600_hdp_flush,
 	.hpd_init = &r600_hpd_init,
 	.hpd_init = &r600_hpd_init,
 	.hpd_fini = &r600_hpd_fini,
 	.hpd_fini = &r600_hpd_fini,
 	.hpd_sense = &r600_hpd_sense,
 	.hpd_sense = &r600_hpd_sense,

+ 40 - 1
drivers/gpu/drm/radeon/radeon_atombios.c

@@ -346,7 +346,9 @@ const int object_connector_convert[] = {
 	DRM_MODE_CONNECTOR_Unknown,
 	DRM_MODE_CONNECTOR_Unknown,
 	DRM_MODE_CONNECTOR_Unknown,
 	DRM_MODE_CONNECTOR_Unknown,
 	DRM_MODE_CONNECTOR_Unknown,
 	DRM_MODE_CONNECTOR_Unknown,
-	DRM_MODE_CONNECTOR_DisplayPort
+	DRM_MODE_CONNECTOR_DisplayPort,
+	DRM_MODE_CONNECTOR_eDP,
+	DRM_MODE_CONNECTOR_Unknown
 };
 };
 
 
 bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
 bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
@@ -936,6 +938,43 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
 	return false;
 	return false;
 }
 }
 
 
+union igp_info {
+	struct _ATOM_INTEGRATED_SYSTEM_INFO info;
+	struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2;
+};
+
+bool radeon_atombios_sideport_present(struct radeon_device *rdev)
+{
+	struct radeon_mode_info *mode_info = &rdev->mode_info;
+	int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
+	union igp_info *igp_info;
+	u8 frev, crev;
+	u16 data_offset;
+
+	atom_parse_data_header(mode_info->atom_context, index, NULL, &frev,
+			       &crev, &data_offset);
+
+	igp_info = (union igp_info *)(mode_info->atom_context->bios +
+				      data_offset);
+
+	if (igp_info) {
+		switch (crev) {
+		case 1:
+			if (igp_info->info.ucMemoryType & 0xf0)
+				return true;
+			break;
+		case 2:
+			if (igp_info->info_2.ucMemoryType & 0x0f)
+				return true;
+			break;
+		default:
+			DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev);
+			break;
+		}
+	}
+	return false;
+}
+
 bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder,
 bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder,
 				   struct radeon_encoder_int_tmds *tmds)
 				   struct radeon_encoder_int_tmds *tmds)
 {
 {

+ 14 - 0
drivers/gpu/drm/radeon/radeon_combios.c

@@ -595,6 +595,20 @@ bool radeon_combios_get_clock_info(struct drm_device *dev)
 	return false;
 	return false;
 }
 }
 
 
+bool radeon_combios_sideport_present(struct radeon_device *rdev)
+{
+	struct drm_device *dev = rdev->ddev;
+	u16 igp_info;
+
+	igp_info = combios_get_table_offset(dev, COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE);
+
+	if (igp_info) {
+		if (RBIOS16(igp_info + 0x4))
+			return true;
+	}
+	return false;
+}
+
 static const uint32_t default_primarydac_adj[CHIP_LAST] = {
 static const uint32_t default_primarydac_adj[CHIP_LAST] = {
 	0x00000808,		/* r100  */
 	0x00000808,		/* r100  */
 	0x00000808,		/* rv100 */
 	0x00000808,		/* rv100 */

+ 17 - 6
drivers/gpu/drm/radeon/radeon_connectors.c

@@ -49,8 +49,10 @@ void radeon_connector_hotplug(struct drm_connector *connector)
 	if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
 	if (radeon_connector->hpd.hpd != RADEON_HPD_NONE)
 		radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
 		radeon_hpd_set_polarity(rdev, radeon_connector->hpd.hpd);
 
 
-	if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
-		if (radeon_dp_getsinktype(radeon_connector) == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
+	if ((connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
+	    (connector->connector_type == DRM_MODE_CONNECTOR_eDP)) {
+		if ((radeon_dp_getsinktype(radeon_connector) == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
+		    (radeon_dp_getsinktype(radeon_connector) == CONNECTOR_OBJECT_ID_eDP)) {
 			if (radeon_dp_needs_link_train(radeon_connector)) {
 			if (radeon_dp_needs_link_train(radeon_connector)) {
 				if (connector->encoder)
 				if (connector->encoder)
 					dp_link_train(connector->encoder, connector);
 					dp_link_train(connector->encoder, connector);
@@ -967,7 +969,8 @@ static enum drm_connector_status radeon_dp_detect(struct drm_connector *connecto
 	}
 	}
 
 
 	sink_type = radeon_dp_getsinktype(radeon_connector);
 	sink_type = radeon_dp_getsinktype(radeon_connector);
-	if (sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
+	if ((sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
+	    (sink_type == CONNECTOR_OBJECT_ID_eDP)) {
 		if (radeon_dp_getdpcd(radeon_connector)) {
 		if (radeon_dp_getdpcd(radeon_connector)) {
 			radeon_dig_connector->dp_sink_type = sink_type;
 			radeon_dig_connector->dp_sink_type = sink_type;
 			ret = connector_status_connected;
 			ret = connector_status_connected;
@@ -992,7 +995,8 @@ static int radeon_dp_mode_valid(struct drm_connector *connector,
 
 
 	/* XXX check mode bandwidth */
 	/* XXX check mode bandwidth */
 
 
-	if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT)
+	if ((radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
+	    (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
 		return radeon_dp_mode_valid_helper(radeon_connector, mode);
 		return radeon_dp_mode_valid_helper(radeon_connector, mode);
 	else
 	else
 		return MODE_OK;
 		return MODE_OK;
@@ -1145,6 +1149,7 @@ radeon_add_atom_connector(struct drm_device *dev,
 		subpixel_order = SubPixelHorizontalRGB;
 		subpixel_order = SubPixelHorizontalRGB;
 		break;
 		break;
 	case DRM_MODE_CONNECTOR_DisplayPort:
 	case DRM_MODE_CONNECTOR_DisplayPort:
+	case DRM_MODE_CONNECTOR_eDP:
 		radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
 		radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
 		if (!radeon_dig_connector)
 		if (!radeon_dig_connector)
 			goto failed;
 			goto failed;
@@ -1157,10 +1162,16 @@ radeon_add_atom_connector(struct drm_device *dev,
 			goto failed;
 			goto failed;
 		if (i2c_bus->valid) {
 		if (i2c_bus->valid) {
 			/* add DP i2c bus */
 			/* add DP i2c bus */
-			radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch");
+			if (connector_type == DRM_MODE_CONNECTOR_eDP)
+				radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "eDP-auxch");
+			else
+				radeon_dig_connector->dp_i2c_bus = radeon_i2c_create_dp(dev, i2c_bus, "DP-auxch");
 			if (!radeon_dig_connector->dp_i2c_bus)
 			if (!radeon_dig_connector->dp_i2c_bus)
 				goto failed;
 				goto failed;
-			radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DP");
+			if (connector_type == DRM_MODE_CONNECTOR_eDP)
+				radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "eDP");
+			else
+				radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DP");
 			if (!radeon_connector->ddc_bus)
 			if (!radeon_connector->ddc_bus)
 				goto failed;
 				goto failed;
 		}
 		}

+ 5 - 2
drivers/gpu/drm/radeon/radeon_display.c

@@ -234,7 +234,7 @@ static const char *encoder_names[34] = {
 	"INTERNAL_UNIPHY2",
 	"INTERNAL_UNIPHY2",
 };
 };
 
 
-static const char *connector_names[13] = {
+static const char *connector_names[15] = {
 	"Unknown",
 	"Unknown",
 	"VGA",
 	"VGA",
 	"DVI-I",
 	"DVI-I",
@@ -248,6 +248,8 @@ static const char *connector_names[13] = {
 	"DisplayPort",
 	"DisplayPort",
 	"HDMI-A",
 	"HDMI-A",
 	"HDMI-B",
 	"HDMI-B",
+	"TV",
+	"eDP",
 };
 };
 
 
 static const char *hpd_names[7] = {
 static const char *hpd_names[7] = {
@@ -352,7 +354,8 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
 {
 {
 	int ret = 0;
 	int ret = 0;
 
 
-	if (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+	if ((radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_DisplayPort) ||
+	    (radeon_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)) {
 		struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
 		struct radeon_connector_atom_dig *dig = radeon_connector->con_priv;
 		if (dig->dp_i2c_bus)
 		if (dig->dp_i2c_bus)
 			radeon_connector->edid = drm_get_edid(&radeon_connector->base, &dig->dp_i2c_bus->adapter);
 			radeon_connector->edid = drm_get_edid(&radeon_connector->base, &dig->dp_i2c_bus->adapter);

+ 8 - 6
drivers/gpu/drm/radeon/radeon_encoders.c

@@ -596,21 +596,23 @@ atombios_get_encoder_mode(struct drm_encoder *encoder)
 		return ATOM_ENCODER_MODE_LVDS;
 		return ATOM_ENCODER_MODE_LVDS;
 		break;
 		break;
 	case DRM_MODE_CONNECTOR_DisplayPort:
 	case DRM_MODE_CONNECTOR_DisplayPort:
+	case DRM_MODE_CONNECTOR_eDP:
 		radeon_dig_connector = radeon_connector->con_priv;
 		radeon_dig_connector = radeon_connector->con_priv;
-		if (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT)
+		if ((radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
+		    (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
 			return ATOM_ENCODER_MODE_DP;
 			return ATOM_ENCODER_MODE_DP;
 		else if (drm_detect_hdmi_monitor(radeon_connector->edid))
 		else if (drm_detect_hdmi_monitor(radeon_connector->edid))
 			return ATOM_ENCODER_MODE_HDMI;
 			return ATOM_ENCODER_MODE_HDMI;
 		else
 		else
 			return ATOM_ENCODER_MODE_DVI;
 			return ATOM_ENCODER_MODE_DVI;
 		break;
 		break;
-	case CONNECTOR_DVI_A:
-	case CONNECTOR_VGA:
+	case DRM_MODE_CONNECTOR_DVIA:
+	case DRM_MODE_CONNECTOR_VGA:
 		return ATOM_ENCODER_MODE_CRT;
 		return ATOM_ENCODER_MODE_CRT;
 		break;
 		break;
-	case CONNECTOR_STV:
-	case CONNECTOR_CTV:
-	case CONNECTOR_DIN:
+	case DRM_MODE_CONNECTOR_Composite:
+	case DRM_MODE_CONNECTOR_SVIDEO:
+	case DRM_MODE_CONNECTOR_9PinDIN:
 		/* fix me */
 		/* fix me */
 		return ATOM_ENCODER_MODE_TV;
 		return ATOM_ENCODER_MODE_TV;
 		/*return ATOM_ENCODER_MODE_CV;*/
 		/*return ATOM_ENCODER_MODE_CV;*/

+ 0 - 2
drivers/gpu/drm/radeon/radeon_gem.c

@@ -131,7 +131,6 @@ int radeon_gem_set_domain(struct drm_gem_object *gobj,
 			printk(KERN_ERR "Failed to wait for object !\n");
 			printk(KERN_ERR "Failed to wait for object !\n");
 			return r;
 			return r;
 		}
 		}
-		radeon_hdp_flush(robj->rdev);
 	}
 	}
 	return 0;
 	return 0;
 }
 }
@@ -312,7 +311,6 @@ int radeon_gem_wait_idle_ioctl(struct drm_device *dev, void *data,
 	mutex_lock(&dev->struct_mutex);
 	mutex_lock(&dev->struct_mutex);
 	drm_gem_object_unreference(gobj);
 	drm_gem_object_unreference(gobj);
 	mutex_unlock(&dev->struct_mutex);
 	mutex_unlock(&dev->struct_mutex);
-	radeon_hdp_flush(robj->rdev);
 	return r;
 	return r;
 }
 }
 
 

+ 8 - 2
drivers/gpu/drm/radeon/radeon_irq_kms.c

@@ -97,6 +97,7 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
 	rdev->irq.sw_int = false;
 	rdev->irq.sw_int = false;
 	for (i = 0; i < 2; i++) {
 	for (i = 0; i < 2; i++) {
 		rdev->irq.crtc_vblank_int[i] = false;
 		rdev->irq.crtc_vblank_int[i] = false;
+		rdev->irq.hpd[i] = false;
 	}
 	}
 	radeon_irq_set(rdev);
 	radeon_irq_set(rdev);
 }
 }
@@ -128,17 +129,22 @@ int radeon_irq_kms_init(struct radeon_device *rdev)
 			DRM_INFO("radeon: using MSI.\n");
 			DRM_INFO("radeon: using MSI.\n");
 		}
 		}
 	}
 	}
-	drm_irq_install(rdev->ddev);
 	rdev->irq.installed = true;
 	rdev->irq.installed = true;
+	r = drm_irq_install(rdev->ddev);
+	if (r) {
+		rdev->irq.installed = false;
+		return r;
+	}
 	DRM_INFO("radeon: irq initialized.\n");
 	DRM_INFO("radeon: irq initialized.\n");
 	return 0;
 	return 0;
 }
 }
 
 
 void radeon_irq_kms_fini(struct radeon_device *rdev)
 void radeon_irq_kms_fini(struct radeon_device *rdev)
 {
 {
+	drm_vblank_cleanup(rdev->ddev);
 	if (rdev->irq.installed) {
 	if (rdev->irq.installed) {
-		rdev->irq.installed = false;
 		drm_irq_uninstall(rdev->ddev);
 		drm_irq_uninstall(rdev->ddev);
+		rdev->irq.installed = false;
 		if (rdev->msi_enabled)
 		if (rdev->msi_enabled)
 			pci_disable_msi(rdev->pdev);
 			pci_disable_msi(rdev->pdev);
 	}
 	}

+ 7 - 7
drivers/gpu/drm/radeon/radeon_legacy_tv.c

@@ -77,7 +77,7 @@ struct radeon_tv_mode_constants {
 	unsigned pix_to_tv;
 	unsigned pix_to_tv;
 };
 };
 
 
-static const uint16_t hor_timing_NTSC[] = {
+static const uint16_t hor_timing_NTSC[MAX_H_CODE_TIMING_LEN] = {
 	0x0007,
 	0x0007,
 	0x003f,
 	0x003f,
 	0x0263,
 	0x0263,
@@ -98,7 +98,7 @@ static const uint16_t hor_timing_NTSC[] = {
 	0
 	0
 };
 };
 
 
-static const uint16_t vert_timing_NTSC[] = {
+static const uint16_t vert_timing_NTSC[MAX_V_CODE_TIMING_LEN] = {
 	0x2001,
 	0x2001,
 	0x200d,
 	0x200d,
 	0x1006,
 	0x1006,
@@ -115,7 +115,7 @@ static const uint16_t vert_timing_NTSC[] = {
 	0
 	0
 };
 };
 
 
-static const uint16_t hor_timing_PAL[] = {
+static const uint16_t hor_timing_PAL[MAX_H_CODE_TIMING_LEN] = {
 	0x0007,
 	0x0007,
 	0x0058,
 	0x0058,
 	0x027c,
 	0x027c,
@@ -136,7 +136,7 @@ static const uint16_t hor_timing_PAL[] = {
 	0
 	0
 };
 };
 
 
-static const uint16_t vert_timing_PAL[] = 	{
+static const uint16_t vert_timing_PAL[MAX_V_CODE_TIMING_LEN] = {
 	0x2001,
 	0x2001,
 	0x200c,
 	0x200c,
 	0x1005,
 	0x1005,
@@ -623,9 +623,9 @@ void radeon_legacy_tv_mode_set(struct drm_encoder *encoder,
 	}
 	}
 	flicker_removal = (tmp + 500) / 1000;
 	flicker_removal = (tmp + 500) / 1000;
 
 
-	if (flicker_removal < 3)
-		flicker_removal = 3;
-	for (i = 0; i < 6; ++i) {
+	if (flicker_removal < 2)
+		flicker_removal = 2;
+	for (i = 0; i < ARRAY_SIZE(SLOPE_limit); ++i) {
 		if (flicker_removal == SLOPE_limit[i])
 		if (flicker_removal == SLOPE_limit[i])
 			break;
 			break;
 	}
 	}

+ 0 - 26
drivers/gpu/drm/radeon/radeon_mode.h

@@ -46,32 +46,6 @@ struct radeon_device;
 #define to_radeon_encoder(x) container_of(x, struct radeon_encoder, base)
 #define to_radeon_encoder(x) container_of(x, struct radeon_encoder, base)
 #define to_radeon_framebuffer(x) container_of(x, struct radeon_framebuffer, base)
 #define to_radeon_framebuffer(x) container_of(x, struct radeon_framebuffer, base)
 
 
-enum radeon_connector_type {
-	CONNECTOR_NONE,
-	CONNECTOR_VGA,
-	CONNECTOR_DVI_I,
-	CONNECTOR_DVI_D,
-	CONNECTOR_DVI_A,
-	CONNECTOR_STV,
-	CONNECTOR_CTV,
-	CONNECTOR_LVDS,
-	CONNECTOR_DIGITAL,
-	CONNECTOR_SCART,
-	CONNECTOR_HDMI_TYPE_A,
-	CONNECTOR_HDMI_TYPE_B,
-	CONNECTOR_0XC,
-	CONNECTOR_0XD,
-	CONNECTOR_DIN,
-	CONNECTOR_DISPLAY_PORT,
-	CONNECTOR_UNSUPPORTED
-};
-
-enum radeon_dvi_type {
-	DVI_AUTO,
-	DVI_DIGITAL,
-	DVI_ANALOG
-};
-
 enum radeon_rmx_type {
 enum radeon_rmx_type {
 	RMX_OFF,
 	RMX_OFF,
 	RMX_FULL,
 	RMX_FULL,

+ 3 - 2
drivers/gpu/drm/radeon/radeon_object.c

@@ -221,8 +221,9 @@ int radeon_bo_unpin(struct radeon_bo *bo)
 int radeon_bo_evict_vram(struct radeon_device *rdev)
 int radeon_bo_evict_vram(struct radeon_device *rdev)
 {
 {
 	if (rdev->flags & RADEON_IS_IGP) {
 	if (rdev->flags & RADEON_IS_IGP) {
-		/* Useless to evict on IGP chips */
-		return 0;
+		if (rdev->mc.igp_sideport_enabled == false)
+			/* Useless to evict on IGP chips */
+			return 0;
 	}
 	}
 	return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
 	return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM);
 }
 }

+ 795 - 0
drivers/gpu/drm/radeon/reg_srcs/r420

@@ -0,0 +1,795 @@
+r420 0x4f60
+0x1434 SRC_Y_X
+0x1438 DST_Y_X
+0x143C DST_HEIGHT_WIDTH
+0x146C DP_GUI_MASTER_CNTL
+0x1474 BRUSH_Y_X
+0x1478 DP_BRUSH_BKGD_CLR
+0x147C DP_BRUSH_FRGD_CLR
+0x1480 BRUSH_DATA0
+0x1484 BRUSH_DATA1
+0x1598 DST_WIDTH_HEIGHT
+0x15C0 CLR_CMP_CNTL
+0x15C4 CLR_CMP_CLR_SRC
+0x15C8 CLR_CMP_CLR_DST
+0x15CC CLR_CMP_MSK
+0x15D8 DP_SRC_FRGD_CLR
+0x15DC DP_SRC_BKGD_CLR
+0x1600 DST_LINE_START
+0x1604 DST_LINE_END
+0x1608 DST_LINE_PATCOUNT
+0x16C0 DP_CNTL
+0x16CC DP_WRITE_MSK
+0x16D0 DP_CNTL_XDIR_YDIR_YMAJOR
+0x16E8 DEFAULT_SC_BOTTOM_RIGHT
+0x16EC SC_TOP_LEFT
+0x16F0 SC_BOTTOM_RIGHT
+0x16F4 SRC_SC_BOTTOM_RIGHT
+0x1714 DSTCACHE_CTLSTAT
+0x1720 WAIT_UNTIL
+0x172C RBBM_GUICNTL
+0x1D98 VAP_VPORT_XSCALE
+0x1D9C VAP_VPORT_XOFFSET
+0x1DA0 VAP_VPORT_YSCALE
+0x1DA4 VAP_VPORT_YOFFSET
+0x1DA8 VAP_VPORT_ZSCALE
+0x1DAC VAP_VPORT_ZOFFSET
+0x2080 VAP_CNTL
+0x2090 VAP_OUT_VTX_FMT_0
+0x2094 VAP_OUT_VTX_FMT_1
+0x20B0 VAP_VTE_CNTL
+0x2138 VAP_VF_MIN_VTX_INDX
+0x2140 VAP_CNTL_STATUS
+0x2150 VAP_PROG_STREAM_CNTL_0
+0x2154 VAP_PROG_STREAM_CNTL_1
+0x2158 VAP_PROG_STREAM_CNTL_2
+0x215C VAP_PROG_STREAM_CNTL_3
+0x2160 VAP_PROG_STREAM_CNTL_4
+0x2164 VAP_PROG_STREAM_CNTL_5
+0x2168 VAP_PROG_STREAM_CNTL_6
+0x216C VAP_PROG_STREAM_CNTL_7
+0x2180 VAP_VTX_STATE_CNTL
+0x2184 VAP_VSM_VTX_ASSM
+0x2188 VAP_VTX_STATE_IND_REG_0
+0x218C VAP_VTX_STATE_IND_REG_1
+0x2190 VAP_VTX_STATE_IND_REG_2
+0x2194 VAP_VTX_STATE_IND_REG_3
+0x2198 VAP_VTX_STATE_IND_REG_4
+0x219C VAP_VTX_STATE_IND_REG_5
+0x21A0 VAP_VTX_STATE_IND_REG_6
+0x21A4 VAP_VTX_STATE_IND_REG_7
+0x21A8 VAP_VTX_STATE_IND_REG_8
+0x21AC VAP_VTX_STATE_IND_REG_9
+0x21B0 VAP_VTX_STATE_IND_REG_10
+0x21B4 VAP_VTX_STATE_IND_REG_11
+0x21B8 VAP_VTX_STATE_IND_REG_12
+0x21BC VAP_VTX_STATE_IND_REG_13
+0x21C0 VAP_VTX_STATE_IND_REG_14
+0x21C4 VAP_VTX_STATE_IND_REG_15
+0x21DC VAP_PSC_SGN_NORM_CNTL
+0x21E0 VAP_PROG_STREAM_CNTL_EXT_0
+0x21E4 VAP_PROG_STREAM_CNTL_EXT_1
+0x21E8 VAP_PROG_STREAM_CNTL_EXT_2
+0x21EC VAP_PROG_STREAM_CNTL_EXT_3
+0x21F0 VAP_PROG_STREAM_CNTL_EXT_4
+0x21F4 VAP_PROG_STREAM_CNTL_EXT_5
+0x21F8 VAP_PROG_STREAM_CNTL_EXT_6
+0x21FC VAP_PROG_STREAM_CNTL_EXT_7
+0x2200 VAP_PVS_VECTOR_INDX_REG
+0x2204 VAP_PVS_VECTOR_DATA_REG
+0x2208 VAP_PVS_VECTOR_DATA_REG_128
+0x221C VAP_CLIP_CNTL
+0x2220 VAP_GB_VERT_CLIP_ADJ
+0x2224 VAP_GB_VERT_DISC_ADJ
+0x2228 VAP_GB_HORZ_CLIP_ADJ
+0x222C VAP_GB_HORZ_DISC_ADJ
+0x2230 VAP_PVS_FLOW_CNTL_ADDRS_0
+0x2234 VAP_PVS_FLOW_CNTL_ADDRS_1
+0x2238 VAP_PVS_FLOW_CNTL_ADDRS_2
+0x223C VAP_PVS_FLOW_CNTL_ADDRS_3
+0x2240 VAP_PVS_FLOW_CNTL_ADDRS_4
+0x2244 VAP_PVS_FLOW_CNTL_ADDRS_5
+0x2248 VAP_PVS_FLOW_CNTL_ADDRS_6
+0x224C VAP_PVS_FLOW_CNTL_ADDRS_7
+0x2250 VAP_PVS_FLOW_CNTL_ADDRS_8
+0x2254 VAP_PVS_FLOW_CNTL_ADDRS_9
+0x2258 VAP_PVS_FLOW_CNTL_ADDRS_10
+0x225C VAP_PVS_FLOW_CNTL_ADDRS_11
+0x2260 VAP_PVS_FLOW_CNTL_ADDRS_12
+0x2264 VAP_PVS_FLOW_CNTL_ADDRS_13
+0x2268 VAP_PVS_FLOW_CNTL_ADDRS_14
+0x226C VAP_PVS_FLOW_CNTL_ADDRS_15
+0x2284 VAP_PVS_STATE_FLUSH_REG
+0x2288 VAP_PVS_VTX_TIMEOUT_REG
+0x2290 VAP_PVS_FLOW_CNTL_LOOP_INDEX_0
+0x2294 VAP_PVS_FLOW_CNTL_LOOP_INDEX_1
+0x2298 VAP_PVS_FLOW_CNTL_LOOP_INDEX_2
+0x229C VAP_PVS_FLOW_CNTL_LOOP_INDEX_3
+0x22A0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_4
+0x22A4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_5
+0x22A8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_6
+0x22AC VAP_PVS_FLOW_CNTL_LOOP_INDEX_7
+0x22B0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_8
+0x22B4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_9
+0x22B8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_10
+0x22BC VAP_PVS_FLOW_CNTL_LOOP_INDEX_11
+0x22C0 VAP_PVS_FLOW_CNTL_LOOP_INDEX_12
+0x22C4 VAP_PVS_FLOW_CNTL_LOOP_INDEX_13
+0x22C8 VAP_PVS_FLOW_CNTL_LOOP_INDEX_14
+0x22CC VAP_PVS_FLOW_CNTL_LOOP_INDEX_15
+0x22D0 VAP_PVS_CODE_CNTL_0
+0x22D4 VAP_PVS_CONST_CNTL
+0x22D8 VAP_PVS_CODE_CNTL_1
+0x22DC VAP_PVS_FLOW_CNTL_OPC
+0x342C RB2D_DSTCACHE_CTLSTAT
+0x4000 GB_VAP_RASTER_VTX_FMT_0
+0x4004 GB_VAP_RASTER_VTX_FMT_1
+0x4008 GB_ENABLE
+0x401C GB_SELECT
+0x4020 GB_AA_CONFIG
+0x4024 GB_FIFO_SIZE
+0x4100 TX_INVALTAGS
+0x4200 GA_POINT_S0
+0x4204 GA_POINT_T0
+0x4208 GA_POINT_S1
+0x420C GA_POINT_T1
+0x4214 GA_TRIANGLE_STIPPLE
+0x421C GA_POINT_SIZE
+0x4230 GA_POINT_MINMAX
+0x4234 GA_LINE_CNTL
+0x4238 GA_LINE_STIPPLE_CONFIG
+0x4260 GA_LINE_STIPPLE_VALUE
+0x4264 GA_LINE_S0
+0x4268 GA_LINE_S1
+0x4278 GA_COLOR_CONTROL
+0x427C GA_SOLID_RG
+0x4280 GA_SOLID_BA
+0x4288 GA_POLY_MODE
+0x428C GA_ROUND_MODE
+0x4290 GA_OFFSET
+0x4294 GA_FOG_SCALE
+0x4298 GA_FOG_OFFSET
+0x42A0 SU_TEX_WRAP
+0x42A4 SU_POLY_OFFSET_FRONT_SCALE
+0x42A8 SU_POLY_OFFSET_FRONT_OFFSET
+0x42AC SU_POLY_OFFSET_BACK_SCALE
+0x42B0 SU_POLY_OFFSET_BACK_OFFSET
+0x42B4 SU_POLY_OFFSET_ENABLE
+0x42B8 SU_CULL_MODE
+0x42C0 SU_DEPTH_SCALE
+0x42C4 SU_DEPTH_OFFSET
+0x42C8 SU_REG_DEST
+0x4300 RS_COUNT
+0x4304 RS_INST_COUNT
+0x4310 RS_IP_0
+0x4314 RS_IP_1
+0x4318 RS_IP_2
+0x431C RS_IP_3
+0x4320 RS_IP_4
+0x4324 RS_IP_5
+0x4328 RS_IP_6
+0x432C RS_IP_7
+0x4330 RS_INST_0
+0x4334 RS_INST_1
+0x4338 RS_INST_2
+0x433C RS_INST_3
+0x4340 RS_INST_4
+0x4344 RS_INST_5
+0x4348 RS_INST_6
+0x434C RS_INST_7
+0x4350 RS_INST_8
+0x4354 RS_INST_9
+0x4358 RS_INST_10
+0x435C RS_INST_11
+0x4360 RS_INST_12
+0x4364 RS_INST_13
+0x4368 RS_INST_14
+0x436C RS_INST_15
+0x43A4 SC_HYPERZ_EN
+0x43A8 SC_EDGERULE
+0x43B0 SC_CLIP_0_A
+0x43B4 SC_CLIP_0_B
+0x43B8 SC_CLIP_1_A
+0x43BC SC_CLIP_1_B
+0x43C0 SC_CLIP_2_A
+0x43C4 SC_CLIP_2_B
+0x43C8 SC_CLIP_3_A
+0x43CC SC_CLIP_3_B
+0x43D0 SC_CLIP_RULE
+0x43E0 SC_SCISSOR0
+0x43E8 SC_SCREENDOOR
+0x4440 TX_FILTER1_0
+0x4444 TX_FILTER1_1
+0x4448 TX_FILTER1_2
+0x444C TX_FILTER1_3
+0x4450 TX_FILTER1_4
+0x4454 TX_FILTER1_5
+0x4458 TX_FILTER1_6
+0x445C TX_FILTER1_7
+0x4460 TX_FILTER1_8
+0x4464 TX_FILTER1_9
+0x4468 TX_FILTER1_10
+0x446C TX_FILTER1_11
+0x4470 TX_FILTER1_12
+0x4474 TX_FILTER1_13
+0x4478 TX_FILTER1_14
+0x447C TX_FILTER1_15
+0x4580 TX_CHROMA_KEY_0
+0x4584 TX_CHROMA_KEY_1
+0x4588 TX_CHROMA_KEY_2
+0x458C TX_CHROMA_KEY_3
+0x4590 TX_CHROMA_KEY_4
+0x4594 TX_CHROMA_KEY_5
+0x4598 TX_CHROMA_KEY_6
+0x459C TX_CHROMA_KEY_7
+0x45A0 TX_CHROMA_KEY_8
+0x45A4 TX_CHROMA_KEY_9
+0x45A8 TX_CHROMA_KEY_10
+0x45AC TX_CHROMA_KEY_11
+0x45B0 TX_CHROMA_KEY_12
+0x45B4 TX_CHROMA_KEY_13
+0x45B8 TX_CHROMA_KEY_14
+0x45BC TX_CHROMA_KEY_15
+0x45C0 TX_BORDER_COLOR_0
+0x45C4 TX_BORDER_COLOR_1
+0x45C8 TX_BORDER_COLOR_2
+0x45CC TX_BORDER_COLOR_3
+0x45D0 TX_BORDER_COLOR_4
+0x45D4 TX_BORDER_COLOR_5
+0x45D8 TX_BORDER_COLOR_6
+0x45DC TX_BORDER_COLOR_7
+0x45E0 TX_BORDER_COLOR_8
+0x45E4 TX_BORDER_COLOR_9
+0x45E8 TX_BORDER_COLOR_10
+0x45EC TX_BORDER_COLOR_11
+0x45F0 TX_BORDER_COLOR_12
+0x45F4 TX_BORDER_COLOR_13
+0x45F8 TX_BORDER_COLOR_14
+0x45FC TX_BORDER_COLOR_15
+0x4600 US_CONFIG
+0x4604 US_PIXSIZE
+0x4608 US_CODE_OFFSET
+0x460C US_RESET
+0x4610 US_CODE_ADDR_0
+0x4614 US_CODE_ADDR_1
+0x4618 US_CODE_ADDR_2
+0x461C US_CODE_ADDR_3
+0x4620 US_TEX_INST_0
+0x4624 US_TEX_INST_1
+0x4628 US_TEX_INST_2
+0x462C US_TEX_INST_3
+0x4630 US_TEX_INST_4
+0x4634 US_TEX_INST_5
+0x4638 US_TEX_INST_6
+0x463C US_TEX_INST_7
+0x4640 US_TEX_INST_8
+0x4644 US_TEX_INST_9
+0x4648 US_TEX_INST_10
+0x464C US_TEX_INST_11
+0x4650 US_TEX_INST_12
+0x4654 US_TEX_INST_13
+0x4658 US_TEX_INST_14
+0x465C US_TEX_INST_15
+0x4660 US_TEX_INST_16
+0x4664 US_TEX_INST_17
+0x4668 US_TEX_INST_18
+0x466C US_TEX_INST_19
+0x4670 US_TEX_INST_20
+0x4674 US_TEX_INST_21
+0x4678 US_TEX_INST_22
+0x467C US_TEX_INST_23
+0x4680 US_TEX_INST_24
+0x4684 US_TEX_INST_25
+0x4688 US_TEX_INST_26
+0x468C US_TEX_INST_27
+0x4690 US_TEX_INST_28
+0x4694 US_TEX_INST_29
+0x4698 US_TEX_INST_30
+0x469C US_TEX_INST_31
+0x46A4 US_OUT_FMT_0
+0x46A8 US_OUT_FMT_1
+0x46AC US_OUT_FMT_2
+0x46B0 US_OUT_FMT_3
+0x46B4 US_W_FMT
+0x46B8 US_CODE_BANK
+0x46BC US_CODE_EXT
+0x46C0 US_ALU_RGB_ADDR_0
+0x46C4 US_ALU_RGB_ADDR_1
+0x46C8 US_ALU_RGB_ADDR_2
+0x46CC US_ALU_RGB_ADDR_3
+0x46D0 US_ALU_RGB_ADDR_4
+0x46D4 US_ALU_RGB_ADDR_5
+0x46D8 US_ALU_RGB_ADDR_6
+0x46DC US_ALU_RGB_ADDR_7
+0x46E0 US_ALU_RGB_ADDR_8
+0x46E4 US_ALU_RGB_ADDR_9
+0x46E8 US_ALU_RGB_ADDR_10
+0x46EC US_ALU_RGB_ADDR_11
+0x46F0 US_ALU_RGB_ADDR_12
+0x46F4 US_ALU_RGB_ADDR_13
+0x46F8 US_ALU_RGB_ADDR_14
+0x46FC US_ALU_RGB_ADDR_15
+0x4700 US_ALU_RGB_ADDR_16
+0x4704 US_ALU_RGB_ADDR_17
+0x4708 US_ALU_RGB_ADDR_18
+0x470C US_ALU_RGB_ADDR_19
+0x4710 US_ALU_RGB_ADDR_20
+0x4714 US_ALU_RGB_ADDR_21
+0x4718 US_ALU_RGB_ADDR_22
+0x471C US_ALU_RGB_ADDR_23
+0x4720 US_ALU_RGB_ADDR_24
+0x4724 US_ALU_RGB_ADDR_25
+0x4728 US_ALU_RGB_ADDR_26
+0x472C US_ALU_RGB_ADDR_27
+0x4730 US_ALU_RGB_ADDR_28
+0x4734 US_ALU_RGB_ADDR_29
+0x4738 US_ALU_RGB_ADDR_30
+0x473C US_ALU_RGB_ADDR_31
+0x4740 US_ALU_RGB_ADDR_32
+0x4744 US_ALU_RGB_ADDR_33
+0x4748 US_ALU_RGB_ADDR_34
+0x474C US_ALU_RGB_ADDR_35
+0x4750 US_ALU_RGB_ADDR_36
+0x4754 US_ALU_RGB_ADDR_37
+0x4758 US_ALU_RGB_ADDR_38
+0x475C US_ALU_RGB_ADDR_39
+0x4760 US_ALU_RGB_ADDR_40
+0x4764 US_ALU_RGB_ADDR_41
+0x4768 US_ALU_RGB_ADDR_42
+0x476C US_ALU_RGB_ADDR_43
+0x4770 US_ALU_RGB_ADDR_44
+0x4774 US_ALU_RGB_ADDR_45
+0x4778 US_ALU_RGB_ADDR_46
+0x477C US_ALU_RGB_ADDR_47
+0x4780 US_ALU_RGB_ADDR_48
+0x4784 US_ALU_RGB_ADDR_49
+0x4788 US_ALU_RGB_ADDR_50
+0x478C US_ALU_RGB_ADDR_51
+0x4790 US_ALU_RGB_ADDR_52
+0x4794 US_ALU_RGB_ADDR_53
+0x4798 US_ALU_RGB_ADDR_54
+0x479C US_ALU_RGB_ADDR_55
+0x47A0 US_ALU_RGB_ADDR_56
+0x47A4 US_ALU_RGB_ADDR_57
+0x47A8 US_ALU_RGB_ADDR_58
+0x47AC US_ALU_RGB_ADDR_59
+0x47B0 US_ALU_RGB_ADDR_60
+0x47B4 US_ALU_RGB_ADDR_61
+0x47B8 US_ALU_RGB_ADDR_62
+0x47BC US_ALU_RGB_ADDR_63
+0x47C0 US_ALU_ALPHA_ADDR_0
+0x47C4 US_ALU_ALPHA_ADDR_1
+0x47C8 US_ALU_ALPHA_ADDR_2
+0x47CC US_ALU_ALPHA_ADDR_3
+0x47D0 US_ALU_ALPHA_ADDR_4
+0x47D4 US_ALU_ALPHA_ADDR_5
+0x47D8 US_ALU_ALPHA_ADDR_6
+0x47DC US_ALU_ALPHA_ADDR_7
+0x47E0 US_ALU_ALPHA_ADDR_8
+0x47E4 US_ALU_ALPHA_ADDR_9
+0x47E8 US_ALU_ALPHA_ADDR_10
+0x47EC US_ALU_ALPHA_ADDR_11
+0x47F0 US_ALU_ALPHA_ADDR_12
+0x47F4 US_ALU_ALPHA_ADDR_13
+0x47F8 US_ALU_ALPHA_ADDR_14
+0x47FC US_ALU_ALPHA_ADDR_15
+0x4800 US_ALU_ALPHA_ADDR_16
+0x4804 US_ALU_ALPHA_ADDR_17
+0x4808 US_ALU_ALPHA_ADDR_18
+0x480C US_ALU_ALPHA_ADDR_19
+0x4810 US_ALU_ALPHA_ADDR_20
+0x4814 US_ALU_ALPHA_ADDR_21
+0x4818 US_ALU_ALPHA_ADDR_22
+0x481C US_ALU_ALPHA_ADDR_23
+0x4820 US_ALU_ALPHA_ADDR_24
+0x4824 US_ALU_ALPHA_ADDR_25
+0x4828 US_ALU_ALPHA_ADDR_26
+0x482C US_ALU_ALPHA_ADDR_27
+0x4830 US_ALU_ALPHA_ADDR_28
+0x4834 US_ALU_ALPHA_ADDR_29
+0x4838 US_ALU_ALPHA_ADDR_30
+0x483C US_ALU_ALPHA_ADDR_31
+0x4840 US_ALU_ALPHA_ADDR_32
+0x4844 US_ALU_ALPHA_ADDR_33
+0x4848 US_ALU_ALPHA_ADDR_34
+0x484C US_ALU_ALPHA_ADDR_35
+0x4850 US_ALU_ALPHA_ADDR_36
+0x4854 US_ALU_ALPHA_ADDR_37
+0x4858 US_ALU_ALPHA_ADDR_38
+0x485C US_ALU_ALPHA_ADDR_39
+0x4860 US_ALU_ALPHA_ADDR_40
+0x4864 US_ALU_ALPHA_ADDR_41
+0x4868 US_ALU_ALPHA_ADDR_42
+0x486C US_ALU_ALPHA_ADDR_43
+0x4870 US_ALU_ALPHA_ADDR_44
+0x4874 US_ALU_ALPHA_ADDR_45
+0x4878 US_ALU_ALPHA_ADDR_46
+0x487C US_ALU_ALPHA_ADDR_47
+0x4880 US_ALU_ALPHA_ADDR_48
+0x4884 US_ALU_ALPHA_ADDR_49
+0x4888 US_ALU_ALPHA_ADDR_50
+0x488C US_ALU_ALPHA_ADDR_51
+0x4890 US_ALU_ALPHA_ADDR_52
+0x4894 US_ALU_ALPHA_ADDR_53
+0x4898 US_ALU_ALPHA_ADDR_54
+0x489C US_ALU_ALPHA_ADDR_55
+0x48A0 US_ALU_ALPHA_ADDR_56
+0x48A4 US_ALU_ALPHA_ADDR_57
+0x48A8 US_ALU_ALPHA_ADDR_58
+0x48AC US_ALU_ALPHA_ADDR_59
+0x48B0 US_ALU_ALPHA_ADDR_60
+0x48B4 US_ALU_ALPHA_ADDR_61
+0x48B8 US_ALU_ALPHA_ADDR_62
+0x48BC US_ALU_ALPHA_ADDR_63
+0x48C0 US_ALU_RGB_INST_0
+0x48C4 US_ALU_RGB_INST_1
+0x48C8 US_ALU_RGB_INST_2
+0x48CC US_ALU_RGB_INST_3
+0x48D0 US_ALU_RGB_INST_4
+0x48D4 US_ALU_RGB_INST_5
+0x48D8 US_ALU_RGB_INST_6
+0x48DC US_ALU_RGB_INST_7
+0x48E0 US_ALU_RGB_INST_8
+0x48E4 US_ALU_RGB_INST_9
+0x48E8 US_ALU_RGB_INST_10
+0x48EC US_ALU_RGB_INST_11
+0x48F0 US_ALU_RGB_INST_12
+0x48F4 US_ALU_RGB_INST_13
+0x48F8 US_ALU_RGB_INST_14
+0x48FC US_ALU_RGB_INST_15
+0x4900 US_ALU_RGB_INST_16
+0x4904 US_ALU_RGB_INST_17
+0x4908 US_ALU_RGB_INST_18
+0x490C US_ALU_RGB_INST_19
+0x4910 US_ALU_RGB_INST_20
+0x4914 US_ALU_RGB_INST_21
+0x4918 US_ALU_RGB_INST_22
+0x491C US_ALU_RGB_INST_23
+0x4920 US_ALU_RGB_INST_24
+0x4924 US_ALU_RGB_INST_25
+0x4928 US_ALU_RGB_INST_26
+0x492C US_ALU_RGB_INST_27
+0x4930 US_ALU_RGB_INST_28
+0x4934 US_ALU_RGB_INST_29
+0x4938 US_ALU_RGB_INST_30
+0x493C US_ALU_RGB_INST_31
+0x4940 US_ALU_RGB_INST_32
+0x4944 US_ALU_RGB_INST_33
+0x4948 US_ALU_RGB_INST_34
+0x494C US_ALU_RGB_INST_35
+0x4950 US_ALU_RGB_INST_36
+0x4954 US_ALU_RGB_INST_37
+0x4958 US_ALU_RGB_INST_38
+0x495C US_ALU_RGB_INST_39
+0x4960 US_ALU_RGB_INST_40
+0x4964 US_ALU_RGB_INST_41
+0x4968 US_ALU_RGB_INST_42
+0x496C US_ALU_RGB_INST_43
+0x4970 US_ALU_RGB_INST_44
+0x4974 US_ALU_RGB_INST_45
+0x4978 US_ALU_RGB_INST_46
+0x497C US_ALU_RGB_INST_47
+0x4980 US_ALU_RGB_INST_48
+0x4984 US_ALU_RGB_INST_49
+0x4988 US_ALU_RGB_INST_50
+0x498C US_ALU_RGB_INST_51
+0x4990 US_ALU_RGB_INST_52
+0x4994 US_ALU_RGB_INST_53
+0x4998 US_ALU_RGB_INST_54
+0x499C US_ALU_RGB_INST_55
+0x49A0 US_ALU_RGB_INST_56
+0x49A4 US_ALU_RGB_INST_57
+0x49A8 US_ALU_RGB_INST_58
+0x49AC US_ALU_RGB_INST_59
+0x49B0 US_ALU_RGB_INST_60
+0x49B4 US_ALU_RGB_INST_61
+0x49B8 US_ALU_RGB_INST_62
+0x49BC US_ALU_RGB_INST_63
+0x49C0 US_ALU_ALPHA_INST_0
+0x49C4 US_ALU_ALPHA_INST_1
+0x49C8 US_ALU_ALPHA_INST_2
+0x49CC US_ALU_ALPHA_INST_3
+0x49D0 US_ALU_ALPHA_INST_4
+0x49D4 US_ALU_ALPHA_INST_5
+0x49D8 US_ALU_ALPHA_INST_6
+0x49DC US_ALU_ALPHA_INST_7
+0x49E0 US_ALU_ALPHA_INST_8
+0x49E4 US_ALU_ALPHA_INST_9
+0x49E8 US_ALU_ALPHA_INST_10
+0x49EC US_ALU_ALPHA_INST_11
+0x49F0 US_ALU_ALPHA_INST_12
+0x49F4 US_ALU_ALPHA_INST_13
+0x49F8 US_ALU_ALPHA_INST_14
+0x49FC US_ALU_ALPHA_INST_15
+0x4A00 US_ALU_ALPHA_INST_16
+0x4A04 US_ALU_ALPHA_INST_17
+0x4A08 US_ALU_ALPHA_INST_18
+0x4A0C US_ALU_ALPHA_INST_19
+0x4A10 US_ALU_ALPHA_INST_20
+0x4A14 US_ALU_ALPHA_INST_21
+0x4A18 US_ALU_ALPHA_INST_22
+0x4A1C US_ALU_ALPHA_INST_23
+0x4A20 US_ALU_ALPHA_INST_24
+0x4A24 US_ALU_ALPHA_INST_25
+0x4A28 US_ALU_ALPHA_INST_26
+0x4A2C US_ALU_ALPHA_INST_27
+0x4A30 US_ALU_ALPHA_INST_28
+0x4A34 US_ALU_ALPHA_INST_29
+0x4A38 US_ALU_ALPHA_INST_30
+0x4A3C US_ALU_ALPHA_INST_31
+0x4A40 US_ALU_ALPHA_INST_32
+0x4A44 US_ALU_ALPHA_INST_33
+0x4A48 US_ALU_ALPHA_INST_34
+0x4A4C US_ALU_ALPHA_INST_35
+0x4A50 US_ALU_ALPHA_INST_36
+0x4A54 US_ALU_ALPHA_INST_37
+0x4A58 US_ALU_ALPHA_INST_38
+0x4A5C US_ALU_ALPHA_INST_39
+0x4A60 US_ALU_ALPHA_INST_40
+0x4A64 US_ALU_ALPHA_INST_41
+0x4A68 US_ALU_ALPHA_INST_42
+0x4A6C US_ALU_ALPHA_INST_43
+0x4A70 US_ALU_ALPHA_INST_44
+0x4A74 US_ALU_ALPHA_INST_45
+0x4A78 US_ALU_ALPHA_INST_46
+0x4A7C US_ALU_ALPHA_INST_47
+0x4A80 US_ALU_ALPHA_INST_48
+0x4A84 US_ALU_ALPHA_INST_49
+0x4A88 US_ALU_ALPHA_INST_50
+0x4A8C US_ALU_ALPHA_INST_51
+0x4A90 US_ALU_ALPHA_INST_52
+0x4A94 US_ALU_ALPHA_INST_53
+0x4A98 US_ALU_ALPHA_INST_54
+0x4A9C US_ALU_ALPHA_INST_55
+0x4AA0 US_ALU_ALPHA_INST_56
+0x4AA4 US_ALU_ALPHA_INST_57
+0x4AA8 US_ALU_ALPHA_INST_58
+0x4AAC US_ALU_ALPHA_INST_59
+0x4AB0 US_ALU_ALPHA_INST_60
+0x4AB4 US_ALU_ALPHA_INST_61
+0x4AB8 US_ALU_ALPHA_INST_62
+0x4ABC US_ALU_ALPHA_INST_63
+0x4AC0 US_ALU_EXT_ADDR_0
+0x4AC4 US_ALU_EXT_ADDR_1
+0x4AC8 US_ALU_EXT_ADDR_2
+0x4ACC US_ALU_EXT_ADDR_3
+0x4AD0 US_ALU_EXT_ADDR_4
+0x4AD4 US_ALU_EXT_ADDR_5
+0x4AD8 US_ALU_EXT_ADDR_6
+0x4ADC US_ALU_EXT_ADDR_7
+0x4AE0 US_ALU_EXT_ADDR_8
+0x4AE4 US_ALU_EXT_ADDR_9
+0x4AE8 US_ALU_EXT_ADDR_10
+0x4AEC US_ALU_EXT_ADDR_11
+0x4AF0 US_ALU_EXT_ADDR_12
+0x4AF4 US_ALU_EXT_ADDR_13
+0x4AF8 US_ALU_EXT_ADDR_14
+0x4AFC US_ALU_EXT_ADDR_15
+0x4B00 US_ALU_EXT_ADDR_16
+0x4B04 US_ALU_EXT_ADDR_17
+0x4B08 US_ALU_EXT_ADDR_18
+0x4B0C US_ALU_EXT_ADDR_19
+0x4B10 US_ALU_EXT_ADDR_20
+0x4B14 US_ALU_EXT_ADDR_21
+0x4B18 US_ALU_EXT_ADDR_22
+0x4B1C US_ALU_EXT_ADDR_23
+0x4B20 US_ALU_EXT_ADDR_24
+0x4B24 US_ALU_EXT_ADDR_25
+0x4B28 US_ALU_EXT_ADDR_26
+0x4B2C US_ALU_EXT_ADDR_27
+0x4B30 US_ALU_EXT_ADDR_28
+0x4B34 US_ALU_EXT_ADDR_29
+0x4B38 US_ALU_EXT_ADDR_30
+0x4B3C US_ALU_EXT_ADDR_31
+0x4B40 US_ALU_EXT_ADDR_32
+0x4B44 US_ALU_EXT_ADDR_33
+0x4B48 US_ALU_EXT_ADDR_34
+0x4B4C US_ALU_EXT_ADDR_35
+0x4B50 US_ALU_EXT_ADDR_36
+0x4B54 US_ALU_EXT_ADDR_37
+0x4B58 US_ALU_EXT_ADDR_38
+0x4B5C US_ALU_EXT_ADDR_39
+0x4B60 US_ALU_EXT_ADDR_40
+0x4B64 US_ALU_EXT_ADDR_41
+0x4B68 US_ALU_EXT_ADDR_42
+0x4B6C US_ALU_EXT_ADDR_43
+0x4B70 US_ALU_EXT_ADDR_44
+0x4B74 US_ALU_EXT_ADDR_45
+0x4B78 US_ALU_EXT_ADDR_46
+0x4B7C US_ALU_EXT_ADDR_47
+0x4B80 US_ALU_EXT_ADDR_48
+0x4B84 US_ALU_EXT_ADDR_49
+0x4B88 US_ALU_EXT_ADDR_50
+0x4B8C US_ALU_EXT_ADDR_51
+0x4B90 US_ALU_EXT_ADDR_52
+0x4B94 US_ALU_EXT_ADDR_53
+0x4B98 US_ALU_EXT_ADDR_54
+0x4B9C US_ALU_EXT_ADDR_55
+0x4BA0 US_ALU_EXT_ADDR_56
+0x4BA4 US_ALU_EXT_ADDR_57
+0x4BA8 US_ALU_EXT_ADDR_58
+0x4BAC US_ALU_EXT_ADDR_59
+0x4BB0 US_ALU_EXT_ADDR_60
+0x4BB4 US_ALU_EXT_ADDR_61
+0x4BB8 US_ALU_EXT_ADDR_62
+0x4BBC US_ALU_EXT_ADDR_63
+0x4BC0 FG_FOG_BLEND
+0x4BC4 FG_FOG_FACTOR
+0x4BC8 FG_FOG_COLOR_R
+0x4BCC FG_FOG_COLOR_G
+0x4BD0 FG_FOG_COLOR_B
+0x4BD4 FG_ALPHA_FUNC
+0x4BD8 FG_DEPTH_SRC
+0x4C00 US_ALU_CONST_R_0
+0x4C04 US_ALU_CONST_G_0
+0x4C08 US_ALU_CONST_B_0
+0x4C0C US_ALU_CONST_A_0
+0x4C10 US_ALU_CONST_R_1
+0x4C14 US_ALU_CONST_G_1
+0x4C18 US_ALU_CONST_B_1
+0x4C1C US_ALU_CONST_A_1
+0x4C20 US_ALU_CONST_R_2
+0x4C24 US_ALU_CONST_G_2
+0x4C28 US_ALU_CONST_B_2
+0x4C2C US_ALU_CONST_A_2
+0x4C30 US_ALU_CONST_R_3
+0x4C34 US_ALU_CONST_G_3
+0x4C38 US_ALU_CONST_B_3
+0x4C3C US_ALU_CONST_A_3
+0x4C40 US_ALU_CONST_R_4
+0x4C44 US_ALU_CONST_G_4
+0x4C48 US_ALU_CONST_B_4
+0x4C4C US_ALU_CONST_A_4
+0x4C50 US_ALU_CONST_R_5
+0x4C54 US_ALU_CONST_G_5
+0x4C58 US_ALU_CONST_B_5
+0x4C5C US_ALU_CONST_A_5
+0x4C60 US_ALU_CONST_R_6
+0x4C64 US_ALU_CONST_G_6
+0x4C68 US_ALU_CONST_B_6
+0x4C6C US_ALU_CONST_A_6
+0x4C70 US_ALU_CONST_R_7
+0x4C74 US_ALU_CONST_G_7
+0x4C78 US_ALU_CONST_B_7
+0x4C7C US_ALU_CONST_A_7
+0x4C80 US_ALU_CONST_R_8
+0x4C84 US_ALU_CONST_G_8
+0x4C88 US_ALU_CONST_B_8
+0x4C8C US_ALU_CONST_A_8
+0x4C90 US_ALU_CONST_R_9
+0x4C94 US_ALU_CONST_G_9
+0x4C98 US_ALU_CONST_B_9
+0x4C9C US_ALU_CONST_A_9
+0x4CA0 US_ALU_CONST_R_10
+0x4CA4 US_ALU_CONST_G_10
+0x4CA8 US_ALU_CONST_B_10
+0x4CAC US_ALU_CONST_A_10
+0x4CB0 US_ALU_CONST_R_11
+0x4CB4 US_ALU_CONST_G_11
+0x4CB8 US_ALU_CONST_B_11
+0x4CBC US_ALU_CONST_A_11
+0x4CC0 US_ALU_CONST_R_12
+0x4CC4 US_ALU_CONST_G_12
+0x4CC8 US_ALU_CONST_B_12
+0x4CCC US_ALU_CONST_A_12
+0x4CD0 US_ALU_CONST_R_13
+0x4CD4 US_ALU_CONST_G_13
+0x4CD8 US_ALU_CONST_B_13
+0x4CDC US_ALU_CONST_A_13
+0x4CE0 US_ALU_CONST_R_14
+0x4CE4 US_ALU_CONST_G_14
+0x4CE8 US_ALU_CONST_B_14
+0x4CEC US_ALU_CONST_A_14
+0x4CF0 US_ALU_CONST_R_15
+0x4CF4 US_ALU_CONST_G_15
+0x4CF8 US_ALU_CONST_B_15
+0x4CFC US_ALU_CONST_A_15
+0x4D00 US_ALU_CONST_R_16
+0x4D04 US_ALU_CONST_G_16
+0x4D08 US_ALU_CONST_B_16
+0x4D0C US_ALU_CONST_A_16
+0x4D10 US_ALU_CONST_R_17
+0x4D14 US_ALU_CONST_G_17
+0x4D18 US_ALU_CONST_B_17
+0x4D1C US_ALU_CONST_A_17
+0x4D20 US_ALU_CONST_R_18
+0x4D24 US_ALU_CONST_G_18
+0x4D28 US_ALU_CONST_B_18
+0x4D2C US_ALU_CONST_A_18
+0x4D30 US_ALU_CONST_R_19
+0x4D34 US_ALU_CONST_G_19
+0x4D38 US_ALU_CONST_B_19
+0x4D3C US_ALU_CONST_A_19
+0x4D40 US_ALU_CONST_R_20
+0x4D44 US_ALU_CONST_G_20
+0x4D48 US_ALU_CONST_B_20
+0x4D4C US_ALU_CONST_A_20
+0x4D50 US_ALU_CONST_R_21
+0x4D54 US_ALU_CONST_G_21
+0x4D58 US_ALU_CONST_B_21
+0x4D5C US_ALU_CONST_A_21
+0x4D60 US_ALU_CONST_R_22
+0x4D64 US_ALU_CONST_G_22
+0x4D68 US_ALU_CONST_B_22
+0x4D6C US_ALU_CONST_A_22
+0x4D70 US_ALU_CONST_R_23
+0x4D74 US_ALU_CONST_G_23
+0x4D78 US_ALU_CONST_B_23
+0x4D7C US_ALU_CONST_A_23
+0x4D80 US_ALU_CONST_R_24
+0x4D84 US_ALU_CONST_G_24
+0x4D88 US_ALU_CONST_B_24
+0x4D8C US_ALU_CONST_A_24
+0x4D90 US_ALU_CONST_R_25
+0x4D94 US_ALU_CONST_G_25
+0x4D98 US_ALU_CONST_B_25
+0x4D9C US_ALU_CONST_A_25
+0x4DA0 US_ALU_CONST_R_26
+0x4DA4 US_ALU_CONST_G_26
+0x4DA8 US_ALU_CONST_B_26
+0x4DAC US_ALU_CONST_A_26
+0x4DB0 US_ALU_CONST_R_27
+0x4DB4 US_ALU_CONST_G_27
+0x4DB8 US_ALU_CONST_B_27
+0x4DBC US_ALU_CONST_A_27
+0x4DC0 US_ALU_CONST_R_28
+0x4DC4 US_ALU_CONST_G_28
+0x4DC8 US_ALU_CONST_B_28
+0x4DCC US_ALU_CONST_A_28
+0x4DD0 US_ALU_CONST_R_29
+0x4DD4 US_ALU_CONST_G_29
+0x4DD8 US_ALU_CONST_B_29
+0x4DDC US_ALU_CONST_A_29
+0x4DE0 US_ALU_CONST_R_30
+0x4DE4 US_ALU_CONST_G_30
+0x4DE8 US_ALU_CONST_B_30
+0x4DEC US_ALU_CONST_A_30
+0x4DF0 US_ALU_CONST_R_31
+0x4DF4 US_ALU_CONST_G_31
+0x4DF8 US_ALU_CONST_B_31
+0x4DFC US_ALU_CONST_A_31
+0x4E04 RB3D_BLENDCNTL_R3
+0x4E08 RB3D_ABLENDCNTL_R3
+0x4E0C RB3D_COLOR_CHANNEL_MASK
+0x4E10 RB3D_CONSTANT_COLOR
+0x4E14 RB3D_COLOR_CLEAR_VALUE
+0x4E18 RB3D_ROPCNTL_R3
+0x4E1C RB3D_CLRCMP_FLIPE_R3
+0x4E20 RB3D_CLRCMP_CLR_R3
+0x4E24 RB3D_CLRCMP_MSK_R3
+0x4E48 RB3D_DEBUG_CTL
+0x4E4C RB3D_DSTCACHE_CTLSTAT_R3
+0x4E50 RB3D_DITHER_CTL
+0x4E54 RB3D_CMASK_OFFSET0
+0x4E58 RB3D_CMASK_OFFSET1
+0x4E5C RB3D_CMASK_OFFSET2
+0x4E60 RB3D_CMASK_OFFSET3
+0x4E64 RB3D_CMASK_PITCH0
+0x4E68 RB3D_CMASK_PITCH1
+0x4E6C RB3D_CMASK_PITCH2
+0x4E70 RB3D_CMASK_PITCH3
+0x4E74 RB3D_CMASK_WRINDEX
+0x4E78 RB3D_CMASK_DWORD
+0x4E7C RB3D_CMASK_RDINDEX
+0x4E80 RB3D_AARESOLVE_OFFSET
+0x4E84 RB3D_AARESOLVE_PITCH
+0x4E88 RB3D_AARESOLVE_CTL
+0x4EA0 RB3D_DISCARD_SRC_PIXEL_LTE_THRESHOLD
+0x4EA4 RB3D_DISCARD_SRC_PIXEL_GTE_THRESHOLD
+0x4F04 ZB_ZSTENCILCNTL
+0x4F08 ZB_STENCILREFMASK
+0x4F14 ZB_ZTOP
+0x4F18 ZB_ZCACHE_CTLSTAT
+0x4F1C ZB_BW_CNTL
+0x4F28 ZB_DEPTHCLEARVALUE
+0x4F30 ZB_ZMASK_OFFSET
+0x4F34 ZB_ZMASK_PITCH
+0x4F38 ZB_ZMASK_WRINDEX
+0x4F3C ZB_ZMASK_DWORD
+0x4F40 ZB_ZMASK_RDINDEX
+0x4F44 ZB_HIZ_OFFSET
+0x4F48 ZB_HIZ_WRINDEX
+0x4F4C ZB_HIZ_DWORD
+0x4F50 ZB_HIZ_RDINDEX
+0x4F54 ZB_HIZ_PITCH
+0x4F58 ZB_ZPASS_DATA

+ 67 - 1
drivers/gpu/drm/radeon/reg_srcs/rs600

@@ -153,7 +153,7 @@ rs600 0x6d40
 0x42A4 SU_POLY_OFFSET_FRONT_SCALE
 0x42A4 SU_POLY_OFFSET_FRONT_SCALE
 0x42A8 SU_POLY_OFFSET_FRONT_OFFSET
 0x42A8 SU_POLY_OFFSET_FRONT_OFFSET
 0x42AC SU_POLY_OFFSET_BACK_SCALE
 0x42AC SU_POLY_OFFSET_BACK_SCALE
-0x42B0 SU_POLY_OFFSET_BACK_OFFSET 
+0x42B0 SU_POLY_OFFSET_BACK_OFFSET
 0x42B4 SU_POLY_OFFSET_ENABLE
 0x42B4 SU_POLY_OFFSET_ENABLE
 0x42B8 SU_CULL_MODE
 0x42B8 SU_CULL_MODE
 0x42C0 SU_DEPTH_SCALE
 0x42C0 SU_DEPTH_SCALE
@@ -291,6 +291,8 @@ rs600 0x6d40
 0x46AC US_OUT_FMT_2
 0x46AC US_OUT_FMT_2
 0x46B0 US_OUT_FMT_3
 0x46B0 US_OUT_FMT_3
 0x46B4 US_W_FMT
 0x46B4 US_W_FMT
+0x46B8 US_CODE_BANK
+0x46BC US_CODE_EXT
 0x46C0 US_ALU_RGB_ADDR_0
 0x46C0 US_ALU_RGB_ADDR_0
 0x46C4 US_ALU_RGB_ADDR_1
 0x46C4 US_ALU_RGB_ADDR_1
 0x46C8 US_ALU_RGB_ADDR_2
 0x46C8 US_ALU_RGB_ADDR_2
@@ -547,6 +549,70 @@ rs600 0x6d40
 0x4AB4 US_ALU_ALPHA_INST_61
 0x4AB4 US_ALU_ALPHA_INST_61
 0x4AB8 US_ALU_ALPHA_INST_62
 0x4AB8 US_ALU_ALPHA_INST_62
 0x4ABC US_ALU_ALPHA_INST_63
 0x4ABC US_ALU_ALPHA_INST_63
+0x4AC0 US_ALU_EXT_ADDR_0
+0x4AC4 US_ALU_EXT_ADDR_1
+0x4AC8 US_ALU_EXT_ADDR_2
+0x4ACC US_ALU_EXT_ADDR_3
+0x4AD0 US_ALU_EXT_ADDR_4
+0x4AD4 US_ALU_EXT_ADDR_5
+0x4AD8 US_ALU_EXT_ADDR_6
+0x4ADC US_ALU_EXT_ADDR_7
+0x4AE0 US_ALU_EXT_ADDR_8
+0x4AE4 US_ALU_EXT_ADDR_9
+0x4AE8 US_ALU_EXT_ADDR_10
+0x4AEC US_ALU_EXT_ADDR_11
+0x4AF0 US_ALU_EXT_ADDR_12
+0x4AF4 US_ALU_EXT_ADDR_13
+0x4AF8 US_ALU_EXT_ADDR_14
+0x4AFC US_ALU_EXT_ADDR_15
+0x4B00 US_ALU_EXT_ADDR_16
+0x4B04 US_ALU_EXT_ADDR_17
+0x4B08 US_ALU_EXT_ADDR_18
+0x4B0C US_ALU_EXT_ADDR_19
+0x4B10 US_ALU_EXT_ADDR_20
+0x4B14 US_ALU_EXT_ADDR_21
+0x4B18 US_ALU_EXT_ADDR_22
+0x4B1C US_ALU_EXT_ADDR_23
+0x4B20 US_ALU_EXT_ADDR_24
+0x4B24 US_ALU_EXT_ADDR_25
+0x4B28 US_ALU_EXT_ADDR_26
+0x4B2C US_ALU_EXT_ADDR_27
+0x4B30 US_ALU_EXT_ADDR_28
+0x4B34 US_ALU_EXT_ADDR_29
+0x4B38 US_ALU_EXT_ADDR_30
+0x4B3C US_ALU_EXT_ADDR_31
+0x4B40 US_ALU_EXT_ADDR_32
+0x4B44 US_ALU_EXT_ADDR_33
+0x4B48 US_ALU_EXT_ADDR_34
+0x4B4C US_ALU_EXT_ADDR_35
+0x4B50 US_ALU_EXT_ADDR_36
+0x4B54 US_ALU_EXT_ADDR_37
+0x4B58 US_ALU_EXT_ADDR_38
+0x4B5C US_ALU_EXT_ADDR_39
+0x4B60 US_ALU_EXT_ADDR_40
+0x4B64 US_ALU_EXT_ADDR_41
+0x4B68 US_ALU_EXT_ADDR_42
+0x4B6C US_ALU_EXT_ADDR_43
+0x4B70 US_ALU_EXT_ADDR_44
+0x4B74 US_ALU_EXT_ADDR_45
+0x4B78 US_ALU_EXT_ADDR_46
+0x4B7C US_ALU_EXT_ADDR_47
+0x4B80 US_ALU_EXT_ADDR_48
+0x4B84 US_ALU_EXT_ADDR_49
+0x4B88 US_ALU_EXT_ADDR_50
+0x4B8C US_ALU_EXT_ADDR_51
+0x4B90 US_ALU_EXT_ADDR_52
+0x4B94 US_ALU_EXT_ADDR_53
+0x4B98 US_ALU_EXT_ADDR_54
+0x4B9C US_ALU_EXT_ADDR_55
+0x4BA0 US_ALU_EXT_ADDR_56
+0x4BA4 US_ALU_EXT_ADDR_57
+0x4BA8 US_ALU_EXT_ADDR_58
+0x4BAC US_ALU_EXT_ADDR_59
+0x4BB0 US_ALU_EXT_ADDR_60
+0x4BB4 US_ALU_EXT_ADDR_61
+0x4BB8 US_ALU_EXT_ADDR_62
+0x4BBC US_ALU_EXT_ADDR_63
 0x4BC0 FG_FOG_BLEND
 0x4BC0 FG_FOG_BLEND
 0x4BC4 FG_FOG_FACTOR
 0x4BC4 FG_FOG_FACTOR
 0x4BC8 FG_FOG_COLOR_R
 0x4BC8 FG_FOG_COLOR_R

+ 6 - 0
drivers/gpu/drm/radeon/reg_srcs/rv515

@@ -161,7 +161,12 @@ rv515 0x6d40
 0x401C GB_SELECT
 0x401C GB_SELECT
 0x4020 GB_AA_CONFIG
 0x4020 GB_AA_CONFIG
 0x4024 GB_FIFO_SIZE
 0x4024 GB_FIFO_SIZE
+0x4028 GB_Z_PEQ_CONFIG
 0x4100 TX_INVALTAGS
 0x4100 TX_INVALTAGS
+0x4114 SU_TEX_WRAP_PS3
+0x4118 PS3_ENABLE
+0x411c PS3_VTX_FMT
+0x4120 PS3_TEX_SOURCE
 0x4200 GA_POINT_S0
 0x4200 GA_POINT_S0
 0x4204 GA_POINT_T0
 0x4204 GA_POINT_T0
 0x4208 GA_POINT_S1
 0x4208 GA_POINT_S1
@@ -171,6 +176,7 @@ rv515 0x6d40
 0x4230 GA_POINT_MINMAX
 0x4230 GA_POINT_MINMAX
 0x4234 GA_LINE_CNTL
 0x4234 GA_LINE_CNTL
 0x4238 GA_LINE_STIPPLE_CONFIG
 0x4238 GA_LINE_STIPPLE_CONFIG
+0x4258 GA_COLOR_CONTROL_PS3
 0x4260 GA_LINE_STIPPLE_VALUE
 0x4260 GA_LINE_STIPPLE_VALUE
 0x4264 GA_LINE_S0
 0x4264 GA_LINE_S0
 0x4268 GA_LINE_S1
 0x4268 GA_LINE_S1

+ 2 - 0
drivers/gpu/drm/radeon/rs400.c

@@ -356,6 +356,7 @@ static int rs400_mc_init(struct radeon_device *rdev)
 	rdev->mc.vram_location = G_00015C_MC_FB_START(tmp) << 16;
 	rdev->mc.vram_location = G_00015C_MC_FB_START(tmp) << 16;
 	rdev->mc.gtt_location = 0xFFFFFFFFUL;
 	rdev->mc.gtt_location = 0xFFFFFFFFUL;
 	r = radeon_mc_setup(rdev);
 	r = radeon_mc_setup(rdev);
+	rdev->mc.igp_sideport_enabled = radeon_combios_sideport_present(rdev);
 	if (r)
 	if (r)
 		return r;
 		return r;
 	return 0;
 	return 0;
@@ -395,6 +396,7 @@ static int rs400_startup(struct radeon_device *rdev)
 		return r;
 		return r;
 	/* Enable IRQ */
 	/* Enable IRQ */
 	r100_irq_set(rdev);
 	r100_irq_set(rdev);
+	rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
 	/* 1M ring buffer */
 	/* 1M ring buffer */
 	r = r100_cp_init(rdev, 1024 * 1024);
 	r = r100_cp_init(rdev, 1024 * 1024);
 	if (r) {
 	if (r) {

+ 9 - 1
drivers/gpu/drm/radeon/rs600.c

@@ -56,6 +56,7 @@ int rs600_mc_init(struct radeon_device *rdev)
 	rdev->mc.vram_location = G_000004_MC_FB_START(tmp) << 16;
 	rdev->mc.vram_location = G_000004_MC_FB_START(tmp) << 16;
 	rdev->mc.gtt_location = 0xffffffffUL;
 	rdev->mc.gtt_location = 0xffffffffUL;
 	r = radeon_mc_setup(rdev);
 	r = radeon_mc_setup(rdev);
+	rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
 	if (r)
 	if (r)
 		return r;
 		return r;
 	return 0;
 	return 0;
@@ -134,7 +135,8 @@ void rs600_hpd_init(struct radeon_device *rdev)
 			break;
 			break;
 		}
 		}
 	}
 	}
-	rs600_irq_set(rdev);
+	if (rdev->irq.installed)
+		rs600_irq_set(rdev);
 }
 }
 
 
 void rs600_hpd_fini(struct radeon_device *rdev)
 void rs600_hpd_fini(struct radeon_device *rdev)
@@ -315,6 +317,11 @@ int rs600_irq_set(struct radeon_device *rdev)
 	u32 hpd2 = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL) &
 	u32 hpd2 = RREG32(R_007D18_DC_HOT_PLUG_DETECT2_INT_CONTROL) &
 		~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
 		~S_007D18_DC_HOT_PLUG_DETECT2_INT_EN(1);
 
 
+	if (!rdev->irq.installed) {
+		WARN(1, "Can't enable IRQ/MSI because no handler is installed.\n");
+		WREG32(R_000040_GEN_INT_CNTL, 0);
+		return -EINVAL;
+	}
 	if (rdev->irq.sw_int) {
 	if (rdev->irq.sw_int) {
 		tmp |= S_000040_SW_INT_EN(1);
 		tmp |= S_000040_SW_INT_EN(1);
 	}
 	}
@@ -553,6 +560,7 @@ static int rs600_startup(struct radeon_device *rdev)
 		return r;
 		return r;
 	/* Enable IRQ */
 	/* Enable IRQ */
 	rs600_irq_set(rdev);
 	rs600_irq_set(rdev);
+	rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
 	/* 1M ring buffer */
 	/* 1M ring buffer */
 	r = r100_cp_init(rdev, 1024 * 1024);
 	r = r100_cp_init(rdev, 1024 * 1024);
 	if (r) {
 	if (r) {

+ 2 - 0
drivers/gpu/drm/radeon/rs690.c

@@ -172,6 +172,7 @@ static int rs690_mc_init(struct radeon_device *rdev)
 	rdev->mc.vram_location = G_000100_MC_FB_START(tmp) << 16;
 	rdev->mc.vram_location = G_000100_MC_FB_START(tmp) << 16;
 	rdev->mc.gtt_location = 0xFFFFFFFFUL;
 	rdev->mc.gtt_location = 0xFFFFFFFFUL;
 	r = radeon_mc_setup(rdev);
 	r = radeon_mc_setup(rdev);
+	rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
 	if (r)
 	if (r)
 		return r;
 		return r;
 	return 0;
 	return 0;
@@ -625,6 +626,7 @@ static int rs690_startup(struct radeon_device *rdev)
 		return r;
 		return r;
 	/* Enable IRQ */
 	/* Enable IRQ */
 	rs600_irq_set(rdev);
 	rs600_irq_set(rdev);
+	rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
 	/* 1M ring buffer */
 	/* 1M ring buffer */
 	r = r100_cp_init(rdev, 1024 * 1024);
 	r = r100_cp_init(rdev, 1024 * 1024);
 	if (r) {
 	if (r) {

+ 1 - 0
drivers/gpu/drm/radeon/rv515.c

@@ -479,6 +479,7 @@ static int rv515_startup(struct radeon_device *rdev)
 	}
 	}
 	/* Enable IRQ */
 	/* Enable IRQ */
 	rs600_irq_set(rdev);
 	rs600_irq_set(rdev);
+	rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL);
 	/* 1M ring buffer */
 	/* 1M ring buffer */
 	r = r100_cp_init(rdev, 1024 * 1024);
 	r = r100_cp_init(rdev, 1024 * 1024);
 	if (r) {
 	if (r) {

+ 1 - 2
drivers/gpu/drm/radeon/rv770.c

@@ -1096,8 +1096,7 @@ void rv770_fini(struct radeon_device *rdev)
 	radeon_gem_fini(rdev);
 	radeon_gem_fini(rdev);
 	radeon_fence_driver_fini(rdev);
 	radeon_fence_driver_fini(rdev);
 	radeon_clocks_fini(rdev);
 	radeon_clocks_fini(rdev);
-	if (rdev->flags & RADEON_IS_AGP)
-		radeon_agp_fini(rdev);
+	radeon_agp_fini(rdev);
 	radeon_bo_fini(rdev);
 	radeon_bo_fini(rdev);
 	radeon_atombios_fini(rdev);
 	radeon_atombios_fini(rdev);
 	kfree(rdev->bios);
 	kfree(rdev->bios);

+ 1 - 0
include/drm/drm_mode.h

@@ -160,6 +160,7 @@ struct drm_mode_get_encoder {
 #define DRM_MODE_CONNECTOR_HDMIA	11
 #define DRM_MODE_CONNECTOR_HDMIA	11
 #define DRM_MODE_CONNECTOR_HDMIB	12
 #define DRM_MODE_CONNECTOR_HDMIB	12
 #define DRM_MODE_CONNECTOR_TV		13
 #define DRM_MODE_CONNECTOR_TV		13
+#define DRM_MODE_CONNECTOR_eDP		14
 
 
 struct drm_mode_get_connector {
 struct drm_mode_get_connector {