Sfoglia il codice sorgente

Merge branch 'drm-core-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6

* 'drm-core-next' of git://git.kernel.org/pub/scm/linux/kernel/git/airlied/drm-2.6: (476 commits)
  vmwgfx: Implement a proper GMR eviction mechanism
  drm/radeon/kms: fix r6xx/7xx 1D tiling CS checker v2
  drm/radeon/kms: properly compute group_size on 6xx/7xx
  drm/radeon/kms: fix 2D tile height alignment in the r600 CS checker
  drm/radeon/kms/evergreen: set the clear state to the blit state
  drm/radeon/kms: don't poll dac load detect.
  gpu: Add Intel GMA500(Poulsbo) Stub Driver
  drm/radeon/kms: MC vram map needs to be >= pci aperture size
  drm/radeon/kms: implement display watermark support for evergreen
  drm/radeon/kms/evergreen: add some additional safe regs v2
  drm/radeon/r600: fix tiling issues in CS checker.
  drm/i915: Move gpu_write_list to per-ring
  drm/i915: Invalidate the to-ring, flush the old-ring when updating domains
  drm/i915/ringbuffer: Write the value passed in to the tail register
  agp/intel: Restore valid PTE bit for Sandybridge after bdd3072
  drm/i915: Fix flushing regression from 9af90d19f
  drm/i915/sdvo: Remove unused encoding member
  i915: enable AVI infoframe for intel_hdmi.c [v4]
  drm/i915: Fix current fb blocking for page flip
  drm/i915: IS_IRONLAKE is synonymous with gen == 5
  ...

Fix up conflicts in
 - drivers/gpu/drm/i915/{i915_gem.c, i915/intel_overlay.c}: due to the
   new simplified stack-based kmap_atomic() interface
 - drivers/gpu/drm/vmwgfx/vmwgfx_drv.c: added .llseek entry due to BKL
   removal cleanups.
Linus Torvalds 14 anni fa
parent
commit
c48c43e422
100 ha cambiato i file con 8705 aggiunte e 5908 eliminazioni
  1. 9 0
      MAINTAINERS
  2. 1 0
      drivers/char/agp/Makefile
  3. 0 5
      drivers/char/agp/agp.h
  4. 4 2
      drivers/char/agp/amd-k7-agp.c
  5. 1 21
      drivers/char/agp/backend.c
  6. 0 8
      drivers/char/agp/generic.c
  7. 24 177
      drivers/char/agp/intel-agp.c
  8. 3 40
      drivers/char/agp/intel-agp.h
  9. 428 368
      drivers/char/agp/intel-gtt.c
  10. 1 1
      drivers/gpu/Makefile
  11. 1 1
      drivers/gpu/drm/Makefile
  12. 4 36
      drivers/gpu/drm/drm_agpsupport.c
  13. 0 8
      drivers/gpu/drm/drm_context.c
  14. 2 1
      drivers/gpu/drm/drm_crtc.c
  15. 0 1
      drivers/gpu/drm/drm_debugfs.c
  16. 0 198
      drivers/gpu/drm/drm_drawable.c
  17. 3 7
      drivers/gpu/drm/drm_drv.c
  18. 78 15
      drivers/gpu/drm/drm_edid.c
  19. 29 3
      drivers/gpu/drm/drm_fb_helper.c
  20. 0 14
      drivers/gpu/drm/drm_gem.c
  21. 0 14
      drivers/gpu/drm/drm_info.c
  22. 5 25
      drivers/gpu/drm/drm_lock.c
  23. 4 10
      drivers/gpu/drm/drm_memory.c
  24. 0 14
      drivers/gpu/drm/drm_proc.c
  25. 0 2
      drivers/gpu/drm/drm_scatter.c
  26. 0 4
      drivers/gpu/drm/drm_stub.c
  27. 2 11
      drivers/gpu/drm/drm_vm.c
  28. 0 2
      drivers/gpu/drm/i810/i810_drv.c
  29. 0 2
      drivers/gpu/drm/i830/i830_drv.c
  30. 3 1
      drivers/gpu/drm/i915/Makefile
  31. 27 39
      drivers/gpu/drm/i915/dvo_ch7017.c
  32. 4 6
      drivers/gpu/drm/i915/dvo_ch7xxx.c
  33. 4 6
      drivers/gpu/drm/i915/dvo_ivch.c
  34. 4 6
      drivers/gpu/drm/i915/dvo_sil164.c
  35. 4 6
      drivers/gpu/drm/i915/dvo_tfp410.c
  36. 267 69
      drivers/gpu/drm/i915/i915_debugfs.c
  37. 109 251
      drivers/gpu/drm/i915/i915_dma.c
  38. 149 65
      drivers/gpu/drm/i915/i915_drv.c
  39. 187 84
      drivers/gpu/drm/i915/i915_drv.h
  40. 327 235
      drivers/gpu/drm/i915/i915_gem.c
  41. 98 50
      drivers/gpu/drm/i915/i915_gem_debug.c
  42. 9 63
      drivers/gpu/drm/i915/i915_gem_evict.c
  43. 26 28
      drivers/gpu/drm/i915/i915_gem_tiling.c
  44. 144 115
      drivers/gpu/drm/i915/i915_irq.c
  45. 211 124
      drivers/gpu/drm/i915/i915_reg.h
  46. 13 15
      drivers/gpu/drm/i915/i915_suspend.c
  47. 286 0
      drivers/gpu/drm/i915/intel_acpi.c
  48. 163 71
      drivers/gpu/drm/i915/intel_bios.c
  49. 4 2
      drivers/gpu/drm/i915/intel_bios.h
  50. 64 63
      drivers/gpu/drm/i915/intel_crt.c
  51. 760 654
      drivers/gpu/drm/i915/intel_display.c
  52. 338 212
      drivers/gpu/drm/i915/intel_dp.c
  53. 103 57
      drivers/gpu/drm/i915/intel_drv.h
  54. 28 41
      drivers/gpu/drm/i915/intel_dvo.c
  55. 7 22
      drivers/gpu/drm/i915/intel_fb.c
  56. 157 36
      drivers/gpu/drm/i915/intel_hdmi.c
  57. 357 127
      drivers/gpu/drm/i915/intel_i2c.c
  58. 230 205
      drivers/gpu/drm/i915/intel_lvds.c
  59. 4 12
      drivers/gpu/drm/i915/intel_modes.c
  60. 68 113
      drivers/gpu/drm/i915/intel_opregion.c
  61. 373 364
      drivers/gpu/drm/i915/intel_overlay.c
  62. 109 0
      drivers/gpu/drm/i915/intel_panel.c
  63. 248 209
      drivers/gpu/drm/i915/intel_ringbuffer.c
  64. 47 34
      drivers/gpu/drm/i915/intel_ringbuffer.h
  65. 294 377
      drivers/gpu/drm/i915/intel_sdvo.c
  66. 80 85
      drivers/gpu/drm/i915/intel_tv.c
  67. 0 2
      drivers/gpu/drm/mga/mga_drv.c
  68. 1 0
      drivers/gpu/drm/nouveau/Kconfig
  69. 4 2
      drivers/gpu/drm/nouveau/Makefile
  70. 1 1
      drivers/gpu/drm/nouveau/nouveau_acpi.c
  71. 220 146
      drivers/gpu/drm/nouveau/nouveau_bios.c
  72. 37 6
      drivers/gpu/drm/nouveau/nouveau_bios.h
  73. 194 96
      drivers/gpu/drm/nouveau/nouveau_bo.c
  74. 5 5
      drivers/gpu/drm/nouveau/nouveau_calc.c
  75. 9 14
      drivers/gpu/drm/nouveau/nouveau_channel.c
  76. 53 1
      drivers/gpu/drm/nouveau/nouveau_connector.c
  77. 3 0
      drivers/gpu/drm/nouveau/nouveau_connector.h
  78. 16 0
      drivers/gpu/drm/nouveau/nouveau_debugfs.c
  79. 6 26
      drivers/gpu/drm/nouveau/nouveau_dma.c
  80. 1 0
      drivers/gpu/drm/nouveau/nouveau_dma.h
  81. 7 3
      drivers/gpu/drm/nouveau/nouveau_dp.c
  82. 18 5
      drivers/gpu/drm/nouveau/nouveau_drv.c
  83. 162 91
      drivers/gpu/drm/nouveau/nouveau_drv.h
  84. 1 0
      drivers/gpu/drm/nouveau/nouveau_encoder.h
  85. 6 0
      drivers/gpu/drm/nouveau/nouveau_fbcon.c
  86. 301 17
      drivers/gpu/drm/nouveau/nouveau_fence.c
  87. 2 2
      drivers/gpu/drm/nouveau/nouveau_gem.c
  88. 1 1
      drivers/gpu/drm/nouveau/nouveau_grctx.h
  89. 21 24
      drivers/gpu/drm/nouveau/nouveau_hw.c
  90. 6 2
      drivers/gpu/drm/nouveau/nouveau_i2c.c
  91. 4 1
      drivers/gpu/drm/nouveau/nouveau_i2c.h
  92. 64 59
      drivers/gpu/drm/nouveau/nouveau_irq.c
  93. 313 50
      drivers/gpu/drm/nouveau/nouveau_mem.c
  94. 5 4
      drivers/gpu/drm/nouveau/nouveau_notifier.c
  95. 198 513
      drivers/gpu/drm/nouveau/nouveau_object.c
  96. 205 0
      drivers/gpu/drm/nouveau/nouveau_perf.c
  97. 518 0
      drivers/gpu/drm/nouveau/nouveau_pm.c
  98. 74 0
      drivers/gpu/drm/nouveau/nouveau_pm.h
  99. 289 0
      drivers/gpu/drm/nouveau/nouveau_ramht.c
  100. 55 0
      drivers/gpu/drm/nouveau/nouveau_ramht.h

+ 9 - 0
MAINTAINERS

@@ -2060,6 +2060,15 @@ S:	Maintained
 F:	drivers/gpu/drm/
 F:	drivers/gpu/drm/
 F:	include/drm/
 F:	include/drm/
 
 
+INTEL DRM DRIVERS (excluding Poulsbo, Moorestown and derivative chipsets)
+M:	Chris Wilson <chris@chris-wilson.co.uk>
+L:	intel-gfx@lists.freedesktop.org
+L:	dri-devel@lists.freedesktop.org
+T:	git git://git.kernel.org/pub/scm/linux/kernel/git/ickle/drm-intel.git
+S:	Supported
+F:	drivers/gpu/drm/i915
+F:	include/drm/i915*
+
 DSCC4 DRIVER
 DSCC4 DRIVER
 M:	Francois Romieu <romieu@fr.zoreil.com>
 M:	Francois Romieu <romieu@fr.zoreil.com>
 L:	netdev@vger.kernel.org
 L:	netdev@vger.kernel.org

+ 1 - 0
drivers/char/agp/Makefile

@@ -13,6 +13,7 @@ obj-$(CONFIG_AGP_HP_ZX1)	+= hp-agp.o
 obj-$(CONFIG_AGP_PARISC)	+= parisc-agp.o
 obj-$(CONFIG_AGP_PARISC)	+= parisc-agp.o
 obj-$(CONFIG_AGP_I460)		+= i460-agp.o
 obj-$(CONFIG_AGP_I460)		+= i460-agp.o
 obj-$(CONFIG_AGP_INTEL)		+= intel-agp.o
 obj-$(CONFIG_AGP_INTEL)		+= intel-agp.o
+obj-$(CONFIG_AGP_INTEL)		+= intel-gtt.o
 obj-$(CONFIG_AGP_NVIDIA)	+= nvidia-agp.o
 obj-$(CONFIG_AGP_NVIDIA)	+= nvidia-agp.o
 obj-$(CONFIG_AGP_SGI_TIOCA)	+= sgi-agp.o
 obj-$(CONFIG_AGP_SGI_TIOCA)	+= sgi-agp.o
 obj-$(CONFIG_AGP_SIS)		+= sis-agp.o
 obj-$(CONFIG_AGP_SIS)		+= sis-agp.o

+ 0 - 5
drivers/char/agp/agp.h

@@ -121,11 +121,6 @@ struct agp_bridge_driver {
 	void (*agp_destroy_pages)(struct agp_memory *);
 	void (*agp_destroy_pages)(struct agp_memory *);
 	int (*agp_type_to_mask_type) (struct agp_bridge_data *, int);
 	int (*agp_type_to_mask_type) (struct agp_bridge_data *, int);
 	void (*chipset_flush)(struct agp_bridge_data *);
 	void (*chipset_flush)(struct agp_bridge_data *);
-
-	int (*agp_map_page)(struct page *page, dma_addr_t *ret);
-	void (*agp_unmap_page)(struct page *page, dma_addr_t dma);
-	int (*agp_map_memory)(struct agp_memory *mem);
-	void (*agp_unmap_memory)(struct agp_memory *mem);
 };
 };
 
 
 struct agp_bridge_data {
 struct agp_bridge_data {

+ 4 - 2
drivers/char/agp/amd-k7-agp.c

@@ -309,7 +309,8 @@ static int amd_insert_memory(struct agp_memory *mem, off_t pg_start, int type)
 
 
 	num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
 	num_entries = A_SIZE_LVL2(agp_bridge->current_size)->num_entries;
 
 
-	if (type != 0 || mem->type != 0)
+	if (type != mem->type ||
+	    agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type))
 		return -EINVAL;
 		return -EINVAL;
 
 
 	if ((pg_start + mem->page_count) > num_entries)
 	if ((pg_start + mem->page_count) > num_entries)
@@ -348,7 +349,8 @@ static int amd_remove_memory(struct agp_memory *mem, off_t pg_start, int type)
 	unsigned long __iomem *cur_gatt;
 	unsigned long __iomem *cur_gatt;
 	unsigned long addr;
 	unsigned long addr;
 
 
-	if (type != 0 || mem->type != 0)
+	if (type != mem->type ||
+	    agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type))
 		return -EINVAL;
 		return -EINVAL;
 
 
 	for (i = pg_start; i < (mem->page_count + pg_start); i++) {
 	for (i = pg_start; i < (mem->page_count + pg_start); i++) {

+ 1 - 21
drivers/char/agp/backend.c

@@ -151,17 +151,7 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
 		}
 		}
 
 
 		bridge->scratch_page_page = page;
 		bridge->scratch_page_page = page;
-		if (bridge->driver->agp_map_page) {
-			if (bridge->driver->agp_map_page(page,
-							 &bridge->scratch_page_dma)) {
-				dev_err(&bridge->dev->dev,
-					"unable to dma-map scratch page\n");
-				rc = -ENOMEM;
-				goto err_out_nounmap;
-			}
-		} else {
-			bridge->scratch_page_dma = page_to_phys(page);
-		}
+		bridge->scratch_page_dma = page_to_phys(page);
 
 
 		bridge->scratch_page = bridge->driver->mask_memory(bridge,
 		bridge->scratch_page = bridge->driver->mask_memory(bridge,
 						   bridge->scratch_page_dma, 0);
 						   bridge->scratch_page_dma, 0);
@@ -204,12 +194,6 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
 	return 0;
 	return 0;
 
 
 err_out:
 err_out:
-	if (bridge->driver->needs_scratch_page &&
-	    bridge->driver->agp_unmap_page) {
-		bridge->driver->agp_unmap_page(bridge->scratch_page_page,
-					       bridge->scratch_page_dma);
-	}
-err_out_nounmap:
 	if (bridge->driver->needs_scratch_page) {
 	if (bridge->driver->needs_scratch_page) {
 		void *va = page_address(bridge->scratch_page_page);
 		void *va = page_address(bridge->scratch_page_page);
 
 
@@ -240,10 +224,6 @@ static void agp_backend_cleanup(struct agp_bridge_data *bridge)
 	    bridge->driver->needs_scratch_page) {
 	    bridge->driver->needs_scratch_page) {
 		void *va = page_address(bridge->scratch_page_page);
 		void *va = page_address(bridge->scratch_page_page);
 
 
-		if (bridge->driver->agp_unmap_page)
-			bridge->driver->agp_unmap_page(bridge->scratch_page_page,
-						       bridge->scratch_page_dma);
-
 		bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_UNMAP);
 		bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_UNMAP);
 		bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_FREE);
 		bridge->driver->agp_destroy_page(va, AGP_PAGE_DESTROY_FREE);
 	}
 	}

+ 0 - 8
drivers/char/agp/generic.c

@@ -437,11 +437,6 @@ int agp_bind_memory(struct agp_memory *curr, off_t pg_start)
 		curr->is_flushed = true;
 		curr->is_flushed = true;
 	}
 	}
 
 
-	if (curr->bridge->driver->agp_map_memory) {
-		ret_val = curr->bridge->driver->agp_map_memory(curr);
-		if (ret_val)
-			return ret_val;
-	}
 	ret_val = curr->bridge->driver->insert_memory(curr, pg_start, curr->type);
 	ret_val = curr->bridge->driver->insert_memory(curr, pg_start, curr->type);
 
 
 	if (ret_val != 0)
 	if (ret_val != 0)
@@ -483,9 +478,6 @@ int agp_unbind_memory(struct agp_memory *curr)
 	if (ret_val != 0)
 	if (ret_val != 0)
 		return ret_val;
 		return ret_val;
 
 
-	if (curr->bridge->driver->agp_unmap_memory)
-		curr->bridge->driver->agp_unmap_memory(curr);
-
 	curr->is_bound = false;
 	curr->is_bound = false;
 	curr->pg_start = 0;
 	curr->pg_start = 0;
 	spin_lock(&curr->bridge->mapped_lock);
 	spin_lock(&curr->bridge->mapped_lock);

+ 24 - 177
drivers/char/agp/intel-agp.c

@@ -12,9 +12,6 @@
 #include <asm/smp.h>
 #include <asm/smp.h>
 #include "agp.h"
 #include "agp.h"
 #include "intel-agp.h"
 #include "intel-agp.h"
-#include <linux/intel-gtt.h>
-
-#include "intel-gtt.c"
 
 
 int intel_agp_enabled;
 int intel_agp_enabled;
 EXPORT_SYMBOL(intel_agp_enabled);
 EXPORT_SYMBOL(intel_agp_enabled);
@@ -703,179 +700,37 @@ static const struct agp_bridge_driver intel_7505_driver = {
 	.agp_type_to_mask_type  = agp_generic_type_to_mask_type,
 	.agp_type_to_mask_type  = agp_generic_type_to_mask_type,
 };
 };
 
 
-static int find_gmch(u16 device)
-{
-	struct pci_dev *gmch_device;
-
-	gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL, device, NULL);
-	if (gmch_device && PCI_FUNC(gmch_device->devfn) != 0) {
-		gmch_device = pci_get_device(PCI_VENDOR_ID_INTEL,
-					     device, gmch_device);
-	}
-
-	if (!gmch_device)
-		return 0;
-
-	intel_private.pcidev = gmch_device;
-	return 1;
-}
-
 /* Table to describe Intel GMCH and AGP/PCIE GART drivers.  At least one of
 /* Table to describe Intel GMCH and AGP/PCIE GART drivers.  At least one of
  * driver and gmch_driver must be non-null, and find_gmch will determine
  * driver and gmch_driver must be non-null, and find_gmch will determine
  * which one should be used if a gmch_chip_id is present.
  * which one should be used if a gmch_chip_id is present.
  */
  */
-static const struct intel_driver_description {
+static const struct intel_agp_driver_description {
 	unsigned int chip_id;
 	unsigned int chip_id;
-	unsigned int gmch_chip_id;
 	char *name;
 	char *name;
 	const struct agp_bridge_driver *driver;
 	const struct agp_bridge_driver *driver;
-	const struct agp_bridge_driver *gmch_driver;
 } intel_agp_chipsets[] = {
 } intel_agp_chipsets[] = {
-	{ PCI_DEVICE_ID_INTEL_82443LX_0, 0, "440LX", &intel_generic_driver, NULL },
-	{ PCI_DEVICE_ID_INTEL_82443BX_0, 0, "440BX", &intel_generic_driver, NULL },
-	{ PCI_DEVICE_ID_INTEL_82443GX_0, 0, "440GX", &intel_generic_driver, NULL },
-	{ PCI_DEVICE_ID_INTEL_82810_MC1, PCI_DEVICE_ID_INTEL_82810_IG1, "i810",
-		NULL, &intel_810_driver },
-	{ PCI_DEVICE_ID_INTEL_82810_MC3, PCI_DEVICE_ID_INTEL_82810_IG3, "i810",
-		NULL, &intel_810_driver },
-	{ PCI_DEVICE_ID_INTEL_82810E_MC, PCI_DEVICE_ID_INTEL_82810E_IG, "i810",
-		NULL, &intel_810_driver },
-	{ PCI_DEVICE_ID_INTEL_82815_MC, PCI_DEVICE_ID_INTEL_82815_CGC, "i815",
-		&intel_815_driver, &intel_810_driver },
-	{ PCI_DEVICE_ID_INTEL_82820_HB, 0, "i820", &intel_820_driver, NULL },
-	{ PCI_DEVICE_ID_INTEL_82820_UP_HB, 0, "i820", &intel_820_driver, NULL },
-	{ PCI_DEVICE_ID_INTEL_82830_HB, PCI_DEVICE_ID_INTEL_82830_CGC, "830M",
-		&intel_830mp_driver, &intel_830_driver },
-	{ PCI_DEVICE_ID_INTEL_82840_HB, 0, "i840", &intel_840_driver, NULL },
-	{ PCI_DEVICE_ID_INTEL_82845_HB, 0, "845G", &intel_845_driver, NULL },
-	{ PCI_DEVICE_ID_INTEL_82845G_HB, PCI_DEVICE_ID_INTEL_82845G_IG, "830M",
-		&intel_845_driver, &intel_830_driver },
-	{ PCI_DEVICE_ID_INTEL_82850_HB, 0, "i850", &intel_850_driver, NULL },
-	{ PCI_DEVICE_ID_INTEL_82854_HB, PCI_DEVICE_ID_INTEL_82854_IG, "854",
-		&intel_845_driver, &intel_830_driver },
-	{ PCI_DEVICE_ID_INTEL_82855PM_HB, 0, "855PM", &intel_845_driver, NULL },
-	{ PCI_DEVICE_ID_INTEL_82855GM_HB, PCI_DEVICE_ID_INTEL_82855GM_IG, "855GM",
-		&intel_845_driver, &intel_830_driver },
-	{ PCI_DEVICE_ID_INTEL_82860_HB, 0, "i860", &intel_860_driver, NULL },
-	{ PCI_DEVICE_ID_INTEL_82865_HB, PCI_DEVICE_ID_INTEL_82865_IG, "865",
-		&intel_845_driver, &intel_830_driver },
-	{ PCI_DEVICE_ID_INTEL_82875_HB, 0, "i875", &intel_845_driver, NULL },
-	{ PCI_DEVICE_ID_INTEL_E7221_HB, PCI_DEVICE_ID_INTEL_E7221_IG, "E7221 (i915)",
-		NULL, &intel_915_driver },
-	{ PCI_DEVICE_ID_INTEL_82915G_HB, PCI_DEVICE_ID_INTEL_82915G_IG, "915G",
-		NULL, &intel_915_driver },
-	{ PCI_DEVICE_ID_INTEL_82915GM_HB, PCI_DEVICE_ID_INTEL_82915GM_IG, "915GM",
-		NULL, &intel_915_driver },
-	{ PCI_DEVICE_ID_INTEL_82945G_HB, PCI_DEVICE_ID_INTEL_82945G_IG, "945G",
-		NULL, &intel_915_driver },
-	{ PCI_DEVICE_ID_INTEL_82945GM_HB, PCI_DEVICE_ID_INTEL_82945GM_IG, "945GM",
-		NULL, &intel_915_driver },
-	{ PCI_DEVICE_ID_INTEL_82945GME_HB, PCI_DEVICE_ID_INTEL_82945GME_IG, "945GME",
-		NULL, &intel_915_driver },
-	{ PCI_DEVICE_ID_INTEL_82946GZ_HB, PCI_DEVICE_ID_INTEL_82946GZ_IG, "946GZ",
-		NULL, &intel_i965_driver },
-	{ PCI_DEVICE_ID_INTEL_82G35_HB, PCI_DEVICE_ID_INTEL_82G35_IG, "G35",
-		NULL, &intel_i965_driver },
-	{ PCI_DEVICE_ID_INTEL_82965Q_HB, PCI_DEVICE_ID_INTEL_82965Q_IG, "965Q",
-		NULL, &intel_i965_driver },
-	{ PCI_DEVICE_ID_INTEL_82965G_HB, PCI_DEVICE_ID_INTEL_82965G_IG, "965G",
-		NULL, &intel_i965_driver },
-	{ PCI_DEVICE_ID_INTEL_82965GM_HB, PCI_DEVICE_ID_INTEL_82965GM_IG, "965GM",
-		NULL, &intel_i965_driver },
-	{ PCI_DEVICE_ID_INTEL_82965GME_HB, PCI_DEVICE_ID_INTEL_82965GME_IG, "965GME/GLE",
-		NULL, &intel_i965_driver },
-	{ PCI_DEVICE_ID_INTEL_7505_0, 0, "E7505", &intel_7505_driver, NULL },
-	{ PCI_DEVICE_ID_INTEL_7205_0, 0, "E7205", &intel_7505_driver, NULL },
-	{ PCI_DEVICE_ID_INTEL_G33_HB, PCI_DEVICE_ID_INTEL_G33_IG, "G33",
-		NULL, &intel_g33_driver },
-	{ PCI_DEVICE_ID_INTEL_Q35_HB, PCI_DEVICE_ID_INTEL_Q35_IG, "Q35",
-		NULL, &intel_g33_driver },
-	{ PCI_DEVICE_ID_INTEL_Q33_HB, PCI_DEVICE_ID_INTEL_Q33_IG, "Q33",
-		NULL, &intel_g33_driver },
-	{ PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_M_IG, "GMA3150",
-		NULL, &intel_g33_driver },
-	{ PCI_DEVICE_ID_INTEL_PINEVIEW_HB, PCI_DEVICE_ID_INTEL_PINEVIEW_IG, "GMA3150",
-		NULL, &intel_g33_driver },
-	{ PCI_DEVICE_ID_INTEL_GM45_HB, PCI_DEVICE_ID_INTEL_GM45_IG,
-	    "GM45", NULL, &intel_i965_driver },
-	{ PCI_DEVICE_ID_INTEL_EAGLELAKE_HB, PCI_DEVICE_ID_INTEL_EAGLELAKE_IG,
-	    "Eaglelake", NULL, &intel_i965_driver },
-	{ PCI_DEVICE_ID_INTEL_Q45_HB, PCI_DEVICE_ID_INTEL_Q45_IG,
-	    "Q45/Q43", NULL, &intel_i965_driver },
-	{ PCI_DEVICE_ID_INTEL_G45_HB, PCI_DEVICE_ID_INTEL_G45_IG,
-	    "G45/G43", NULL, &intel_i965_driver },
-	{ PCI_DEVICE_ID_INTEL_B43_HB, PCI_DEVICE_ID_INTEL_B43_IG,
-	    "B43", NULL, &intel_i965_driver },
-	{ PCI_DEVICE_ID_INTEL_B43_1_HB, PCI_DEVICE_ID_INTEL_B43_1_IG,
-	    "B43", NULL, &intel_i965_driver },
-	{ PCI_DEVICE_ID_INTEL_G41_HB, PCI_DEVICE_ID_INTEL_G41_IG,
-	    "G41", NULL, &intel_i965_driver },
-	{ PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG,
-	    "HD Graphics", NULL, &intel_i965_driver },
-	{ PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
-	    "HD Graphics", NULL, &intel_i965_driver },
-	{ PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
-	    "HD Graphics", NULL, &intel_i965_driver },
-	{ PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB, PCI_DEVICE_ID_INTEL_IRONLAKE_M_IG,
-	    "HD Graphics", NULL, &intel_i965_driver },
-	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT1_IG,
-	    "Sandybridge", NULL, &intel_gen6_driver },
-	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_IG,
-	    "Sandybridge", NULL, &intel_gen6_driver },
-	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_GT2_PLUS_IG,
-	    "Sandybridge", NULL, &intel_gen6_driver },
-	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT1_IG,
-	    "Sandybridge", NULL, &intel_gen6_driver },
-	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_IG,
-	    "Sandybridge", NULL, &intel_gen6_driver },
-	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_GT2_PLUS_IG,
-	    "Sandybridge", NULL, &intel_gen6_driver },
-	{ PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB, PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG,
-	    "Sandybridge", NULL, &intel_gen6_driver },
-	{ 0, 0, NULL, NULL, NULL }
+	{ PCI_DEVICE_ID_INTEL_82443LX_0, "440LX", &intel_generic_driver },
+	{ PCI_DEVICE_ID_INTEL_82443BX_0, "440BX", &intel_generic_driver },
+	{ PCI_DEVICE_ID_INTEL_82443GX_0, "440GX", &intel_generic_driver },
+	{ PCI_DEVICE_ID_INTEL_82815_MC, "i815", &intel_815_driver },
+	{ PCI_DEVICE_ID_INTEL_82820_HB, "i820", &intel_820_driver },
+	{ PCI_DEVICE_ID_INTEL_82820_UP_HB, "i820", &intel_820_driver },
+	{ PCI_DEVICE_ID_INTEL_82830_HB, "830M", &intel_830mp_driver },
+	{ PCI_DEVICE_ID_INTEL_82840_HB, "i840", &intel_840_driver },
+	{ PCI_DEVICE_ID_INTEL_82845_HB, "845G", &intel_845_driver },
+	{ PCI_DEVICE_ID_INTEL_82845G_HB, "830M", &intel_845_driver },
+	{ PCI_DEVICE_ID_INTEL_82850_HB, "i850", &intel_850_driver },
+	{ PCI_DEVICE_ID_INTEL_82854_HB, "854", &intel_845_driver },
+	{ PCI_DEVICE_ID_INTEL_82855PM_HB, "855PM", &intel_845_driver },
+	{ PCI_DEVICE_ID_INTEL_82855GM_HB, "855GM", &intel_845_driver },
+	{ PCI_DEVICE_ID_INTEL_82860_HB, "i860", &intel_860_driver },
+	{ PCI_DEVICE_ID_INTEL_82865_HB, "865", &intel_845_driver },
+	{ PCI_DEVICE_ID_INTEL_82875_HB, "i875", &intel_845_driver },
+	{ PCI_DEVICE_ID_INTEL_7505_0, "E7505", &intel_7505_driver },
+	{ PCI_DEVICE_ID_INTEL_7205_0, "E7205", &intel_7505_driver },
+	{ 0, NULL, NULL }
 };
 };
 
 
-static int __devinit intel_gmch_probe(struct pci_dev *pdev,
-				      struct agp_bridge_data *bridge)
-{
-	int i, mask;
-
-	bridge->driver = NULL;
-
-	for (i = 0; intel_agp_chipsets[i].name != NULL; i++) {
-		if ((intel_agp_chipsets[i].gmch_chip_id != 0) &&
-			find_gmch(intel_agp_chipsets[i].gmch_chip_id)) {
-			bridge->driver =
-				intel_agp_chipsets[i].gmch_driver;
-			break;
-		}
-	}
-
-	if (!bridge->driver)
-		return 0;
-
-	bridge->dev_private_data = &intel_private;
-	bridge->dev = pdev;
-
-	dev_info(&pdev->dev, "Intel %s Chipset\n", intel_agp_chipsets[i].name);
-
-	if (bridge->driver->mask_memory == intel_gen6_mask_memory)
-		mask = 40;
-	else if (bridge->driver->mask_memory == intel_i965_mask_memory)
-		mask = 36;
-	else
-		mask = 32;
-
-	if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(mask)))
-		dev_err(&intel_private.pcidev->dev,
-			"set gfx device dma mask %d-bit failed!\n", mask);
-	else
-		pci_set_consistent_dma_mask(intel_private.pcidev,
-					    DMA_BIT_MASK(mask));
-
-	return 1;
-}
-
 static int __devinit agp_intel_probe(struct pci_dev *pdev,
 static int __devinit agp_intel_probe(struct pci_dev *pdev,
 				     const struct pci_device_id *ent)
 				     const struct pci_device_id *ent)
 {
 {
@@ -905,7 +760,7 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
 		}
 		}
 	}
 	}
 
 
-	if (intel_agp_chipsets[i].name == NULL) {
+	if (!bridge->driver) {
 		if (cap_ptr)
 		if (cap_ptr)
 			dev_warn(&pdev->dev, "unsupported Intel chipset [%04x/%04x]\n",
 			dev_warn(&pdev->dev, "unsupported Intel chipset [%04x/%04x]\n",
 				 pdev->vendor, pdev->device);
 				 pdev->vendor, pdev->device);
@@ -913,14 +768,6 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
 		return -ENODEV;
 		return -ENODEV;
 	}
 	}
 
 
-	if (!bridge->driver) {
-		if (cap_ptr)
-			dev_warn(&pdev->dev, "can't find bridge device (chip_id: %04x)\n",
-			    	 intel_agp_chipsets[i].gmch_chip_id);
-		agp_put_bridge(bridge);
-		return -ENODEV;
-	}
-
 	bridge->dev = pdev;
 	bridge->dev = pdev;
 	bridge->dev_private_data = NULL;
 	bridge->dev_private_data = NULL;
 
 
@@ -972,8 +819,7 @@ static void __devexit agp_intel_remove(struct pci_dev *pdev)
 
 
 	agp_remove_bridge(bridge);
 	agp_remove_bridge(bridge);
 
 
-	if (intel_private.pcidev)
-		pci_dev_put(intel_private.pcidev);
+	intel_gmch_remove(pdev);
 
 
 	agp_put_bridge(bridge);
 	agp_put_bridge(bridge);
 }
 }
@@ -1049,6 +895,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
 	ID(PCI_DEVICE_ID_INTEL_G45_HB),
 	ID(PCI_DEVICE_ID_INTEL_G45_HB),
 	ID(PCI_DEVICE_ID_INTEL_G41_HB),
 	ID(PCI_DEVICE_ID_INTEL_G41_HB),
 	ID(PCI_DEVICE_ID_INTEL_B43_HB),
 	ID(PCI_DEVICE_ID_INTEL_B43_HB),
+	ID(PCI_DEVICE_ID_INTEL_B43_1_HB),
 	ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB),
 	ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB),
 	ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB),
 	ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB),
 	ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB),
 	ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB),

+ 3 - 40
drivers/char/agp/intel-agp.h

@@ -215,44 +215,7 @@
 #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB		0x0108  /* Server */
 #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB		0x0108  /* Server */
 #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG		0x010A
 #define PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_IG		0x010A
 
 
-/* cover 915 and 945 variants */
-#define IS_I915 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_E7221_HB || \
-		 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915G_HB || \
-		 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82915GM_HB || \
-		 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945G_HB || \
-		 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GM_HB || \
-		 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82945GME_HB)
-
-#define IS_I965 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82946GZ_HB || \
-		 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82G35_HB || \
-		 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965Q_HB || \
-		 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965G_HB || \
-		 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GM_HB || \
-		 agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_82965GME_HB)
-
-#define IS_G33 (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G33_HB || \
-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q35_HB || \
-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q33_HB || \
-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \
-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
-
-#define IS_PINEVIEW (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_M_HB || \
-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_PINEVIEW_HB)
-
-#define IS_SNB (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB || \
-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB || \
-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_SANDYBRIDGE_S_HB)
-
-#define IS_G4X (agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_EAGLELAKE_HB || \
-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_Q45_HB || \
-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G45_HB || \
-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_GM45_HB || \
-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_G41_HB || \
-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_B43_HB || \
-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB || \
-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB || \
-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB || \
-		agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB || \
-		IS_SNB)
-
+int intel_gmch_probe(struct pci_dev *pdev,
+			       struct agp_bridge_data *bridge);
+void intel_gmch_remove(struct pci_dev *pdev);
 #endif
 #endif

File diff suppressed because it is too large
+ 428 - 368
drivers/char/agp/intel-gtt.c


+ 1 - 1
drivers/gpu/Makefile

@@ -1 +1 @@
-obj-y			+= drm/ vga/
+obj-y			+= drm/ vga/ stub/

+ 1 - 1
drivers/gpu/drm/Makefile

@@ -5,7 +5,7 @@
 ccflags-y := -Iinclude/drm
 ccflags-y := -Iinclude/drm
 
 
 drm-y       :=	drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
 drm-y       :=	drm_auth.o drm_buffer.o drm_bufs.o drm_cache.o \
-		drm_context.o drm_dma.o drm_drawable.o \
+		drm_context.o drm_dma.o \
 		drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
 		drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
 		drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
 		drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
 		drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
 		drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \

+ 4 - 36
drivers/gpu/drm/drm_agpsupport.c

@@ -193,7 +193,7 @@ int drm_agp_enable_ioctl(struct drm_device *dev, void *data,
  * \return zero on success or a negative number on failure.
  * \return zero on success or a negative number on failure.
  *
  *
  * Verifies the AGP device is present and has been acquired, allocates the
  * Verifies the AGP device is present and has been acquired, allocates the
- * memory via alloc_agp() and creates a drm_agp_mem entry for it.
+ * memory via agp_allocate_memory() and creates a drm_agp_mem entry for it.
  */
  */
 int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
 int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
 {
 {
@@ -211,7 +211,7 @@ int drm_agp_alloc(struct drm_device *dev, struct drm_agp_buffer *request)
 
 
 	pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
 	pages = (request->size + PAGE_SIZE - 1) / PAGE_SIZE;
 	type = (u32) request->type;
 	type = (u32) request->type;
-	if (!(memory = drm_alloc_agp(dev, pages, type))) {
+	if (!(memory = agp_allocate_memory(dev->agp->bridge, pages, type))) {
 		kfree(entry);
 		kfree(entry);
 		return -ENOMEM;
 		return -ENOMEM;
 	}
 	}
@@ -423,38 +423,6 @@ struct drm_agp_head *drm_agp_init(struct drm_device *dev)
 	return head;
 	return head;
 }
 }
 
 
-/** Calls agp_allocate_memory() */
-DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data * bridge,
-				     size_t pages, u32 type)
-{
-	return agp_allocate_memory(bridge, pages, type);
-}
-
-/** Calls agp_free_memory() */
-int drm_agp_free_memory(DRM_AGP_MEM * handle)
-{
-	if (!handle)
-		return 0;
-	agp_free_memory(handle);
-	return 1;
-}
-
-/** Calls agp_bind_memory() */
-int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start)
-{
-	if (!handle)
-		return -EINVAL;
-	return agp_bind_memory(handle, start);
-}
-
-/** Calls agp_unbind_memory() */
-int drm_agp_unbind_memory(DRM_AGP_MEM * handle)
-{
-	if (!handle)
-		return -EINVAL;
-	return agp_unbind_memory(handle);
-}
-
 /**
 /**
  * Binds a collection of pages into AGP memory at the given offset, returning
  * Binds a collection of pages into AGP memory at the given offset, returning
  * the AGP memory structure containing them.
  * the AGP memory structure containing them.
@@ -474,7 +442,7 @@ drm_agp_bind_pages(struct drm_device *dev,
 
 
 	DRM_DEBUG("\n");
 	DRM_DEBUG("\n");
 
 
-	mem = drm_agp_allocate_memory(dev->agp->bridge, num_pages,
+	mem = agp_allocate_memory(dev->agp->bridge, num_pages,
 				      type);
 				      type);
 	if (mem == NULL) {
 	if (mem == NULL) {
 		DRM_ERROR("Failed to allocate memory for %ld pages\n",
 		DRM_ERROR("Failed to allocate memory for %ld pages\n",
@@ -487,7 +455,7 @@ drm_agp_bind_pages(struct drm_device *dev,
 	mem->page_count = num_pages;
 	mem->page_count = num_pages;
 
 
 	mem->is_flushed = true;
 	mem->is_flushed = true;
-	ret = drm_agp_bind_memory(mem, gtt_offset / PAGE_SIZE);
+	ret = agp_bind_memory(mem, gtt_offset / PAGE_SIZE);
 	if (ret != 0) {
 	if (ret != 0) {
 		DRM_ERROR("Failed to bind AGP memory: %d\n", ret);
 		DRM_ERROR("Failed to bind AGP memory: %d\n", ret);
 		agp_free_memory(mem);
 		agp_free_memory(mem);

+ 0 - 8
drivers/gpu/drm/drm_context.c

@@ -333,14 +333,6 @@ int drm_addctx(struct drm_device *dev, void *data,
 		return -ENOMEM;
 		return -ENOMEM;
 	}
 	}
 
 
-	if (ctx->handle != DRM_KERNEL_CONTEXT) {
-		if (dev->driver->context_ctor)
-			if (!dev->driver->context_ctor(dev, ctx->handle)) {
-				DRM_DEBUG("Running out of ctxs or memory.\n");
-				return -ENOMEM;
-			}
-	}
-
 	ctx_entry = kmalloc(sizeof(*ctx_entry), GFP_KERNEL);
 	ctx_entry = kmalloc(sizeof(*ctx_entry), GFP_KERNEL);
 	if (!ctx_entry) {
 	if (!ctx_entry) {
 		DRM_DEBUG("out of memory\n");
 		DRM_DEBUG("out of memory\n");

+ 2 - 1
drivers/gpu/drm/drm_crtc.c

@@ -1854,7 +1854,8 @@ int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
 	}
 	}
 
 
 	if (fb->funcs->dirty) {
 	if (fb->funcs->dirty) {
-		ret = fb->funcs->dirty(fb, flags, r->color, clips, num_clips);
+		ret = fb->funcs->dirty(fb, file_priv, flags, r->color,
+				       clips, num_clips);
 	} else {
 	} else {
 		ret = -ENOSYS;
 		ret = -ENOSYS;
 		goto out_err2;
 		goto out_err2;

+ 0 - 1
drivers/gpu/drm/drm_debugfs.c

@@ -48,7 +48,6 @@ static struct drm_info_list drm_debugfs_list[] = {
 	{"queues", drm_queues_info, 0},
 	{"queues", drm_queues_info, 0},
 	{"bufs", drm_bufs_info, 0},
 	{"bufs", drm_bufs_info, 0},
 	{"gem_names", drm_gem_name_info, DRIVER_GEM},
 	{"gem_names", drm_gem_name_info, DRIVER_GEM},
-	{"gem_objects", drm_gem_object_info, DRIVER_GEM},
 #if DRM_DEBUG_CODE
 #if DRM_DEBUG_CODE
 	{"vma", drm_vma_info, 0},
 	{"vma", drm_vma_info, 0},
 #endif
 #endif

+ 0 - 198
drivers/gpu/drm/drm_drawable.c

@@ -1,198 +0,0 @@
-/**
- * \file drm_drawable.c
- * IOCTLs for drawables
- *
- * \author Rickard E. (Rik) Faith <faith@valinux.com>
- * \author Gareth Hughes <gareth@valinux.com>
- * \author Michel Dänzer <michel@tungstengraphics.com>
- */
-
-/*
- * Created: Tue Feb  2 08:37:54 1999 by faith@valinux.com
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * Copyright 2006 Tungsten Graphics, Inc., Bismarck, North Dakota.
- * All Rights Reserved.
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#include "drmP.h"
-
-/**
- * Allocate drawable ID and memory to store information about it.
- */
-int drm_adddraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
-	unsigned long irqflags;
-	struct drm_draw *draw = data;
-	int new_id = 0;
-	int ret;
-
-again:
-	if (idr_pre_get(&dev->drw_idr, GFP_KERNEL) == 0) {
-		DRM_ERROR("Out of memory expanding drawable idr\n");
-		return -ENOMEM;
-	}
-
-	spin_lock_irqsave(&dev->drw_lock, irqflags);
-	ret = idr_get_new_above(&dev->drw_idr, NULL, 1, &new_id);
-	if (ret == -EAGAIN) {
-		spin_unlock_irqrestore(&dev->drw_lock, irqflags);
-		goto again;
-	}
-
-	spin_unlock_irqrestore(&dev->drw_lock, irqflags);
-
-	draw->handle = new_id;
-
-	DRM_DEBUG("%d\n", draw->handle);
-
-	return 0;
-}
-
-/**
- * Free drawable ID and memory to store information about it.
- */
-int drm_rmdraw(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
-	struct drm_draw *draw = data;
-	unsigned long irqflags;
-	struct drm_drawable_info *info;
-
-	spin_lock_irqsave(&dev->drw_lock, irqflags);
-
-	info = drm_get_drawable_info(dev, draw->handle);
-	if (info == NULL) {
-		spin_unlock_irqrestore(&dev->drw_lock, irqflags);
-		return -EINVAL;
-	}
-	kfree(info->rects);
-	kfree(info);
-
-	idr_remove(&dev->drw_idr, draw->handle);
-
-	spin_unlock_irqrestore(&dev->drw_lock, irqflags);
-	DRM_DEBUG("%d\n", draw->handle);
-	return 0;
-}
-
-int drm_update_drawable_info(struct drm_device *dev, void *data, struct drm_file *file_priv)
-{
-	struct drm_update_draw *update = data;
-	unsigned long irqflags;
-	struct drm_clip_rect *rects;
-	struct drm_drawable_info *info;
-	int err;
-
-	info = idr_find(&dev->drw_idr, update->handle);
-	if (!info) {
-		info = kzalloc(sizeof(*info), GFP_KERNEL);
-		if (!info)
-			return -ENOMEM;
-		if (IS_ERR(idr_replace(&dev->drw_idr, info, update->handle))) {
-			DRM_ERROR("No such drawable %d\n", update->handle);
-			kfree(info);
-			return -EINVAL;
-		}
-	}
-
-	switch (update->type) {
-	case DRM_DRAWABLE_CLIPRECTS:
-		if (update->num == 0)
-			rects = NULL;
-		else if (update->num != info->num_rects) {
-			rects = kmalloc(update->num *
-					sizeof(struct drm_clip_rect),
-					GFP_KERNEL);
-		} else
-			rects = info->rects;
-
-		if (update->num && !rects) {
-			DRM_ERROR("Failed to allocate cliprect memory\n");
-			err = -ENOMEM;
-			goto error;
-		}
-
-		if (update->num && DRM_COPY_FROM_USER(rects,
-						     (struct drm_clip_rect __user *)
-						     (unsigned long)update->data,
-						     update->num *
-						     sizeof(*rects))) {
-			DRM_ERROR("Failed to copy cliprects from userspace\n");
-			err = -EFAULT;
-			goto error;
-		}
-
-		spin_lock_irqsave(&dev->drw_lock, irqflags);
-
-		if (rects != info->rects) {
-			kfree(info->rects);
-		}
-
-		info->rects = rects;
-		info->num_rects = update->num;
-
-		spin_unlock_irqrestore(&dev->drw_lock, irqflags);
-
-		DRM_DEBUG("Updated %d cliprects for drawable %d\n",
-			  info->num_rects, update->handle);
-		break;
-	default:
-		DRM_ERROR("Invalid update type %d\n", update->type);
-		return -EINVAL;
-	}
-
-	return 0;
-
-error:
-	if (rects != info->rects)
-		kfree(rects);
-
-	return err;
-}
-
-/**
- * Caller must hold the drawable spinlock!
- */
-struct drm_drawable_info *drm_get_drawable_info(struct drm_device *dev, drm_drawable_t id)
-{
-	return idr_find(&dev->drw_idr, id);
-}
-EXPORT_SYMBOL(drm_get_drawable_info);
-
-static int drm_drawable_free(int idr, void *p, void *data)
-{
-	struct drm_drawable_info *info = p;
-
-	if (info) {
-		kfree(info->rects);
-		kfree(info);
-	}
-
-	return 0;
-}
-
-void drm_drawable_free_all(struct drm_device *dev)
-{
-	idr_for_each(&dev->drw_idr, drm_drawable_free, NULL);
-	idr_remove_all(&dev->drw_idr);
-}

+ 3 - 7
drivers/gpu/drm/drm_drv.c

@@ -91,8 +91,8 @@ static struct drm_ioctl_desc drm_ioctls[] = {
 	DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 	DRM_IOCTL_DEF(DRM_IOCTL_NEW_CTX, drm_newctx, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 	DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH),
 	DRM_IOCTL_DEF(DRM_IOCTL_RES_CTX, drm_resctx, DRM_AUTH),
 
 
-	DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_adddraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
-	DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_rmdraw, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_IOCTL_ADD_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_IOCTL_RM_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 
 
 	DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH),
 	DRM_IOCTL_DEF(DRM_IOCTL_LOCK, drm_lock, DRM_AUTH),
 	DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH),
 	DRM_IOCTL_DEF(DRM_IOCTL_UNLOCK, drm_unlock, DRM_AUTH),
@@ -127,7 +127,7 @@ static struct drm_ioctl_desc drm_ioctls[] = {
 
 
 	DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
 	DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
 
 
-	DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_noop, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 
 
 	DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED),
 	DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH|DRM_UNLOCKED),
@@ -180,10 +180,6 @@ int drm_lastclose(struct drm_device * dev)
 
 
 	mutex_lock(&dev->struct_mutex);
 	mutex_lock(&dev->struct_mutex);
 
 
-	/* Free drawable information memory */
-	drm_drawable_free_all(dev);
-	del_timer(&dev->timer);
-
 	/* Clear AGP information */
 	/* Clear AGP information */
 	if (drm_core_has_AGP(dev) && dev->agp &&
 	if (drm_core_has_AGP(dev) && dev->agp &&
 			!drm_core_check_feature(dev, DRIVER_MODESET)) {
 			!drm_core_check_feature(dev, DRIVER_MODESET)) {

+ 78 - 15
drivers/gpu/drm/drm_edid.c

@@ -30,7 +30,6 @@
 #include <linux/kernel.h>
 #include <linux/kernel.h>
 #include <linux/slab.h>
 #include <linux/slab.h>
 #include <linux/i2c.h>
 #include <linux/i2c.h>
-#include <linux/i2c-algo-bit.h>
 #include "drmP.h"
 #include "drmP.h"
 #include "drm_edid.h"
 #include "drm_edid.h"
 #include "drm_edid_modes.h"
 #include "drm_edid_modes.h"
@@ -1268,34 +1267,51 @@ add_detailed_modes(struct drm_connector *connector, struct edid *edid,
 }
 }
 
 
 #define HDMI_IDENTIFIER 0x000C03
 #define HDMI_IDENTIFIER 0x000C03
+#define AUDIO_BLOCK	0x01
 #define VENDOR_BLOCK    0x03
 #define VENDOR_BLOCK    0x03
+#define EDID_BASIC_AUDIO	(1 << 6)
+
 /**
 /**
- * drm_detect_hdmi_monitor - detect whether monitor is hdmi.
- * @edid: monitor EDID information
- *
- * Parse the CEA extension according to CEA-861-B.
- * Return true if HDMI, false if not or unknown.
+ * Search EDID for CEA extension block.
  */
  */
-bool drm_detect_hdmi_monitor(struct edid *edid)
+static u8 *drm_find_cea_extension(struct edid *edid)
 {
 {
-	char *edid_ext = NULL;
-	int i, hdmi_id;
-	int start_offset, end_offset;
-	bool is_hdmi = false;
+	u8 *edid_ext = NULL;
+	int i;
 
 
 	/* No EDID or EDID extensions */
 	/* No EDID or EDID extensions */
 	if (edid == NULL || edid->extensions == 0)
 	if (edid == NULL || edid->extensions == 0)
-		goto end;
+		return NULL;
 
 
 	/* Find CEA extension */
 	/* Find CEA extension */
 	for (i = 0; i < edid->extensions; i++) {
 	for (i = 0; i < edid->extensions; i++) {
-		edid_ext = (char *)edid + EDID_LENGTH * (i + 1);
-		/* This block is CEA extension */
-		if (edid_ext[0] == 0x02)
+		edid_ext = (u8 *)edid + EDID_LENGTH * (i + 1);
+		if (edid_ext[0] == CEA_EXT)
 			break;
 			break;
 	}
 	}
 
 
 	if (i == edid->extensions)
 	if (i == edid->extensions)
+		return NULL;
+
+	return edid_ext;
+}
+
+/**
+ * drm_detect_hdmi_monitor - detect whether monitor is hdmi.
+ * @edid: monitor EDID information
+ *
+ * Parse the CEA extension according to CEA-861-B.
+ * Return true if HDMI, false if not or unknown.
+ */
+bool drm_detect_hdmi_monitor(struct edid *edid)
+{
+	u8 *edid_ext;
+	int i, hdmi_id;
+	int start_offset, end_offset;
+	bool is_hdmi = false;
+
+	edid_ext = drm_find_cea_extension(edid);
+	if (!edid_ext)
 		goto end;
 		goto end;
 
 
 	/* Data block offset in CEA extension block */
 	/* Data block offset in CEA extension block */
@@ -1325,6 +1341,53 @@ end:
 }
 }
 EXPORT_SYMBOL(drm_detect_hdmi_monitor);
 EXPORT_SYMBOL(drm_detect_hdmi_monitor);
 
 
+/**
+ * drm_detect_monitor_audio - check monitor audio capability
+ *
+ * Monitor should have CEA extension block.
+ * If monitor has 'basic audio', but no CEA audio blocks, it's 'basic
+ * audio' only. If there is any audio extension block and supported
+ * audio format, assume at least 'basic audio' support, even if 'basic
+ * audio' is not defined in EDID.
+ *
+ */
+bool drm_detect_monitor_audio(struct edid *edid)
+{
+	u8 *edid_ext;
+	int i, j;
+	bool has_audio = false;
+	int start_offset, end_offset;
+
+	edid_ext = drm_find_cea_extension(edid);
+	if (!edid_ext)
+		goto end;
+
+	has_audio = ((edid_ext[3] & EDID_BASIC_AUDIO) != 0);
+
+	if (has_audio) {
+		DRM_DEBUG_KMS("Monitor has basic audio support\n");
+		goto end;
+	}
+
+	/* Data block offset in CEA extension block */
+	start_offset = 4;
+	end_offset = edid_ext[2];
+
+	for (i = start_offset; i < end_offset;
+			i += ((edid_ext[i] & 0x1f) + 1)) {
+		if ((edid_ext[i] >> 5) == AUDIO_BLOCK) {
+			has_audio = true;
+			for (j = 1; j < (edid_ext[i] & 0x1f); j += 3)
+				DRM_DEBUG_KMS("CEA audio format %d\n",
+					      (edid_ext[i + j] >> 3) & 0xf);
+			goto end;
+		}
+	}
+end:
+	return has_audio;
+}
+EXPORT_SYMBOL(drm_detect_monitor_audio);
+
 /**
 /**
  * drm_add_edid_modes - add modes from EDID data, if available
  * drm_add_edid_modes - add modes from EDID data, if available
  * @connector: connector we're probing
  * @connector: connector we're probing

+ 29 - 3
drivers/gpu/drm/drm_fb_helper.c

@@ -242,6 +242,30 @@ static int drm_fb_helper_parse_command_line(struct drm_fb_helper *fb_helper)
 	return 0;
 	return 0;
 }
 }
 
 
+static void drm_fb_helper_save_lut_atomic(struct drm_crtc *crtc, struct drm_fb_helper *helper)
+{
+	uint16_t *r_base, *g_base, *b_base;
+	int i;
+
+	r_base = crtc->gamma_store;
+	g_base = r_base + crtc->gamma_size;
+	b_base = g_base + crtc->gamma_size;
+
+	for (i = 0; i < crtc->gamma_size; i++)
+		helper->funcs->gamma_get(crtc, &r_base[i], &g_base[i], &b_base[i], i);
+}
+
+static void drm_fb_helper_restore_lut_atomic(struct drm_crtc *crtc)
+{
+	uint16_t *r_base, *g_base, *b_base;
+
+	r_base = crtc->gamma_store;
+	g_base = r_base + crtc->gamma_size;
+	b_base = g_base + crtc->gamma_size;
+
+	crtc->funcs->gamma_set(crtc, r_base, g_base, b_base, 0, crtc->gamma_size);
+}
+
 int drm_fb_helper_debug_enter(struct fb_info *info)
 int drm_fb_helper_debug_enter(struct fb_info *info)
 {
 {
 	struct drm_fb_helper *helper = info->par;
 	struct drm_fb_helper *helper = info->par;
@@ -260,11 +284,12 @@ int drm_fb_helper_debug_enter(struct fb_info *info)
 				continue;
 				continue;
 
 
 			funcs =	mode_set->crtc->helper_private;
 			funcs =	mode_set->crtc->helper_private;
+			drm_fb_helper_save_lut_atomic(mode_set->crtc, helper);
 			funcs->mode_set_base_atomic(mode_set->crtc,
 			funcs->mode_set_base_atomic(mode_set->crtc,
 						    mode_set->fb,
 						    mode_set->fb,
 						    mode_set->x,
 						    mode_set->x,
-						    mode_set->y);
-
+						    mode_set->y,
+						    ENTER_ATOMIC_MODE_SET);
 		}
 		}
 	}
 	}
 
 
@@ -308,8 +333,9 @@ int drm_fb_helper_debug_leave(struct fb_info *info)
 			continue;
 			continue;
 		}
 		}
 
 
+		drm_fb_helper_restore_lut_atomic(mode_set->crtc);
 		funcs->mode_set_base_atomic(mode_set->crtc, fb, crtc->x,
 		funcs->mode_set_base_atomic(mode_set->crtc, fb, crtc->x,
-					    crtc->y);
+					    crtc->y, LEAVE_ATOMIC_MODE_SET);
 	}
 	}
 
 
 	return 0;
 	return 0;

+ 0 - 14
drivers/gpu/drm/drm_gem.c

@@ -92,12 +92,6 @@ drm_gem_init(struct drm_device *dev)
 
 
 	spin_lock_init(&dev->object_name_lock);
 	spin_lock_init(&dev->object_name_lock);
 	idr_init(&dev->object_name_idr);
 	idr_init(&dev->object_name_idr);
-	atomic_set(&dev->object_count, 0);
-	atomic_set(&dev->object_memory, 0);
-	atomic_set(&dev->pin_count, 0);
-	atomic_set(&dev->pin_memory, 0);
-	atomic_set(&dev->gtt_count, 0);
-	atomic_set(&dev->gtt_memory, 0);
 
 
 	mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
 	mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
 	if (!mm) {
 	if (!mm) {
@@ -151,9 +145,6 @@ int drm_gem_object_init(struct drm_device *dev,
 	atomic_set(&obj->handle_count, 0);
 	atomic_set(&obj->handle_count, 0);
 	obj->size = size;
 	obj->size = size;
 
 
-	atomic_inc(&dev->object_count);
-	atomic_add(obj->size, &dev->object_memory);
-
 	return 0;
 	return 0;
 }
 }
 EXPORT_SYMBOL(drm_gem_object_init);
 EXPORT_SYMBOL(drm_gem_object_init);
@@ -180,8 +171,6 @@ drm_gem_object_alloc(struct drm_device *dev, size_t size)
 	return obj;
 	return obj;
 fput:
 fput:
 	/* Object_init mangles the global counters - readjust them. */
 	/* Object_init mangles the global counters - readjust them. */
-	atomic_dec(&dev->object_count);
-	atomic_sub(obj->size, &dev->object_memory);
 	fput(obj->filp);
 	fput(obj->filp);
 free:
 free:
 	kfree(obj);
 	kfree(obj);
@@ -436,10 +425,7 @@ drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
 void
 void
 drm_gem_object_release(struct drm_gem_object *obj)
 drm_gem_object_release(struct drm_gem_object *obj)
 {
 {
-	struct drm_device *dev = obj->dev;
 	fput(obj->filp);
 	fput(obj->filp);
-	atomic_dec(&dev->object_count);
-	atomic_sub(obj->size, &dev->object_memory);
 }
 }
 EXPORT_SYMBOL(drm_gem_object_release);
 EXPORT_SYMBOL(drm_gem_object_release);
 
 

+ 0 - 14
drivers/gpu/drm/drm_info.c

@@ -270,20 +270,6 @@ int drm_gem_name_info(struct seq_file *m, void *data)
 	return 0;
 	return 0;
 }
 }
 
 
-int drm_gem_object_info(struct seq_file *m, void* data)
-{
-	struct drm_info_node *node = (struct drm_info_node *) m->private;
-	struct drm_device *dev = node->minor->dev;
-
-	seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
-	seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
-	seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
-	seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
-	seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
-	seq_printf(m, "%d gtt total\n", dev->gtt_total);
-	return 0;
-}
-
 #if DRM_DEBUG_CODE
 #if DRM_DEBUG_CODE
 
 
 int drm_vma_info(struct seq_file *m, void *data)
 int drm_vma_info(struct seq_file *m, void *data)

+ 5 - 25
drivers/gpu/drm/drm_lock.c

@@ -37,6 +37,8 @@
 
 
 static int drm_notifier(void *priv);
 static int drm_notifier(void *priv);
 
 
+static int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
+
 /**
 /**
  * Lock ioctl.
  * Lock ioctl.
  *
  *
@@ -124,9 +126,6 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
 		block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
 		block_all_signals(drm_notifier, &dev->sigdata, &dev->sigmask);
 	}
 	}
 
 
-	if (dev->driver->dma_ready && (lock->flags & _DRM_LOCK_READY))
-		dev->driver->dma_ready(dev);
-
 	if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT))
 	if (dev->driver->dma_quiescent && (lock->flags & _DRM_LOCK_QUIESCENT))
 	{
 	{
 		if (dev->driver->dma_quiescent(dev)) {
 		if (dev->driver->dma_quiescent(dev)) {
@@ -136,12 +135,6 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
 		}
 		}
 	}
 	}
 
 
-	if (dev->driver->kernel_context_switch &&
-	    dev->last_context != lock->context) {
-		dev->driver->kernel_context_switch(dev, dev->last_context,
-						   lock->context);
-	}
-
 	return 0;
 	return 0;
 }
 }
 
 
@@ -169,15 +162,8 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
 
 
 	atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
 	atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
 
 
-	/* kernel_context_switch isn't used by any of the x86 drm
-	 * modules but is required by the Sparc driver.
-	 */
-	if (dev->driver->kernel_context_switch_unlock)
-		dev->driver->kernel_context_switch_unlock(dev);
-	else {
-		if (drm_lock_free(&master->lock, lock->context)) {
-			/* FIXME: Should really bail out here. */
-		}
+	if (drm_lock_free(&master->lock, lock->context)) {
+		/* FIXME: Should really bail out here. */
 	}
 	}
 
 
 	unblock_all_signals();
 	unblock_all_signals();
@@ -193,6 +179,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
  *
  *
  * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
  * Attempt to mark the lock as held by the given context, via the \p cmpxchg instruction.
  */
  */
+static
 int drm_lock_take(struct drm_lock_data *lock_data,
 int drm_lock_take(struct drm_lock_data *lock_data,
 		  unsigned int context)
 		  unsigned int context)
 {
 {
@@ -229,7 +216,6 @@ int drm_lock_take(struct drm_lock_data *lock_data,
 	}
 	}
 	return 0;
 	return 0;
 }
 }
-EXPORT_SYMBOL(drm_lock_take);
 
 
 /**
 /**
  * This takes a lock forcibly and hands it to context.	Should ONLY be used
  * This takes a lock forcibly and hands it to context.	Should ONLY be used
@@ -297,7 +283,6 @@ int drm_lock_free(struct drm_lock_data *lock_data, unsigned int context)
 	wake_up_interruptible(&lock_data->lock_queue);
 	wake_up_interruptible(&lock_data->lock_queue);
 	return 0;
 	return 0;
 }
 }
-EXPORT_SYMBOL(drm_lock_free);
 
 
 /**
 /**
  * If we get here, it means that the process has called DRM_IOCTL_LOCK
  * If we get here, it means that the process has called DRM_IOCTL_LOCK
@@ -360,7 +345,6 @@ void drm_idlelock_take(struct drm_lock_data *lock_data)
 	}
 	}
 	spin_unlock_bh(&lock_data->spinlock);
 	spin_unlock_bh(&lock_data->spinlock);
 }
 }
-EXPORT_SYMBOL(drm_idlelock_take);
 
 
 void drm_idlelock_release(struct drm_lock_data *lock_data)
 void drm_idlelock_release(struct drm_lock_data *lock_data)
 {
 {
@@ -380,8 +364,6 @@ void drm_idlelock_release(struct drm_lock_data *lock_data)
 	}
 	}
 	spin_unlock_bh(&lock_data->spinlock);
 	spin_unlock_bh(&lock_data->spinlock);
 }
 }
-EXPORT_SYMBOL(drm_idlelock_release);
-
 
 
 int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
 int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
 {
 {
@@ -390,5 +372,3 @@ int drm_i_have_hw_lock(struct drm_device *dev, struct drm_file *file_priv)
 		_DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) &&
 		_DRM_LOCK_IS_HELD(master->lock.hw_lock->lock) &&
 		master->lock.file_priv == file_priv);
 		master->lock.file_priv == file_priv);
 }
 }
-
-EXPORT_SYMBOL(drm_i_have_hw_lock);

+ 4 - 10
drivers/gpu/drm/drm_memory.c

@@ -99,29 +99,23 @@ static void *agp_remap(unsigned long offset, unsigned long size,
 	return addr;
 	return addr;
 }
 }
 
 
-/** Wrapper around agp_allocate_memory() */
-DRM_AGP_MEM *drm_alloc_agp(struct drm_device * dev, int pages, u32 type)
-{
-	return drm_agp_allocate_memory(dev->agp->bridge, pages, type);
-}
-
 /** Wrapper around agp_free_memory() */
 /** Wrapper around agp_free_memory() */
-int drm_free_agp(DRM_AGP_MEM * handle, int pages)
+void drm_free_agp(DRM_AGP_MEM * handle, int pages)
 {
 {
-	return drm_agp_free_memory(handle) ? 0 : -EINVAL;
+	agp_free_memory(handle);
 }
 }
 EXPORT_SYMBOL(drm_free_agp);
 EXPORT_SYMBOL(drm_free_agp);
 
 
 /** Wrapper around agp_bind_memory() */
 /** Wrapper around agp_bind_memory() */
 int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
 int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
 {
 {
-	return drm_agp_bind_memory(handle, start);
+	return agp_bind_memory(handle, start);
 }
 }
 
 
 /** Wrapper around agp_unbind_memory() */
 /** Wrapper around agp_unbind_memory() */
 int drm_unbind_agp(DRM_AGP_MEM * handle)
 int drm_unbind_agp(DRM_AGP_MEM * handle)
 {
 {
-	return drm_agp_unbind_memory(handle);
+	return agp_unbind_memory(handle);
 }
 }
 EXPORT_SYMBOL(drm_unbind_agp);
 EXPORT_SYMBOL(drm_unbind_agp);
 
 

+ 0 - 14
drivers/gpu/drm/drm_proc.c

@@ -55,7 +55,6 @@ static struct drm_info_list drm_proc_list[] = {
 	{"queues", drm_queues_info, 0},
 	{"queues", drm_queues_info, 0},
 	{"bufs", drm_bufs_info, 0},
 	{"bufs", drm_bufs_info, 0},
 	{"gem_names", drm_gem_name_info, DRIVER_GEM},
 	{"gem_names", drm_gem_name_info, DRIVER_GEM},
-	{"gem_objects", drm_gem_object_info, DRIVER_GEM},
 #if DRM_DEBUG_CODE
 #if DRM_DEBUG_CODE
 	{"vma", drm_vma_info, 0},
 	{"vma", drm_vma_info, 0},
 #endif
 #endif
@@ -151,7 +150,6 @@ fail:
 int drm_proc_init(struct drm_minor *minor, int minor_id,
 int drm_proc_init(struct drm_minor *minor, int minor_id,
 		  struct proc_dir_entry *root)
 		  struct proc_dir_entry *root)
 {
 {
-	struct drm_device *dev = minor->dev;
 	char name[64];
 	char name[64];
 	int ret;
 	int ret;
 
 
@@ -172,14 +170,6 @@ int drm_proc_init(struct drm_minor *minor, int minor_id,
 		return ret;
 		return ret;
 	}
 	}
 
 
-	if (dev->driver->proc_init) {
-		ret = dev->driver->proc_init(minor);
-		if (ret) {
-			DRM_ERROR("DRM: Driver failed to initialize "
-				  "/proc/dri.\n");
-			return ret;
-		}
-	}
 	return 0;
 	return 0;
 }
 }
 
 
@@ -216,15 +206,11 @@ int drm_proc_remove_files(struct drm_info_list *files, int count,
  */
  */
 int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root)
 int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root)
 {
 {
-	struct drm_device *dev = minor->dev;
 	char name[64];
 	char name[64];
 
 
 	if (!root || !minor->proc_root)
 	if (!root || !minor->proc_root)
 		return 0;
 		return 0;
 
 
-	if (dev->driver->proc_cleanup)
-		dev->driver->proc_cleanup(minor);
-
 	drm_proc_remove_files(drm_proc_list, DRM_PROC_ENTRIES, minor);
 	drm_proc_remove_files(drm_proc_list, DRM_PROC_ENTRIES, minor);
 
 
 	sprintf(name, "%d", minor->index);
 	sprintf(name, "%d", minor->index);

+ 0 - 2
drivers/gpu/drm/drm_scatter.c

@@ -184,8 +184,6 @@ int drm_sg_alloc(struct drm_device *dev, struct drm_scatter_gather * request)
 	drm_sg_cleanup(entry);
 	drm_sg_cleanup(entry);
 	return -ENOMEM;
 	return -ENOMEM;
 }
 }
-EXPORT_SYMBOL(drm_sg_alloc);
-
 
 
 int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
 int drm_sg_alloc_ioctl(struct drm_device *dev, void *data,
 		       struct drm_file *file_priv)
 		       struct drm_file *file_priv)

+ 0 - 4
drivers/gpu/drm/drm_stub.c

@@ -240,14 +240,10 @@ int drm_fill_in_dev(struct drm_device *dev,
 	INIT_LIST_HEAD(&dev->vblank_event_list);
 	INIT_LIST_HEAD(&dev->vblank_event_list);
 
 
 	spin_lock_init(&dev->count_lock);
 	spin_lock_init(&dev->count_lock);
-	spin_lock_init(&dev->drw_lock);
 	spin_lock_init(&dev->event_lock);
 	spin_lock_init(&dev->event_lock);
-	init_timer(&dev->timer);
 	mutex_init(&dev->struct_mutex);
 	mutex_init(&dev->struct_mutex);
 	mutex_init(&dev->ctxlist_mutex);
 	mutex_init(&dev->ctxlist_mutex);
 
 
-	idr_init(&dev->drw_idr);
-
 	if (drm_ht_create(&dev->map_hash, 12)) {
 	if (drm_ht_create(&dev->map_hash, 12)) {
 		return -ENOMEM;
 		return -ENOMEM;
 	}
 	}

+ 2 - 11
drivers/gpu/drm/drm_vm.c

@@ -523,14 +523,7 @@ static int drm_mmap_dma(struct file *filp, struct vm_area_struct *vma)
 	return 0;
 	return 0;
 }
 }
 
 
-resource_size_t drm_core_get_map_ofs(struct drm_local_map * map)
-{
-	return map->offset;
-}
-
-EXPORT_SYMBOL(drm_core_get_map_ofs);
-
-resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
+static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
 {
 {
 #ifdef __alpha__
 #ifdef __alpha__
 	return dev->hose->dense_mem_base - dev->hose->mem_space->start;
 	return dev->hose->dense_mem_base - dev->hose->mem_space->start;
@@ -539,8 +532,6 @@ resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
 #endif
 #endif
 }
 }
 
 
-EXPORT_SYMBOL(drm_core_get_reg_ofs);
-
 /**
 /**
  * mmap DMA memory.
  * mmap DMA memory.
  *
  *
@@ -627,7 +618,7 @@ int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
 #endif
 #endif
 	case _DRM_FRAME_BUFFER:
 	case _DRM_FRAME_BUFFER:
 	case _DRM_REGISTERS:
 	case _DRM_REGISTERS:
-		offset = dev->driver->get_reg_ofs(dev);
+		offset = drm_core_get_reg_ofs(dev);
 		vma->vm_flags |= VM_IO;	/* not in core dump */
 		vma->vm_flags |= VM_IO;	/* not in core dump */
 		vma->vm_page_prot = drm_io_prot(map->type, vma);
 		vma->vm_page_prot = drm_io_prot(map->type, vma);
 #if !defined(__arm__)
 #if !defined(__arm__)

+ 0 - 2
drivers/gpu/drm/i810/i810_drv.c

@@ -52,8 +52,6 @@ static struct drm_driver driver = {
 	.device_is_agp = i810_driver_device_is_agp,
 	.device_is_agp = i810_driver_device_is_agp,
 	.reclaim_buffers_locked = i810_driver_reclaim_buffers_locked,
 	.reclaim_buffers_locked = i810_driver_reclaim_buffers_locked,
 	.dma_quiescent = i810_driver_dma_quiescent,
 	.dma_quiescent = i810_driver_dma_quiescent,
-	.get_map_ofs = drm_core_get_map_ofs,
-	.get_reg_ofs = drm_core_get_reg_ofs,
 	.ioctls = i810_ioctls,
 	.ioctls = i810_ioctls,
 	.fops = {
 	.fops = {
 		 .owner = THIS_MODULE,
 		 .owner = THIS_MODULE,

+ 0 - 2
drivers/gpu/drm/i830/i830_drv.c

@@ -57,8 +57,6 @@ static struct drm_driver driver = {
 	.device_is_agp = i830_driver_device_is_agp,
 	.device_is_agp = i830_driver_device_is_agp,
 	.reclaim_buffers_locked = i830_driver_reclaim_buffers_locked,
 	.reclaim_buffers_locked = i830_driver_reclaim_buffers_locked,
 	.dma_quiescent = i830_driver_dma_quiescent,
 	.dma_quiescent = i830_driver_dma_quiescent,
-	.get_map_ofs = drm_core_get_map_ofs,
-	.get_reg_ofs = drm_core_get_reg_ofs,
 #if USE_IRQS
 #if USE_IRQS
 	.irq_preinstall = i830_driver_irq_preinstall,
 	.irq_preinstall = i830_driver_irq_preinstall,
 	.irq_postinstall = i830_driver_irq_postinstall,
 	.irq_postinstall = i830_driver_irq_postinstall,

+ 3 - 1
drivers/gpu/drm/i915/Makefile

@@ -26,15 +26,17 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
 	  intel_dvo.o \
 	  intel_dvo.o \
 	  intel_ringbuffer.o \
 	  intel_ringbuffer.o \
 	  intel_overlay.o \
 	  intel_overlay.o \
+	  intel_opregion.o \
 	  dvo_ch7xxx.o \
 	  dvo_ch7xxx.o \
 	  dvo_ch7017.o \
 	  dvo_ch7017.o \
 	  dvo_ivch.o \
 	  dvo_ivch.o \
 	  dvo_tfp410.o \
 	  dvo_tfp410.o \
 	  dvo_sil164.o
 	  dvo_sil164.o
 
 
-i915-$(CONFIG_ACPI)	+= i915_opregion.o
 i915-$(CONFIG_COMPAT)   += i915_ioc32.o
 i915-$(CONFIG_COMPAT)   += i915_ioc32.o
 
 
+i915-$(CONFIG_ACPI)	+= intel_acpi.o
+
 obj-$(CONFIG_DRM_I915)  += i915.o
 obj-$(CONFIG_DRM_I915)  += i915.o
 
 
 CFLAGS_i915_trace_points.o := -I$(src)
 CFLAGS_i915_trace_points.o := -I$(src)

+ 27 - 39
drivers/gpu/drm/i915/dvo_ch7017.c

@@ -165,67 +165,44 @@ struct ch7017_priv {
 static void ch7017_dump_regs(struct intel_dvo_device *dvo);
 static void ch7017_dump_regs(struct intel_dvo_device *dvo);
 static void ch7017_dpms(struct intel_dvo_device *dvo, int mode);
 static void ch7017_dpms(struct intel_dvo_device *dvo, int mode);
 
 
-static bool ch7017_read(struct intel_dvo_device *dvo, int addr, uint8_t *val)
+static bool ch7017_read(struct intel_dvo_device *dvo, u8 addr, u8 *val)
 {
 {
-	struct i2c_adapter *adapter = dvo->i2c_bus;
-	struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
-	u8 out_buf[2];
-	u8 in_buf[2];
-
 	struct i2c_msg msgs[] = {
 	struct i2c_msg msgs[] = {
 		{
 		{
 			.addr = dvo->slave_addr,
 			.addr = dvo->slave_addr,
 			.flags = 0,
 			.flags = 0,
 			.len = 1,
 			.len = 1,
-			.buf = out_buf,
+			.buf = &addr,
 		},
 		},
 		{
 		{
 			.addr = dvo->slave_addr,
 			.addr = dvo->slave_addr,
 			.flags = I2C_M_RD,
 			.flags = I2C_M_RD,
 			.len = 1,
 			.len = 1,
-			.buf = in_buf,
+			.buf = val,
 		}
 		}
 	};
 	};
-
-	out_buf[0] = addr;
-	out_buf[1] = 0;
-
-	if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) {
-		*val= in_buf[0];
-		return true;
-	};
-
-	return false;
+	return i2c_transfer(dvo->i2c_bus, msgs, 2) == 2;
 }
 }
 
 
-static bool ch7017_write(struct intel_dvo_device *dvo, int addr, uint8_t val)
+static bool ch7017_write(struct intel_dvo_device *dvo, u8 addr, u8 val)
 {
 {
-	struct i2c_adapter *adapter = dvo->i2c_bus;
-	struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
-	uint8_t out_buf[2];
+	uint8_t buf[2] = { addr, val };
 	struct i2c_msg msg = {
 	struct i2c_msg msg = {
 		.addr = dvo->slave_addr,
 		.addr = dvo->slave_addr,
 		.flags = 0,
 		.flags = 0,
 		.len = 2,
 		.len = 2,
-		.buf = out_buf,
+		.buf = buf,
 	};
 	};
-
-	out_buf[0] = addr;
-	out_buf[1] = val;
-
-	if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
-		return true;
-
-	return false;
+	return i2c_transfer(dvo->i2c_bus, &msg, 1) == 1;
 }
 }
 
 
 /** Probes for a CH7017 on the given bus and slave address. */
 /** Probes for a CH7017 on the given bus and slave address. */
 static bool ch7017_init(struct intel_dvo_device *dvo,
 static bool ch7017_init(struct intel_dvo_device *dvo,
 			struct i2c_adapter *adapter)
 			struct i2c_adapter *adapter)
 {
 {
-	struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
 	struct ch7017_priv *priv;
 	struct ch7017_priv *priv;
-	uint8_t val;
+	const char *str;
+	u8 val;
 
 
 	priv = kzalloc(sizeof(struct ch7017_priv), GFP_KERNEL);
 	priv = kzalloc(sizeof(struct ch7017_priv), GFP_KERNEL);
 	if (priv == NULL)
 	if (priv == NULL)
@@ -237,16 +214,27 @@ static bool ch7017_init(struct intel_dvo_device *dvo,
 	if (!ch7017_read(dvo, CH7017_DEVICE_ID, &val))
 	if (!ch7017_read(dvo, CH7017_DEVICE_ID, &val))
 		goto fail;
 		goto fail;
 
 
-	if (val != CH7017_DEVICE_ID_VALUE &&
-	    val != CH7018_DEVICE_ID_VALUE &&
-	    val != CH7019_DEVICE_ID_VALUE) {
+	switch (val) {
+	case CH7017_DEVICE_ID_VALUE:
+		str = "ch7017";
+		break;
+	case CH7018_DEVICE_ID_VALUE:
+		str = "ch7018";
+		break;
+	case CH7019_DEVICE_ID_VALUE:
+		str = "ch7019";
+		break;
+	default:
 		DRM_DEBUG_KMS("ch701x not detected, got %d: from %s "
 		DRM_DEBUG_KMS("ch701x not detected, got %d: from %s "
-				"Slave %d.\n",
-			  val, i2cbus->adapter.name,dvo->slave_addr);
+			      "slave %d.\n",
+			      val, adapter->name,dvo->slave_addr);
 		goto fail;
 		goto fail;
 	}
 	}
 
 
+	DRM_DEBUG_KMS("%s detected on %s, addr %d\n",
+		      str, adapter->name, dvo->slave_addr);
 	return true;
 	return true;
+
 fail:
 fail:
 	kfree(priv);
 	kfree(priv);
 	return false;
 	return false;
@@ -368,7 +356,7 @@ static void ch7017_dpms(struct intel_dvo_device *dvo, int mode)
 	}
 	}
 
 
 	/* XXX: Should actually wait for update power status somehow */
 	/* XXX: Should actually wait for update power status somehow */
-	udelay(20000);
+	msleep(20);
 }
 }
 
 
 static void ch7017_dump_regs(struct intel_dvo_device *dvo)
 static void ch7017_dump_regs(struct intel_dvo_device *dvo)

+ 4 - 6
drivers/gpu/drm/i915/dvo_ch7xxx.c

@@ -113,7 +113,6 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
 {
 {
 	struct ch7xxx_priv *ch7xxx= dvo->dev_priv;
 	struct ch7xxx_priv *ch7xxx= dvo->dev_priv;
 	struct i2c_adapter *adapter = dvo->i2c_bus;
 	struct i2c_adapter *adapter = dvo->i2c_bus;
-	struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
 	u8 out_buf[2];
 	u8 out_buf[2];
 	u8 in_buf[2];
 	u8 in_buf[2];
 
 
@@ -135,14 +134,14 @@ static bool ch7xxx_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
 	out_buf[0] = addr;
 	out_buf[0] = addr;
 	out_buf[1] = 0;
 	out_buf[1] = 0;
 
 
-	if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) {
+	if (i2c_transfer(adapter, msgs, 2) == 2) {
 		*ch = in_buf[0];
 		*ch = in_buf[0];
 		return true;
 		return true;
 	};
 	};
 
 
 	if (!ch7xxx->quiet) {
 	if (!ch7xxx->quiet) {
 		DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
 		DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
-			  addr, i2cbus->adapter.name, dvo->slave_addr);
+			  addr, adapter->name, dvo->slave_addr);
 	}
 	}
 	return false;
 	return false;
 }
 }
@@ -152,7 +151,6 @@ static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
 {
 {
 	struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
 	struct ch7xxx_priv *ch7xxx = dvo->dev_priv;
 	struct i2c_adapter *adapter = dvo->i2c_bus;
 	struct i2c_adapter *adapter = dvo->i2c_bus;
-	struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
 	uint8_t out_buf[2];
 	uint8_t out_buf[2];
 	struct i2c_msg msg = {
 	struct i2c_msg msg = {
 		.addr = dvo->slave_addr,
 		.addr = dvo->slave_addr,
@@ -164,12 +162,12 @@ static bool ch7xxx_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
 	out_buf[0] = addr;
 	out_buf[0] = addr;
 	out_buf[1] = ch;
 	out_buf[1] = ch;
 
 
-	if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
+	if (i2c_transfer(adapter, &msg, 1) == 1)
 		return true;
 		return true;
 
 
 	if (!ch7xxx->quiet) {
 	if (!ch7xxx->quiet) {
 		DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
 		DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
-			  addr, i2cbus->adapter.name, dvo->slave_addr);
+			  addr, adapter->name, dvo->slave_addr);
 	}
 	}
 
 
 	return false;
 	return false;

+ 4 - 6
drivers/gpu/drm/i915/dvo_ivch.c

@@ -167,7 +167,6 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
 {
 {
 	struct ivch_priv *priv = dvo->dev_priv;
 	struct ivch_priv *priv = dvo->dev_priv;
 	struct i2c_adapter *adapter = dvo->i2c_bus;
 	struct i2c_adapter *adapter = dvo->i2c_bus;
-	struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
 	u8 out_buf[1];
 	u8 out_buf[1];
 	u8 in_buf[2];
 	u8 in_buf[2];
 
 
@@ -193,7 +192,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
 
 
 	out_buf[0] = addr;
 	out_buf[0] = addr;
 
 
-	if (i2c_transfer(&i2cbus->adapter, msgs, 3) == 3) {
+	if (i2c_transfer(adapter, msgs, 3) == 3) {
 		*data = (in_buf[1] << 8) | in_buf[0];
 		*data = (in_buf[1] << 8) | in_buf[0];
 		return true;
 		return true;
 	};
 	};
@@ -201,7 +200,7 @@ static bool ivch_read(struct intel_dvo_device *dvo, int addr, uint16_t *data)
 	if (!priv->quiet) {
 	if (!priv->quiet) {
 		DRM_DEBUG_KMS("Unable to read register 0x%02x from "
 		DRM_DEBUG_KMS("Unable to read register 0x%02x from "
 				"%s:%02x.\n",
 				"%s:%02x.\n",
-			  addr, i2cbus->adapter.name, dvo->slave_addr);
+			  addr, adapter->name, dvo->slave_addr);
 	}
 	}
 	return false;
 	return false;
 }
 }
@@ -211,7 +210,6 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
 {
 {
 	struct ivch_priv *priv = dvo->dev_priv;
 	struct ivch_priv *priv = dvo->dev_priv;
 	struct i2c_adapter *adapter = dvo->i2c_bus;
 	struct i2c_adapter *adapter = dvo->i2c_bus;
-	struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
 	u8 out_buf[3];
 	u8 out_buf[3];
 	struct i2c_msg msg = {
 	struct i2c_msg msg = {
 		.addr = dvo->slave_addr,
 		.addr = dvo->slave_addr,
@@ -224,12 +222,12 @@ static bool ivch_write(struct intel_dvo_device *dvo, int addr, uint16_t data)
 	out_buf[1] = data & 0xff;
 	out_buf[1] = data & 0xff;
 	out_buf[2] = data >> 8;
 	out_buf[2] = data >> 8;
 
 
-	if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
+	if (i2c_transfer(adapter, &msg, 1) == 1)
 		return true;
 		return true;
 
 
 	if (!priv->quiet) {
 	if (!priv->quiet) {
 		DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
 		DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
-			  addr, i2cbus->adapter.name, dvo->slave_addr);
+			  addr, adapter->name, dvo->slave_addr);
 	}
 	}
 
 
 	return false;
 	return false;

+ 4 - 6
drivers/gpu/drm/i915/dvo_sil164.c

@@ -69,7 +69,6 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
 {
 {
 	struct sil164_priv *sil = dvo->dev_priv;
 	struct sil164_priv *sil = dvo->dev_priv;
 	struct i2c_adapter *adapter = dvo->i2c_bus;
 	struct i2c_adapter *adapter = dvo->i2c_bus;
-	struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
 	u8 out_buf[2];
 	u8 out_buf[2];
 	u8 in_buf[2];
 	u8 in_buf[2];
 
 
@@ -91,14 +90,14 @@ static bool sil164_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
 	out_buf[0] = addr;
 	out_buf[0] = addr;
 	out_buf[1] = 0;
 	out_buf[1] = 0;
 
 
-	if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) {
+	if (i2c_transfer(adapter, msgs, 2) == 2) {
 		*ch = in_buf[0];
 		*ch = in_buf[0];
 		return true;
 		return true;
 	};
 	};
 
 
 	if (!sil->quiet) {
 	if (!sil->quiet) {
 		DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
 		DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
-			  addr, i2cbus->adapter.name, dvo->slave_addr);
+			  addr, adapter->name, dvo->slave_addr);
 	}
 	}
 	return false;
 	return false;
 }
 }
@@ -107,7 +106,6 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
 {
 {
 	struct sil164_priv *sil= dvo->dev_priv;
 	struct sil164_priv *sil= dvo->dev_priv;
 	struct i2c_adapter *adapter = dvo->i2c_bus;
 	struct i2c_adapter *adapter = dvo->i2c_bus;
-	struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
 	uint8_t out_buf[2];
 	uint8_t out_buf[2];
 	struct i2c_msg msg = {
 	struct i2c_msg msg = {
 		.addr = dvo->slave_addr,
 		.addr = dvo->slave_addr,
@@ -119,12 +117,12 @@ static bool sil164_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
 	out_buf[0] = addr;
 	out_buf[0] = addr;
 	out_buf[1] = ch;
 	out_buf[1] = ch;
 
 
-	if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
+	if (i2c_transfer(adapter, &msg, 1) == 1)
 		return true;
 		return true;
 
 
 	if (!sil->quiet) {
 	if (!sil->quiet) {
 		DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
 		DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
-			  addr, i2cbus->adapter.name, dvo->slave_addr);
+			  addr, adapter->name, dvo->slave_addr);
 	}
 	}
 
 
 	return false;
 	return false;

+ 4 - 6
drivers/gpu/drm/i915/dvo_tfp410.c

@@ -94,7 +94,6 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
 {
 {
 	struct tfp410_priv *tfp = dvo->dev_priv;
 	struct tfp410_priv *tfp = dvo->dev_priv;
 	struct i2c_adapter *adapter = dvo->i2c_bus;
 	struct i2c_adapter *adapter = dvo->i2c_bus;
-	struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
 	u8 out_buf[2];
 	u8 out_buf[2];
 	u8 in_buf[2];
 	u8 in_buf[2];
 
 
@@ -116,14 +115,14 @@ static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch)
 	out_buf[0] = addr;
 	out_buf[0] = addr;
 	out_buf[1] = 0;
 	out_buf[1] = 0;
 
 
-	if (i2c_transfer(&i2cbus->adapter, msgs, 2) == 2) {
+	if (i2c_transfer(adapter, msgs, 2) == 2) {
 		*ch = in_buf[0];
 		*ch = in_buf[0];
 		return true;
 		return true;
 	};
 	};
 
 
 	if (!tfp->quiet) {
 	if (!tfp->quiet) {
 		DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
 		DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n",
-			  addr, i2cbus->adapter.name, dvo->slave_addr);
+			  addr, adapter->name, dvo->slave_addr);
 	}
 	}
 	return false;
 	return false;
 }
 }
@@ -132,7 +131,6 @@ static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
 {
 {
 	struct tfp410_priv *tfp = dvo->dev_priv;
 	struct tfp410_priv *tfp = dvo->dev_priv;
 	struct i2c_adapter *adapter = dvo->i2c_bus;
 	struct i2c_adapter *adapter = dvo->i2c_bus;
-	struct intel_i2c_chan *i2cbus = container_of(adapter, struct intel_i2c_chan, adapter);
 	uint8_t out_buf[2];
 	uint8_t out_buf[2];
 	struct i2c_msg msg = {
 	struct i2c_msg msg = {
 		.addr = dvo->slave_addr,
 		.addr = dvo->slave_addr,
@@ -144,12 +142,12 @@ static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch)
 	out_buf[0] = addr;
 	out_buf[0] = addr;
 	out_buf[1] = ch;
 	out_buf[1] = ch;
 
 
-	if (i2c_transfer(&i2cbus->adapter, &msg, 1) == 1)
+	if (i2c_transfer(adapter, &msg, 1) == 1)
 		return true;
 		return true;
 
 
 	if (!tfp->quiet) {
 	if (!tfp->quiet) {
 		DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
 		DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n",
-			  addr, i2cbus->adapter.name, dvo->slave_addr);
+			  addr, adapter->name, dvo->slave_addr);
 	}
 	}
 
 
 	return false;
 	return false;

+ 267 - 69
drivers/gpu/drm/i915/i915_debugfs.c

@@ -40,9 +40,51 @@
 
 
 #if defined(CONFIG_DEBUG_FS)
 #if defined(CONFIG_DEBUG_FS)
 
 
-#define ACTIVE_LIST	1
-#define FLUSHING_LIST	2
-#define INACTIVE_LIST	3
+enum {
+	ACTIVE_LIST,
+	FLUSHING_LIST,
+	INACTIVE_LIST,
+	PINNED_LIST,
+	DEFERRED_FREE_LIST,
+};
+
+static const char *yesno(int v)
+{
+	return v ? "yes" : "no";
+}
+
+static int i915_capabilities(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	const struct intel_device_info *info = INTEL_INFO(dev);
+
+	seq_printf(m, "gen: %d\n", info->gen);
+#define B(x) seq_printf(m, #x ": %s\n", yesno(info->x))
+	B(is_mobile);
+	B(is_i85x);
+	B(is_i915g);
+	B(is_i945gm);
+	B(is_g33);
+	B(need_gfx_hws);
+	B(is_g4x);
+	B(is_pineview);
+	B(is_broadwater);
+	B(is_crestline);
+	B(has_fbc);
+	B(has_rc6);
+	B(has_pipe_cxsr);
+	B(has_hotplug);
+	B(cursor_needs_physical);
+	B(has_overlay);
+	B(overlay_needs_physical);
+	B(supports_tv);
+	B(has_bsd_ring);
+	B(has_blt_ring);
+#undef B
+
+	return 0;
+}
 
 
 static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv)
 static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv)
 {
 {
@@ -64,6 +106,29 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv)
     }
     }
 }
 }
 
 
+static void
+describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
+{
+	seq_printf(m, "%p: %s%s %8zd %08x %08x %d%s%s",
+		   &obj->base,
+		   get_pin_flag(obj),
+		   get_tiling_flag(obj),
+		   obj->base.size,
+		   obj->base.read_domains,
+		   obj->base.write_domain,
+		   obj->last_rendering_seqno,
+		   obj->dirty ? " dirty" : "",
+		   obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
+	if (obj->base.name)
+		seq_printf(m, " (name: %d)", obj->base.name);
+	if (obj->fence_reg != I915_FENCE_REG_NONE)
+		seq_printf(m, " (fence: %d)", obj->fence_reg);
+	if (obj->gtt_space != NULL)
+		seq_printf(m, " (gtt_offset: %08x)", obj->gtt_offset);
+	if (obj->ring != NULL)
+		seq_printf(m, " (%s)", obj->ring->name);
+}
+
 static int i915_gem_object_list_info(struct seq_file *m, void *data)
 static int i915_gem_object_list_info(struct seq_file *m, void *data)
 {
 {
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -72,56 +137,80 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
 	struct drm_device *dev = node->minor->dev;
 	struct drm_device *dev = node->minor->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_i915_gem_object *obj_priv;
 	struct drm_i915_gem_object *obj_priv;
-	spinlock_t *lock = NULL;
+	size_t total_obj_size, total_gtt_size;
+	int count, ret;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
 
 
 	switch (list) {
 	switch (list) {
 	case ACTIVE_LIST:
 	case ACTIVE_LIST:
 		seq_printf(m, "Active:\n");
 		seq_printf(m, "Active:\n");
-		lock = &dev_priv->mm.active_list_lock;
-		head = &dev_priv->render_ring.active_list;
+		head = &dev_priv->mm.active_list;
 		break;
 		break;
 	case INACTIVE_LIST:
 	case INACTIVE_LIST:
 		seq_printf(m, "Inactive:\n");
 		seq_printf(m, "Inactive:\n");
 		head = &dev_priv->mm.inactive_list;
 		head = &dev_priv->mm.inactive_list;
 		break;
 		break;
+	case PINNED_LIST:
+		seq_printf(m, "Pinned:\n");
+		head = &dev_priv->mm.pinned_list;
+		break;
 	case FLUSHING_LIST:
 	case FLUSHING_LIST:
 		seq_printf(m, "Flushing:\n");
 		seq_printf(m, "Flushing:\n");
 		head = &dev_priv->mm.flushing_list;
 		head = &dev_priv->mm.flushing_list;
 		break;
 		break;
+	case DEFERRED_FREE_LIST:
+		seq_printf(m, "Deferred free:\n");
+		head = &dev_priv->mm.deferred_free_list;
+		break;
 	default:
 	default:
-		DRM_INFO("Ooops, unexpected list\n");
-		return 0;
+		mutex_unlock(&dev->struct_mutex);
+		return -EINVAL;
 	}
 	}
 
 
-	if (lock)
-		spin_lock(lock);
-	list_for_each_entry(obj_priv, head, list)
-	{
-		seq_printf(m, "    %p: %s %8zd %08x %08x %d%s%s",
-			   &obj_priv->base,
-			   get_pin_flag(obj_priv),
-			   obj_priv->base.size,
-			   obj_priv->base.read_domains,
-			   obj_priv->base.write_domain,
-			   obj_priv->last_rendering_seqno,
-			   obj_priv->dirty ? " dirty" : "",
-			   obj_priv->madv == I915_MADV_DONTNEED ? " purgeable" : "");
-
-		if (obj_priv->base.name)
-			seq_printf(m, " (name: %d)", obj_priv->base.name);
-		if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
-			seq_printf(m, " (fence: %d)", obj_priv->fence_reg);
-		if (obj_priv->gtt_space != NULL)
-			seq_printf(m, " (gtt_offset: %08x)", obj_priv->gtt_offset);
-
+	total_obj_size = total_gtt_size = count = 0;
+	list_for_each_entry(obj_priv, head, mm_list) {
+		seq_printf(m, "   ");
+		describe_obj(m, obj_priv);
 		seq_printf(m, "\n");
 		seq_printf(m, "\n");
+		total_obj_size += obj_priv->base.size;
+		total_gtt_size += obj_priv->gtt_space->size;
+		count++;
 	}
 	}
+	mutex_unlock(&dev->struct_mutex);
 
 
-	if (lock)
-	    spin_unlock(lock);
+	seq_printf(m, "Total %d objects, %zu bytes, %zu GTT size\n",
+		   count, total_obj_size, total_gtt_size);
 	return 0;
 	return 0;
 }
 }
 
 
+static int i915_gem_object_info(struct seq_file *m, void* data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	seq_printf(m, "%u objects\n", dev_priv->mm.object_count);
+	seq_printf(m, "%zu object bytes\n", dev_priv->mm.object_memory);
+	seq_printf(m, "%u pinned\n", dev_priv->mm.pin_count);
+	seq_printf(m, "%zu pin bytes\n", dev_priv->mm.pin_memory);
+	seq_printf(m, "%u objects in gtt\n", dev_priv->mm.gtt_count);
+	seq_printf(m, "%zu gtt bytes\n", dev_priv->mm.gtt_memory);
+	seq_printf(m, "%zu gtt total\n", dev_priv->mm.gtt_total);
+
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+
 static int i915_gem_pageflip_info(struct seq_file *m, void *data)
 static int i915_gem_pageflip_info(struct seq_file *m, void *data)
 {
 {
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -176,6 +265,11 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
 	struct drm_device *dev = node->minor->dev;
 	struct drm_device *dev = node->minor->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_i915_gem_request *gem_request;
 	struct drm_i915_gem_request *gem_request;
+	int ret;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
 
 
 	seq_printf(m, "Request:\n");
 	seq_printf(m, "Request:\n");
 	list_for_each_entry(gem_request, &dev_priv->render_ring.request_list,
 	list_for_each_entry(gem_request, &dev_priv->render_ring.request_list,
@@ -184,6 +278,8 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
 			   gem_request->seqno,
 			   gem_request->seqno,
 			   (int) (jiffies - gem_request->emitted_jiffies));
 			   (int) (jiffies - gem_request->emitted_jiffies));
 	}
 	}
+	mutex_unlock(&dev->struct_mutex);
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -192,16 +288,24 @@ static int i915_gem_seqno_info(struct seq_file *m, void *data)
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct drm_device *dev = node->minor->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	drm_i915_private_t *dev_priv = dev->dev_private;
+	int ret;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
 
 
 	if (dev_priv->render_ring.status_page.page_addr != NULL) {
 	if (dev_priv->render_ring.status_page.page_addr != NULL) {
 		seq_printf(m, "Current sequence: %d\n",
 		seq_printf(m, "Current sequence: %d\n",
-			   i915_get_gem_seqno(dev,  &dev_priv->render_ring));
+			   dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring));
 	} else {
 	} else {
 		seq_printf(m, "Current sequence: hws uninitialized\n");
 		seq_printf(m, "Current sequence: hws uninitialized\n");
 	}
 	}
 	seq_printf(m, "Waiter sequence:  %d\n",
 	seq_printf(m, "Waiter sequence:  %d\n",
 			dev_priv->mm.waiting_gem_seqno);
 			dev_priv->mm.waiting_gem_seqno);
 	seq_printf(m, "IRQ sequence:     %d\n", dev_priv->mm.irq_gem_seqno);
 	seq_printf(m, "IRQ sequence:     %d\n", dev_priv->mm.irq_gem_seqno);
+
+	mutex_unlock(&dev->struct_mutex);
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -211,6 +315,11 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct drm_device *dev = node->minor->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	drm_i915_private_t *dev_priv = dev->dev_private;
+	int ret;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
 
 
 	if (!HAS_PCH_SPLIT(dev)) {
 	if (!HAS_PCH_SPLIT(dev)) {
 		seq_printf(m, "Interrupt enable:    %08x\n",
 		seq_printf(m, "Interrupt enable:    %08x\n",
@@ -247,7 +356,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
 		   atomic_read(&dev_priv->irq_received));
 		   atomic_read(&dev_priv->irq_received));
 	if (dev_priv->render_ring.status_page.page_addr != NULL) {
 	if (dev_priv->render_ring.status_page.page_addr != NULL) {
 		seq_printf(m, "Current sequence:    %d\n",
 		seq_printf(m, "Current sequence:    %d\n",
-			   i915_get_gem_seqno(dev,  &dev_priv->render_ring));
+			   dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring));
 	} else {
 	} else {
 		seq_printf(m, "Current sequence:    hws uninitialized\n");
 		seq_printf(m, "Current sequence:    hws uninitialized\n");
 	}
 	}
@@ -255,6 +364,8 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
 		   dev_priv->mm.waiting_gem_seqno);
 		   dev_priv->mm.waiting_gem_seqno);
 	seq_printf(m, "IRQ sequence:        %d\n",
 	seq_printf(m, "IRQ sequence:        %d\n",
 		   dev_priv->mm.irq_gem_seqno);
 		   dev_priv->mm.irq_gem_seqno);
+	mutex_unlock(&dev->struct_mutex);
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -263,7 +374,11 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct drm_device *dev = node->minor->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	int i;
+	int i, ret;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
 
 
 	seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
 	seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
 	seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
 	seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
@@ -289,6 +404,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
 			seq_printf(m, "\n");
 			seq_printf(m, "\n");
 		}
 		}
 	}
 	}
+	mutex_unlock(&dev->struct_mutex);
 
 
 	return 0;
 	return 0;
 }
 }
@@ -313,16 +429,19 @@ static int i915_hws_info(struct seq_file *m, void *data)
 	return 0;
 	return 0;
 }
 }
 
 
-static void i915_dump_pages(struct seq_file *m, struct page **pages, int page_count)
+static void i915_dump_object(struct seq_file *m,
+			     struct io_mapping *mapping,
+			     struct drm_i915_gem_object *obj_priv)
 {
 {
-	int page, i;
-	uint32_t *mem;
+	int page, page_count, i;
 
 
+	page_count = obj_priv->base.size / PAGE_SIZE;
 	for (page = 0; page < page_count; page++) {
 	for (page = 0; page < page_count; page++) {
-		mem = kmap_atomic(pages[page], KM_USER0);
+		u32 *mem = io_mapping_map_wc(mapping,
+					     obj_priv->gtt_offset + page * PAGE_SIZE);
 		for (i = 0; i < PAGE_SIZE; i += 4)
 		for (i = 0; i < PAGE_SIZE; i += 4)
 			seq_printf(m, "%08x :  %08x\n", i, mem[i / 4]);
 			seq_printf(m, "%08x :  %08x\n", i, mem[i / 4]);
-		kunmap_atomic(mem, KM_USER0);
+		io_mapping_unmap(mem);
 	}
 	}
 }
 }
 
 
@@ -335,27 +454,20 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data)
 	struct drm_i915_gem_object *obj_priv;
 	struct drm_i915_gem_object *obj_priv;
 	int ret;
 	int ret;
 
 
-	spin_lock(&dev_priv->mm.active_list_lock);
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
 
 
-	list_for_each_entry(obj_priv, &dev_priv->render_ring.active_list,
-			list) {
+	list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
 		obj = &obj_priv->base;
 		obj = &obj_priv->base;
 		if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
 		if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
-		    ret = i915_gem_object_get_pages(obj, 0);
-		    if (ret) {
-			    DRM_ERROR("Failed to get pages: %d\n", ret);
-			    spin_unlock(&dev_priv->mm.active_list_lock);
-			    return ret;
-		    }
-
-		    seq_printf(m, "--- gtt_offset = 0x%08x\n", obj_priv->gtt_offset);
-		    i915_dump_pages(m, obj_priv->pages, obj->size / PAGE_SIZE);
-
-		    i915_gem_object_put_pages(obj);
+		    seq_printf(m, "--- gtt_offset = 0x%08x\n",
+			       obj_priv->gtt_offset);
+		    i915_dump_object(m, dev_priv->mm.gtt_mapping, obj_priv);
 		}
 		}
 	}
 	}
 
 
-	spin_unlock(&dev_priv->mm.active_list_lock);
+	mutex_unlock(&dev->struct_mutex);
 
 
 	return 0;
 	return 0;
 }
 }
@@ -365,20 +477,24 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data)
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_info_node *node = (struct drm_info_node *) m->private;
 	struct drm_device *dev = node->minor->dev;
 	struct drm_device *dev = node->minor->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	u8 *virt;
-	uint32_t *ptr, off;
+	int ret;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
 
 
 	if (!dev_priv->render_ring.gem_object) {
 	if (!dev_priv->render_ring.gem_object) {
 		seq_printf(m, "No ringbuffer setup\n");
 		seq_printf(m, "No ringbuffer setup\n");
-		return 0;
-	}
-
-	virt = dev_priv->render_ring.virtual_start;
+	} else {
+		u8 *virt = dev_priv->render_ring.virtual_start;
+		uint32_t off;
 
 
-	for (off = 0; off < dev_priv->render_ring.size; off += 4) {
-		ptr = (uint32_t *)(virt + off);
-		seq_printf(m, "%08x :  %08x\n", off, *ptr);
+		for (off = 0; off < dev_priv->render_ring.size; off += 4) {
+			uint32_t *ptr = (uint32_t *)(virt + off);
+			seq_printf(m, "%08x :  %08x\n", off, *ptr);
+		}
 	}
 	}
+	mutex_unlock(&dev->struct_mutex);
 
 
 	return 0;
 	return 0;
 }
 }
@@ -396,7 +512,7 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
 	seq_printf(m, "RingHead :  %08x\n", head);
 	seq_printf(m, "RingHead :  %08x\n", head);
 	seq_printf(m, "RingTail :  %08x\n", tail);
 	seq_printf(m, "RingTail :  %08x\n", tail);
 	seq_printf(m, "RingSize :  %08lx\n", dev_priv->render_ring.size);
 	seq_printf(m, "RingSize :  %08lx\n", dev_priv->render_ring.size);
-	seq_printf(m, "Acthd :     %08x\n", I915_READ(IS_I965G(dev) ? ACTHD_I965 : ACTHD));
+	seq_printf(m, "Acthd :     %08x\n", I915_READ(INTEL_INFO(dev)->gen >= 4 ? ACTHD_I965 : ACTHD));
 
 
 	return 0;
 	return 0;
 }
 }
@@ -458,7 +574,7 @@ static int i915_error_state(struct seq_file *m, void *unused)
 	seq_printf(m, "  IPEHR: 0x%08x\n", error->ipehr);
 	seq_printf(m, "  IPEHR: 0x%08x\n", error->ipehr);
 	seq_printf(m, "  INSTDONE: 0x%08x\n", error->instdone);
 	seq_printf(m, "  INSTDONE: 0x%08x\n", error->instdone);
 	seq_printf(m, "  ACTHD: 0x%08x\n", error->acthd);
 	seq_printf(m, "  ACTHD: 0x%08x\n", error->acthd);
-	if (IS_I965G(dev)) {
+	if (INTEL_INFO(dev)->gen >= 4) {
 		seq_printf(m, "  INSTPS: 0x%08x\n", error->instps);
 		seq_printf(m, "  INSTPS: 0x%08x\n", error->instps);
 		seq_printf(m, "  INSTDONE1: 0x%08x\n", error->instdone1);
 		seq_printf(m, "  INSTDONE1: 0x%08x\n", error->instdone1);
 	}
 	}
@@ -642,6 +758,9 @@ static int i915_fbc_status(struct seq_file *m, void *unused)
 	} else {
 	} else {
 		seq_printf(m, "FBC disabled: ");
 		seq_printf(m, "FBC disabled: ");
 		switch (dev_priv->no_fbc_reason) {
 		switch (dev_priv->no_fbc_reason) {
+		case FBC_NO_OUTPUT:
+			seq_printf(m, "no outputs");
+			break;
 		case FBC_STOLEN_TOO_SMALL:
 		case FBC_STOLEN_TOO_SMALL:
 			seq_printf(m, "not enough stolen memory");
 			seq_printf(m, "not enough stolen memory");
 			break;
 			break;
@@ -675,15 +794,17 @@ static int i915_sr_status(struct seq_file *m, void *unused)
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	bool sr_enabled = false;
 	bool sr_enabled = false;
 
 
-	if (IS_I965GM(dev) || IS_I945G(dev) || IS_I945GM(dev))
+	if (IS_GEN5(dev))
+		sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
+	else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
 		sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
 		sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
 	else if (IS_I915GM(dev))
 	else if (IS_I915GM(dev))
 		sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
 		sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
 	else if (IS_PINEVIEW(dev))
 	else if (IS_PINEVIEW(dev))
 		sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
 		sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
 
 
-	seq_printf(m, "self-refresh: %s\n", sr_enabled ? "enabled" :
-		   "disabled");
+	seq_printf(m, "self-refresh: %s\n",
+		   sr_enabled ? "enabled" : "disabled");
 
 
 	return 0;
 	return 0;
 }
 }
@@ -694,10 +815,16 @@ static int i915_emon_status(struct seq_file *m, void *unused)
 	struct drm_device *dev = node->minor->dev;
 	struct drm_device *dev = node->minor->dev;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	unsigned long temp, chipset, gfx;
 	unsigned long temp, chipset, gfx;
+	int ret;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
 
 
 	temp = i915_mch_val(dev_priv);
 	temp = i915_mch_val(dev_priv);
 	chipset = i915_chipset_val(dev_priv);
 	chipset = i915_chipset_val(dev_priv);
 	gfx = i915_gfx_val(dev_priv);
 	gfx = i915_gfx_val(dev_priv);
+	mutex_unlock(&dev->struct_mutex);
 
 
 	seq_printf(m, "GMCH temp: %ld\n", temp);
 	seq_printf(m, "GMCH temp: %ld\n", temp);
 	seq_printf(m, "Chipset power: %ld\n", chipset);
 	seq_printf(m, "Chipset power: %ld\n", chipset);
@@ -718,6 +845,68 @@ static int i915_gfxec(struct seq_file *m, void *unused)
 	return 0;
 	return 0;
 }
 }
 
 
+static int i915_opregion(struct seq_file *m, void *unused)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_opregion *opregion = &dev_priv->opregion;
+	int ret;
+
+	ret = mutex_lock_interruptible(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	if (opregion->header)
+		seq_write(m, opregion->header, OPREGION_SIZE);
+
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_device *dev = node->minor->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct intel_fbdev *ifbdev;
+	struct intel_framebuffer *fb;
+	int ret;
+
+	ret = mutex_lock_interruptible(&dev->mode_config.mutex);
+	if (ret)
+		return ret;
+
+	ifbdev = dev_priv->fbdev;
+	fb = to_intel_framebuffer(ifbdev->helper.fb);
+
+	seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, obj ",
+		   fb->base.width,
+		   fb->base.height,
+		   fb->base.depth,
+		   fb->base.bits_per_pixel);
+	describe_obj(m, to_intel_bo(fb->obj));
+	seq_printf(m, "\n");
+
+	list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
+		if (&fb->base == ifbdev->helper.fb)
+			continue;
+
+		seq_printf(m, "user size: %d x %d, depth %d, %d bpp, obj ",
+			   fb->base.width,
+			   fb->base.height,
+			   fb->base.depth,
+			   fb->base.bits_per_pixel);
+		describe_obj(m, to_intel_bo(fb->obj));
+		seq_printf(m, "\n");
+	}
+
+	mutex_unlock(&dev->mode_config.mutex);
+
+	return 0;
+}
+
 static int
 static int
 i915_wedged_open(struct inode *inode,
 i915_wedged_open(struct inode *inode,
 		 struct file *filp)
 		 struct file *filp)
@@ -741,6 +930,9 @@ i915_wedged_read(struct file *filp,
 		       "wedged :  %d\n",
 		       "wedged :  %d\n",
 		       atomic_read(&dev_priv->mm.wedged));
 		       atomic_read(&dev_priv->mm.wedged));
 
 
+	if (len > sizeof (buf))
+		len = sizeof (buf);
+
 	return simple_read_from_buffer(ubuf, max, ppos, buf, len);
 	return simple_read_from_buffer(ubuf, max, ppos, buf, len);
 }
 }
 
 
@@ -770,7 +962,7 @@ i915_wedged_write(struct file *filp,
 
 
 	atomic_set(&dev_priv->mm.wedged, val);
 	atomic_set(&dev_priv->mm.wedged, val);
 	if (val) {
 	if (val) {
-		DRM_WAKEUP(&dev_priv->irq_queue);
+		wake_up_all(&dev_priv->irq_queue);
 		queue_work(dev_priv->wq, &dev_priv->error_work);
 		queue_work(dev_priv->wq, &dev_priv->error_work);
 	}
 	}
 
 
@@ -824,9 +1016,13 @@ static int i915_wedged_create(struct dentry *root, struct drm_minor *minor)
 }
 }
 
 
 static struct drm_info_list i915_debugfs_list[] = {
 static struct drm_info_list i915_debugfs_list[] = {
+	{"i915_capabilities", i915_capabilities, 0, 0},
+	{"i915_gem_objects", i915_gem_object_info, 0},
 	{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
 	{"i915_gem_active", i915_gem_object_list_info, 0, (void *) ACTIVE_LIST},
 	{"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
 	{"i915_gem_flushing", i915_gem_object_list_info, 0, (void *) FLUSHING_LIST},
 	{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
 	{"i915_gem_inactive", i915_gem_object_list_info, 0, (void *) INACTIVE_LIST},
+	{"i915_gem_pinned", i915_gem_object_list_info, 0, (void *) PINNED_LIST},
+	{"i915_gem_deferred_free", i915_gem_object_list_info, 0, (void *) DEFERRED_FREE_LIST},
 	{"i915_gem_pageflip", i915_gem_pageflip_info, 0},
 	{"i915_gem_pageflip", i915_gem_pageflip_info, 0},
 	{"i915_gem_request", i915_gem_request_info, 0},
 	{"i915_gem_request", i915_gem_request_info, 0},
 	{"i915_gem_seqno", i915_gem_seqno_info, 0},
 	{"i915_gem_seqno", i915_gem_seqno_info, 0},
@@ -846,6 +1042,8 @@ static struct drm_info_list i915_debugfs_list[] = {
 	{"i915_gfxec", i915_gfxec, 0},
 	{"i915_gfxec", i915_gfxec, 0},
 	{"i915_fbc_status", i915_fbc_status, 0},
 	{"i915_fbc_status", i915_fbc_status, 0},
 	{"i915_sr_status", i915_sr_status, 0},
 	{"i915_sr_status", i915_sr_status, 0},
+	{"i915_opregion", i915_opregion, 0},
+	{"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
 };
 };
 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
 #define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
 
 

+ 109 - 251
drivers/gpu/drm/i915/i915_dma.c

@@ -40,8 +40,7 @@
 #include <linux/pnp.h>
 #include <linux/pnp.h>
 #include <linux/vga_switcheroo.h>
 #include <linux/vga_switcheroo.h>
 #include <linux/slab.h>
 #include <linux/slab.h>
-
-extern int intel_max_stolen; /* from AGP driver */
+#include <acpi/video.h>
 
 
 /**
 /**
  * Sets up the hardware status page for devices that need a physical address
  * Sets up the hardware status page for devices that need a physical address
@@ -64,7 +63,7 @@ static int i915_init_phys_hws(struct drm_device *dev)
 
 
 	memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE);
 	memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE);
 
 
-	if (IS_I965G(dev))
+	if (INTEL_INFO(dev)->gen >= 4)
 		dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) &
 		dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) &
 					     0xf0;
 					     0xf0;
 
 
@@ -133,8 +132,8 @@ static int i915_dma_cleanup(struct drm_device * dev)
 
 
 	mutex_lock(&dev->struct_mutex);
 	mutex_lock(&dev->struct_mutex);
 	intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
 	intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
-	if (HAS_BSD(dev))
-		intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
+	intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
+	intel_cleanup_ring_buffer(dev, &dev_priv->blt_ring);
 	mutex_unlock(&dev->struct_mutex);
 	mutex_unlock(&dev->struct_mutex);
 
 
 	/* Clear the HWS virtual address at teardown */
 	/* Clear the HWS virtual address at teardown */
@@ -222,7 +221,7 @@ static int i915_dma_resume(struct drm_device * dev)
 	DRM_DEBUG_DRIVER("hw status page @ %p\n",
 	DRM_DEBUG_DRIVER("hw status page @ %p\n",
 				ring->status_page.page_addr);
 				ring->status_page.page_addr);
 	if (ring->status_page.gfx_addr != 0)
 	if (ring->status_page.gfx_addr != 0)
-		ring->setup_status_page(dev, ring);
+		intel_ring_setup_status_page(dev, ring);
 	else
 	else
 		I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
 		I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
 
 
@@ -377,7 +376,7 @@ i915_emit_box(struct drm_device *dev,
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 
 
-	if (IS_I965G(dev)) {
+	if (INTEL_INFO(dev)->gen >= 4) {
 		BEGIN_LP_RING(4);
 		BEGIN_LP_RING(4);
 		OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
 		OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
 		OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
 		OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
@@ -481,7 +480,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
 
 
 		if (!IS_I830(dev) && !IS_845G(dev)) {
 		if (!IS_I830(dev) && !IS_845G(dev)) {
 			BEGIN_LP_RING(2);
 			BEGIN_LP_RING(2);
-			if (IS_I965G(dev)) {
+			if (INTEL_INFO(dev)->gen >= 4) {
 				OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
 				OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
 				OUT_RING(batch->start);
 				OUT_RING(batch->start);
 			} else {
 			} else {
@@ -500,7 +499,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
 	}
 	}
 
 
 
 
-	if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
+	if (IS_G4X(dev) || IS_GEN5(dev)) {
 		BEGIN_LP_RING(2);
 		BEGIN_LP_RING(2);
 		OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
 		OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
 		OUT_RING(MI_NOOP);
 		OUT_RING(MI_NOOP);
@@ -765,6 +764,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
 	case I915_PARAM_HAS_BSD:
 	case I915_PARAM_HAS_BSD:
 		value = HAS_BSD(dev);
 		value = HAS_BSD(dev);
 		break;
 		break;
+	case I915_PARAM_HAS_BLT:
+		value = HAS_BLT(dev);
+		break;
 	default:
 	default:
 		DRM_DEBUG_DRIVER("Unknown parameter %d\n",
 		DRM_DEBUG_DRIVER("Unknown parameter %d\n",
 				 param->param);
 				 param->param);
@@ -888,12 +890,12 @@ static int
 intel_alloc_mchbar_resource(struct drm_device *dev)
 intel_alloc_mchbar_resource(struct drm_device *dev)
 {
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	int reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
+	int reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
 	u32 temp_lo, temp_hi = 0;
 	u32 temp_lo, temp_hi = 0;
 	u64 mchbar_addr;
 	u64 mchbar_addr;
 	int ret;
 	int ret;
 
 
-	if (IS_I965G(dev))
+	if (INTEL_INFO(dev)->gen >= 4)
 		pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
 		pci_read_config_dword(dev_priv->bridge_dev, reg + 4, &temp_hi);
 	pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
 	pci_read_config_dword(dev_priv->bridge_dev, reg, &temp_lo);
 	mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
 	mchbar_addr = ((u64)temp_hi << 32) | temp_lo;
@@ -920,7 +922,7 @@ intel_alloc_mchbar_resource(struct drm_device *dev)
 		return ret;
 		return ret;
 	}
 	}
 
 
-	if (IS_I965G(dev))
+	if (INTEL_INFO(dev)->gen >= 4)
 		pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
 		pci_write_config_dword(dev_priv->bridge_dev, reg + 4,
 				       upper_32_bits(dev_priv->mch_res.start));
 				       upper_32_bits(dev_priv->mch_res.start));
 
 
@@ -934,7 +936,7 @@ static void
 intel_setup_mchbar(struct drm_device *dev)
 intel_setup_mchbar(struct drm_device *dev)
 {
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
+	int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
 	u32 temp;
 	u32 temp;
 	bool enabled;
 	bool enabled;
 
 
@@ -971,7 +973,7 @@ static void
 intel_teardown_mchbar(struct drm_device *dev)
 intel_teardown_mchbar(struct drm_device *dev)
 {
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	int mchbar_reg = IS_I965G(dev) ? MCHBAR_I965 : MCHBAR_I915;
+	int mchbar_reg = INTEL_INFO(dev)->gen >= 4 ? MCHBAR_I965 : MCHBAR_I915;
 	u32 temp;
 	u32 temp;
 
 
 	if (dev_priv->mchbar_need_disable) {
 	if (dev_priv->mchbar_need_disable) {
@@ -990,174 +992,6 @@ intel_teardown_mchbar(struct drm_device *dev)
 		release_resource(&dev_priv->mch_res);
 		release_resource(&dev_priv->mch_res);
 }
 }
 
 
-/**
- * i915_probe_agp - get AGP bootup configuration
- * @pdev: PCI device
- * @aperture_size: returns AGP aperture configured size
- * @preallocated_size: returns size of BIOS preallocated AGP space
- *
- * Since Intel integrated graphics are UMA, the BIOS has to set aside
- * some RAM for the framebuffer at early boot.  This code figures out
- * how much was set aside so we can use it for our own purposes.
- */
-static int i915_probe_agp(struct drm_device *dev, uint32_t *aperture_size,
-			  uint32_t *preallocated_size,
-			  uint32_t *start)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	u16 tmp = 0;
-	unsigned long overhead;
-	unsigned long stolen;
-
-	/* Get the fb aperture size and "stolen" memory amount. */
-	pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &tmp);
-
-	*aperture_size = 1024 * 1024;
-	*preallocated_size = 1024 * 1024;
-
-	switch (dev->pdev->device) {
-	case PCI_DEVICE_ID_INTEL_82830_CGC:
-	case PCI_DEVICE_ID_INTEL_82845G_IG:
-	case PCI_DEVICE_ID_INTEL_82855GM_IG:
-	case PCI_DEVICE_ID_INTEL_82865_IG:
-		if ((tmp & INTEL_GMCH_MEM_MASK) == INTEL_GMCH_MEM_64M)
-			*aperture_size *= 64;
-		else
-			*aperture_size *= 128;
-		break;
-	default:
-		/* 9xx supports large sizes, just look at the length */
-		*aperture_size = pci_resource_len(dev->pdev, 2);
-		break;
-	}
-
-	/*
-	 * Some of the preallocated space is taken by the GTT
-	 * and popup.  GTT is 1K per MB of aperture size, and popup is 4K.
-	 */
-	if (IS_G4X(dev) || IS_PINEVIEW(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev))
-		overhead = 4096;
-	else
-		overhead = (*aperture_size / 1024) + 4096;
-
-	if (IS_GEN6(dev)) {
-		/* SNB has memory control reg at 0x50.w */
-		pci_read_config_word(dev->pdev, SNB_GMCH_CTRL, &tmp);
-
-		switch (tmp & SNB_GMCH_GMS_STOLEN_MASK) {
-		case INTEL_855_GMCH_GMS_DISABLED:
-			DRM_ERROR("video memory is disabled\n");
-			return -1;
-		case SNB_GMCH_GMS_STOLEN_32M:
-			stolen = 32 * 1024 * 1024;
-			break;
-		case SNB_GMCH_GMS_STOLEN_64M:
-			stolen = 64 * 1024 * 1024;
-			break;
-		case SNB_GMCH_GMS_STOLEN_96M:
-			stolen = 96 * 1024 * 1024;
-			break;
-		case SNB_GMCH_GMS_STOLEN_128M:
-			stolen = 128 * 1024 * 1024;
-			break;
-		case SNB_GMCH_GMS_STOLEN_160M:
-			stolen = 160 * 1024 * 1024;
-			break;
-		case SNB_GMCH_GMS_STOLEN_192M:
-			stolen = 192 * 1024 * 1024;
-			break;
-		case SNB_GMCH_GMS_STOLEN_224M:
-			stolen = 224 * 1024 * 1024;
-			break;
-		case SNB_GMCH_GMS_STOLEN_256M:
-			stolen = 256 * 1024 * 1024;
-			break;
-		case SNB_GMCH_GMS_STOLEN_288M:
-			stolen = 288 * 1024 * 1024;
-			break;
-		case SNB_GMCH_GMS_STOLEN_320M:
-			stolen = 320 * 1024 * 1024;
-			break;
-		case SNB_GMCH_GMS_STOLEN_352M:
-			stolen = 352 * 1024 * 1024;
-			break;
-		case SNB_GMCH_GMS_STOLEN_384M:
-			stolen = 384 * 1024 * 1024;
-			break;
-		case SNB_GMCH_GMS_STOLEN_416M:
-			stolen = 416 * 1024 * 1024;
-			break;
-		case SNB_GMCH_GMS_STOLEN_448M:
-			stolen = 448 * 1024 * 1024;
-			break;
-		case SNB_GMCH_GMS_STOLEN_480M:
-			stolen = 480 * 1024 * 1024;
-			break;
-		case SNB_GMCH_GMS_STOLEN_512M:
-			stolen = 512 * 1024 * 1024;
-			break;
-		default:
-			DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n",
-				  tmp & SNB_GMCH_GMS_STOLEN_MASK);
-			return -1;
-		}
-	} else {
-		switch (tmp & INTEL_GMCH_GMS_MASK) {
-		case INTEL_855_GMCH_GMS_DISABLED:
-			DRM_ERROR("video memory is disabled\n");
-			return -1;
-		case INTEL_855_GMCH_GMS_STOLEN_1M:
-			stolen = 1 * 1024 * 1024;
-			break;
-		case INTEL_855_GMCH_GMS_STOLEN_4M:
-			stolen = 4 * 1024 * 1024;
-			break;
-		case INTEL_855_GMCH_GMS_STOLEN_8M:
-			stolen = 8 * 1024 * 1024;
-			break;
-		case INTEL_855_GMCH_GMS_STOLEN_16M:
-			stolen = 16 * 1024 * 1024;
-			break;
-		case INTEL_855_GMCH_GMS_STOLEN_32M:
-			stolen = 32 * 1024 * 1024;
-			break;
-		case INTEL_915G_GMCH_GMS_STOLEN_48M:
-			stolen = 48 * 1024 * 1024;
-			break;
-		case INTEL_915G_GMCH_GMS_STOLEN_64M:
-			stolen = 64 * 1024 * 1024;
-			break;
-		case INTEL_GMCH_GMS_STOLEN_128M:
-			stolen = 128 * 1024 * 1024;
-			break;
-		case INTEL_GMCH_GMS_STOLEN_256M:
-			stolen = 256 * 1024 * 1024;
-			break;
-		case INTEL_GMCH_GMS_STOLEN_96M:
-			stolen = 96 * 1024 * 1024;
-			break;
-		case INTEL_GMCH_GMS_STOLEN_160M:
-			stolen = 160 * 1024 * 1024;
-			break;
-		case INTEL_GMCH_GMS_STOLEN_224M:
-			stolen = 224 * 1024 * 1024;
-			break;
-		case INTEL_GMCH_GMS_STOLEN_352M:
-			stolen = 352 * 1024 * 1024;
-			break;
-		default:
-			DRM_ERROR("unexpected GMCH_GMS value: 0x%02x\n",
-				  tmp & INTEL_GMCH_GMS_MASK);
-			return -1;
-		}
-	}
-
-	*preallocated_size = stolen - overhead;
-	*start = overhead;
-
-	return 0;
-}
-
 #define PTE_ADDRESS_MASK		0xfffff000
 #define PTE_ADDRESS_MASK		0xfffff000
 #define PTE_ADDRESS_MASK_HIGH		0x000000f0 /* i915+ */
 #define PTE_ADDRESS_MASK_HIGH		0x000000f0 /* i915+ */
 #define PTE_MAPPING_TYPE_UNCACHED	(0 << 1)
 #define PTE_MAPPING_TYPE_UNCACHED	(0 << 1)
@@ -1181,11 +1015,11 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev,
 {
 {
 	unsigned long *gtt;
 	unsigned long *gtt;
 	unsigned long entry, phys;
 	unsigned long entry, phys;
-	int gtt_bar = IS_I9XX(dev) ? 0 : 1;
+	int gtt_bar = IS_GEN2(dev) ? 1 : 0;
 	int gtt_offset, gtt_size;
 	int gtt_offset, gtt_size;
 
 
-	if (IS_I965G(dev)) {
-		if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) {
+	if (INTEL_INFO(dev)->gen >= 4) {
+		if (IS_G4X(dev) || INTEL_INFO(dev)->gen > 4) {
 			gtt_offset = 2*1024*1024;
 			gtt_offset = 2*1024*1024;
 			gtt_size = 2*1024*1024;
 			gtt_size = 2*1024*1024;
 		} else {
 		} else {
@@ -1210,10 +1044,8 @@ static unsigned long i915_gtt_to_phys(struct drm_device *dev,
 	DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry);
 	DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry);
 
 
 	/* Mask out these reserved bits on this hardware. */
 	/* Mask out these reserved bits on this hardware. */
-	if (!IS_I9XX(dev) || IS_I915G(dev) || IS_I915GM(dev) ||
-	    IS_I945G(dev) || IS_I945GM(dev)) {
+	if (INTEL_INFO(dev)->gen < 4 && !IS_G33(dev))
 		entry &= ~PTE_ADDRESS_MASK_HIGH;
 		entry &= ~PTE_ADDRESS_MASK_HIGH;
-	}
 
 
 	/* If it's not a mapping type we know, then bail. */
 	/* If it's not a mapping type we know, then bail. */
 	if ((entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_UNCACHED &&
 	if ((entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_UNCACHED &&
@@ -1252,7 +1084,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
 	unsigned long ll_base = 0;
 	unsigned long ll_base = 0;
 
 
 	/* Leave 1M for line length buffer & misc. */
 	/* Leave 1M for line length buffer & misc. */
-	compressed_fb = drm_mm_search_free(&dev_priv->vram, size, 4096, 0);
+	compressed_fb = drm_mm_search_free(&dev_priv->mm.vram, size, 4096, 0);
 	if (!compressed_fb) {
 	if (!compressed_fb) {
 		dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
 		dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
 		i915_warn_stolen(dev);
 		i915_warn_stolen(dev);
@@ -1273,7 +1105,7 @@ static void i915_setup_compression(struct drm_device *dev, int size)
 	}
 	}
 
 
 	if (!(IS_GM45(dev) || IS_IRONLAKE_M(dev))) {
 	if (!(IS_GM45(dev) || IS_IRONLAKE_M(dev))) {
-		compressed_llb = drm_mm_search_free(&dev_priv->vram, 4096,
+		compressed_llb = drm_mm_search_free(&dev_priv->mm.vram, 4096,
 						    4096, 0);
 						    4096, 0);
 		if (!compressed_llb) {
 		if (!compressed_llb) {
 			i915_warn_stolen(dev);
 			i915_warn_stolen(dev);
@@ -1343,10 +1175,8 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_
 		/* i915 resume handler doesn't set to D0 */
 		/* i915 resume handler doesn't set to D0 */
 		pci_set_power_state(dev->pdev, PCI_D0);
 		pci_set_power_state(dev->pdev, PCI_D0);
 		i915_resume(dev);
 		i915_resume(dev);
-		drm_kms_helper_poll_enable(dev);
 	} else {
 	} else {
 		printk(KERN_ERR "i915: switched off\n");
 		printk(KERN_ERR "i915: switched off\n");
-		drm_kms_helper_poll_disable(dev);
 		i915_suspend(dev, pmm);
 		i915_suspend(dev, pmm);
 	}
 	}
 }
 }
@@ -1363,23 +1193,14 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
 }
 }
 
 
 static int i915_load_modeset_init(struct drm_device *dev,
 static int i915_load_modeset_init(struct drm_device *dev,
-				  unsigned long prealloc_start,
 				  unsigned long prealloc_size,
 				  unsigned long prealloc_size,
 				  unsigned long agp_size)
 				  unsigned long agp_size)
 {
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	int fb_bar = IS_I9XX(dev) ? 2 : 0;
 	int ret = 0;
 	int ret = 0;
 
 
-	dev->mode_config.fb_base = pci_resource_start(dev->pdev, fb_bar) &
-		0xff000000;
-
-	/* Basic memrange allocator for stolen space (aka vram) */
-	drm_mm_init(&dev_priv->vram, 0, prealloc_size);
-	DRM_INFO("set up %ldM of stolen space\n", prealloc_size / (1024*1024));
-
-	/* We're off and running w/KMS */
-	dev_priv->mm.suspended = 0;
+	/* Basic memrange allocator for stolen space (aka mm.vram) */
+	drm_mm_init(&dev_priv->mm.vram, 0, prealloc_size);
 
 
 	/* Let GEM Manage from end of prealloc space to end of aperture.
 	/* Let GEM Manage from end of prealloc space to end of aperture.
 	 *
 	 *
@@ -1414,7 +1235,7 @@ static int i915_load_modeset_init(struct drm_device *dev,
 	 */
 	 */
 	dev_priv->allow_batchbuffer = 1;
 	dev_priv->allow_batchbuffer = 1;
 
 
-	ret = intel_init_bios(dev);
+	ret = intel_parse_bios(dev);
 	if (ret)
 	if (ret)
 		DRM_INFO("failed to find VBIOS tables\n");
 		DRM_INFO("failed to find VBIOS tables\n");
 
 
@@ -1423,6 +1244,8 @@ static int i915_load_modeset_init(struct drm_device *dev,
 	if (ret)
 	if (ret)
 		goto cleanup_ringbuffer;
 		goto cleanup_ringbuffer;
 
 
+	intel_register_dsm_handler();
+
 	ret = vga_switcheroo_register_client(dev->pdev,
 	ret = vga_switcheroo_register_client(dev->pdev,
 					     i915_switcheroo_set_state,
 					     i915_switcheroo_set_state,
 					     i915_switcheroo_can_switch);
 					     i915_switcheroo_can_switch);
@@ -1443,17 +1266,15 @@ static int i915_load_modeset_init(struct drm_device *dev,
 	/* FIXME: do pre/post-mode set stuff in core KMS code */
 	/* FIXME: do pre/post-mode set stuff in core KMS code */
 	dev->vblank_disable_allowed = 1;
 	dev->vblank_disable_allowed = 1;
 
 
-	/*
-	 * Initialize the hardware status page IRQ location.
-	 */
-
-	I915_WRITE(INSTPM, (1 << 5) | (1 << 21));
-
 	ret = intel_fbdev_init(dev);
 	ret = intel_fbdev_init(dev);
 	if (ret)
 	if (ret)
 		goto cleanup_irq;
 		goto cleanup_irq;
 
 
 	drm_kms_helper_poll_init(dev);
 	drm_kms_helper_poll_init(dev);
+
+	/* We're off and running w/KMS */
+	dev_priv->mm.suspended = 0;
+
 	return 0;
 	return 0;
 
 
 cleanup_irq:
 cleanup_irq:
@@ -1907,7 +1728,7 @@ static struct drm_i915_private *i915_mch_dev;
  *   - dev_priv->fmax
  *   - dev_priv->fmax
  *   - dev_priv->gpu_busy
  *   - dev_priv->gpu_busy
  */
  */
-DEFINE_SPINLOCK(mchdev_lock);
+static DEFINE_SPINLOCK(mchdev_lock);
 
 
 /**
 /**
  * i915_read_mch_val - return value for IPS use
  * i915_read_mch_val - return value for IPS use
@@ -2062,7 +1883,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 	struct drm_i915_private *dev_priv;
 	struct drm_i915_private *dev_priv;
 	resource_size_t base, size;
 	resource_size_t base, size;
 	int ret = 0, mmio_bar;
 	int ret = 0, mmio_bar;
-	uint32_t agp_size, prealloc_size, prealloc_start;
+	uint32_t agp_size, prealloc_size;
 	/* i915 has 4 more counters */
 	/* i915 has 4 more counters */
 	dev->counters += 4;
 	dev->counters += 4;
 	dev->types[6] = _DRM_STAT_IRQ;
 	dev->types[6] = _DRM_STAT_IRQ;
@@ -2079,7 +1900,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 	dev_priv->info = (struct intel_device_info *) flags;
 	dev_priv->info = (struct intel_device_info *) flags;
 
 
 	/* Add register map (needed for suspend/resume) */
 	/* Add register map (needed for suspend/resume) */
-	mmio_bar = IS_I9XX(dev) ? 0 : 1;
+	mmio_bar = IS_GEN2(dev) ? 1 : 0;
 	base = pci_resource_start(dev->pdev, mmio_bar);
 	base = pci_resource_start(dev->pdev, mmio_bar);
 	size = pci_resource_len(dev->pdev, mmio_bar);
 	size = pci_resource_len(dev->pdev, mmio_bar);
 
 
@@ -2121,17 +1942,32 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 			 "performance may suffer.\n");
 			 "performance may suffer.\n");
 	}
 	}
 
 
-	ret = i915_probe_agp(dev, &agp_size, &prealloc_size, &prealloc_start);
-	if (ret)
+	dev_priv->mm.gtt = intel_gtt_get();
+	if (!dev_priv->mm.gtt) {
+		DRM_ERROR("Failed to initialize GTT\n");
+		ret = -ENODEV;
 		goto out_iomapfree;
 		goto out_iomapfree;
-
-	if (prealloc_size > intel_max_stolen) {
-		DRM_INFO("detected %dM stolen memory, trimming to %dM\n",
-			 prealloc_size >> 20, intel_max_stolen >> 20);
-		prealloc_size = intel_max_stolen;
 	}
 	}
 
 
-	dev_priv->wq = create_singlethread_workqueue("i915");
+	prealloc_size = dev_priv->mm.gtt->gtt_stolen_entries << PAGE_SHIFT;
+	agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+
+	/* The i915 workqueue is primarily used for batched retirement of
+	 * requests (and thus managing bo) once the task has been completed
+	 * by the GPU. i915_gem_retire_requests() is called directly when we
+	 * need high-priority retirement, such as waiting for an explicit
+	 * bo.
+	 *
+	 * It is also used for periodic low-priority events, such as
+	 * idle-timers and hangcheck.
+	 *
+	 * All tasks on the workqueue are expected to acquire the dev mutex
+	 * so there is no point in running more than one instance of the
+	 * workqueue at any time: max_active = 1 and NON_REENTRANT.
+	 */
+	dev_priv->wq = alloc_workqueue("i915",
+				       WQ_UNBOUND | WQ_NON_REENTRANT,
+				       1);
 	if (dev_priv->wq == NULL) {
 	if (dev_priv->wq == NULL) {
 		DRM_ERROR("Failed to create our workqueue.\n");
 		DRM_ERROR("Failed to create our workqueue.\n");
 		ret = -ENOMEM;
 		ret = -ENOMEM;
@@ -2159,13 +1995,18 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 
 
 	dev->driver->get_vblank_counter = i915_get_vblank_counter;
 	dev->driver->get_vblank_counter = i915_get_vblank_counter;
 	dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
 	dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
-	if (IS_G4X(dev) || IS_IRONLAKE(dev) || IS_GEN6(dev)) {
+	if (IS_G4X(dev) || IS_GEN5(dev) || IS_GEN6(dev)) {
 		dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
 		dev->max_vblank_count = 0xffffffff; /* full 32 bit counter */
 		dev->driver->get_vblank_counter = gm45_get_vblank_counter;
 		dev->driver->get_vblank_counter = gm45_get_vblank_counter;
 	}
 	}
 
 
 	/* Try to make sure MCHBAR is enabled before poking at it */
 	/* Try to make sure MCHBAR is enabled before poking at it */
 	intel_setup_mchbar(dev);
 	intel_setup_mchbar(dev);
+	intel_setup_gmbus(dev);
+	intel_opregion_setup(dev);
+
+	/* Make sure the bios did its job and set up vital registers */
+	intel_setup_bios(dev);
 
 
 	i915_gem_load(dev);
 	i915_gem_load(dev);
 
 
@@ -2178,7 +2019,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 
 
 	if (IS_PINEVIEW(dev))
 	if (IS_PINEVIEW(dev))
 		i915_pineview_get_mem_freq(dev);
 		i915_pineview_get_mem_freq(dev);
-	else if (IS_IRONLAKE(dev))
+	else if (IS_GEN5(dev))
 		i915_ironlake_get_mem_freq(dev);
 		i915_ironlake_get_mem_freq(dev);
 
 
 	/* On the 945G/GM, the chipset reports the MSI capability on the
 	/* On the 945G/GM, the chipset reports the MSI capability on the
@@ -2212,8 +2053,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 	intel_detect_pch(dev);
 	intel_detect_pch(dev);
 
 
 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
-		ret = i915_load_modeset_init(dev, prealloc_start,
-					     prealloc_size, agp_size);
+		ret = i915_load_modeset_init(dev, prealloc_size, agp_size);
 		if (ret < 0) {
 		if (ret < 0) {
 			DRM_ERROR("failed to init modeset\n");
 			DRM_ERROR("failed to init modeset\n");
 			goto out_workqueue_free;
 			goto out_workqueue_free;
@@ -2221,7 +2061,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 	}
 	}
 
 
 	/* Must be done after probing outputs */
 	/* Must be done after probing outputs */
-	intel_opregion_init(dev, 0);
+	intel_opregion_init(dev);
+	acpi_video_register();
 
 
 	setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
 	setup_timer(&dev_priv->hangcheck_timer, i915_hangcheck_elapsed,
 		    (unsigned long) dev);
 		    (unsigned long) dev);
@@ -2231,9 +2072,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 	dev_priv->mchdev_lock = &mchdev_lock;
 	dev_priv->mchdev_lock = &mchdev_lock;
 	spin_unlock(&mchdev_lock);
 	spin_unlock(&mchdev_lock);
 
 
-	/* XXX Prevent module unload due to memory corruption bugs. */
-	__module_get(THIS_MODULE);
-
 	return 0;
 	return 0;
 
 
 out_workqueue_free:
 out_workqueue_free:
@@ -2252,15 +2090,20 @@ free_priv:
 int i915_driver_unload(struct drm_device *dev)
 int i915_driver_unload(struct drm_device *dev)
 {
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	i915_destroy_error_state(dev);
+	int ret;
 
 
 	spin_lock(&mchdev_lock);
 	spin_lock(&mchdev_lock);
 	i915_mch_dev = NULL;
 	i915_mch_dev = NULL;
 	spin_unlock(&mchdev_lock);
 	spin_unlock(&mchdev_lock);
 
 
-	destroy_workqueue(dev_priv->wq);
-	del_timer_sync(&dev_priv->hangcheck_timer);
+	mutex_lock(&dev->struct_mutex);
+	ret = i915_gpu_idle(dev);
+	if (ret)
+		DRM_ERROR("failed to idle hardware: %d\n", ret);
+	mutex_unlock(&dev->struct_mutex);
+
+	/* Cancel the retire work handler, which should be idle now. */
+	cancel_delayed_work_sync(&dev_priv->mm.retire_work);
 
 
 	io_mapping_free(dev_priv->mm.gtt_mapping);
 	io_mapping_free(dev_priv->mm.gtt_mapping);
 	if (dev_priv->mm.gtt_mtrr >= 0) {
 	if (dev_priv->mm.gtt_mtrr >= 0) {
@@ -2269,7 +2112,10 @@ int i915_driver_unload(struct drm_device *dev)
 		dev_priv->mm.gtt_mtrr = -1;
 		dev_priv->mm.gtt_mtrr = -1;
 	}
 	}
 
 
+	acpi_video_unregister();
+
 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+		intel_fbdev_fini(dev);
 		intel_modeset_cleanup(dev);
 		intel_modeset_cleanup(dev);
 
 
 		/*
 		/*
@@ -2281,20 +2127,25 @@ int i915_driver_unload(struct drm_device *dev)
 			dev_priv->child_dev = NULL;
 			dev_priv->child_dev = NULL;
 			dev_priv->child_dev_num = 0;
 			dev_priv->child_dev_num = 0;
 		}
 		}
-		drm_irq_uninstall(dev);
+
 		vga_switcheroo_unregister_client(dev->pdev);
 		vga_switcheroo_unregister_client(dev->pdev);
 		vga_client_register(dev->pdev, NULL, NULL, NULL);
 		vga_client_register(dev->pdev, NULL, NULL, NULL);
 	}
 	}
 
 
+	/* Free error state after interrupts are fully disabled. */
+	del_timer_sync(&dev_priv->hangcheck_timer);
+	cancel_work_sync(&dev_priv->error_work);
+	i915_destroy_error_state(dev);
+
 	if (dev->pdev->msi_enabled)
 	if (dev->pdev->msi_enabled)
 		pci_disable_msi(dev->pdev);
 		pci_disable_msi(dev->pdev);
 
 
-	if (dev_priv->regs != NULL)
-		iounmap(dev_priv->regs);
-
-	intel_opregion_free(dev, 0);
+	intel_opregion_fini(dev);
 
 
 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+		/* Flush any outstanding unpin_work. */
+		flush_workqueue(dev_priv->wq);
+
 		i915_gem_free_all_phys_object(dev);
 		i915_gem_free_all_phys_object(dev);
 
 
 		mutex_lock(&dev->struct_mutex);
 		mutex_lock(&dev->struct_mutex);
@@ -2302,34 +2153,41 @@ int i915_driver_unload(struct drm_device *dev)
 		mutex_unlock(&dev->struct_mutex);
 		mutex_unlock(&dev->struct_mutex);
 		if (I915_HAS_FBC(dev) && i915_powersave)
 		if (I915_HAS_FBC(dev) && i915_powersave)
 			i915_cleanup_compression(dev);
 			i915_cleanup_compression(dev);
-		drm_mm_takedown(&dev_priv->vram);
-		i915_gem_lastclose(dev);
+		drm_mm_takedown(&dev_priv->mm.vram);
 
 
 		intel_cleanup_overlay(dev);
 		intel_cleanup_overlay(dev);
+
+		if (!I915_NEED_GFX_HWS(dev))
+			i915_free_hws(dev);
 	}
 	}
 
 
+	if (dev_priv->regs != NULL)
+		iounmap(dev_priv->regs);
+
+	intel_teardown_gmbus(dev);
 	intel_teardown_mchbar(dev);
 	intel_teardown_mchbar(dev);
 
 
+	destroy_workqueue(dev_priv->wq);
+
 	pci_dev_put(dev_priv->bridge_dev);
 	pci_dev_put(dev_priv->bridge_dev);
 	kfree(dev->dev_private);
 	kfree(dev->dev_private);
 
 
 	return 0;
 	return 0;
 }
 }
 
 
-int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
+int i915_driver_open(struct drm_device *dev, struct drm_file *file)
 {
 {
-	struct drm_i915_file_private *i915_file_priv;
+	struct drm_i915_file_private *file_priv;
 
 
 	DRM_DEBUG_DRIVER("\n");
 	DRM_DEBUG_DRIVER("\n");
-	i915_file_priv = (struct drm_i915_file_private *)
-	    kmalloc(sizeof(*i915_file_priv), GFP_KERNEL);
-
-	if (!i915_file_priv)
+	file_priv = kmalloc(sizeof(*file_priv), GFP_KERNEL);
+	if (!file_priv)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
-	file_priv->driver_priv = i915_file_priv;
+	file->driver_priv = file_priv;
 
 
-	INIT_LIST_HEAD(&i915_file_priv->mm.request_list);
+	spin_lock_init(&file_priv->mm.lock);
+	INIT_LIST_HEAD(&file_priv->mm.request_list);
 
 
 	return 0;
 	return 0;
 }
 }
@@ -2372,11 +2230,11 @@ void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
 		i915_mem_release(dev, file_priv, dev_priv->agp_heap);
 		i915_mem_release(dev, file_priv, dev_priv->agp_heap);
 }
 }
 
 
-void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
+void i915_driver_postclose(struct drm_device *dev, struct drm_file *file)
 {
 {
-	struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
+	struct drm_i915_file_private *file_priv = file->driver_priv;
 
 
-	kfree(i915_file_priv);
+	kfree(file_priv);
 }
 }
 
 
 struct drm_ioctl_desc i915_ioctls[] = {
 struct drm_ioctl_desc i915_ioctls[] = {

+ 149 - 65
drivers/gpu/drm/i915/i915_drv.c

@@ -32,6 +32,7 @@
 #include "drm.h"
 #include "drm.h"
 #include "i915_drm.h"
 #include "i915_drm.h"
 #include "i915_drv.h"
 #include "i915_drv.h"
+#include "intel_drv.h"
 
 
 #include <linux/console.h>
 #include <linux/console.h>
 #include "drm_crtc_helper.h"
 #include "drm_crtc_helper.h"
@@ -61,86 +62,110 @@ extern int intel_agp_enabled;
 	.driver_data = (unsigned long) info }
 	.driver_data = (unsigned long) info }
 
 
 static const struct intel_device_info intel_i830_info = {
 static const struct intel_device_info intel_i830_info = {
-	.gen = 2, .is_i8xx = 1, .is_mobile = 1, .cursor_needs_physical = 1,
+	.gen = 2, .is_mobile = 1, .cursor_needs_physical = 1,
+	.has_overlay = 1, .overlay_needs_physical = 1,
 };
 };
 
 
 static const struct intel_device_info intel_845g_info = {
 static const struct intel_device_info intel_845g_info = {
-	.gen = 2, .is_i8xx = 1,
+	.gen = 2,
+	.has_overlay = 1, .overlay_needs_physical = 1,
 };
 };
 
 
 static const struct intel_device_info intel_i85x_info = {
 static const struct intel_device_info intel_i85x_info = {
-	.gen = 2, .is_i8xx = 1, .is_i85x = 1, .is_mobile = 1,
+	.gen = 2, .is_i85x = 1, .is_mobile = 1,
 	.cursor_needs_physical = 1,
 	.cursor_needs_physical = 1,
+	.has_overlay = 1, .overlay_needs_physical = 1,
 };
 };
 
 
 static const struct intel_device_info intel_i865g_info = {
 static const struct intel_device_info intel_i865g_info = {
-	.gen = 2, .is_i8xx = 1,
+	.gen = 2,
+	.has_overlay = 1, .overlay_needs_physical = 1,
 };
 };
 
 
 static const struct intel_device_info intel_i915g_info = {
 static const struct intel_device_info intel_i915g_info = {
-	.gen = 3, .is_i915g = 1, .is_i9xx = 1, .cursor_needs_physical = 1,
+	.gen = 3, .is_i915g = 1, .cursor_needs_physical = 1,
+	.has_overlay = 1, .overlay_needs_physical = 1,
 };
 };
 static const struct intel_device_info intel_i915gm_info = {
 static const struct intel_device_info intel_i915gm_info = {
-	.gen = 3, .is_i9xx = 1,  .is_mobile = 1,
+	.gen = 3, .is_mobile = 1,
 	.cursor_needs_physical = 1,
 	.cursor_needs_physical = 1,
+	.has_overlay = 1, .overlay_needs_physical = 1,
+	.supports_tv = 1,
 };
 };
 static const struct intel_device_info intel_i945g_info = {
 static const struct intel_device_info intel_i945g_info = {
-	.gen = 3, .is_i9xx = 1, .has_hotplug = 1, .cursor_needs_physical = 1,
+	.gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1,
+	.has_overlay = 1, .overlay_needs_physical = 1,
 };
 };
 static const struct intel_device_info intel_i945gm_info = {
 static const struct intel_device_info intel_i945gm_info = {
-	.gen = 3, .is_i945gm = 1, .is_i9xx = 1, .is_mobile = 1,
+	.gen = 3, .is_i945gm = 1, .is_mobile = 1,
 	.has_hotplug = 1, .cursor_needs_physical = 1,
 	.has_hotplug = 1, .cursor_needs_physical = 1,
+	.has_overlay = 1, .overlay_needs_physical = 1,
+	.supports_tv = 1,
 };
 };
 
 
 static const struct intel_device_info intel_i965g_info = {
 static const struct intel_device_info intel_i965g_info = {
-	.gen = 4, .is_broadwater = 1, .is_i965g = 1, .is_i9xx = 1,
+	.gen = 4, .is_broadwater = 1,
 	.has_hotplug = 1,
 	.has_hotplug = 1,
+	.has_overlay = 1,
 };
 };
 
 
 static const struct intel_device_info intel_i965gm_info = {
 static const struct intel_device_info intel_i965gm_info = {
-	.gen = 4, .is_crestline = 1, .is_i965g = 1, .is_i965gm = 1, .is_i9xx = 1,
+	.gen = 4, .is_crestline = 1,
 	.is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1,
 	.is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1,
+	.has_overlay = 1,
+	.supports_tv = 1,
 };
 };
 
 
 static const struct intel_device_info intel_g33_info = {
 static const struct intel_device_info intel_g33_info = {
-	.gen = 3, .is_g33 = 1, .is_i9xx = 1,
+	.gen = 3, .is_g33 = 1,
 	.need_gfx_hws = 1, .has_hotplug = 1,
 	.need_gfx_hws = 1, .has_hotplug = 1,
+	.has_overlay = 1,
 };
 };
 
 
 static const struct intel_device_info intel_g45_info = {
 static const struct intel_device_info intel_g45_info = {
-	.gen = 4, .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1, .need_gfx_hws = 1,
+	.gen = 4, .is_g4x = 1, .need_gfx_hws = 1,
 	.has_pipe_cxsr = 1, .has_hotplug = 1,
 	.has_pipe_cxsr = 1, .has_hotplug = 1,
+	.has_bsd_ring = 1,
 };
 };
 
 
 static const struct intel_device_info intel_gm45_info = {
 static const struct intel_device_info intel_gm45_info = {
-	.gen = 4, .is_i965g = 1, .is_g4x = 1, .is_i9xx = 1,
+	.gen = 4, .is_g4x = 1,
 	.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
 	.is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
 	.has_pipe_cxsr = 1, .has_hotplug = 1,
 	.has_pipe_cxsr = 1, .has_hotplug = 1,
+	.supports_tv = 1,
+	.has_bsd_ring = 1,
 };
 };
 
 
 static const struct intel_device_info intel_pineview_info = {
 static const struct intel_device_info intel_pineview_info = {
-	.gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .is_i9xx = 1,
+	.gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1,
 	.need_gfx_hws = 1, .has_hotplug = 1,
 	.need_gfx_hws = 1, .has_hotplug = 1,
+	.has_overlay = 1,
 };
 };
 
 
 static const struct intel_device_info intel_ironlake_d_info = {
 static const struct intel_device_info intel_ironlake_d_info = {
-	.gen = 5, .is_ironlake = 1, .is_i965g = 1, .is_i9xx = 1,
+	.gen = 5,
 	.need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1,
 	.need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1,
+	.has_bsd_ring = 1,
 };
 };
 
 
 static const struct intel_device_info intel_ironlake_m_info = {
 static const struct intel_device_info intel_ironlake_m_info = {
-	.gen = 5, .is_ironlake = 1, .is_mobile = 1, .is_i965g = 1, .is_i9xx = 1,
+	.gen = 5, .is_mobile = 1,
 	.need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1,
 	.need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1,
+	.has_bsd_ring = 1,
 };
 };
 
 
 static const struct intel_device_info intel_sandybridge_d_info = {
 static const struct intel_device_info intel_sandybridge_d_info = {
-	.gen = 6, .is_i965g = 1, .is_i9xx = 1,
+	.gen = 6,
 	.need_gfx_hws = 1, .has_hotplug = 1,
 	.need_gfx_hws = 1, .has_hotplug = 1,
+	.has_bsd_ring = 1,
+	.has_blt_ring = 1,
 };
 };
 
 
 static const struct intel_device_info intel_sandybridge_m_info = {
 static const struct intel_device_info intel_sandybridge_m_info = {
-	.gen = 6, .is_i965g = 1, .is_mobile = 1, .is_i9xx = 1,
+	.gen = 6, .is_mobile = 1,
 	.need_gfx_hws = 1, .has_hotplug = 1,
 	.need_gfx_hws = 1, .has_hotplug = 1,
+	.has_bsd_ring = 1,
+	.has_blt_ring = 1,
 };
 };
 
 
 static const struct pci_device_id pciidlist[] = {		/* aka */
 static const struct pci_device_id pciidlist[] = {		/* aka */
@@ -237,7 +262,7 @@ static int i915_drm_freeze(struct drm_device *dev)
 
 
 	i915_save_state(dev);
 	i915_save_state(dev);
 
 
-	intel_opregion_free(dev, 1);
+	intel_opregion_fini(dev);
 
 
 	/* Modeset on resume, not lid events */
 	/* Modeset on resume, not lid events */
 	dev_priv->modeset_on_lid = 0;
 	dev_priv->modeset_on_lid = 0;
@@ -258,6 +283,8 @@ int i915_suspend(struct drm_device *dev, pm_message_t state)
 	if (state.event == PM_EVENT_PRETHAW)
 	if (state.event == PM_EVENT_PRETHAW)
 		return 0;
 		return 0;
 
 
+	drm_kms_helper_poll_disable(dev);
+
 	error = i915_drm_freeze(dev);
 	error = i915_drm_freeze(dev);
 	if (error)
 	if (error)
 		return error;
 		return error;
@@ -277,8 +304,7 @@ static int i915_drm_thaw(struct drm_device *dev)
 	int error = 0;
 	int error = 0;
 
 
 	i915_restore_state(dev);
 	i915_restore_state(dev);
-
-	intel_opregion_init(dev, 1);
+	intel_opregion_setup(dev);
 
 
 	/* KMS EnterVT equivalent */
 	/* KMS EnterVT equivalent */
 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
 	if (drm_core_check_feature(dev, DRIVER_MODESET)) {
@@ -294,6 +320,8 @@ static int i915_drm_thaw(struct drm_device *dev)
 		drm_helper_resume_force_mode(dev);
 		drm_helper_resume_force_mode(dev);
 	}
 	}
 
 
+	intel_opregion_init(dev);
+
 	dev_priv->modeset_on_lid = 0;
 	dev_priv->modeset_on_lid = 0;
 
 
 	return error;
 	return error;
@@ -301,12 +329,79 @@ static int i915_drm_thaw(struct drm_device *dev)
 
 
 int i915_resume(struct drm_device *dev)
 int i915_resume(struct drm_device *dev)
 {
 {
+	int ret;
+
 	if (pci_enable_device(dev->pdev))
 	if (pci_enable_device(dev->pdev))
 		return -EIO;
 		return -EIO;
 
 
 	pci_set_master(dev->pdev);
 	pci_set_master(dev->pdev);
 
 
-	return i915_drm_thaw(dev);
+	ret = i915_drm_thaw(dev);
+	if (ret)
+		return ret;
+
+	drm_kms_helper_poll_enable(dev);
+	return 0;
+}
+
+static int i8xx_do_reset(struct drm_device *dev, u8 flags)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (IS_I85X(dev))
+		return -ENODEV;
+
+	I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
+	POSTING_READ(D_STATE);
+
+	if (IS_I830(dev) || IS_845G(dev)) {
+		I915_WRITE(DEBUG_RESET_I830,
+			   DEBUG_RESET_DISPLAY |
+			   DEBUG_RESET_RENDER |
+			   DEBUG_RESET_FULL);
+		POSTING_READ(DEBUG_RESET_I830);
+		msleep(1);
+
+		I915_WRITE(DEBUG_RESET_I830, 0);
+		POSTING_READ(DEBUG_RESET_I830);
+	}
+
+	msleep(1);
+
+	I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
+	POSTING_READ(D_STATE);
+
+	return 0;
+}
+
+static int i965_reset_complete(struct drm_device *dev)
+{
+	u8 gdrst;
+	pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
+	return gdrst & 0x1;
+}
+
+static int i965_do_reset(struct drm_device *dev, u8 flags)
+{
+	u8 gdrst;
+
+	/*
+	 * Set the domains we want to reset (GRDOM/bits 2 and 3) as
+	 * well as the reset bit (GR/bit 0).  Setting the GR bit
+	 * triggers the reset; when done, the hardware will clear it.
+	 */
+	pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
+	pci_write_config_byte(dev->pdev, I965_GDRST, gdrst | flags | 0x1);
+
+	return wait_for(i965_reset_complete(dev), 500);
+}
+
+static int ironlake_do_reset(struct drm_device *dev, u8 flags)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
+	I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, gdrst | flags | 0x1);
+	return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
 }
 }
 
 
 /**
 /**
@@ -325,54 +420,39 @@ int i915_resume(struct drm_device *dev)
  *   - re-init interrupt state
  *   - re-init interrupt state
  *   - re-init display
  *   - re-init display
  */
  */
-int i965_reset(struct drm_device *dev, u8 flags)
+int i915_reset(struct drm_device *dev, u8 flags)
 {
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	unsigned long timeout;
-	u8 gdrst;
 	/*
 	/*
 	 * We really should only reset the display subsystem if we actually
 	 * We really should only reset the display subsystem if we actually
 	 * need to
 	 * need to
 	 */
 	 */
 	bool need_display = true;
 	bool need_display = true;
+	int ret;
 
 
 	mutex_lock(&dev->struct_mutex);
 	mutex_lock(&dev->struct_mutex);
 
 
-	/*
-	 * Clear request list
-	 */
-	i915_gem_retire_requests(dev);
-
-	if (need_display)
-		i915_save_display(dev);
-
-	if (IS_I965G(dev) || IS_G4X(dev)) {
-		/*
-		 * Set the domains we want to reset, then the reset bit (bit 0).
-		 * Clear the reset bit after a while and wait for hardware status
-		 * bit (bit 1) to be set
-		 */
-		pci_read_config_byte(dev->pdev, GDRST, &gdrst);
-		pci_write_config_byte(dev->pdev, GDRST, gdrst | flags | ((flags == GDRST_FULL) ? 0x1 : 0x0));
-		udelay(50);
-		pci_write_config_byte(dev->pdev, GDRST, gdrst & 0xfe);
-
-		/* ...we don't want to loop forever though, 500ms should be plenty */
-	       timeout = jiffies + msecs_to_jiffies(500);
-		do {
-			udelay(100);
-			pci_read_config_byte(dev->pdev, GDRST, &gdrst);
-		} while ((gdrst & 0x1) && time_after(timeout, jiffies));
-
-		if (gdrst & 0x1) {
-			WARN(true, "i915: Failed to reset chip\n");
-			mutex_unlock(&dev->struct_mutex);
-			return -EIO;
-		}
-	} else {
-		DRM_ERROR("Error occurred. Don't know how to reset this chip.\n");
+	i915_gem_reset(dev);
+
+	ret = -ENODEV;
+	if (get_seconds() - dev_priv->last_gpu_reset < 5) {
+		DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
+	} else switch (INTEL_INFO(dev)->gen) {
+	case 5:
+		ret = ironlake_do_reset(dev, flags);
+		break;
+	case 4:
+		ret = i965_do_reset(dev, flags);
+		break;
+	case 2:
+		ret = i8xx_do_reset(dev, flags);
+		break;
+	}
+	dev_priv->last_gpu_reset = get_seconds();
+	if (ret) {
+		DRM_ERROR("Failed to reset chip.\n");
 		mutex_unlock(&dev->struct_mutex);
 		mutex_unlock(&dev->struct_mutex);
-		return -ENODEV;
+		return ret;
 	}
 	}
 
 
 	/* Ok, now get things going again... */
 	/* Ok, now get things going again... */
@@ -400,13 +480,19 @@ int i965_reset(struct drm_device *dev, u8 flags)
 		mutex_lock(&dev->struct_mutex);
 		mutex_lock(&dev->struct_mutex);
 	}
 	}
 
 
+	mutex_unlock(&dev->struct_mutex);
+
 	/*
 	/*
-	 * Display needs restore too...
+	 * Perform a full modeset as on later generations, e.g. Ironlake, we may
+	 * need to retrain the display link and cannot just restore the register
+	 * values.
 	 */
 	 */
-	if (need_display)
-		i915_restore_display(dev);
+	if (need_display) {
+		mutex_lock(&dev->mode_config.mutex);
+		drm_helper_resume_force_mode(dev);
+		mutex_unlock(&dev->mode_config.mutex);
+	}
 
 
-	mutex_unlock(&dev->struct_mutex);
 	return 0;
 	return 0;
 }
 }
 
 
@@ -524,8 +610,6 @@ static struct drm_driver driver = {
 	.irq_uninstall = i915_driver_irq_uninstall,
 	.irq_uninstall = i915_driver_irq_uninstall,
 	.irq_handler = i915_driver_irq_handler,
 	.irq_handler = i915_driver_irq_handler,
 	.reclaim_buffers = drm_core_reclaim_buffers,
 	.reclaim_buffers = drm_core_reclaim_buffers,
-	.get_map_ofs = drm_core_get_map_ofs,
-	.get_reg_ofs = drm_core_get_reg_ofs,
 	.master_create = i915_master_create,
 	.master_create = i915_master_create,
 	.master_destroy = i915_master_destroy,
 	.master_destroy = i915_master_destroy,
 #if defined(CONFIG_DEBUG_FS)
 #if defined(CONFIG_DEBUG_FS)

+ 187 - 84
drivers/gpu/drm/i915/i915_drv.h

@@ -34,6 +34,8 @@
 #include "intel_bios.h"
 #include "intel_bios.h"
 #include "intel_ringbuffer.h"
 #include "intel_ringbuffer.h"
 #include <linux/io-mapping.h>
 #include <linux/io-mapping.h>
+#include <linux/i2c.h>
+#include <drm/intel-gtt.h>
 
 
 /* General customization:
 /* General customization:
  */
  */
@@ -73,11 +75,9 @@ enum plane {
 #define DRIVER_PATCHLEVEL	0
 #define DRIVER_PATCHLEVEL	0
 
 
 #define WATCH_COHERENCY	0
 #define WATCH_COHERENCY	0
-#define WATCH_BUF	0
 #define WATCH_EXEC	0
 #define WATCH_EXEC	0
-#define WATCH_LRU	0
 #define WATCH_RELOC	0
 #define WATCH_RELOC	0
-#define WATCH_INACTIVE	0
+#define WATCH_LISTS	0
 #define WATCH_PWRITE	0
 #define WATCH_PWRITE	0
 
 
 #define I915_GEM_PHYS_CURSOR_0 1
 #define I915_GEM_PHYS_CURSOR_0 1
@@ -110,8 +110,9 @@ struct intel_opregion {
 	struct opregion_acpi *acpi;
 	struct opregion_acpi *acpi;
 	struct opregion_swsci *swsci;
 	struct opregion_swsci *swsci;
 	struct opregion_asle *asle;
 	struct opregion_asle *asle;
-	int enabled;
+	void *vbt;
 };
 };
+#define OPREGION_SIZE            (8*1024)
 
 
 struct intel_overlay;
 struct intel_overlay;
 struct intel_overlay_error_state;
 struct intel_overlay_error_state;
@@ -125,13 +126,16 @@ struct drm_i915_master_private {
 struct drm_i915_fence_reg {
 struct drm_i915_fence_reg {
 	struct drm_gem_object *obj;
 	struct drm_gem_object *obj;
 	struct list_head lru_list;
 	struct list_head lru_list;
+	bool gpu;
 };
 };
 
 
 struct sdvo_device_mapping {
 struct sdvo_device_mapping {
+	u8 initialized;
 	u8 dvo_port;
 	u8 dvo_port;
 	u8 slave_addr;
 	u8 slave_addr;
 	u8 dvo_wiring;
 	u8 dvo_wiring;
-	u8 initialized;
+	u8 i2c_pin;
+	u8 i2c_speed;
 	u8 ddc_pin;
 	u8 ddc_pin;
 };
 };
 
 
@@ -193,28 +197,29 @@ struct drm_i915_display_funcs {
 struct intel_device_info {
 struct intel_device_info {
 	u8 gen;
 	u8 gen;
 	u8 is_mobile : 1;
 	u8 is_mobile : 1;
-	u8 is_i8xx : 1;
 	u8 is_i85x : 1;
 	u8 is_i85x : 1;
 	u8 is_i915g : 1;
 	u8 is_i915g : 1;
-	u8 is_i9xx : 1;
 	u8 is_i945gm : 1;
 	u8 is_i945gm : 1;
-	u8 is_i965g : 1;
-	u8 is_i965gm : 1;
 	u8 is_g33 : 1;
 	u8 is_g33 : 1;
 	u8 need_gfx_hws : 1;
 	u8 need_gfx_hws : 1;
 	u8 is_g4x : 1;
 	u8 is_g4x : 1;
 	u8 is_pineview : 1;
 	u8 is_pineview : 1;
 	u8 is_broadwater : 1;
 	u8 is_broadwater : 1;
 	u8 is_crestline : 1;
 	u8 is_crestline : 1;
-	u8 is_ironlake : 1;
 	u8 has_fbc : 1;
 	u8 has_fbc : 1;
 	u8 has_rc6 : 1;
 	u8 has_rc6 : 1;
 	u8 has_pipe_cxsr : 1;
 	u8 has_pipe_cxsr : 1;
 	u8 has_hotplug : 1;
 	u8 has_hotplug : 1;
 	u8 cursor_needs_physical : 1;
 	u8 cursor_needs_physical : 1;
+	u8 has_overlay : 1;
+	u8 overlay_needs_physical : 1;
+	u8 supports_tv : 1;
+	u8 has_bsd_ring : 1;
+	u8 has_blt_ring : 1;
 };
 };
 
 
 enum no_fbc_reason {
 enum no_fbc_reason {
+	FBC_NO_OUTPUT, /* no outputs enabled to compress */
 	FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */
 	FBC_STOLEN_TOO_SMALL, /* not enough space to hold compressed buffers */
 	FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
 	FBC_UNSUPPORTED_MODE, /* interlace or doublescanned mode */
 	FBC_MODE_TOO_LARGE, /* mode too large for compression */
 	FBC_MODE_TOO_LARGE, /* mode too large for compression */
@@ -241,9 +246,16 @@ typedef struct drm_i915_private {
 
 
 	void __iomem *regs;
 	void __iomem *regs;
 
 
+	struct intel_gmbus {
+		struct i2c_adapter adapter;
+		struct i2c_adapter *force_bit;
+		u32 reg0;
+	} *gmbus;
+
 	struct pci_dev *bridge_dev;
 	struct pci_dev *bridge_dev;
 	struct intel_ring_buffer render_ring;
 	struct intel_ring_buffer render_ring;
 	struct intel_ring_buffer bsd_ring;
 	struct intel_ring_buffer bsd_ring;
+	struct intel_ring_buffer blt_ring;
 	uint32_t next_seqno;
 	uint32_t next_seqno;
 
 
 	drm_dma_handle_t *status_page_dmah;
 	drm_dma_handle_t *status_page_dmah;
@@ -263,6 +275,9 @@ typedef struct drm_i915_private {
 	int front_offset;
 	int front_offset;
 	int current_page;
 	int current_page;
 	int page_flipping;
 	int page_flipping;
+#define I915_DEBUG_READ (1<<0)
+#define I915_DEBUG_WRITE (1<<1)
+	unsigned long debug_flags;
 
 
 	wait_queue_head_t irq_queue;
 	wait_queue_head_t irq_queue;
 	atomic_t irq_received;
 	atomic_t irq_received;
@@ -289,24 +304,21 @@ typedef struct drm_i915_private {
 	unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
 	unsigned int sr01, adpa, ppcr, dvob, dvoc, lvds;
 	int vblank_pipe;
 	int vblank_pipe;
 	int num_pipe;
 	int num_pipe;
-	u32 flush_rings;
-#define FLUSH_RENDER_RING	0x1
-#define FLUSH_BSD_RING		0x2
 
 
 	/* For hangcheck timer */
 	/* For hangcheck timer */
-#define DRM_I915_HANGCHECK_PERIOD 75 /* in jiffies */
+#define DRM_I915_HANGCHECK_PERIOD 250 /* in ms */
 	struct timer_list hangcheck_timer;
 	struct timer_list hangcheck_timer;
 	int hangcheck_count;
 	int hangcheck_count;
 	uint32_t last_acthd;
 	uint32_t last_acthd;
 	uint32_t last_instdone;
 	uint32_t last_instdone;
 	uint32_t last_instdone1;
 	uint32_t last_instdone1;
 
 
-	struct drm_mm vram;
-
 	unsigned long cfb_size;
 	unsigned long cfb_size;
 	unsigned long cfb_pitch;
 	unsigned long cfb_pitch;
+	unsigned long cfb_offset;
 	int cfb_fence;
 	int cfb_fence;
 	int cfb_plane;
 	int cfb_plane;
+	int cfb_y;
 
 
 	int irq_enabled;
 	int irq_enabled;
 
 
@@ -316,8 +328,7 @@ typedef struct drm_i915_private {
 	struct intel_overlay *overlay;
 	struct intel_overlay *overlay;
 
 
 	/* LVDS info */
 	/* LVDS info */
-	int backlight_duty_cycle;  /* restore backlight to this value */
-	bool panel_wants_dither;
+	int backlight_level;  /* restore backlight to this value */
 	struct drm_display_mode *panel_fixed_mode;
 	struct drm_display_mode *panel_fixed_mode;
 	struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
 	struct drm_display_mode *lfp_lvds_vbt_mode; /* if any */
 	struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
 	struct drm_display_mode *sdvo_lvds_vbt_mode; /* if any */
@@ -328,13 +339,23 @@ typedef struct drm_i915_private {
 	unsigned int lvds_vbt:1;
 	unsigned int lvds_vbt:1;
 	unsigned int int_crt_support:1;
 	unsigned int int_crt_support:1;
 	unsigned int lvds_use_ssc:1;
 	unsigned int lvds_use_ssc:1;
-	unsigned int edp_support:1;
 	int lvds_ssc_freq;
 	int lvds_ssc_freq;
-	int edp_bpp;
+	struct {
+		int rate;
+		int lanes;
+		int preemphasis;
+		int vswing;
+
+		bool initialized;
+		bool support;
+		int bpp;
+		struct edp_power_seq pps;
+	} edp;
+	bool no_aux_handshake;
 
 
 	struct notifier_block lid_notifier;
 	struct notifier_block lid_notifier;
 
 
-	int crt_ddc_bus; /* 0 = unknown, else GPIO to use for CRT DDC */
+	int crt_ddc_pin;
 	struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */
 	struct drm_i915_fence_reg fence_regs[16]; /* assume 965 */
 	int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
 	int fence_reg_start; /* 4 if userland hasn't ioctl'd us yet */
 	int num_fence_regs; /* 8 on pre-965, 16 otherwise */
 	int num_fence_regs; /* 8 on pre-965, 16 otherwise */
@@ -344,6 +365,7 @@ typedef struct drm_i915_private {
 	spinlock_t error_lock;
 	spinlock_t error_lock;
 	struct drm_i915_error_state *first_error;
 	struct drm_i915_error_state *first_error;
 	struct work_struct error_work;
 	struct work_struct error_work;
+	struct completion error_completion;
 	struct workqueue_struct *wq;
 	struct workqueue_struct *wq;
 
 
 	/* Display functions */
 	/* Display functions */
@@ -507,6 +529,11 @@ typedef struct drm_i915_private {
 	u32 saveMCHBAR_RENDER_STANDBY;
 	u32 saveMCHBAR_RENDER_STANDBY;
 
 
 	struct {
 	struct {
+		/** Bridge to intel-gtt-ko */
+		struct intel_gtt *gtt;
+		/** Memory allocator for GTT stolen memory */
+		struct drm_mm vram;
+		/** Memory allocator for GTT */
 		struct drm_mm gtt_space;
 		struct drm_mm gtt_space;
 
 
 		struct io_mapping *gtt_mapping;
 		struct io_mapping *gtt_mapping;
@@ -521,7 +548,16 @@ typedef struct drm_i915_private {
 		 */
 		 */
 		struct list_head shrink_list;
 		struct list_head shrink_list;
 
 
-		spinlock_t active_list_lock;
+		/**
+		 * List of objects currently involved in rendering.
+		 *
+		 * Includes buffers having the contents of their GPU caches
+		 * flushed, not necessarily primitives.  last_rendering_seqno
+		 * represents when the rendering involved will be completed.
+		 *
+		 * A reference is held on the buffer while on this list.
+		 */
+		struct list_head active_list;
 
 
 		/**
 		/**
 		 * List of objects which are not in the ringbuffer but which
 		 * List of objects which are not in the ringbuffer but which
@@ -534,15 +570,6 @@ typedef struct drm_i915_private {
 		 */
 		 */
 		struct list_head flushing_list;
 		struct list_head flushing_list;
 
 
-		/**
-		 * List of objects currently pending a GPU write flush.
-		 *
-		 * All elements on this list will belong to either the
-		 * active_list or flushing_list, last_rendering_seqno can
-		 * be used to differentiate between the two elements.
-		 */
-		struct list_head gpu_write_list;
-
 		/**
 		/**
 		 * LRU list of objects which are not in the ringbuffer and
 		 * LRU list of objects which are not in the ringbuffer and
 		 * are ready to unbind, but are still in the GTT.
 		 * are ready to unbind, but are still in the GTT.
@@ -555,6 +582,12 @@ typedef struct drm_i915_private {
 		 */
 		 */
 		struct list_head inactive_list;
 		struct list_head inactive_list;
 
 
+		/**
+		 * LRU list of objects which are not in the ringbuffer but
+		 * are still pinned in the GTT.
+		 */
+		struct list_head pinned_list;
+
 		/** LRU list of objects with fence regs on them. */
 		/** LRU list of objects with fence regs on them. */
 		struct list_head fence_list;
 		struct list_head fence_list;
 
 
@@ -611,6 +644,17 @@ typedef struct drm_i915_private {
 
 
 		/* storage for physical objects */
 		/* storage for physical objects */
 		struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
 		struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
+
+		uint32_t flush_rings;
+
+		/* accounting, useful for userland debugging */
+		size_t object_memory;
+		size_t pin_memory;
+		size_t gtt_memory;
+		size_t gtt_total;
+		u32 object_count;
+		u32 pin_count;
+		u32 gtt_count;
 	} mm;
 	} mm;
 	struct sdvo_device_mapping sdvo_mappings[2];
 	struct sdvo_device_mapping sdvo_mappings[2];
 	/* indicate whether the LVDS_BORDER should be enabled or not */
 	/* indicate whether the LVDS_BORDER should be enabled or not */
@@ -626,8 +670,6 @@ typedef struct drm_i915_private {
 	/* Reclocking support */
 	/* Reclocking support */
 	bool render_reclock_avail;
 	bool render_reclock_avail;
 	bool lvds_downclock_avail;
 	bool lvds_downclock_avail;
-	/* indicate whether the LVDS EDID is OK */
-	bool lvds_edid_good;
 	/* indicates the reduced downclock for LVDS*/
 	/* indicates the reduced downclock for LVDS*/
 	int lvds_downclock;
 	int lvds_downclock;
 	struct work_struct idle_work;
 	struct work_struct idle_work;
@@ -661,6 +703,8 @@ typedef struct drm_i915_private {
 	struct drm_mm_node *compressed_fb;
 	struct drm_mm_node *compressed_fb;
 	struct drm_mm_node *compressed_llb;
 	struct drm_mm_node *compressed_llb;
 
 
+	unsigned long last_gpu_reset;
+
 	/* list of fbdev register on this device */
 	/* list of fbdev register on this device */
 	struct intel_fbdev *fbdev;
 	struct intel_fbdev *fbdev;
 } drm_i915_private_t;
 } drm_i915_private_t;
@@ -673,7 +717,8 @@ struct drm_i915_gem_object {
 	struct drm_mm_node *gtt_space;
 	struct drm_mm_node *gtt_space;
 
 
 	/** This object's place on the active/flushing/inactive lists */
 	/** This object's place on the active/flushing/inactive lists */
-	struct list_head list;
+	struct list_head ring_list;
+	struct list_head mm_list;
 	/** This object's place on GPU write list */
 	/** This object's place on GPU write list */
 	struct list_head gpu_write_list;
 	struct list_head gpu_write_list;
 	/** This object's place on eviction list */
 	/** This object's place on eviction list */
@@ -816,12 +861,14 @@ struct drm_i915_gem_request {
 	/** global list entry for this request */
 	/** global list entry for this request */
 	struct list_head list;
 	struct list_head list;
 
 
+	struct drm_i915_file_private *file_priv;
 	/** file_priv list entry for this request */
 	/** file_priv list entry for this request */
 	struct list_head client_list;
 	struct list_head client_list;
 };
 };
 
 
 struct drm_i915_file_private {
 struct drm_i915_file_private {
 	struct {
 	struct {
+		struct spinlock lock;
 		struct list_head request_list;
 		struct list_head request_list;
 	} mm;
 	} mm;
 };
 };
@@ -862,7 +909,7 @@ extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
 extern int i915_emit_box(struct drm_device *dev,
 extern int i915_emit_box(struct drm_device *dev,
 			 struct drm_clip_rect *boxes,
 			 struct drm_clip_rect *boxes,
 			 int i, int DR1, int DR4);
 			 int i, int DR1, int DR4);
-extern int i965_reset(struct drm_device *dev, u8 flags);
+extern int i915_reset(struct drm_device *dev, u8 flags);
 extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
 extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
 extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
 extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
 extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
 extern unsigned long i915_gfx_val(struct drm_i915_private *dev_priv);
@@ -871,7 +918,6 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
 
 
 /* i915_irq.c */
 /* i915_irq.c */
 void i915_hangcheck_elapsed(unsigned long data);
 void i915_hangcheck_elapsed(unsigned long data);
-void i915_destroy_error_state(struct drm_device *dev);
 extern int i915_irq_emit(struct drm_device *dev, void *data,
 extern int i915_irq_emit(struct drm_device *dev, void *data,
 			 struct drm_file *file_priv);
 			 struct drm_file *file_priv);
 extern int i915_irq_wait(struct drm_device *dev, void *data,
 extern int i915_irq_wait(struct drm_device *dev, void *data,
@@ -908,6 +954,12 @@ i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
 
 
 void intel_enable_asle (struct drm_device *dev);
 void intel_enable_asle (struct drm_device *dev);
 
 
+#ifdef CONFIG_DEBUG_FS
+extern void i915_destroy_error_state(struct drm_device *dev);
+#else
+#define i915_destroy_error_state(x)
+#endif
+
 
 
 /* i915_mem.c */
 /* i915_mem.c */
 extern int i915_mem_alloc(struct drm_device *dev, void *data,
 extern int i915_mem_alloc(struct drm_device *dev, void *data,
@@ -922,6 +974,7 @@ extern void i915_mem_takedown(struct mem_block **heap);
 extern void i915_mem_release(struct drm_device * dev,
 extern void i915_mem_release(struct drm_device * dev,
 			     struct drm_file *file_priv, struct mem_block *heap);
 			     struct drm_file *file_priv, struct mem_block *heap);
 /* i915_gem.c */
 /* i915_gem.c */
+int i915_gem_check_is_wedged(struct drm_device *dev);
 int i915_gem_init_ioctl(struct drm_device *dev, void *data,
 int i915_gem_init_ioctl(struct drm_device *dev, void *data,
 			struct drm_file *file_priv);
 			struct drm_file *file_priv);
 int i915_gem_create_ioctl(struct drm_device *dev, void *data,
 int i915_gem_create_ioctl(struct drm_device *dev, void *data,
@@ -972,13 +1025,22 @@ void i915_gem_object_unpin(struct drm_gem_object *obj);
 int i915_gem_object_unbind(struct drm_gem_object *obj);
 int i915_gem_object_unbind(struct drm_gem_object *obj);
 void i915_gem_release_mmap(struct drm_gem_object *obj);
 void i915_gem_release_mmap(struct drm_gem_object *obj);
 void i915_gem_lastclose(struct drm_device *dev);
 void i915_gem_lastclose(struct drm_device *dev);
-uint32_t i915_get_gem_seqno(struct drm_device *dev,
-		struct intel_ring_buffer *ring);
-bool i915_seqno_passed(uint32_t seq1, uint32_t seq2);
-int i915_gem_object_get_fence_reg(struct drm_gem_object *obj);
-int i915_gem_object_put_fence_reg(struct drm_gem_object *obj);
+
+/**
+ * Returns true if seq1 is later than seq2.
+ */
+static inline bool
+i915_seqno_passed(uint32_t seq1, uint32_t seq2)
+{
+	return (int32_t)(seq1 - seq2) >= 0;
+}
+
+int i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
+				  bool interruptible);
+int i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
+				  bool interruptible);
 void i915_gem_retire_requests(struct drm_device *dev);
 void i915_gem_retire_requests(struct drm_device *dev);
-void i915_gem_retire_work_handler(struct work_struct *work);
+void i915_gem_reset(struct drm_device *dev);
 void i915_gem_clflush_object(struct drm_gem_object *obj);
 void i915_gem_clflush_object(struct drm_gem_object *obj);
 int i915_gem_object_set_domain(struct drm_gem_object *obj,
 int i915_gem_object_set_domain(struct drm_gem_object *obj,
 			       uint32_t read_domains,
 			       uint32_t read_domains,
@@ -990,16 +1052,18 @@ int i915_gem_do_init(struct drm_device *dev, unsigned long start,
 int i915_gpu_idle(struct drm_device *dev);
 int i915_gpu_idle(struct drm_device *dev);
 int i915_gem_idle(struct drm_device *dev);
 int i915_gem_idle(struct drm_device *dev);
 uint32_t i915_add_request(struct drm_device *dev,
 uint32_t i915_add_request(struct drm_device *dev,
-		struct drm_file *file_priv,
-		uint32_t flush_domains,
-		struct intel_ring_buffer *ring);
+			  struct drm_file *file_priv,
+			  struct drm_i915_gem_request *request,
+			  struct intel_ring_buffer *ring);
 int i915_do_wait_request(struct drm_device *dev,
 int i915_do_wait_request(struct drm_device *dev,
-		uint32_t seqno, int interruptible,
-		struct intel_ring_buffer *ring);
+			 uint32_t seqno,
+			 bool interruptible,
+			 struct intel_ring_buffer *ring);
 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
 int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
 int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
 int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
 				      int write);
 				      int write);
-int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj);
+int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
+					 bool pipelined);
 int i915_gem_attach_phys_object(struct drm_device *dev,
 int i915_gem_attach_phys_object(struct drm_device *dev,
 				struct drm_gem_object *obj,
 				struct drm_gem_object *obj,
 				int id,
 				int id,
@@ -1007,10 +1071,7 @@ int i915_gem_attach_phys_object(struct drm_device *dev,
 void i915_gem_detach_phys_object(struct drm_device *dev,
 void i915_gem_detach_phys_object(struct drm_device *dev,
 				 struct drm_gem_object *obj);
 				 struct drm_gem_object *obj);
 void i915_gem_free_all_phys_object(struct drm_device *dev);
 void i915_gem_free_all_phys_object(struct drm_device *dev);
-int i915_gem_object_get_pages(struct drm_gem_object *obj, gfp_t gfpmask);
-void i915_gem_object_put_pages(struct drm_gem_object *obj);
 void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
 void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
-int i915_gem_object_flush_write_domain(struct drm_gem_object *obj);
 
 
 void i915_gem_shrinker_init(void);
 void i915_gem_shrinker_init(void);
 void i915_gem_shrinker_exit(void);
 void i915_gem_shrinker_exit(void);
@@ -1032,15 +1093,14 @@ bool i915_gem_object_fence_offset_ok(struct drm_gem_object *obj,
 /* i915_gem_debug.c */
 /* i915_gem_debug.c */
 void i915_gem_dump_object(struct drm_gem_object *obj, int len,
 void i915_gem_dump_object(struct drm_gem_object *obj, int len,
 			  const char *where, uint32_t mark);
 			  const char *where, uint32_t mark);
-#if WATCH_INACTIVE
-void i915_verify_inactive(struct drm_device *dev, char *file, int line);
+#if WATCH_LISTS
+int i915_verify_lists(struct drm_device *dev);
 #else
 #else
-#define i915_verify_inactive(dev, file, line)
+#define i915_verify_lists(dev) 0
 #endif
 #endif
 void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle);
 void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle);
 void i915_gem_dump_object(struct drm_gem_object *obj, int len,
 void i915_gem_dump_object(struct drm_gem_object *obj, int len,
 			  const char *where, uint32_t mark);
 			  const char *where, uint32_t mark);
-void i915_dump_lru(struct drm_device *dev, const char *where);
 
 
 /* i915_debugfs.c */
 /* i915_debugfs.c */
 int i915_debugfs_init(struct drm_minor *minor);
 int i915_debugfs_init(struct drm_minor *minor);
@@ -1054,21 +1114,42 @@ extern int i915_restore_state(struct drm_device *dev);
 extern int i915_save_state(struct drm_device *dev);
 extern int i915_save_state(struct drm_device *dev);
 extern int i915_restore_state(struct drm_device *dev);
 extern int i915_restore_state(struct drm_device *dev);
 
 
+/* intel_i2c.c */
+extern int intel_setup_gmbus(struct drm_device *dev);
+extern void intel_teardown_gmbus(struct drm_device *dev);
+extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
+extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
+extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
+{
+	return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
+}
+extern void intel_i2c_reset(struct drm_device *dev);
+
+/* intel_opregion.c */
+extern int intel_opregion_setup(struct drm_device *dev);
 #ifdef CONFIG_ACPI
 #ifdef CONFIG_ACPI
-/* i915_opregion.c */
-extern int intel_opregion_init(struct drm_device *dev, int resume);
-extern void intel_opregion_free(struct drm_device *dev, int suspend);
-extern void opregion_asle_intr(struct drm_device *dev);
-extern void ironlake_opregion_gse_intr(struct drm_device *dev);
-extern void opregion_enable_asle(struct drm_device *dev);
+extern void intel_opregion_init(struct drm_device *dev);
+extern void intel_opregion_fini(struct drm_device *dev);
+extern void intel_opregion_asle_intr(struct drm_device *dev);
+extern void intel_opregion_gse_intr(struct drm_device *dev);
+extern void intel_opregion_enable_asle(struct drm_device *dev);
 #else
 #else
-static inline int intel_opregion_init(struct drm_device *dev, int resume) { return 0; }
-static inline void intel_opregion_free(struct drm_device *dev, int suspend) { return; }
-static inline void opregion_asle_intr(struct drm_device *dev) { return; }
-static inline void ironlake_opregion_gse_intr(struct drm_device *dev) { return; }
-static inline void opregion_enable_asle(struct drm_device *dev) { return; }
+static inline void intel_opregion_init(struct drm_device *dev) { return; }
+static inline void intel_opregion_fini(struct drm_device *dev) { return; }
+static inline void intel_opregion_asle_intr(struct drm_device *dev) { return; }
+static inline void intel_opregion_gse_intr(struct drm_device *dev) { return; }
+static inline void intel_opregion_enable_asle(struct drm_device *dev) { return; }
 #endif
 #endif
 
 
+/* intel_acpi.c */
+#ifdef CONFIG_ACPI
+extern void intel_register_dsm_handler(void);
+extern void intel_unregister_dsm_handler(void);
+#else
+static inline void intel_register_dsm_handler(void) { return; }
+static inline void intel_unregister_dsm_handler(void) { return; }
+#endif /* CONFIG_ACPI */
+
 /* modesetting */
 /* modesetting */
 extern void intel_modeset_init(struct drm_device *dev);
 extern void intel_modeset_init(struct drm_device *dev);
 extern void intel_modeset_cleanup(struct drm_device *dev);
 extern void intel_modeset_cleanup(struct drm_device *dev);
@@ -1084,8 +1165,10 @@ extern void intel_detect_pch (struct drm_device *dev);
 extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
 extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
 
 
 /* overlay */
 /* overlay */
+#ifdef CONFIG_DEBUG_FS
 extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
 extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
 extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error);
 extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error);
+#endif
 
 
 /**
 /**
  * Lock test for when it's just for synchronization of ring access.
  * Lock test for when it's just for synchronization of ring access.
@@ -1099,8 +1182,26 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
 		LOCK_TEST_WITH_RETURN(dev, file_priv);			\
 		LOCK_TEST_WITH_RETURN(dev, file_priv);			\
 } while (0)
 } while (0)
 
 
-#define I915_READ(reg)          readl(dev_priv->regs + (reg))
-#define I915_WRITE(reg, val)     writel(val, dev_priv->regs + (reg))
+static inline u32 i915_read(struct drm_i915_private *dev_priv, u32 reg)
+{
+	u32 val;
+
+	val = readl(dev_priv->regs + reg);
+	if (dev_priv->debug_flags & I915_DEBUG_READ)
+		printk(KERN_ERR "read 0x%08x from 0x%08x\n", val, reg);
+	return val;
+}
+
+static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg,
+			      u32 val)
+{
+	writel(val, dev_priv->regs + reg);
+	if (dev_priv->debug_flags & I915_DEBUG_WRITE)
+		printk(KERN_ERR "wrote 0x%08x to 0x%08x\n", val, reg);
+}
+
+#define I915_READ(reg)          i915_read(dev_priv, (reg))
+#define I915_WRITE(reg, val)    i915_write(dev_priv, (reg), (val))
 #define I915_READ16(reg)	readw(dev_priv->regs + (reg))
 #define I915_READ16(reg)	readw(dev_priv->regs + (reg))
 #define I915_WRITE16(reg, val)	writel(val, dev_priv->regs + (reg))
 #define I915_WRITE16(reg, val)	writel(val, dev_priv->regs + (reg))
 #define I915_READ8(reg)		readb(dev_priv->regs + (reg))
 #define I915_READ8(reg)		readb(dev_priv->regs + (reg))
@@ -1110,6 +1211,11 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
 #define POSTING_READ(reg)	(void)I915_READ(reg)
 #define POSTING_READ(reg)	(void)I915_READ(reg)
 #define POSTING_READ16(reg)	(void)I915_READ16(reg)
 #define POSTING_READ16(reg)	(void)I915_READ16(reg)
 
 
+#define I915_DEBUG_ENABLE_IO() (dev_priv->debug_flags |= I915_DEBUG_READ | \
+				I915_DEBUG_WRITE)
+#define I915_DEBUG_DISABLE_IO() (dev_priv->debug_flags &= ~(I915_DEBUG_READ | \
+							    I915_DEBUG_WRITE))
+
 #define I915_VERBOSE 0
 #define I915_VERBOSE 0
 
 
 #define BEGIN_LP_RING(n)  do { \
 #define BEGIN_LP_RING(n)  do { \
@@ -1166,8 +1272,6 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
 #define IS_I915GM(dev)		((dev)->pci_device == 0x2592)
 #define IS_I915GM(dev)		((dev)->pci_device == 0x2592)
 #define IS_I945G(dev)		((dev)->pci_device == 0x2772)
 #define IS_I945G(dev)		((dev)->pci_device == 0x2772)
 #define IS_I945GM(dev)		(INTEL_INFO(dev)->is_i945gm)
 #define IS_I945GM(dev)		(INTEL_INFO(dev)->is_i945gm)
-#define IS_I965G(dev)		(INTEL_INFO(dev)->is_i965g)
-#define IS_I965GM(dev)		(INTEL_INFO(dev)->is_i965gm)
 #define IS_BROADWATER(dev)	(INTEL_INFO(dev)->is_broadwater)
 #define IS_BROADWATER(dev)	(INTEL_INFO(dev)->is_broadwater)
 #define IS_CRESTLINE(dev)	(INTEL_INFO(dev)->is_crestline)
 #define IS_CRESTLINE(dev)	(INTEL_INFO(dev)->is_crestline)
 #define IS_GM45(dev)		((dev)->pci_device == 0x2A42)
 #define IS_GM45(dev)		((dev)->pci_device == 0x2A42)
@@ -1178,8 +1282,6 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
 #define IS_G33(dev)		(INTEL_INFO(dev)->is_g33)
 #define IS_G33(dev)		(INTEL_INFO(dev)->is_g33)
 #define IS_IRONLAKE_D(dev)	((dev)->pci_device == 0x0042)
 #define IS_IRONLAKE_D(dev)	((dev)->pci_device == 0x0042)
 #define IS_IRONLAKE_M(dev)	((dev)->pci_device == 0x0046)
 #define IS_IRONLAKE_M(dev)	((dev)->pci_device == 0x0046)
-#define IS_IRONLAKE(dev)	(INTEL_INFO(dev)->is_ironlake)
-#define IS_I9XX(dev)		(INTEL_INFO(dev)->is_i9xx)
 #define IS_MOBILE(dev)		(INTEL_INFO(dev)->is_mobile)
 #define IS_MOBILE(dev)		(INTEL_INFO(dev)->is_mobile)
 
 
 #define IS_GEN2(dev)	(INTEL_INFO(dev)->gen == 2)
 #define IS_GEN2(dev)	(INTEL_INFO(dev)->gen == 2)
@@ -1188,33 +1290,34 @@ extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_ove
 #define IS_GEN5(dev)	(INTEL_INFO(dev)->gen == 5)
 #define IS_GEN5(dev)	(INTEL_INFO(dev)->gen == 5)
 #define IS_GEN6(dev)	(INTEL_INFO(dev)->gen == 6)
 #define IS_GEN6(dev)	(INTEL_INFO(dev)->gen == 6)
 
 
-#define HAS_BSD(dev)            (IS_IRONLAKE(dev) || IS_G4X(dev))
+#define HAS_BSD(dev)            (INTEL_INFO(dev)->has_bsd_ring)
+#define HAS_BLT(dev)            (INTEL_INFO(dev)->has_blt_ring)
 #define I915_NEED_GFX_HWS(dev)	(INTEL_INFO(dev)->need_gfx_hws)
 #define I915_NEED_GFX_HWS(dev)	(INTEL_INFO(dev)->need_gfx_hws)
 
 
+#define HAS_OVERLAY(dev) 		(INTEL_INFO(dev)->has_overlay)
+#define OVERLAY_NEEDS_PHYSICAL(dev)	(INTEL_INFO(dev)->overlay_needs_physical)
+
 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
 /* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
  * rows, which changed the alignment requirements and fence programming.
  * rows, which changed the alignment requirements and fence programming.
  */
  */
-#define HAS_128_BYTE_Y_TILING(dev) (IS_I9XX(dev) && !(IS_I915G(dev) || \
+#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
 						      IS_I915GM(dev)))
 						      IS_I915GM(dev)))
-#define SUPPORTS_DIGITAL_OUTPUTS(dev)	(IS_I9XX(dev) && !IS_PINEVIEW(dev))
-#define SUPPORTS_INTEGRATED_HDMI(dev)	(IS_G4X(dev) || IS_IRONLAKE(dev))
-#define SUPPORTS_INTEGRATED_DP(dev)	(IS_G4X(dev) || IS_IRONLAKE(dev))
+#define SUPPORTS_DIGITAL_OUTPUTS(dev)	(!IS_GEN2(dev) && !IS_PINEVIEW(dev))
+#define SUPPORTS_INTEGRATED_HDMI(dev)	(IS_G4X(dev) || IS_GEN5(dev))
+#define SUPPORTS_INTEGRATED_DP(dev)	(IS_G4X(dev) || IS_GEN5(dev))
 #define SUPPORTS_EDP(dev)		(IS_IRONLAKE_M(dev))
 #define SUPPORTS_EDP(dev)		(IS_IRONLAKE_M(dev))
-#define SUPPORTS_TV(dev)		(IS_I9XX(dev) && IS_MOBILE(dev) && \
-					!IS_IRONLAKE(dev) && !IS_PINEVIEW(dev) && \
-					!IS_GEN6(dev))
+#define SUPPORTS_TV(dev)		(INTEL_INFO(dev)->supports_tv)
 #define I915_HAS_HOTPLUG(dev)		 (INTEL_INFO(dev)->has_hotplug)
 #define I915_HAS_HOTPLUG(dev)		 (INTEL_INFO(dev)->has_hotplug)
 /* dsparb controlled by hw only */
 /* dsparb controlled by hw only */
 #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
 #define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
 
 
-#define HAS_FW_BLC(dev) (IS_I9XX(dev) || IS_G4X(dev) || IS_IRONLAKE(dev))
+#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
 #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
 #define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
 #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
 #define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
 #define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6)
 #define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6)
 
 
-#define HAS_PCH_SPLIT(dev) (IS_IRONLAKE(dev) ||	\
-			    IS_GEN6(dev))
-#define HAS_PIPE_CONTROL(dev) (IS_IRONLAKE(dev) || IS_GEN6(dev))
+#define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev))
+#define HAS_PIPE_CONTROL(dev) (IS_GEN5(dev) || IS_GEN6(dev))
 
 
 #define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
 #define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
 #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
 #define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)

File diff suppressed because it is too large
+ 327 - 235
drivers/gpu/drm/i915/i915_gem.c


+ 98 - 50
drivers/gpu/drm/i915/i915_gem_debug.c

@@ -30,29 +30,112 @@
 #include "i915_drm.h"
 #include "i915_drm.h"
 #include "i915_drv.h"
 #include "i915_drv.h"
 
 
-#if WATCH_INACTIVE
-void
-i915_verify_inactive(struct drm_device *dev, char *file, int line)
+#if WATCH_LISTS
+int
+i915_verify_lists(struct drm_device *dev)
 {
 {
+	static int warned;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_gem_object *obj;
-	struct drm_i915_gem_object *obj_priv;
-
-	list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
-		obj = &obj_priv->base;
-		if (obj_priv->pin_count || obj_priv->active ||
-		    (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
-					   I915_GEM_DOMAIN_GTT)))
-			DRM_ERROR("inactive %p (p %d a %d w %x)  %s:%d\n",
+	struct drm_i915_gem_object *obj;
+	int err = 0;
+
+	if (warned)
+		return 0;
+
+	list_for_each_entry(obj, &dev_priv->render_ring.active_list, list) {
+		if (obj->base.dev != dev ||
+		    !atomic_read(&obj->base.refcount.refcount)) {
+			DRM_ERROR("freed render active %p\n", obj);
+			err++;
+			break;
+		} else if (!obj->active ||
+			   (obj->base.read_domains & I915_GEM_GPU_DOMAINS) == 0) {
+			DRM_ERROR("invalid render active %p (a %d r %x)\n",
+				  obj,
+				  obj->active,
+				  obj->base.read_domains);
+			err++;
+		} else if (obj->base.write_domain && list_empty(&obj->gpu_write_list)) {
+			DRM_ERROR("invalid render active %p (w %x, gwl %d)\n",
+				  obj,
+				  obj->base.write_domain,
+				  !list_empty(&obj->gpu_write_list));
+			err++;
+		}
+	}
+
+	list_for_each_entry(obj, &dev_priv->mm.flushing_list, list) {
+		if (obj->base.dev != dev ||
+		    !atomic_read(&obj->base.refcount.refcount)) {
+			DRM_ERROR("freed flushing %p\n", obj);
+			err++;
+			break;
+		} else if (!obj->active ||
+			   (obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0 ||
+			   list_empty(&obj->gpu_write_list)){
+			DRM_ERROR("invalid flushing %p (a %d w %x gwl %d)\n",
 				  obj,
 				  obj,
-				  obj_priv->pin_count, obj_priv->active,
-				  obj->write_domain, file, line);
+				  obj->active,
+				  obj->base.write_domain,
+				  !list_empty(&obj->gpu_write_list));
+			err++;
+		}
+	}
+
+	list_for_each_entry(obj, &dev_priv->mm.gpu_write_list, gpu_write_list) {
+		if (obj->base.dev != dev ||
+		    !atomic_read(&obj->base.refcount.refcount)) {
+			DRM_ERROR("freed gpu write %p\n", obj);
+			err++;
+			break;
+		} else if (!obj->active ||
+			   (obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0) {
+			DRM_ERROR("invalid gpu write %p (a %d w %x)\n",
+				  obj,
+				  obj->active,
+				  obj->base.write_domain);
+			err++;
+		}
+	}
+
+	list_for_each_entry(obj, &dev_priv->mm.inactive_list, list) {
+		if (obj->base.dev != dev ||
+		    !atomic_read(&obj->base.refcount.refcount)) {
+			DRM_ERROR("freed inactive %p\n", obj);
+			err++;
+			break;
+		} else if (obj->pin_count || obj->active ||
+			   (obj->base.write_domain & I915_GEM_GPU_DOMAINS)) {
+			DRM_ERROR("invalid inactive %p (p %d a %d w %x)\n",
+				  obj,
+				  obj->pin_count, obj->active,
+				  obj->base.write_domain);
+			err++;
+		}
 	}
 	}
+
+	list_for_each_entry(obj, &dev_priv->mm.pinned_list, list) {
+		if (obj->base.dev != dev ||
+		    !atomic_read(&obj->base.refcount.refcount)) {
+			DRM_ERROR("freed pinned %p\n", obj);
+			err++;
+			break;
+		} else if (!obj->pin_count || obj->active ||
+			   (obj->base.write_domain & I915_GEM_GPU_DOMAINS)) {
+			DRM_ERROR("invalid pinned %p (p %d a %d w %x)\n",
+				  obj,
+				  obj->pin_count, obj->active,
+				  obj->base.write_domain);
+			err++;
+		}
+	}
+
+	return warned = err;
 }
 }
 #endif /* WATCH_INACTIVE */
 #endif /* WATCH_INACTIVE */
 
 
 
 
-#if WATCH_BUF | WATCH_EXEC | WATCH_PWRITE
+#if WATCH_EXEC | WATCH_PWRITE
 static void
 static void
 i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end,
 i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end,
 		   uint32_t bias, uint32_t mark)
 		   uint32_t bias, uint32_t mark)
@@ -97,41 +180,6 @@ i915_gem_dump_object(struct drm_gem_object *obj, int len,
 }
 }
 #endif
 #endif
 
 
-#if WATCH_LRU
-void
-i915_dump_lru(struct drm_device *dev, const char *where)
-{
-	drm_i915_private_t		*dev_priv = dev->dev_private;
-	struct drm_i915_gem_object	*obj_priv;
-
-	DRM_INFO("active list %s {\n", where);
-	spin_lock(&dev_priv->mm.active_list_lock);
-	list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
-			    list)
-	{
-		DRM_INFO("    %p: %08x\n", obj_priv,
-			 obj_priv->last_rendering_seqno);
-	}
-	spin_unlock(&dev_priv->mm.active_list_lock);
-	DRM_INFO("}\n");
-	DRM_INFO("flushing list %s {\n", where);
-	list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
-			    list)
-	{
-		DRM_INFO("    %p: %08x\n", obj_priv,
-			 obj_priv->last_rendering_seqno);
-	}
-	DRM_INFO("}\n");
-	DRM_INFO("inactive %s {\n", where);
-	list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
-		DRM_INFO("    %p: %08x\n", obj_priv,
-			 obj_priv->last_rendering_seqno);
-	}
-	DRM_INFO("}\n");
-}
-#endif
-
-
 #if WATCH_COHERENCY
 #if WATCH_COHERENCY
 void
 void
 i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
 i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)

+ 9 - 63
drivers/gpu/drm/i915/i915_gem_evict.c

@@ -31,49 +31,6 @@
 #include "i915_drv.h"
 #include "i915_drv.h"
 #include "i915_drm.h"
 #include "i915_drm.h"
 
 
-static struct drm_i915_gem_object *
-i915_gem_next_active_object(struct drm_device *dev,
-			    struct list_head **render_iter,
-			    struct list_head **bsd_iter)
-{
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	struct drm_i915_gem_object *render_obj = NULL, *bsd_obj = NULL;
-
-	if (*render_iter != &dev_priv->render_ring.active_list)
-		render_obj = list_entry(*render_iter,
-					struct drm_i915_gem_object,
-					list);
-
-	if (HAS_BSD(dev)) {
-		if (*bsd_iter != &dev_priv->bsd_ring.active_list)
-			bsd_obj = list_entry(*bsd_iter,
-					     struct drm_i915_gem_object,
-					     list);
-
-		if (render_obj == NULL) {
-			*bsd_iter = (*bsd_iter)->next;
-			return bsd_obj;
-		}
-
-		if (bsd_obj == NULL) {
-			*render_iter = (*render_iter)->next;
-			return render_obj;
-		}
-
-		/* XXX can we handle seqno wrapping? */
-		if (render_obj->last_rendering_seqno < bsd_obj->last_rendering_seqno) {
-			*render_iter = (*render_iter)->next;
-			return render_obj;
-		} else {
-			*bsd_iter = (*bsd_iter)->next;
-			return bsd_obj;
-		}
-	} else {
-		*render_iter = (*render_iter)->next;
-		return render_obj;
-	}
-}
-
 static bool
 static bool
 mark_free(struct drm_i915_gem_object *obj_priv,
 mark_free(struct drm_i915_gem_object *obj_priv,
 	   struct list_head *unwind)
 	   struct list_head *unwind)
@@ -83,18 +40,12 @@ mark_free(struct drm_i915_gem_object *obj_priv,
 	return drm_mm_scan_add_block(obj_priv->gtt_space);
 	return drm_mm_scan_add_block(obj_priv->gtt_space);
 }
 }
 
 
-#define i915_for_each_active_object(OBJ, R, B) \
-	*(R) = dev_priv->render_ring.active_list.next; \
-	*(B) = dev_priv->bsd_ring.active_list.next; \
-	while (((OBJ) = i915_gem_next_active_object(dev, (R), (B))) != NULL)
-
 int
 int
 i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment)
 i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment)
 {
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct list_head eviction_list, unwind_list;
 	struct list_head eviction_list, unwind_list;
 	struct drm_i915_gem_object *obj_priv;
 	struct drm_i915_gem_object *obj_priv;
-	struct list_head *render_iter, *bsd_iter;
 	int ret = 0;
 	int ret = 0;
 
 
 	i915_gem_retire_requests(dev);
 	i915_gem_retire_requests(dev);
@@ -131,13 +82,13 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
 	drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
 	drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
 
 
 	/* First see if there is a large enough contiguous idle region... */
 	/* First see if there is a large enough contiguous idle region... */
-	list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
+	list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) {
 		if (mark_free(obj_priv, &unwind_list))
 		if (mark_free(obj_priv, &unwind_list))
 			goto found;
 			goto found;
 	}
 	}
 
 
 	/* Now merge in the soon-to-be-expired objects... */
 	/* Now merge in the soon-to-be-expired objects... */
-	i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) {
+	list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
 		/* Does the object require an outstanding flush? */
 		/* Does the object require an outstanding flush? */
 		if (obj_priv->base.write_domain || obj_priv->pin_count)
 		if (obj_priv->base.write_domain || obj_priv->pin_count)
 			continue;
 			continue;
@@ -147,14 +98,14 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
 	}
 	}
 
 
 	/* Finally add anything with a pending flush (in order of retirement) */
 	/* Finally add anything with a pending flush (in order of retirement) */
-	list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
+	list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) {
 		if (obj_priv->pin_count)
 		if (obj_priv->pin_count)
 			continue;
 			continue;
 
 
 		if (mark_free(obj_priv, &unwind_list))
 		if (mark_free(obj_priv, &unwind_list))
 			goto found;
 			goto found;
 	}
 	}
-	i915_for_each_active_object(obj_priv, &render_iter, &bsd_iter) {
+	list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
 		if (! obj_priv->base.write_domain || obj_priv->pin_count)
 		if (! obj_priv->base.write_domain || obj_priv->pin_count)
 			continue;
 			continue;
 
 
@@ -212,14 +163,11 @@ i915_gem_evict_everything(struct drm_device *dev)
 	int ret;
 	int ret;
 	bool lists_empty;
 	bool lists_empty;
 
 
-	spin_lock(&dev_priv->mm.active_list_lock);
 	lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
 	lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
 		       list_empty(&dev_priv->mm.flushing_list) &&
 		       list_empty(&dev_priv->mm.flushing_list) &&
 		       list_empty(&dev_priv->render_ring.active_list) &&
 		       list_empty(&dev_priv->render_ring.active_list) &&
-		       (!HAS_BSD(dev)
-			|| list_empty(&dev_priv->bsd_ring.active_list)));
-	spin_unlock(&dev_priv->mm.active_list_lock);
-
+		       list_empty(&dev_priv->bsd_ring.active_list) &&
+		       list_empty(&dev_priv->blt_ring.active_list));
 	if (lists_empty)
 	if (lists_empty)
 		return -ENOSPC;
 		return -ENOSPC;
 
 
@@ -234,13 +182,11 @@ i915_gem_evict_everything(struct drm_device *dev)
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
-	spin_lock(&dev_priv->mm.active_list_lock);
 	lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
 	lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
 		       list_empty(&dev_priv->mm.flushing_list) &&
 		       list_empty(&dev_priv->mm.flushing_list) &&
 		       list_empty(&dev_priv->render_ring.active_list) &&
 		       list_empty(&dev_priv->render_ring.active_list) &&
-		       (!HAS_BSD(dev)
-			|| list_empty(&dev_priv->bsd_ring.active_list)));
-	spin_unlock(&dev_priv->mm.active_list_lock);
+		       list_empty(&dev_priv->bsd_ring.active_list) &&
+		       list_empty(&dev_priv->blt_ring.active_list));
 	BUG_ON(!lists_empty);
 	BUG_ON(!lists_empty);
 
 
 	return 0;
 	return 0;
@@ -258,7 +204,7 @@ i915_gem_evict_inactive(struct drm_device *dev)
 
 
 		obj = &list_first_entry(&dev_priv->mm.inactive_list,
 		obj = &list_first_entry(&dev_priv->mm.inactive_list,
 					struct drm_i915_gem_object,
 					struct drm_i915_gem_object,
-					list)->base;
+					mm_list)->base;
 
 
 		ret = i915_gem_object_unbind(obj);
 		ret = i915_gem_object_unbind(obj);
 		if (ret != 0) {
 		if (ret != 0) {

+ 26 - 28
drivers/gpu/drm/i915/i915_gem_tiling.c

@@ -92,13 +92,13 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
 	uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
 	uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
 	uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
 	uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
 
 
-	if (IS_IRONLAKE(dev) || IS_GEN6(dev)) {
+	if (IS_GEN5(dev) || IS_GEN6(dev)) {
 		/* On Ironlake whatever DRAM config, GPU always do
 		/* On Ironlake whatever DRAM config, GPU always do
 		 * same swizzling setup.
 		 * same swizzling setup.
 		 */
 		 */
 		swizzle_x = I915_BIT_6_SWIZZLE_9_10;
 		swizzle_x = I915_BIT_6_SWIZZLE_9_10;
 		swizzle_y = I915_BIT_6_SWIZZLE_9;
 		swizzle_y = I915_BIT_6_SWIZZLE_9;
-	} else if (!IS_I9XX(dev)) {
+	} else if (IS_GEN2(dev)) {
 		/* As far as we know, the 865 doesn't have these bit 6
 		/* As far as we know, the 865 doesn't have these bit 6
 		 * swizzling issues.
 		 * swizzling issues.
 		 */
 		 */
@@ -190,19 +190,19 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
 	if (tiling_mode == I915_TILING_NONE)
 	if (tiling_mode == I915_TILING_NONE)
 		return true;
 		return true;
 
 
-	if (!IS_I9XX(dev) ||
+	if (IS_GEN2(dev) ||
 	    (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
 	    (tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
 		tile_width = 128;
 		tile_width = 128;
 	else
 	else
 		tile_width = 512;
 		tile_width = 512;
 
 
 	/* check maximum stride & object size */
 	/* check maximum stride & object size */
-	if (IS_I965G(dev)) {
+	if (INTEL_INFO(dev)->gen >= 4) {
 		/* i965 stores the end address of the gtt mapping in the fence
 		/* i965 stores the end address of the gtt mapping in the fence
 		 * reg, so dont bother to check the size */
 		 * reg, so dont bother to check the size */
 		if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
 		if (stride / 128 > I965_FENCE_MAX_PITCH_VAL)
 			return false;
 			return false;
-	} else if (IS_GEN3(dev) || IS_GEN2(dev)) {
+	} else {
 		if (stride > 8192)
 		if (stride > 8192)
 			return false;
 			return false;
 
 
@@ -216,7 +216,7 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
 	}
 	}
 
 
 	/* 965+ just needs multiples of tile width */
 	/* 965+ just needs multiples of tile width */
-	if (IS_I965G(dev)) {
+	if (INTEL_INFO(dev)->gen >= 4) {
 		if (stride & (tile_width - 1))
 		if (stride & (tile_width - 1))
 			return false;
 			return false;
 		return true;
 		return true;
@@ -244,16 +244,18 @@ i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode)
 	if (tiling_mode == I915_TILING_NONE)
 	if (tiling_mode == I915_TILING_NONE)
 		return true;
 		return true;
 
 
-	if (!IS_I965G(dev)) {
-		if (obj_priv->gtt_offset & (obj->size - 1))
+	if (INTEL_INFO(dev)->gen >= 4)
+		return true;
+
+	if (obj_priv->gtt_offset & (obj->size - 1))
+		return false;
+
+	if (IS_GEN3(dev)) {
+		if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK)
+			return false;
+	} else {
+		if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK)
 			return false;
 			return false;
-		if (IS_I9XX(dev)) {
-			if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK)
-				return false;
-		} else {
-			if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK)
-				return false;
-		}
 	}
 	}
 
 
 	return true;
 	return true;
@@ -271,7 +273,11 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_gem_object *obj;
 	struct drm_gem_object *obj;
 	struct drm_i915_gem_object *obj_priv;
 	struct drm_i915_gem_object *obj_priv;
-	int ret = 0;
+	int ret;
+
+	ret = i915_gem_check_is_wedged(dev);
+	if (ret)
+		return ret;
 
 
 	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
 	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
 	if (obj == NULL)
 	if (obj == NULL)
@@ -328,7 +334,7 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
 		if (!i915_gem_object_fence_offset_ok(obj, args->tiling_mode))
 		if (!i915_gem_object_fence_offset_ok(obj, args->tiling_mode))
 			ret = i915_gem_object_unbind(obj);
 			ret = i915_gem_object_unbind(obj);
 		else if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
 		else if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
-			ret = i915_gem_object_put_fence_reg(obj);
+			ret = i915_gem_object_put_fence_reg(obj, true);
 		else
 		else
 			i915_gem_release_mmap(obj);
 			i915_gem_release_mmap(obj);
 
 
@@ -399,16 +405,14 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
  * bit 17 of its physical address and therefore being interpreted differently
  * bit 17 of its physical address and therefore being interpreted differently
  * by the GPU.
  * by the GPU.
  */
  */
-static int
+static void
 i915_gem_swizzle_page(struct page *page)
 i915_gem_swizzle_page(struct page *page)
 {
 {
+	char temp[64];
 	char *vaddr;
 	char *vaddr;
 	int i;
 	int i;
-	char temp[64];
 
 
 	vaddr = kmap(page);
 	vaddr = kmap(page);
-	if (vaddr == NULL)
-		return -ENOMEM;
 
 
 	for (i = 0; i < PAGE_SIZE; i += 128) {
 	for (i = 0; i < PAGE_SIZE; i += 128) {
 		memcpy(temp, &vaddr[i], 64);
 		memcpy(temp, &vaddr[i], 64);
@@ -417,8 +421,6 @@ i915_gem_swizzle_page(struct page *page)
 	}
 	}
 
 
 	kunmap(page);
 	kunmap(page);
-
-	return 0;
 }
 }
 
 
 void
 void
@@ -440,11 +442,7 @@ i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj)
 		char new_bit_17 = page_to_phys(obj_priv->pages[i]) >> 17;
 		char new_bit_17 = page_to_phys(obj_priv->pages[i]) >> 17;
 		if ((new_bit_17 & 0x1) !=
 		if ((new_bit_17 & 0x1) !=
 		    (test_bit(i, obj_priv->bit_17) != 0)) {
 		    (test_bit(i, obj_priv->bit_17) != 0)) {
-			int ret = i915_gem_swizzle_page(obj_priv->pages[i]);
-			if (ret != 0) {
-				DRM_ERROR("Failed to swizzle page\n");
-				return;
-			}
+			i915_gem_swizzle_page(obj_priv->pages[i]);
 			set_page_dirty(obj_priv->pages[i]);
 			set_page_dirty(obj_priv->pages[i]);
 		}
 		}
 	}
 	}

+ 144 - 115
drivers/gpu/drm/i915/i915_irq.c

@@ -85,7 +85,7 @@ ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
 }
 }
 
 
 /* For display hotplug interrupt */
 /* For display hotplug interrupt */
-void
+static void
 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
 ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
 {
 {
 	if ((dev_priv->irq_mask_reg & mask) != 0) {
 	if ((dev_priv->irq_mask_reg & mask) != 0) {
@@ -172,7 +172,7 @@ void intel_enable_asle (struct drm_device *dev)
 	else {
 	else {
 		i915_enable_pipestat(dev_priv, 1,
 		i915_enable_pipestat(dev_priv, 1,
 				     PIPE_LEGACY_BLC_EVENT_ENABLE);
 				     PIPE_LEGACY_BLC_EVENT_ENABLE);
-		if (IS_I965G(dev))
+		if (INTEL_INFO(dev)->gen >= 4)
 			i915_enable_pipestat(dev_priv, 0,
 			i915_enable_pipestat(dev_priv, 0,
 					     PIPE_LEGACY_BLC_EVENT_ENABLE);
 					     PIPE_LEGACY_BLC_EVENT_ENABLE);
 	}
 	}
@@ -191,12 +191,7 @@ static int
 i915_pipe_enabled(struct drm_device *dev, int pipe)
 i915_pipe_enabled(struct drm_device *dev, int pipe)
 {
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-	unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF;
-
-	if (I915_READ(pipeconf) & PIPEACONF_ENABLE)
-		return 1;
-
-	return 0;
+	return I915_READ(PIPECONF(pipe)) & PIPECONF_ENABLE;
 }
 }
 
 
 /* Called from drm generic code, passed a 'crtc', which
 /* Called from drm generic code, passed a 'crtc', which
@@ -207,10 +202,7 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	unsigned long high_frame;
 	unsigned long high_frame;
 	unsigned long low_frame;
 	unsigned long low_frame;
-	u32 high1, high2, low, count;
-
-	high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
-	low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
+	u32 high1, high2, low;
 
 
 	if (!i915_pipe_enabled(dev, pipe)) {
 	if (!i915_pipe_enabled(dev, pipe)) {
 		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
 		DRM_DEBUG_DRIVER("trying to get vblank count for disabled "
@@ -218,23 +210,23 @@ u32 i915_get_vblank_counter(struct drm_device *dev, int pipe)
 		return 0;
 		return 0;
 	}
 	}
 
 
+	high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
+	low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
+
 	/*
 	/*
 	 * High & low register fields aren't synchronized, so make sure
 	 * High & low register fields aren't synchronized, so make sure
 	 * we get a low value that's stable across two reads of the high
 	 * we get a low value that's stable across two reads of the high
 	 * register.
 	 * register.
 	 */
 	 */
 	do {
 	do {
-		high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
-			 PIPE_FRAME_HIGH_SHIFT);
-		low =  ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
-			PIPE_FRAME_LOW_SHIFT);
-		high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
-			 PIPE_FRAME_HIGH_SHIFT);
+		high1 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
+		low   = I915_READ(low_frame)  & PIPE_FRAME_LOW_MASK;
+		high2 = I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK;
 	} while (high1 != high2);
 	} while (high1 != high2);
 
 
-	count = (high1 << 8) | low;
-
-	return count;
+	high1 >>= PIPE_FRAME_HIGH_SHIFT;
+	low >>= PIPE_FRAME_LOW_SHIFT;
+	return (high1 << 8) | low;
 }
 }
 
 
 u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
 u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
@@ -260,16 +252,12 @@ static void i915_hotplug_work_func(struct work_struct *work)
 						    hotplug_work);
 						    hotplug_work);
 	struct drm_device *dev = dev_priv->dev;
 	struct drm_device *dev = dev_priv->dev;
 	struct drm_mode_config *mode_config = &dev->mode_config;
 	struct drm_mode_config *mode_config = &dev->mode_config;
-	struct drm_encoder *encoder;
-
-	if (mode_config->num_encoder) {
-		list_for_each_entry(encoder, &mode_config->encoder_list, head) {
-			struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-	
-			if (intel_encoder->hot_plug)
-				(*intel_encoder->hot_plug) (intel_encoder);
-		}
-	}
+	struct intel_encoder *encoder;
+
+	list_for_each_entry(encoder, &mode_config->encoder_list, base.head)
+		if (encoder->hot_plug)
+			encoder->hot_plug(encoder);
+
 	/* Just fire off a uevent and let userspace tell us what to do */
 	/* Just fire off a uevent and let userspace tell us what to do */
 	drm_helper_hpd_irq_event(dev);
 	drm_helper_hpd_irq_event(dev);
 }
 }
@@ -305,13 +293,30 @@ static void i915_handle_rps_change(struct drm_device *dev)
 	return;
 	return;
 }
 }
 
 
-irqreturn_t ironlake_irq_handler(struct drm_device *dev)
+static void notify_ring(struct drm_device *dev,
+			struct intel_ring_buffer *ring)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 seqno = ring->get_seqno(dev, ring);
+	ring->irq_gem_seqno = seqno;
+	trace_i915_gem_request_complete(dev, seqno);
+	wake_up_all(&ring->irq_queue);
+	dev_priv->hangcheck_count = 0;
+	mod_timer(&dev_priv->hangcheck_timer,
+		  jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
+}
+
+static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
 {
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	int ret = IRQ_NONE;
 	int ret = IRQ_NONE;
 	u32 de_iir, gt_iir, de_ier, pch_iir;
 	u32 de_iir, gt_iir, de_ier, pch_iir;
+	u32 hotplug_mask;
 	struct drm_i915_master_private *master_priv;
 	struct drm_i915_master_private *master_priv;
-	struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
+	u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
+
+	if (IS_GEN6(dev))
+		bsd_usr_interrupt = GT_GEN6_BSD_USER_INTERRUPT;
 
 
 	/* disable master interrupt before clearing iir  */
 	/* disable master interrupt before clearing iir  */
 	de_ier = I915_READ(DEIER);
 	de_ier = I915_READ(DEIER);
@@ -325,6 +330,11 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
 	if (de_iir == 0 && gt_iir == 0 && pch_iir == 0)
 	if (de_iir == 0 && gt_iir == 0 && pch_iir == 0)
 		goto done;
 		goto done;
 
 
+	if (HAS_PCH_CPT(dev))
+		hotplug_mask = SDE_HOTPLUG_MASK_CPT;
+	else
+		hotplug_mask = SDE_HOTPLUG_MASK;
+
 	ret = IRQ_HANDLED;
 	ret = IRQ_HANDLED;
 
 
 	if (dev->primary->master) {
 	if (dev->primary->master) {
@@ -334,29 +344,24 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
 				READ_BREADCRUMB(dev_priv);
 				READ_BREADCRUMB(dev_priv);
 	}
 	}
 
 
-	if (gt_iir & GT_PIPE_NOTIFY) {
-		u32 seqno = render_ring->get_gem_seqno(dev, render_ring);
-		render_ring->irq_gem_seqno = seqno;
-		trace_i915_gem_request_complete(dev, seqno);
-		DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
-		dev_priv->hangcheck_count = 0;
-		mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
-	}
-	if (gt_iir & GT_BSD_USER_INTERRUPT)
-		DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
-
+	if (gt_iir & GT_PIPE_NOTIFY)
+		notify_ring(dev, &dev_priv->render_ring);
+	if (gt_iir & bsd_usr_interrupt)
+		notify_ring(dev, &dev_priv->bsd_ring);
+	if (HAS_BLT(dev) && gt_iir & GT_BLT_USER_INTERRUPT)
+		notify_ring(dev, &dev_priv->blt_ring);
 
 
 	if (de_iir & DE_GSE)
 	if (de_iir & DE_GSE)
-		ironlake_opregion_gse_intr(dev);
+		intel_opregion_gse_intr(dev);
 
 
 	if (de_iir & DE_PLANEA_FLIP_DONE) {
 	if (de_iir & DE_PLANEA_FLIP_DONE) {
 		intel_prepare_page_flip(dev, 0);
 		intel_prepare_page_flip(dev, 0);
-		intel_finish_page_flip(dev, 0);
+		intel_finish_page_flip_plane(dev, 0);
 	}
 	}
 
 
 	if (de_iir & DE_PLANEB_FLIP_DONE) {
 	if (de_iir & DE_PLANEB_FLIP_DONE) {
 		intel_prepare_page_flip(dev, 1);
 		intel_prepare_page_flip(dev, 1);
-		intel_finish_page_flip(dev, 1);
+		intel_finish_page_flip_plane(dev, 1);
 	}
 	}
 
 
 	if (de_iir & DE_PIPEA_VBLANK)
 	if (de_iir & DE_PIPEA_VBLANK)
@@ -366,10 +371,8 @@ irqreturn_t ironlake_irq_handler(struct drm_device *dev)
 		drm_handle_vblank(dev, 1);
 		drm_handle_vblank(dev, 1);
 
 
 	/* check event from PCH */
 	/* check event from PCH */
-	if ((de_iir & DE_PCH_EVENT) &&
-	    (pch_iir & SDE_HOTPLUG_MASK)) {
+	if ((de_iir & DE_PCH_EVENT) && (pch_iir & hotplug_mask))
 		queue_work(dev_priv->wq, &dev_priv->hotplug_work);
 		queue_work(dev_priv->wq, &dev_priv->hotplug_work);
-	}
 
 
 	if (de_iir & DE_PCU_EVENT) {
 	if (de_iir & DE_PCU_EVENT) {
 		I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
 		I915_WRITE16(MEMINTRSTS, I915_READ(MEMINTRSTS));
@@ -404,23 +407,20 @@ static void i915_error_work_func(struct work_struct *work)
 	char *reset_event[] = { "RESET=1", NULL };
 	char *reset_event[] = { "RESET=1", NULL };
 	char *reset_done_event[] = { "ERROR=0", NULL };
 	char *reset_done_event[] = { "ERROR=0", NULL };
 
 
-	DRM_DEBUG_DRIVER("generating error event\n");
 	kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
 	kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, error_event);
 
 
 	if (atomic_read(&dev_priv->mm.wedged)) {
 	if (atomic_read(&dev_priv->mm.wedged)) {
-		if (IS_I965G(dev)) {
-			DRM_DEBUG_DRIVER("resetting chip\n");
-			kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
-			if (!i965_reset(dev, GDRST_RENDER)) {
-				atomic_set(&dev_priv->mm.wedged, 0);
-				kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
-			}
-		} else {
-			DRM_DEBUG_DRIVER("reboot required\n");
+		DRM_DEBUG_DRIVER("resetting chip\n");
+		kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_event);
+		if (!i915_reset(dev, GRDOM_RENDER)) {
+			atomic_set(&dev_priv->mm.wedged, 0);
+			kobject_uevent_env(&dev->primary->kdev.kobj, KOBJ_CHANGE, reset_done_event);
 		}
 		}
+		complete_all(&dev_priv->error_completion);
 	}
 	}
 }
 }
 
 
+#ifdef CONFIG_DEBUG_FS
 static struct drm_i915_error_object *
 static struct drm_i915_error_object *
 i915_error_object_create(struct drm_device *dev,
 i915_error_object_create(struct drm_device *dev,
 			 struct drm_gem_object *src)
 			 struct drm_gem_object *src)
@@ -510,7 +510,7 @@ i915_get_bbaddr(struct drm_device *dev, u32 *ring)
 
 
 	if (IS_I830(dev) || IS_845G(dev))
 	if (IS_I830(dev) || IS_845G(dev))
 		cmd = MI_BATCH_BUFFER;
 		cmd = MI_BATCH_BUFFER;
-	else if (IS_I965G(dev))
+	else if (INTEL_INFO(dev)->gen >= 4)
 		cmd = (MI_BATCH_BUFFER_START | (2 << 6) |
 		cmd = (MI_BATCH_BUFFER_START | (2 << 6) |
 		       MI_BATCH_NON_SECURE_I965);
 		       MI_BATCH_NON_SECURE_I965);
 	else
 	else
@@ -583,13 +583,16 @@ static void i915_capture_error_state(struct drm_device *dev)
 		return;
 		return;
 	}
 	}
 
 
-	error->seqno = i915_get_gem_seqno(dev, &dev_priv->render_ring);
+	DRM_DEBUG_DRIVER("generating error event\n");
+
+	error->seqno =
+		dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring);
 	error->eir = I915_READ(EIR);
 	error->eir = I915_READ(EIR);
 	error->pgtbl_er = I915_READ(PGTBL_ER);
 	error->pgtbl_er = I915_READ(PGTBL_ER);
 	error->pipeastat = I915_READ(PIPEASTAT);
 	error->pipeastat = I915_READ(PIPEASTAT);
 	error->pipebstat = I915_READ(PIPEBSTAT);
 	error->pipebstat = I915_READ(PIPEBSTAT);
 	error->instpm = I915_READ(INSTPM);
 	error->instpm = I915_READ(INSTPM);
-	if (!IS_I965G(dev)) {
+	if (INTEL_INFO(dev)->gen < 4) {
 		error->ipeir = I915_READ(IPEIR);
 		error->ipeir = I915_READ(IPEIR);
 		error->ipehr = I915_READ(IPEHR);
 		error->ipehr = I915_READ(IPEHR);
 		error->instdone = I915_READ(INSTDONE);
 		error->instdone = I915_READ(INSTDONE);
@@ -611,9 +614,7 @@ static void i915_capture_error_state(struct drm_device *dev)
 	batchbuffer[0] = NULL;
 	batchbuffer[0] = NULL;
 	batchbuffer[1] = NULL;
 	batchbuffer[1] = NULL;
 	count = 0;
 	count = 0;
-	list_for_each_entry(obj_priv,
-			&dev_priv->render_ring.active_list, list) {
-
+	list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
 		struct drm_gem_object *obj = &obj_priv->base;
 		struct drm_gem_object *obj = &obj_priv->base;
 
 
 		if (batchbuffer[0] == NULL &&
 		if (batchbuffer[0] == NULL &&
@@ -630,7 +631,7 @@ static void i915_capture_error_state(struct drm_device *dev)
 	}
 	}
 	/* Scan the other lists for completeness for those bizarre errors. */
 	/* Scan the other lists for completeness for those bizarre errors. */
 	if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
 	if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
-		list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, list) {
+		list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) {
 			struct drm_gem_object *obj = &obj_priv->base;
 			struct drm_gem_object *obj = &obj_priv->base;
 
 
 			if (batchbuffer[0] == NULL &&
 			if (batchbuffer[0] == NULL &&
@@ -648,7 +649,7 @@ static void i915_capture_error_state(struct drm_device *dev)
 		}
 		}
 	}
 	}
 	if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
 	if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
-		list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
+		list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) {
 			struct drm_gem_object *obj = &obj_priv->base;
 			struct drm_gem_object *obj = &obj_priv->base;
 
 
 			if (batchbuffer[0] == NULL &&
 			if (batchbuffer[0] == NULL &&
@@ -667,7 +668,7 @@ static void i915_capture_error_state(struct drm_device *dev)
 	}
 	}
 
 
 	/* We need to copy these to an anonymous buffer as the simplest
 	/* We need to copy these to an anonymous buffer as the simplest
-	 * method to avoid being overwritten by userpace.
+	 * method to avoid being overwritten by userspace.
 	 */
 	 */
 	error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]);
 	error->batchbuffer[0] = i915_error_object_create(dev, batchbuffer[0]);
 	if (batchbuffer[1] != batchbuffer[0])
 	if (batchbuffer[1] != batchbuffer[0])
@@ -689,8 +690,7 @@ static void i915_capture_error_state(struct drm_device *dev)
 
 
 	if (error->active_bo) {
 	if (error->active_bo) {
 		int i = 0;
 		int i = 0;
-		list_for_each_entry(obj_priv,
-				&dev_priv->render_ring.active_list, list) {
+		list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
 			struct drm_gem_object *obj = &obj_priv->base;
 			struct drm_gem_object *obj = &obj_priv->base;
 
 
 			error->active_bo[i].size = obj->size;
 			error->active_bo[i].size = obj->size;
@@ -743,6 +743,9 @@ void i915_destroy_error_state(struct drm_device *dev)
 	if (error)
 	if (error)
 		i915_error_state_free(dev, error);
 		i915_error_state_free(dev, error);
 }
 }
+#else
+#define i915_capture_error_state(x)
+#endif
 
 
 static void i915_report_and_clear_eir(struct drm_device *dev)
 static void i915_report_and_clear_eir(struct drm_device *dev)
 {
 {
@@ -784,7 +787,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
 		}
 		}
 	}
 	}
 
 
-	if (IS_I9XX(dev)) {
+	if (!IS_GEN2(dev)) {
 		if (eir & I915_ERROR_PAGE_TABLE) {
 		if (eir & I915_ERROR_PAGE_TABLE) {
 			u32 pgtbl_err = I915_READ(PGTBL_ER);
 			u32 pgtbl_err = I915_READ(PGTBL_ER);
 			printk(KERN_ERR "page table error\n");
 			printk(KERN_ERR "page table error\n");
@@ -810,7 +813,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
 		printk(KERN_ERR "instruction error\n");
 		printk(KERN_ERR "instruction error\n");
 		printk(KERN_ERR "  INSTPM: 0x%08x\n",
 		printk(KERN_ERR "  INSTPM: 0x%08x\n",
 		       I915_READ(INSTPM));
 		       I915_READ(INSTPM));
-		if (!IS_I965G(dev)) {
+		if (INTEL_INFO(dev)->gen < 4) {
 			u32 ipeir = I915_READ(IPEIR);
 			u32 ipeir = I915_READ(IPEIR);
 
 
 			printk(KERN_ERR "  IPEIR: 0x%08x\n",
 			printk(KERN_ERR "  IPEIR: 0x%08x\n",
@@ -875,12 +878,17 @@ static void i915_handle_error(struct drm_device *dev, bool wedged)
 	i915_report_and_clear_eir(dev);
 	i915_report_and_clear_eir(dev);
 
 
 	if (wedged) {
 	if (wedged) {
+		INIT_COMPLETION(dev_priv->error_completion);
 		atomic_set(&dev_priv->mm.wedged, 1);
 		atomic_set(&dev_priv->mm.wedged, 1);
 
 
 		/*
 		/*
 		 * Wakeup waiting processes so they don't hang
 		 * Wakeup waiting processes so they don't hang
 		 */
 		 */
-		DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
+		wake_up_all(&dev_priv->render_ring.irq_queue);
+		if (HAS_BSD(dev))
+			wake_up_all(&dev_priv->bsd_ring.irq_queue);
+		if (HAS_BLT(dev))
+			wake_up_all(&dev_priv->blt_ring.irq_queue);
 	}
 	}
 
 
 	queue_work(dev_priv->wq, &dev_priv->error_work);
 	queue_work(dev_priv->wq, &dev_priv->error_work);
@@ -911,7 +919,7 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
 
 
 	/* Potential stall - if we see that the flip has happened, assume a missed interrupt */
 	/* Potential stall - if we see that the flip has happened, assume a missed interrupt */
 	obj_priv = to_intel_bo(work->pending_flip_obj);
 	obj_priv = to_intel_bo(work->pending_flip_obj);
-	if(IS_I965G(dev)) {
+	if (INTEL_INFO(dev)->gen >= 4) {
 		int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF;
 		int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF;
 		stall_detected = I915_READ(dspsurf) == obj_priv->gtt_offset;
 		stall_detected = I915_READ(dspsurf) == obj_priv->gtt_offset;
 	} else {
 	} else {
@@ -941,7 +949,6 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
 	unsigned long irqflags;
 	unsigned long irqflags;
 	int irq_received;
 	int irq_received;
 	int ret = IRQ_NONE;
 	int ret = IRQ_NONE;
-	struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
 
 
 	atomic_inc(&dev_priv->irq_received);
 	atomic_inc(&dev_priv->irq_received);
 
 
@@ -950,7 +957,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
 
 
 	iir = I915_READ(IIR);
 	iir = I915_READ(IIR);
 
 
-	if (IS_I965G(dev))
+	if (INTEL_INFO(dev)->gen >= 4)
 		vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS;
 		vblank_status = PIPE_START_VBLANK_INTERRUPT_STATUS;
 	else
 	else
 		vblank_status = PIPE_VBLANK_INTERRUPT_STATUS;
 		vblank_status = PIPE_VBLANK_INTERRUPT_STATUS;
@@ -1018,18 +1025,10 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
 					READ_BREADCRUMB(dev_priv);
 					READ_BREADCRUMB(dev_priv);
 		}
 		}
 
 
-		if (iir & I915_USER_INTERRUPT) {
-			u32 seqno =
-				render_ring->get_gem_seqno(dev, render_ring);
-			render_ring->irq_gem_seqno = seqno;
-			trace_i915_gem_request_complete(dev, seqno);
-			DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
-			dev_priv->hangcheck_count = 0;
-			mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
-		}
-
+		if (iir & I915_USER_INTERRUPT)
+			notify_ring(dev, &dev_priv->render_ring);
 		if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT))
 		if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT))
-			DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
+			notify_ring(dev, &dev_priv->bsd_ring);
 
 
 		if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
 		if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
 			intel_prepare_page_flip(dev, 0);
 			intel_prepare_page_flip(dev, 0);
@@ -1064,7 +1063,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
 		if ((pipea_stats & PIPE_LEGACY_BLC_EVENT_STATUS) ||
 		if ((pipea_stats & PIPE_LEGACY_BLC_EVENT_STATUS) ||
 		    (pipeb_stats & PIPE_LEGACY_BLC_EVENT_STATUS) ||
 		    (pipeb_stats & PIPE_LEGACY_BLC_EVENT_STATUS) ||
 		    (iir & I915_ASLE_INTERRUPT))
 		    (iir & I915_ASLE_INTERRUPT))
-			opregion_asle_intr(dev);
+			intel_opregion_asle_intr(dev);
 
 
 		/* With MSI, interrupts are only generated when iir
 		/* With MSI, interrupts are only generated when iir
 		 * transitions from zero to nonzero.  If another bit got
 		 * transitions from zero to nonzero.  If another bit got
@@ -1206,18 +1205,15 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
 {
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	unsigned long irqflags;
 	unsigned long irqflags;
-	int pipeconf_reg = (pipe == 0) ? PIPEACONF : PIPEBCONF;
-	u32 pipeconf;
 
 
-	pipeconf = I915_READ(pipeconf_reg);
-	if (!(pipeconf & PIPEACONF_ENABLE))
+	if (!i915_pipe_enabled(dev, pipe))
 		return -EINVAL;
 		return -EINVAL;
 
 
 	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
 	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
 	if (HAS_PCH_SPLIT(dev))
 	if (HAS_PCH_SPLIT(dev))
 		ironlake_enable_display_irq(dev_priv, (pipe == 0) ? 
 		ironlake_enable_display_irq(dev_priv, (pipe == 0) ? 
 					    DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
 					    DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
-	else if (IS_I965G(dev))
+	else if (INTEL_INFO(dev)->gen >= 4)
 		i915_enable_pipestat(dev_priv, pipe,
 		i915_enable_pipestat(dev_priv, pipe,
 				     PIPE_START_VBLANK_INTERRUPT_ENABLE);
 				     PIPE_START_VBLANK_INTERRUPT_ENABLE);
 	else
 	else
@@ -1251,7 +1247,7 @@ void i915_enable_interrupt (struct drm_device *dev)
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
 
 	if (!HAS_PCH_SPLIT(dev))
 	if (!HAS_PCH_SPLIT(dev))
-		opregion_enable_asle(dev);
+		intel_opregion_enable_asle(dev);
 	dev_priv->irq_enabled = 1;
 	dev_priv->irq_enabled = 1;
 }
 }
 
 
@@ -1310,7 +1306,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
 	return -EINVAL;
 	return -EINVAL;
 }
 }
 
 
-struct drm_i915_gem_request *
+static struct drm_i915_gem_request *
 i915_get_tail_request(struct drm_device *dev)
 i915_get_tail_request(struct drm_device *dev)
 {
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	drm_i915_private_t *dev_priv = dev->dev_private;
@@ -1330,11 +1326,7 @@ void i915_hangcheck_elapsed(unsigned long data)
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	uint32_t acthd, instdone, instdone1;
 	uint32_t acthd, instdone, instdone1;
 
 
-	/* No reset support on this chip yet. */
-	if (IS_GEN6(dev))
-		return;
-
-	if (!IS_I965G(dev)) {
+	if (INTEL_INFO(dev)->gen < 4) {
 		acthd = I915_READ(ACTHD);
 		acthd = I915_READ(ACTHD);
 		instdone = I915_READ(INSTDONE);
 		instdone = I915_READ(INSTDONE);
 		instdone1 = 0;
 		instdone1 = 0;
@@ -1346,9 +1338,8 @@ void i915_hangcheck_elapsed(unsigned long data)
 
 
 	/* If all work is done then ACTHD clearly hasn't advanced. */
 	/* If all work is done then ACTHD clearly hasn't advanced. */
 	if (list_empty(&dev_priv->render_ring.request_list) ||
 	if (list_empty(&dev_priv->render_ring.request_list) ||
-		i915_seqno_passed(i915_get_gem_seqno(dev,
-				&dev_priv->render_ring),
-			i915_get_tail_request(dev)->seqno)) {
+		i915_seqno_passed(dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring),
+				  i915_get_tail_request(dev)->seqno)) {
 		bool missed_wakeup = false;
 		bool missed_wakeup = false;
 
 
 		dev_priv->hangcheck_count = 0;
 		dev_priv->hangcheck_count = 0;
@@ -1356,13 +1347,19 @@ void i915_hangcheck_elapsed(unsigned long data)
 		/* Issue a wake-up to catch stuck h/w. */
 		/* Issue a wake-up to catch stuck h/w. */
 		if (dev_priv->render_ring.waiting_gem_seqno &&
 		if (dev_priv->render_ring.waiting_gem_seqno &&
 		    waitqueue_active(&dev_priv->render_ring.irq_queue)) {
 		    waitqueue_active(&dev_priv->render_ring.irq_queue)) {
-			DRM_WAKEUP(&dev_priv->render_ring.irq_queue);
+			wake_up_all(&dev_priv->render_ring.irq_queue);
 			missed_wakeup = true;
 			missed_wakeup = true;
 		}
 		}
 
 
 		if (dev_priv->bsd_ring.waiting_gem_seqno &&
 		if (dev_priv->bsd_ring.waiting_gem_seqno &&
 		    waitqueue_active(&dev_priv->bsd_ring.irq_queue)) {
 		    waitqueue_active(&dev_priv->bsd_ring.irq_queue)) {
-			DRM_WAKEUP(&dev_priv->bsd_ring.irq_queue);
+			wake_up_all(&dev_priv->bsd_ring.irq_queue);
+			missed_wakeup = true;
+		}
+
+		if (dev_priv->blt_ring.waiting_gem_seqno &&
+		    waitqueue_active(&dev_priv->blt_ring.irq_queue)) {
+			wake_up_all(&dev_priv->blt_ring.irq_queue);
 			missed_wakeup = true;
 			missed_wakeup = true;
 		}
 		}
 
 
@@ -1376,6 +1373,21 @@ void i915_hangcheck_elapsed(unsigned long data)
 	    dev_priv->last_instdone1 == instdone1) {
 	    dev_priv->last_instdone1 == instdone1) {
 		if (dev_priv->hangcheck_count++ > 1) {
 		if (dev_priv->hangcheck_count++ > 1) {
 			DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
 			DRM_ERROR("Hangcheck timer elapsed... GPU hung\n");
+
+			if (!IS_GEN2(dev)) {
+				/* Is the chip hanging on a WAIT_FOR_EVENT?
+				 * If so we can simply poke the RB_WAIT bit
+				 * and break the hang. This should work on
+				 * all but the second generation chipsets.
+				 */
+				u32 tmp = I915_READ(PRB0_CTL);
+				if (tmp & RING_WAIT) {
+					I915_WRITE(PRB0_CTL, tmp);
+					POSTING_READ(PRB0_CTL);
+					goto out;
+				}
+			}
+
 			i915_handle_error(dev, true);
 			i915_handle_error(dev, true);
 			return;
 			return;
 		}
 		}
@@ -1387,8 +1399,10 @@ void i915_hangcheck_elapsed(unsigned long data)
 		dev_priv->last_instdone1 = instdone1;
 		dev_priv->last_instdone1 = instdone1;
 	}
 	}
 
 
+out:
 	/* Reset timer case chip hangs without another request being added */
 	/* Reset timer case chip hangs without another request being added */
-	mod_timer(&dev_priv->hangcheck_timer, jiffies + DRM_I915_HANGCHECK_PERIOD);
+	mod_timer(&dev_priv->hangcheck_timer,
+		  jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
 }
 }
 
 
 /* drm_dma.h hooks
 /* drm_dma.h hooks
@@ -1423,8 +1437,7 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
 	u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
 	u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
 			   DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
 			   DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
 	u32 render_mask = GT_PIPE_NOTIFY | GT_BSD_USER_INTERRUPT;
 	u32 render_mask = GT_PIPE_NOTIFY | GT_BSD_USER_INTERRUPT;
-	u32 hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
-			   SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
+	u32 hotplug_mask;
 
 
 	dev_priv->irq_mask_reg = ~display_mask;
 	dev_priv->irq_mask_reg = ~display_mask;
 	dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK;
 	dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK;
@@ -1435,20 +1448,35 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
 	I915_WRITE(DEIER, dev_priv->de_irq_enable_reg);
 	I915_WRITE(DEIER, dev_priv->de_irq_enable_reg);
 	(void) I915_READ(DEIER);
 	(void) I915_READ(DEIER);
 
 
-	/* Gen6 only needs render pipe_control now */
-	if (IS_GEN6(dev))
-		render_mask = GT_PIPE_NOTIFY;
+	if (IS_GEN6(dev)) {
+		render_mask =
+			GT_PIPE_NOTIFY |
+			GT_GEN6_BSD_USER_INTERRUPT |
+			GT_BLT_USER_INTERRUPT;
+	}
 
 
 	dev_priv->gt_irq_mask_reg = ~render_mask;
 	dev_priv->gt_irq_mask_reg = ~render_mask;
 	dev_priv->gt_irq_enable_reg = render_mask;
 	dev_priv->gt_irq_enable_reg = render_mask;
 
 
 	I915_WRITE(GTIIR, I915_READ(GTIIR));
 	I915_WRITE(GTIIR, I915_READ(GTIIR));
 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
 	I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
-	if (IS_GEN6(dev))
+	if (IS_GEN6(dev)) {
 		I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT);
 		I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT);
+		I915_WRITE(GEN6_BSD_IMR, ~GEN6_BSD_IMR_USER_INTERRUPT);
+		I915_WRITE(GEN6_BLITTER_IMR, ~GEN6_BLITTER_USER_INTERRUPT);
+	}
+
 	I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg);
 	I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg);
 	(void) I915_READ(GTIER);
 	(void) I915_READ(GTIER);
 
 
+	if (HAS_PCH_CPT(dev)) {
+		hotplug_mask = SDE_CRT_HOTPLUG_CPT | SDE_PORTB_HOTPLUG_CPT  |
+			       SDE_PORTC_HOTPLUG_CPT | SDE_PORTD_HOTPLUG_CPT ;
+	} else {
+		hotplug_mask = SDE_CRT_HOTPLUG | SDE_PORTB_HOTPLUG |
+			       SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
+	}
+
 	dev_priv->pch_irq_mask_reg = ~hotplug_mask;
 	dev_priv->pch_irq_mask_reg = ~hotplug_mask;
 	dev_priv->pch_irq_enable_reg = hotplug_mask;
 	dev_priv->pch_irq_enable_reg = hotplug_mask;
 
 
@@ -1505,9 +1533,10 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
 	u32 error_mask;
 	u32 error_mask;
 
 
 	DRM_INIT_WAITQUEUE(&dev_priv->render_ring.irq_queue);
 	DRM_INIT_WAITQUEUE(&dev_priv->render_ring.irq_queue);
-
 	if (HAS_BSD(dev))
 	if (HAS_BSD(dev))
 		DRM_INIT_WAITQUEUE(&dev_priv->bsd_ring.irq_queue);
 		DRM_INIT_WAITQUEUE(&dev_priv->bsd_ring.irq_queue);
+	if (HAS_BLT(dev))
+		DRM_INIT_WAITQUEUE(&dev_priv->blt_ring.irq_queue);
 
 
 	dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
 	dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
 
 
@@ -1577,7 +1606,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
 		I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
 		I915_WRITE(PORT_HOTPLUG_EN, hotplug_en);
 	}
 	}
 
 
-	opregion_enable_asle(dev);
+	intel_opregion_enable_asle(dev);
 
 
 	return 0;
 	return 0;
 }
 }

+ 211 - 124
drivers/gpu/drm/i915/i915_reg.h

@@ -25,52 +25,16 @@
 #ifndef _I915_REG_H_
 #ifndef _I915_REG_H_
 #define _I915_REG_H_
 #define _I915_REG_H_
 
 
+#define _PIPE(pipe, a, b) ((a) + (pipe)*((b)-(a)))
+
 /*
 /*
  * The Bridge device's PCI config space has information about the
  * The Bridge device's PCI config space has information about the
  * fb aperture size and the amount of pre-reserved memory.
  * fb aperture size and the amount of pre-reserved memory.
+ * This is all handled in the intel-gtt.ko module. i915.ko only
+ * cares about the vga bit for the vga rbiter.
  */
  */
 #define INTEL_GMCH_CTRL		0x52
 #define INTEL_GMCH_CTRL		0x52
 #define INTEL_GMCH_VGA_DISABLE  (1 << 1)
 #define INTEL_GMCH_VGA_DISABLE  (1 << 1)
-#define INTEL_GMCH_ENABLED	0x4
-#define INTEL_GMCH_MEM_MASK	0x1
-#define INTEL_GMCH_MEM_64M	0x1
-#define INTEL_GMCH_MEM_128M	0
-
-#define INTEL_GMCH_GMS_MASK		(0xf << 4)
-#define INTEL_855_GMCH_GMS_DISABLED	(0x0 << 4)
-#define INTEL_855_GMCH_GMS_STOLEN_1M	(0x1 << 4)
-#define INTEL_855_GMCH_GMS_STOLEN_4M	(0x2 << 4)
-#define INTEL_855_GMCH_GMS_STOLEN_8M	(0x3 << 4)
-#define INTEL_855_GMCH_GMS_STOLEN_16M	(0x4 << 4)
-#define INTEL_855_GMCH_GMS_STOLEN_32M	(0x5 << 4)
-
-#define INTEL_915G_GMCH_GMS_STOLEN_48M	(0x6 << 4)
-#define INTEL_915G_GMCH_GMS_STOLEN_64M	(0x7 << 4)
-#define INTEL_GMCH_GMS_STOLEN_128M	(0x8 << 4)
-#define INTEL_GMCH_GMS_STOLEN_256M	(0x9 << 4)
-#define INTEL_GMCH_GMS_STOLEN_96M	(0xa << 4)
-#define INTEL_GMCH_GMS_STOLEN_160M	(0xb << 4)
-#define INTEL_GMCH_GMS_STOLEN_224M	(0xc << 4)
-#define INTEL_GMCH_GMS_STOLEN_352M	(0xd << 4)
-
-#define SNB_GMCH_CTRL	0x50
-#define SNB_GMCH_GMS_STOLEN_MASK	0xF8
-#define SNB_GMCH_GMS_STOLEN_32M		(1 << 3)
-#define SNB_GMCH_GMS_STOLEN_64M		(2 << 3)
-#define SNB_GMCH_GMS_STOLEN_96M		(3 << 3)
-#define SNB_GMCH_GMS_STOLEN_128M	(4 << 3)
-#define SNB_GMCH_GMS_STOLEN_160M	(5 << 3)
-#define SNB_GMCH_GMS_STOLEN_192M	(6 << 3)
-#define SNB_GMCH_GMS_STOLEN_224M	(7 << 3)
-#define SNB_GMCH_GMS_STOLEN_256M	(8 << 3)
-#define SNB_GMCH_GMS_STOLEN_288M	(9 << 3)
-#define SNB_GMCH_GMS_STOLEN_320M	(0xa << 3)
-#define SNB_GMCH_GMS_STOLEN_352M	(0xb << 3)
-#define SNB_GMCH_GMS_STOLEN_384M	(0xc << 3)
-#define SNB_GMCH_GMS_STOLEN_416M	(0xd << 3)
-#define SNB_GMCH_GMS_STOLEN_448M	(0xe << 3)
-#define SNB_GMCH_GMS_STOLEN_480M	(0xf << 3)
-#define SNB_GMCH_GMS_STOLEN_512M	(0x10 << 3)
 
 
 /* PCI config space */
 /* PCI config space */
 
 
@@ -106,10 +70,13 @@
 #define   I915_GC_RENDER_CLOCK_200_MHZ	(1 << 0)
 #define   I915_GC_RENDER_CLOCK_200_MHZ	(1 << 0)
 #define   I915_GC_RENDER_CLOCK_333_MHZ	(4 << 0)
 #define   I915_GC_RENDER_CLOCK_333_MHZ	(4 << 0)
 #define LBB	0xf4
 #define LBB	0xf4
-#define GDRST 0xc0
-#define  GDRST_FULL	(0<<2)
-#define  GDRST_RENDER	(1<<2)
-#define  GDRST_MEDIA	(3<<2)
+
+/* Graphics reset regs */
+#define I965_GDRST 0xc0 /* PCI config register */
+#define ILK_GDSR 0x2ca4 /* MCHBAR offset */
+#define  GRDOM_FULL	(0<<2)
+#define  GRDOM_RENDER	(1<<2)
+#define  GRDOM_MEDIA	(3<<2)
 
 
 /* VGA stuff */
 /* VGA stuff */
 
 
@@ -192,11 +159,11 @@
 #define MI_STORE_DWORD_INDEX	MI_INSTR(0x21, 1)
 #define MI_STORE_DWORD_INDEX	MI_INSTR(0x21, 1)
 #define   MI_STORE_DWORD_INDEX_SHIFT 2
 #define   MI_STORE_DWORD_INDEX_SHIFT 2
 #define MI_LOAD_REGISTER_IMM	MI_INSTR(0x22, 1)
 #define MI_LOAD_REGISTER_IMM	MI_INSTR(0x22, 1)
+#define MI_FLUSH_DW		MI_INSTR(0x26, 2) /* for GEN6 */
 #define MI_BATCH_BUFFER		MI_INSTR(0x30, 1)
 #define MI_BATCH_BUFFER		MI_INSTR(0x30, 1)
 #define   MI_BATCH_NON_SECURE	(1)
 #define   MI_BATCH_NON_SECURE	(1)
 #define   MI_BATCH_NON_SECURE_I965 (1<<8)
 #define   MI_BATCH_NON_SECURE_I965 (1<<8)
 #define MI_BATCH_BUFFER_START	MI_INSTR(0x31, 0)
 #define MI_BATCH_BUFFER_START	MI_INSTR(0x31, 0)
-
 /*
 /*
  * 3D instructions used by the kernel
  * 3D instructions used by the kernel
  */
  */
@@ -249,6 +216,16 @@
 #define   PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */
 #define   PIPE_CONTROL_GLOBAL_GTT (1<<2) /* in addr dword */
 #define   PIPE_CONTROL_STALL_EN	(1<<1) /* in addr word, Ironlake+ only */
 #define   PIPE_CONTROL_STALL_EN	(1<<1) /* in addr word, Ironlake+ only */
 
 
+
+/*
+ * Reset registers
+ */
+#define DEBUG_RESET_I830		0x6070
+#define  DEBUG_RESET_FULL		(1<<7)
+#define  DEBUG_RESET_RENDER		(1<<8)
+#define  DEBUG_RESET_DISPLAY		(1<<9)
+
+
 /*
 /*
  * Fence registers
  * Fence registers
  */
  */
@@ -283,6 +260,17 @@
 #define PRB0_HEAD	0x02034
 #define PRB0_HEAD	0x02034
 #define PRB0_START	0x02038
 #define PRB0_START	0x02038
 #define PRB0_CTL	0x0203c
 #define PRB0_CTL	0x0203c
+#define RENDER_RING_BASE	0x02000
+#define BSD_RING_BASE		0x04000
+#define GEN6_BSD_RING_BASE	0x12000
+#define BLT_RING_BASE		0x22000
+#define RING_TAIL(base)		((base)+0x30)
+#define RING_HEAD(base)		((base)+0x34)
+#define RING_START(base)	((base)+0x38)
+#define RING_CTL(base)		((base)+0x3c)
+#define RING_HWS_PGA(base)	((base)+0x80)
+#define RING_HWS_PGA_GEN6(base)	((base)+0x2080)
+#define RING_ACTHD(base)	((base)+0x74)
 #define   TAIL_ADDR		0x001FFFF8
 #define   TAIL_ADDR		0x001FFFF8
 #define   HEAD_WRAP_COUNT	0xFFE00000
 #define   HEAD_WRAP_COUNT	0xFFE00000
 #define   HEAD_WRAP_ONE		0x00200000
 #define   HEAD_WRAP_ONE		0x00200000
@@ -295,6 +283,8 @@
 #define   RING_VALID_MASK	0x00000001
 #define   RING_VALID_MASK	0x00000001
 #define   RING_VALID		0x00000001
 #define   RING_VALID		0x00000001
 #define   RING_INVALID		0x00000000
 #define   RING_INVALID		0x00000000
+#define   RING_WAIT_I8XX	(1<<0) /* gen2, PRBx_HEAD */
+#define   RING_WAIT		(1<<11) /* gen3+, PRBx_CTL */
 #define PRB1_TAIL	0x02040 /* 915+ only */
 #define PRB1_TAIL	0x02040 /* 915+ only */
 #define PRB1_HEAD	0x02044 /* 915+ only */
 #define PRB1_HEAD	0x02044 /* 915+ only */
 #define PRB1_START	0x02048 /* 915+ only */
 #define PRB1_START	0x02048 /* 915+ only */
@@ -306,7 +296,6 @@
 #define INSTDONE1	0x0207c /* 965+ only */
 #define INSTDONE1	0x0207c /* 965+ only */
 #define ACTHD_I965	0x02074
 #define ACTHD_I965	0x02074
 #define HWS_PGA		0x02080
 #define HWS_PGA		0x02080
-#define HWS_PGA_GEN6	0x04080
 #define HWS_ADDRESS_MASK	0xfffff000
 #define HWS_ADDRESS_MASK	0xfffff000
 #define HWS_START_ADDRESS_SHIFT	4
 #define HWS_START_ADDRESS_SHIFT	4
 #define PWRCTXA		0x2088 /* 965GM+ only */
 #define PWRCTXA		0x2088 /* 965GM+ only */
@@ -464,17 +453,17 @@
 #define   GEN6_BLITTER_COMMAND_PARSER_MASTER_ERROR	(1 << 25)
 #define   GEN6_BLITTER_COMMAND_PARSER_MASTER_ERROR	(1 << 25)
 #define   GEN6_BLITTER_SYNC_STATUS			(1 << 24)
 #define   GEN6_BLITTER_SYNC_STATUS			(1 << 24)
 #define   GEN6_BLITTER_USER_INTERRUPT			(1 << 22)
 #define   GEN6_BLITTER_USER_INTERRUPT			(1 << 22)
-/*
- * BSD (bit stream decoder instruction and interrupt control register defines
- * (G4X and Ironlake only)
- */
 
 
-#define BSD_RING_TAIL          0x04030
-#define BSD_RING_HEAD          0x04034
-#define BSD_RING_START         0x04038
-#define BSD_RING_CTL           0x0403c
-#define BSD_RING_ACTHD         0x04074
-#define BSD_HWS_PGA            0x04080
+#define GEN6_BSD_SLEEP_PSMI_CONTROL	0x12050
+#define   GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK	(1 << 16)
+#define   GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE		(1 << 0)
+#define   GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE		0
+#define   GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR			(1 << 3)
+
+#define GEN6_BSD_IMR			0x120a8
+#define   GEN6_BSD_IMR_USER_INTERRUPT	(1 << 12)
+
+#define GEN6_BSD_RNCID			0x12198
 
 
 /*
 /*
  * Framebuffer compression (915+ only)
  * Framebuffer compression (915+ only)
@@ -579,12 +568,51 @@
 # define GPIO_DATA_VAL_IN		(1 << 12)
 # define GPIO_DATA_VAL_IN		(1 << 12)
 # define GPIO_DATA_PULLUP_DISABLE	(1 << 13)
 # define GPIO_DATA_PULLUP_DISABLE	(1 << 13)
 
 
-#define GMBUS0			0x5100
-#define GMBUS1			0x5104
-#define GMBUS2			0x5108
-#define GMBUS3			0x510c
-#define GMBUS4			0x5110
-#define GMBUS5			0x5120
+#define GMBUS0			0x5100 /* clock/port select */
+#define   GMBUS_RATE_100KHZ	(0<<8)
+#define   GMBUS_RATE_50KHZ	(1<<8)
+#define   GMBUS_RATE_400KHZ	(2<<8) /* reserved on Pineview */
+#define   GMBUS_RATE_1MHZ	(3<<8) /* reserved on Pineview */
+#define   GMBUS_HOLD_EXT	(1<<7) /* 300ns hold time, rsvd on Pineview */
+#define   GMBUS_PORT_DISABLED	0
+#define   GMBUS_PORT_SSC	1
+#define   GMBUS_PORT_VGADDC	2
+#define   GMBUS_PORT_PANEL	3
+#define   GMBUS_PORT_DPC	4 /* HDMIC */
+#define   GMBUS_PORT_DPB	5 /* SDVO, HDMIB */
+				  /* 6 reserved */
+#define   GMBUS_PORT_DPD	7 /* HDMID */
+#define   GMBUS_NUM_PORTS       8
+#define GMBUS1			0x5104 /* command/status */
+#define   GMBUS_SW_CLR_INT	(1<<31)
+#define   GMBUS_SW_RDY		(1<<30)
+#define   GMBUS_ENT		(1<<29) /* enable timeout */
+#define   GMBUS_CYCLE_NONE	(0<<25)
+#define   GMBUS_CYCLE_WAIT	(1<<25)
+#define   GMBUS_CYCLE_INDEX	(2<<25)
+#define   GMBUS_CYCLE_STOP	(4<<25)
+#define   GMBUS_BYTE_COUNT_SHIFT 16
+#define   GMBUS_SLAVE_INDEX_SHIFT 8
+#define   GMBUS_SLAVE_ADDR_SHIFT 1
+#define   GMBUS_SLAVE_READ	(1<<0)
+#define   GMBUS_SLAVE_WRITE	(0<<0)
+#define GMBUS2			0x5108 /* status */
+#define   GMBUS_INUSE		(1<<15)
+#define   GMBUS_HW_WAIT_PHASE	(1<<14)
+#define   GMBUS_STALL_TIMEOUT	(1<<13)
+#define   GMBUS_INT		(1<<12)
+#define   GMBUS_HW_RDY		(1<<11)
+#define   GMBUS_SATOER		(1<<10)
+#define   GMBUS_ACTIVE		(1<<9)
+#define GMBUS3			0x510c /* data buffer bytes 3-0 */
+#define GMBUS4			0x5110 /* interrupt mask (Pineview+) */
+#define   GMBUS_SLAVE_TIMEOUT_EN (1<<4)
+#define   GMBUS_NAK_EN		(1<<3)
+#define   GMBUS_IDLE_EN		(1<<2)
+#define   GMBUS_HW_WAIT_EN	(1<<1)
+#define   GMBUS_HW_RDY_EN	(1<<0)
+#define GMBUS5			0x5120 /* byte index */
+#define   GMBUS_2BYTE_INDEX_EN	(1<<31)
 
 
 /*
 /*
  * Clock control & power management
  * Clock control & power management
@@ -603,6 +631,7 @@
 #define   VGA1_PD_P1_MASK	(0x1f << 8)
 #define   VGA1_PD_P1_MASK	(0x1f << 8)
 #define DPLL_A	0x06014
 #define DPLL_A	0x06014
 #define DPLL_B	0x06018
 #define DPLL_B	0x06018
+#define DPLL(pipe) _PIPE(pipe, DPLL_A, DPLL_B)
 #define   DPLL_VCO_ENABLE		(1 << 31)
 #define   DPLL_VCO_ENABLE		(1 << 31)
 #define   DPLL_DVO_HIGH_SPEED		(1 << 30)
 #define   DPLL_DVO_HIGH_SPEED		(1 << 30)
 #define   DPLL_SYNCLOCK_ENABLE		(1 << 29)
 #define   DPLL_SYNCLOCK_ENABLE		(1 << 29)
@@ -633,31 +662,6 @@
 #define LVDS			0x61180
 #define LVDS			0x61180
 #define LVDS_ON			(1<<31)
 #define LVDS_ON			(1<<31)
 
 
-#define ADPA			0x61100
-#define ADPA_DPMS_MASK		(~(3<<10))
-#define ADPA_DPMS_ON		(0<<10)
-#define ADPA_DPMS_SUSPEND	(1<<10)
-#define ADPA_DPMS_STANDBY	(2<<10)
-#define ADPA_DPMS_OFF		(3<<10)
-
-#define RING_TAIL		0x00
-#define TAIL_ADDR		0x001FFFF8
-#define RING_HEAD		0x04
-#define HEAD_WRAP_COUNT		0xFFE00000
-#define HEAD_WRAP_ONE		0x00200000
-#define HEAD_ADDR		0x001FFFFC
-#define RING_START		0x08
-#define START_ADDR		0xFFFFF000
-#define RING_LEN		0x0C
-#define RING_NR_PAGES		0x001FF000
-#define RING_REPORT_MASK	0x00000006
-#define RING_REPORT_64K		0x00000002
-#define RING_REPORT_128K	0x00000004
-#define RING_NO_REPORT		0x00000000
-#define RING_VALID_MASK		0x00000001
-#define RING_VALID		0x00000001
-#define RING_INVALID		0x00000000
-
 /* Scratch pad debug 0 reg:
 /* Scratch pad debug 0 reg:
  */
  */
 #define   DPLL_FPA01_P1_POST_DIV_MASK_I830	0x001f0000
 #define   DPLL_FPA01_P1_POST_DIV_MASK_I830	0x001f0000
@@ -736,10 +740,13 @@
 #define   DPLL_MD_VGA_UDI_MULTIPLIER_MASK	0x0000003f
 #define   DPLL_MD_VGA_UDI_MULTIPLIER_MASK	0x0000003f
 #define   DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT	0
 #define   DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT	0
 #define DPLL_B_MD 0x06020 /* 965+ only */
 #define DPLL_B_MD 0x06020 /* 965+ only */
+#define DPLL_MD(pipe) _PIPE(pipe, DPLL_A_MD, DPLL_B_MD)
 #define FPA0	0x06040
 #define FPA0	0x06040
 #define FPA1	0x06044
 #define FPA1	0x06044
 #define FPB0	0x06048
 #define FPB0	0x06048
 #define FPB1	0x0604c
 #define FPB1	0x0604c
+#define FP0(pipe) _PIPE(pipe, FPA0, FPB0)
+#define FP1(pipe) _PIPE(pipe, FPA1, FPB1)
 #define   FP_N_DIV_MASK		0x003f0000
 #define   FP_N_DIV_MASK		0x003f0000
 #define   FP_N_PINEVIEW_DIV_MASK	0x00ff0000
 #define   FP_N_PINEVIEW_DIV_MASK	0x00ff0000
 #define   FP_N_DIV_SHIFT		16
 #define   FP_N_DIV_SHIFT		16
@@ -760,6 +767,7 @@
 #define   DPLLA_TEST_M_BYPASS		(1 << 2)
 #define   DPLLA_TEST_M_BYPASS		(1 << 2)
 #define   DPLLA_INPUT_BUFFER_ENABLE	(1 << 0)
 #define   DPLLA_INPUT_BUFFER_ENABLE	(1 << 0)
 #define D_STATE		0x6104
 #define D_STATE		0x6104
+#define  DSTATE_GFX_RESET_I830			(1<<6)
 #define  DSTATE_PLL_D3_OFF			(1<<3)
 #define  DSTATE_PLL_D3_OFF			(1<<3)
 #define  DSTATE_GFX_CLOCK_GATING		(1<<1)
 #define  DSTATE_GFX_CLOCK_GATING		(1<<1)
 #define  DSTATE_DOT_CLOCK_GATING		(1<<0)
 #define  DSTATE_DOT_CLOCK_GATING		(1<<0)
@@ -926,6 +934,8 @@
 #define CLKCFG_MEM_800					(3 << 4)
 #define CLKCFG_MEM_800					(3 << 4)
 #define CLKCFG_MEM_MASK					(7 << 4)
 #define CLKCFG_MEM_MASK					(7 << 4)
 
 
+#define TSC1			0x11001
+#define   TSE			(1<<0)
 #define TR1			0x11006
 #define TR1			0x11006
 #define TSFS			0x11020
 #define TSFS			0x11020
 #define   TSFS_SLOPE_MASK	0x0000ff00
 #define   TSFS_SLOPE_MASK	0x0000ff00
@@ -1070,6 +1080,8 @@
 #define   MEMSTAT_SRC_CTL_STDBY 3
 #define   MEMSTAT_SRC_CTL_STDBY 3
 #define RCPREVBSYTUPAVG		0x113b8
 #define RCPREVBSYTUPAVG		0x113b8
 #define RCPREVBSYTDNAVG		0x113bc
 #define RCPREVBSYTDNAVG		0x113bc
+#define PMMISC			0x11214
+#define   MCPPCE_EN		(1<<0) /* enable PM_MSG from PCH->MPC */
 #define SDEW			0x1124c
 #define SDEW			0x1124c
 #define CSIEW0			0x11250
 #define CSIEW0			0x11250
 #define CSIEW1			0x11254
 #define CSIEW1			0x11254
@@ -1150,6 +1162,15 @@
 #define PIPEBSRC	0x6101c
 #define PIPEBSRC	0x6101c
 #define BCLRPAT_B	0x61020
 #define BCLRPAT_B	0x61020
 
 
+#define HTOTAL(pipe) _PIPE(pipe, HTOTAL_A, HTOTAL_B)
+#define HBLANK(pipe) _PIPE(pipe, HBLANK_A, HBLANK_B)
+#define HSYNC(pipe) _PIPE(pipe, HSYNC_A, HSYNC_B)
+#define VTOTAL(pipe) _PIPE(pipe, VTOTAL_A, VTOTAL_B)
+#define VBLANK(pipe) _PIPE(pipe, VBLANK_A, VBLANK_B)
+#define VSYNC(pipe) _PIPE(pipe, VSYNC_A, VSYNC_B)
+#define PIPESRC(pipe) _PIPE(pipe, PIPEASRC, PIPEBSRC)
+#define BCLRPAT(pipe) _PIPE(pipe, BCLRPAT_A, BCLRPAT_B)
+
 /* VGA port control */
 /* VGA port control */
 #define ADPA			0x61100
 #define ADPA			0x61100
 #define   ADPA_DAC_ENABLE	(1<<31)
 #define   ADPA_DAC_ENABLE	(1<<31)
@@ -1173,6 +1194,7 @@
 #define   ADPA_DPMS_STANDBY	(2<<10)
 #define   ADPA_DPMS_STANDBY	(2<<10)
 #define   ADPA_DPMS_OFF		(3<<10)
 #define   ADPA_DPMS_OFF		(3<<10)
 
 
+
 /* Hotplug control (945+ only) */
 /* Hotplug control (945+ only) */
 #define PORT_HOTPLUG_EN		0x61110
 #define PORT_HOTPLUG_EN		0x61110
 #define   HDMIB_HOTPLUG_INT_EN			(1 << 29)
 #define   HDMIB_HOTPLUG_INT_EN			(1 << 29)
@@ -1331,6 +1353,22 @@
 #define   LVDS_B0B3_POWER_DOWN		(0 << 2)
 #define   LVDS_B0B3_POWER_DOWN		(0 << 2)
 #define   LVDS_B0B3_POWER_UP		(3 << 2)
 #define   LVDS_B0B3_POWER_UP		(3 << 2)
 
 
+/* Video Data Island Packet control */
+#define VIDEO_DIP_DATA		0x61178
+#define VIDEO_DIP_CTL		0x61170
+#define   VIDEO_DIP_ENABLE		(1 << 31)
+#define   VIDEO_DIP_PORT_B		(1 << 29)
+#define   VIDEO_DIP_PORT_C		(2 << 29)
+#define   VIDEO_DIP_ENABLE_AVI		(1 << 21)
+#define   VIDEO_DIP_ENABLE_VENDOR	(2 << 21)
+#define   VIDEO_DIP_ENABLE_SPD		(8 << 21)
+#define   VIDEO_DIP_SELECT_AVI		(0 << 19)
+#define   VIDEO_DIP_SELECT_VENDOR	(1 << 19)
+#define   VIDEO_DIP_SELECT_SPD		(3 << 19)
+#define   VIDEO_DIP_FREQ_ONCE		(0 << 16)
+#define   VIDEO_DIP_FREQ_VSYNC		(1 << 16)
+#define   VIDEO_DIP_FREQ_2VSYNC		(2 << 16)
+
 /* Panel power sequencing */
 /* Panel power sequencing */
 #define PP_STATUS	0x61200
 #define PP_STATUS	0x61200
 #define   PP_ON		(1 << 31)
 #define   PP_ON		(1 << 31)
@@ -1346,6 +1384,9 @@
 #define   PP_SEQUENCE_ON	(1 << 28)
 #define   PP_SEQUENCE_ON	(1 << 28)
 #define   PP_SEQUENCE_OFF	(2 << 28)
 #define   PP_SEQUENCE_OFF	(2 << 28)
 #define   PP_SEQUENCE_MASK	0x30000000
 #define   PP_SEQUENCE_MASK	0x30000000
+#define   PP_CYCLE_DELAY_ACTIVE	(1 << 27)
+#define   PP_SEQUENCE_STATE_ON_IDLE (1 << 3)
+#define   PP_SEQUENCE_STATE_MASK 0x0000000f
 #define PP_CONTROL	0x61204
 #define PP_CONTROL	0x61204
 #define   POWER_TARGET_ON	(1 << 0)
 #define   POWER_TARGET_ON	(1 << 0)
 #define PP_ON_DELAYS	0x61208
 #define PP_ON_DELAYS	0x61208
@@ -1481,6 +1522,7 @@
 # define TV_TEST_MODE_MASK		(7 << 0)
 # define TV_TEST_MODE_MASK		(7 << 0)
 
 
 #define TV_DAC			0x68004
 #define TV_DAC			0x68004
+# define TV_DAC_SAVE		0x00ffff00
 /**
 /**
  * Reports that DAC state change logic has reported change (RO).
  * Reports that DAC state change logic has reported change (RO).
  *
  *
@@ -2075,29 +2117,35 @@
 
 
 /* Display & cursor control */
 /* Display & cursor control */
 
 
-/* dithering flag on Ironlake */
-#define PIPE_ENABLE_DITHER		(1 << 4)
-#define PIPE_DITHER_TYPE_MASK		(3 << 2)
-#define PIPE_DITHER_TYPE_SPATIAL	(0 << 2)
-#define PIPE_DITHER_TYPE_ST01		(1 << 2)
 /* Pipe A */
 /* Pipe A */
 #define PIPEADSL		0x70000
 #define PIPEADSL		0x70000
-#define   DSL_LINEMASK	       	0x00000fff
+#define   DSL_LINEMASK		0x00000fff
 #define PIPEACONF		0x70008
 #define PIPEACONF		0x70008
-#define   PIPEACONF_ENABLE	(1<<31)
-#define   PIPEACONF_DISABLE	0
-#define   PIPEACONF_DOUBLE_WIDE	(1<<30)
+#define   PIPECONF_ENABLE	(1<<31)
+#define   PIPECONF_DISABLE	0
+#define   PIPECONF_DOUBLE_WIDE	(1<<30)
 #define   I965_PIPECONF_ACTIVE	(1<<30)
 #define   I965_PIPECONF_ACTIVE	(1<<30)
-#define   PIPEACONF_SINGLE_WIDE	0
-#define   PIPEACONF_PIPE_UNLOCKED 0
-#define   PIPEACONF_PIPE_LOCKED	(1<<25)
-#define   PIPEACONF_PALETTE	0
-#define   PIPEACONF_GAMMA		(1<<24)
+#define   PIPECONF_SINGLE_WIDE	0
+#define   PIPECONF_PIPE_UNLOCKED 0
+#define   PIPECONF_PIPE_LOCKED	(1<<25)
+#define   PIPECONF_PALETTE	0
+#define   PIPECONF_GAMMA		(1<<24)
 #define   PIPECONF_FORCE_BORDER	(1<<25)
 #define   PIPECONF_FORCE_BORDER	(1<<25)
 #define   PIPECONF_PROGRESSIVE	(0 << 21)
 #define   PIPECONF_PROGRESSIVE	(0 << 21)
 #define   PIPECONF_INTERLACE_W_FIELD_INDICATION	(6 << 21)
 #define   PIPECONF_INTERLACE_W_FIELD_INDICATION	(6 << 21)
 #define   PIPECONF_INTERLACE_FIELD_0_ONLY		(7 << 21)
 #define   PIPECONF_INTERLACE_FIELD_0_ONLY		(7 << 21)
 #define   PIPECONF_CXSR_DOWNCLOCK	(1<<16)
 #define   PIPECONF_CXSR_DOWNCLOCK	(1<<16)
+#define   PIPECONF_BPP_MASK	(0x000000e0)
+#define   PIPECONF_BPP_8	(0<<5)
+#define   PIPECONF_BPP_10	(1<<5)
+#define   PIPECONF_BPP_6	(2<<5)
+#define   PIPECONF_BPP_12	(3<<5)
+#define   PIPECONF_DITHER_EN	(1<<4)
+#define   PIPECONF_DITHER_TYPE_MASK (0x0000000c)
+#define   PIPECONF_DITHER_TYPE_SP (0<<2)
+#define   PIPECONF_DITHER_TYPE_ST1 (1<<2)
+#define   PIPECONF_DITHER_TYPE_ST2 (2<<2)
+#define   PIPECONF_DITHER_TYPE_TEMP (3<<2)
 #define PIPEASTAT		0x70024
 #define PIPEASTAT		0x70024
 #define   PIPE_FIFO_UNDERRUN_STATUS		(1UL<<31)
 #define   PIPE_FIFO_UNDERRUN_STATUS		(1UL<<31)
 #define   PIPE_CRC_ERROR_ENABLE			(1UL<<29)
 #define   PIPE_CRC_ERROR_ENABLE			(1UL<<29)
@@ -2128,12 +2176,15 @@
 #define   PIPE_START_VBLANK_INTERRUPT_STATUS	(1UL<<2) /* 965 or later */
 #define   PIPE_START_VBLANK_INTERRUPT_STATUS	(1UL<<2) /* 965 or later */
 #define   PIPE_VBLANK_INTERRUPT_STATUS		(1UL<<1)
 #define   PIPE_VBLANK_INTERRUPT_STATUS		(1UL<<1)
 #define   PIPE_OVERLAY_UPDATED_STATUS		(1UL<<0)
 #define   PIPE_OVERLAY_UPDATED_STATUS		(1UL<<0)
-#define   PIPE_BPC_MASK 			(7 << 5) /* Ironlake */
+#define   PIPE_BPC_MASK				(7 << 5) /* Ironlake */
 #define   PIPE_8BPC				(0 << 5)
 #define   PIPE_8BPC				(0 << 5)
 #define   PIPE_10BPC				(1 << 5)
 #define   PIPE_10BPC				(1 << 5)
 #define   PIPE_6BPC				(2 << 5)
 #define   PIPE_6BPC				(2 << 5)
 #define   PIPE_12BPC				(3 << 5)
 #define   PIPE_12BPC				(3 << 5)
 
 
+#define PIPECONF(pipe) _PIPE(pipe, PIPEACONF, PIPEBCONF)
+#define PIPEDSL(pipe)  _PIPE(pipe, PIPEADSL, PIPEBDSL)
+
 #define DSPARB			0x70030
 #define DSPARB			0x70030
 #define   DSPARB_CSTART_MASK	(0x7f << 7)
 #define   DSPARB_CSTART_MASK	(0x7f << 7)
 #define   DSPARB_CSTART_SHIFT	7
 #define   DSPARB_CSTART_SHIFT	7
@@ -2206,8 +2257,8 @@
 #define  WM1_LP_SR_EN		(1<<31)
 #define  WM1_LP_SR_EN		(1<<31)
 #define  WM1_LP_LATENCY_SHIFT	24
 #define  WM1_LP_LATENCY_SHIFT	24
 #define  WM1_LP_LATENCY_MASK	(0x7f<<24)
 #define  WM1_LP_LATENCY_MASK	(0x7f<<24)
-#define  WM1_LP_FBC_LP1_MASK	(0xf<<20)
-#define  WM1_LP_FBC_LP1_SHIFT	20
+#define  WM1_LP_FBC_MASK	(0xf<<20)
+#define  WM1_LP_FBC_SHIFT	20
 #define  WM1_LP_SR_MASK		(0x1ff<<8)
 #define  WM1_LP_SR_MASK		(0x1ff<<8)
 #define  WM1_LP_SR_SHIFT	8
 #define  WM1_LP_SR_SHIFT	8
 #define  WM1_LP_CURSOR_MASK	(0x3f)
 #define  WM1_LP_CURSOR_MASK	(0x3f)
@@ -2333,6 +2384,14 @@
 #define DSPASURF		0x7019C /* 965+ only */
 #define DSPASURF		0x7019C /* 965+ only */
 #define DSPATILEOFF		0x701A4 /* 965+ only */
 #define DSPATILEOFF		0x701A4 /* 965+ only */
 
 
+#define DSPCNTR(plane) _PIPE(plane, DSPACNTR, DSPBCNTR)
+#define DSPADDR(plane) _PIPE(plane, DSPAADDR, DSPBADDR)
+#define DSPSTRIDE(plane) _PIPE(plane, DSPASTRIDE, DSPBSTRIDE)
+#define DSPPOS(plane) _PIPE(plane, DSPAPOS, DSPBPOS)
+#define DSPSIZE(plane) _PIPE(plane, DSPASIZE, DSPBSIZE)
+#define DSPSURF(plane) _PIPE(plane, DSPASURF, DSPBSURF)
+#define DSPTILEOFF(plane) _PIPE(plane, DSPATILEOFF, DSPBTILEOFF)
+
 /* VBIOS flags */
 /* VBIOS flags */
 #define SWF00			0x71410
 #define SWF00			0x71410
 #define SWF01			0x71414
 #define SWF01			0x71414
@@ -2397,6 +2456,7 @@
 #define  RR_HW_HIGH_POWER_FRAMES_MASK   0xff00
 #define  RR_HW_HIGH_POWER_FRAMES_MASK   0xff00
 
 
 #define FDI_PLL_BIOS_0  0x46000
 #define FDI_PLL_BIOS_0  0x46000
+#define  FDI_PLL_FB_CLOCK_MASK  0xff
 #define FDI_PLL_BIOS_1  0x46004
 #define FDI_PLL_BIOS_1  0x46004
 #define FDI_PLL_BIOS_2  0x46008
 #define FDI_PLL_BIOS_2  0x46008
 #define DISPLAY_PORT_PLL_BIOS_0         0x4600c
 #define DISPLAY_PORT_PLL_BIOS_0         0x4600c
@@ -2420,46 +2480,47 @@
 #define PIPEA_DATA_M1           0x60030
 #define PIPEA_DATA_M1           0x60030
 #define  TU_SIZE(x)             (((x)-1) << 25) /* default size 64 */
 #define  TU_SIZE(x)             (((x)-1) << 25) /* default size 64 */
 #define  TU_SIZE_MASK           0x7e000000
 #define  TU_SIZE_MASK           0x7e000000
-#define  PIPEA_DATA_M1_OFFSET   0
+#define  PIPE_DATA_M1_OFFSET    0
 #define PIPEA_DATA_N1           0x60034
 #define PIPEA_DATA_N1           0x60034
-#define  PIPEA_DATA_N1_OFFSET   0
+#define  PIPE_DATA_N1_OFFSET    0
 
 
 #define PIPEA_DATA_M2           0x60038
 #define PIPEA_DATA_M2           0x60038
-#define  PIPEA_DATA_M2_OFFSET   0
+#define  PIPE_DATA_M2_OFFSET    0
 #define PIPEA_DATA_N2           0x6003c
 #define PIPEA_DATA_N2           0x6003c
-#define  PIPEA_DATA_N2_OFFSET   0
+#define  PIPE_DATA_N2_OFFSET    0
 
 
 #define PIPEA_LINK_M1           0x60040
 #define PIPEA_LINK_M1           0x60040
-#define  PIPEA_LINK_M1_OFFSET   0
+#define  PIPE_LINK_M1_OFFSET    0
 #define PIPEA_LINK_N1           0x60044
 #define PIPEA_LINK_N1           0x60044
-#define  PIPEA_LINK_N1_OFFSET   0
+#define  PIPE_LINK_N1_OFFSET    0
 
 
 #define PIPEA_LINK_M2           0x60048
 #define PIPEA_LINK_M2           0x60048
-#define  PIPEA_LINK_M2_OFFSET   0
+#define  PIPE_LINK_M2_OFFSET    0
 #define PIPEA_LINK_N2           0x6004c
 #define PIPEA_LINK_N2           0x6004c
-#define  PIPEA_LINK_N2_OFFSET   0
+#define  PIPE_LINK_N2_OFFSET    0
 
 
 /* PIPEB timing regs are same start from 0x61000 */
 /* PIPEB timing regs are same start from 0x61000 */
 
 
 #define PIPEB_DATA_M1           0x61030
 #define PIPEB_DATA_M1           0x61030
-#define  PIPEB_DATA_M1_OFFSET   0
 #define PIPEB_DATA_N1           0x61034
 #define PIPEB_DATA_N1           0x61034
-#define  PIPEB_DATA_N1_OFFSET   0
 
 
 #define PIPEB_DATA_M2           0x61038
 #define PIPEB_DATA_M2           0x61038
-#define  PIPEB_DATA_M2_OFFSET   0
 #define PIPEB_DATA_N2           0x6103c
 #define PIPEB_DATA_N2           0x6103c
-#define  PIPEB_DATA_N2_OFFSET   0
 
 
 #define PIPEB_LINK_M1           0x61040
 #define PIPEB_LINK_M1           0x61040
-#define  PIPEB_LINK_M1_OFFSET   0
 #define PIPEB_LINK_N1           0x61044
 #define PIPEB_LINK_N1           0x61044
-#define  PIPEB_LINK_N1_OFFSET   0
 
 
 #define PIPEB_LINK_M2           0x61048
 #define PIPEB_LINK_M2           0x61048
-#define  PIPEB_LINK_M2_OFFSET   0
 #define PIPEB_LINK_N2           0x6104c
 #define PIPEB_LINK_N2           0x6104c
-#define  PIPEB_LINK_N2_OFFSET   0
+
+#define PIPE_DATA_M1(pipe) _PIPE(pipe, PIPEA_DATA_M1, PIPEB_DATA_M1)
+#define PIPE_DATA_N1(pipe) _PIPE(pipe, PIPEA_DATA_N1, PIPEB_DATA_N1)
+#define PIPE_DATA_M2(pipe) _PIPE(pipe, PIPEA_DATA_M2, PIPEB_DATA_M2)
+#define PIPE_DATA_N2(pipe) _PIPE(pipe, PIPEA_DATA_N2, PIPEB_DATA_N2)
+#define PIPE_LINK_M1(pipe) _PIPE(pipe, PIPEA_LINK_M1, PIPEB_LINK_M1)
+#define PIPE_LINK_N1(pipe) _PIPE(pipe, PIPEA_LINK_N1, PIPEB_LINK_N1)
+#define PIPE_LINK_M2(pipe) _PIPE(pipe, PIPEA_LINK_M2, PIPEB_LINK_M2)
+#define PIPE_LINK_N2(pipe) _PIPE(pipe, PIPEA_LINK_N2, PIPEB_LINK_N2)
 
 
 /* CPU panel fitter */
 /* CPU panel fitter */
 #define PFA_CTL_1               0x68080
 #define PFA_CTL_1               0x68080
@@ -2516,7 +2577,8 @@
 #define GT_SYNC_STATUS          (1 << 2)
 #define GT_SYNC_STATUS          (1 << 2)
 #define GT_USER_INTERRUPT       (1 << 0)
 #define GT_USER_INTERRUPT       (1 << 0)
 #define GT_BSD_USER_INTERRUPT   (1 << 5)
 #define GT_BSD_USER_INTERRUPT   (1 << 5)
-
+#define GT_GEN6_BSD_USER_INTERRUPT	(1 << 12)
+#define GT_BLT_USER_INTERRUPT	(1 << 22)
 
 
 #define GTISR   0x44010
 #define GTISR   0x44010
 #define GTIMR   0x44014
 #define GTIMR   0x44014
@@ -2551,6 +2613,10 @@
 #define SDE_PORTD_HOTPLUG_CPT	(1 << 23)
 #define SDE_PORTD_HOTPLUG_CPT	(1 << 23)
 #define SDE_PORTC_HOTPLUG_CPT	(1 << 22)
 #define SDE_PORTC_HOTPLUG_CPT	(1 << 22)
 #define SDE_PORTB_HOTPLUG_CPT	(1 << 21)
 #define SDE_PORTB_HOTPLUG_CPT	(1 << 21)
+#define SDE_HOTPLUG_MASK_CPT	(SDE_CRT_HOTPLUG_CPT |		\
+				 SDE_PORTD_HOTPLUG_CPT |	\
+				 SDE_PORTC_HOTPLUG_CPT |	\
+				 SDE_PORTB_HOTPLUG_CPT)
 
 
 #define SDEISR  0xc4000
 #define SDEISR  0xc4000
 #define SDEIMR  0xc4004
 #define SDEIMR  0xc4004
@@ -2600,11 +2666,14 @@
 
 
 #define PCH_DPLL_A              0xc6014
 #define PCH_DPLL_A              0xc6014
 #define PCH_DPLL_B              0xc6018
 #define PCH_DPLL_B              0xc6018
+#define PCH_DPLL(pipe) _PIPE(pipe, PCH_DPLL_A, PCH_DPLL_B)
 
 
 #define PCH_FPA0                0xc6040
 #define PCH_FPA0                0xc6040
 #define PCH_FPA1                0xc6044
 #define PCH_FPA1                0xc6044
 #define PCH_FPB0                0xc6048
 #define PCH_FPB0                0xc6048
 #define PCH_FPB1                0xc604c
 #define PCH_FPB1                0xc604c
+#define PCH_FP0(pipe) _PIPE(pipe, PCH_FPA0, PCH_FPB0)
+#define PCH_FP1(pipe) _PIPE(pipe, PCH_FPA1, PCH_FPB1)
 
 
 #define PCH_DPLL_TEST           0xc606c
 #define PCH_DPLL_TEST           0xc606c
 
 
@@ -2690,6 +2759,13 @@
 #define TRANS_VBLANK_B          0xe1010
 #define TRANS_VBLANK_B          0xe1010
 #define TRANS_VSYNC_B           0xe1014
 #define TRANS_VSYNC_B           0xe1014
 
 
+#define TRANS_HTOTAL(pipe) _PIPE(pipe, TRANS_HTOTAL_A, TRANS_HTOTAL_B)
+#define TRANS_HBLANK(pipe) _PIPE(pipe, TRANS_HBLANK_A, TRANS_HBLANK_B)
+#define TRANS_HSYNC(pipe) _PIPE(pipe, TRANS_HSYNC_A, TRANS_HSYNC_B)
+#define TRANS_VTOTAL(pipe) _PIPE(pipe, TRANS_VTOTAL_A, TRANS_VTOTAL_B)
+#define TRANS_VBLANK(pipe) _PIPE(pipe, TRANS_VBLANK_A, TRANS_VBLANK_B)
+#define TRANS_VSYNC(pipe) _PIPE(pipe, TRANS_VSYNC_A, TRANS_VSYNC_B)
+
 #define TRANSB_DATA_M1          0xe1030
 #define TRANSB_DATA_M1          0xe1030
 #define TRANSB_DATA_N1          0xe1034
 #define TRANSB_DATA_N1          0xe1034
 #define TRANSB_DATA_M2          0xe1038
 #define TRANSB_DATA_M2          0xe1038
@@ -2701,6 +2777,7 @@
 
 
 #define TRANSACONF              0xf0008
 #define TRANSACONF              0xf0008
 #define TRANSBCONF              0xf1008
 #define TRANSBCONF              0xf1008
+#define TRANSCONF(plane) _PIPE(plane, TRANSACONF, TRANSBCONF)
 #define  TRANS_DISABLE          (0<<31)
 #define  TRANS_DISABLE          (0<<31)
 #define  TRANS_ENABLE           (1<<31)
 #define  TRANS_ENABLE           (1<<31)
 #define  TRANS_STATE_MASK       (1<<30)
 #define  TRANS_STATE_MASK       (1<<30)
@@ -2721,10 +2798,15 @@
 #define FDI_RXA_CHICKEN         0xc200c
 #define FDI_RXA_CHICKEN         0xc200c
 #define FDI_RXB_CHICKEN         0xc2010
 #define FDI_RXB_CHICKEN         0xc2010
 #define  FDI_RX_PHASE_SYNC_POINTER_ENABLE       (1)
 #define  FDI_RX_PHASE_SYNC_POINTER_ENABLE       (1)
+#define FDI_RX_CHICKEN(pipe) _PIPE(pipe, FDI_RXA_CHICKEN, FDI_RXB_CHICKEN)
+
+#define SOUTH_DSPCLK_GATE_D	0xc2020
+#define  PCH_DPLSUNIT_CLOCK_GATE_DISABLE (1<<29)
 
 
 /* CPU: FDI_TX */
 /* CPU: FDI_TX */
 #define FDI_TXA_CTL             0x60100
 #define FDI_TXA_CTL             0x60100
 #define FDI_TXB_CTL             0x61100
 #define FDI_TXB_CTL             0x61100
+#define FDI_TX_CTL(pipe) _PIPE(pipe, FDI_TXA_CTL, FDI_TXB_CTL)
 #define  FDI_TX_DISABLE         (0<<31)
 #define  FDI_TX_DISABLE         (0<<31)
 #define  FDI_TX_ENABLE          (1<<31)
 #define  FDI_TX_ENABLE          (1<<31)
 #define  FDI_LINK_TRAIN_PATTERN_1       (0<<28)
 #define  FDI_LINK_TRAIN_PATTERN_1       (0<<28)
@@ -2766,8 +2848,8 @@
 /* FDI_RX, FDI_X is hard-wired to Transcoder_X */
 /* FDI_RX, FDI_X is hard-wired to Transcoder_X */
 #define FDI_RXA_CTL             0xf000c
 #define FDI_RXA_CTL             0xf000c
 #define FDI_RXB_CTL             0xf100c
 #define FDI_RXB_CTL             0xf100c
+#define FDI_RX_CTL(pipe) _PIPE(pipe, FDI_RXA_CTL, FDI_RXB_CTL)
 #define  FDI_RX_ENABLE          (1<<31)
 #define  FDI_RX_ENABLE          (1<<31)
-#define  FDI_RX_DISABLE         (0<<31)
 /* train, dp width same as FDI_TX */
 /* train, dp width same as FDI_TX */
 #define  FDI_DP_PORT_WIDTH_X8           (7<<19)
 #define  FDI_DP_PORT_WIDTH_X8           (7<<19)
 #define  FDI_8BPC                       (0<<16)
 #define  FDI_8BPC                       (0<<16)
@@ -2782,8 +2864,7 @@
 #define  FDI_FS_ERR_REPORT_ENABLE       (1<<9)
 #define  FDI_FS_ERR_REPORT_ENABLE       (1<<9)
 #define  FDI_FE_ERR_REPORT_ENABLE       (1<<8)
 #define  FDI_FE_ERR_REPORT_ENABLE       (1<<8)
 #define  FDI_RX_ENHANCE_FRAME_ENABLE    (1<<6)
 #define  FDI_RX_ENHANCE_FRAME_ENABLE    (1<<6)
-#define  FDI_SEL_RAWCLK                 (0<<4)
-#define  FDI_SEL_PCDCLK                 (1<<4)
+#define  FDI_PCDCLK	                (1<<4)
 /* CPT */
 /* CPT */
 #define  FDI_AUTO_TRAINING			(1<<10)
 #define  FDI_AUTO_TRAINING			(1<<10)
 #define  FDI_LINK_TRAIN_PATTERN_1_CPT		(0<<8)
 #define  FDI_LINK_TRAIN_PATTERN_1_CPT		(0<<8)
@@ -2798,6 +2879,9 @@
 #define FDI_RXA_TUSIZE2         0xf0038
 #define FDI_RXA_TUSIZE2         0xf0038
 #define FDI_RXB_TUSIZE1         0xf1030
 #define FDI_RXB_TUSIZE1         0xf1030
 #define FDI_RXB_TUSIZE2         0xf1038
 #define FDI_RXB_TUSIZE2         0xf1038
+#define FDI_RX_MISC(pipe) _PIPE(pipe, FDI_RXA_MISC, FDI_RXB_MISC)
+#define FDI_RX_TUSIZE1(pipe) _PIPE(pipe, FDI_RXA_TUSIZE1, FDI_RXB_TUSIZE1)
+#define FDI_RX_TUSIZE2(pipe) _PIPE(pipe, FDI_RXA_TUSIZE2, FDI_RXB_TUSIZE2)
 
 
 /* FDI_RX interrupt register format */
 /* FDI_RX interrupt register format */
 #define FDI_RX_INTER_LANE_ALIGN         (1<<10)
 #define FDI_RX_INTER_LANE_ALIGN         (1<<10)
@@ -2816,6 +2900,8 @@
 #define FDI_RXA_IMR             0xf0018
 #define FDI_RXA_IMR             0xf0018
 #define FDI_RXB_IIR             0xf1014
 #define FDI_RXB_IIR             0xf1014
 #define FDI_RXB_IMR             0xf1018
 #define FDI_RXB_IMR             0xf1018
+#define FDI_RX_IIR(pipe) _PIPE(pipe, FDI_RXA_IIR, FDI_RXB_IIR)
+#define FDI_RX_IMR(pipe) _PIPE(pipe, FDI_RXA_IMR, FDI_RXB_IMR)
 
 
 #define FDI_PLL_CTL_1           0xfe000
 #define FDI_PLL_CTL_1           0xfe000
 #define FDI_PLL_CTL_2           0xfe004
 #define FDI_PLL_CTL_2           0xfe004
@@ -2935,6 +3021,7 @@
 #define TRANS_DP_CTL_A		0xe0300
 #define TRANS_DP_CTL_A		0xe0300
 #define TRANS_DP_CTL_B		0xe1300
 #define TRANS_DP_CTL_B		0xe1300
 #define TRANS_DP_CTL_C		0xe2300
 #define TRANS_DP_CTL_C		0xe2300
+#define TRANS_DP_CTL(pipe)	(TRANS_DP_CTL_A + (pipe) * 0x01000)
 #define  TRANS_DP_OUTPUT_ENABLE	(1<<31)
 #define  TRANS_DP_OUTPUT_ENABLE	(1<<31)
 #define  TRANS_DP_PORT_SEL_B	(0<<29)
 #define  TRANS_DP_PORT_SEL_B	(0<<29)
 #define  TRANS_DP_PORT_SEL_C	(1<<29)
 #define  TRANS_DP_PORT_SEL_C	(1<<29)

+ 13 - 15
drivers/gpu/drm/i915/i915_suspend.c

@@ -256,7 +256,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
 		dev_priv->saveFPA1 = I915_READ(FPA1);
 		dev_priv->saveFPA1 = I915_READ(FPA1);
 		dev_priv->saveDPLL_A = I915_READ(DPLL_A);
 		dev_priv->saveDPLL_A = I915_READ(DPLL_A);
 	}
 	}
-	if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
+	if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
 		dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
 		dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
 	dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
 	dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
 	dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
 	dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
@@ -294,7 +294,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
 	dev_priv->saveDSPASIZE = I915_READ(DSPASIZE);
 	dev_priv->saveDSPASIZE = I915_READ(DSPASIZE);
 	dev_priv->saveDSPAPOS = I915_READ(DSPAPOS);
 	dev_priv->saveDSPAPOS = I915_READ(DSPAPOS);
 	dev_priv->saveDSPAADDR = I915_READ(DSPAADDR);
 	dev_priv->saveDSPAADDR = I915_READ(DSPAADDR);
-	if (IS_I965G(dev)) {
+	if (INTEL_INFO(dev)->gen >= 4) {
 		dev_priv->saveDSPASURF = I915_READ(DSPASURF);
 		dev_priv->saveDSPASURF = I915_READ(DSPASURF);
 		dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF);
 		dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF);
 	}
 	}
@@ -313,7 +313,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
 		dev_priv->saveFPB1 = I915_READ(FPB1);
 		dev_priv->saveFPB1 = I915_READ(FPB1);
 		dev_priv->saveDPLL_B = I915_READ(DPLL_B);
 		dev_priv->saveDPLL_B = I915_READ(DPLL_B);
 	}
 	}
-	if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
+	if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
 		dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
 		dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
 	dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
 	dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
 	dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
 	dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
@@ -351,7 +351,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
 	dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE);
 	dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE);
 	dev_priv->saveDSPBPOS = I915_READ(DSPBPOS);
 	dev_priv->saveDSPBPOS = I915_READ(DSPBPOS);
 	dev_priv->saveDSPBADDR = I915_READ(DSPBADDR);
 	dev_priv->saveDSPBADDR = I915_READ(DSPBADDR);
-	if (IS_I965GM(dev) || IS_GM45(dev)) {
+	if (INTEL_INFO(dev)->gen >= 4) {
 		dev_priv->saveDSPBSURF = I915_READ(DSPBSURF);
 		dev_priv->saveDSPBSURF = I915_READ(DSPBSURF);
 		dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF);
 		dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF);
 	}
 	}
@@ -404,7 +404,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
 	I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A);
 	I915_WRITE(dpll_a_reg, dev_priv->saveDPLL_A);
 	POSTING_READ(dpll_a_reg);
 	POSTING_READ(dpll_a_reg);
 	udelay(150);
 	udelay(150);
-	if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
+	if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
 		I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
 		I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
 		POSTING_READ(DPLL_A_MD);
 		POSTING_READ(DPLL_A_MD);
 	}
 	}
@@ -448,7 +448,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
 	I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC);
 	I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC);
 	I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR);
 	I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR);
 	I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE);
 	I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE);
-	if (IS_I965G(dev)) {
+	if (INTEL_INFO(dev)->gen >= 4) {
 		I915_WRITE(DSPASURF, dev_priv->saveDSPASURF);
 		I915_WRITE(DSPASURF, dev_priv->saveDSPASURF);
 		I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF);
 		I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF);
 	}
 	}
@@ -473,7 +473,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
 	I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B);
 	I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B);
 	POSTING_READ(dpll_b_reg);
 	POSTING_READ(dpll_b_reg);
 	udelay(150);
 	udelay(150);
-	if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
+	if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
 		I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
 		I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
 		POSTING_READ(DPLL_B_MD);
 		POSTING_READ(DPLL_B_MD);
 	}
 	}
@@ -517,7 +517,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
 	I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC);
 	I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC);
 	I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR);
 	I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR);
 	I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
 	I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
-	if (IS_I965G(dev)) {
+	if (INTEL_INFO(dev)->gen >= 4) {
 		I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF);
 		I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF);
 		I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
 		I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
 	}
 	}
@@ -550,7 +550,7 @@ void i915_save_display(struct drm_device *dev)
 	dev_priv->saveCURBCNTR = I915_READ(CURBCNTR);
 	dev_priv->saveCURBCNTR = I915_READ(CURBCNTR);
 	dev_priv->saveCURBPOS = I915_READ(CURBPOS);
 	dev_priv->saveCURBPOS = I915_READ(CURBPOS);
 	dev_priv->saveCURBBASE = I915_READ(CURBBASE);
 	dev_priv->saveCURBBASE = I915_READ(CURBBASE);
-	if (!IS_I9XX(dev))
+	if (IS_GEN2(dev))
 		dev_priv->saveCURSIZE = I915_READ(CURSIZE);
 		dev_priv->saveCURSIZE = I915_READ(CURSIZE);
 
 
 	/* CRT state */
 	/* CRT state */
@@ -573,7 +573,7 @@ void i915_save_display(struct drm_device *dev)
 		dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
 		dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
 		dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
 		dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
 		dev_priv->saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
 		dev_priv->saveBLC_HIST_CTL = I915_READ(BLC_HIST_CTL);
-		if (IS_I965G(dev))
+		if (INTEL_INFO(dev)->gen >= 4)
 			dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
 			dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
 		if (IS_MOBILE(dev) && !IS_I830(dev))
 		if (IS_MOBILE(dev) && !IS_I830(dev))
 			dev_priv->saveLVDS = I915_READ(LVDS);
 			dev_priv->saveLVDS = I915_READ(LVDS);
@@ -664,7 +664,7 @@ void i915_restore_display(struct drm_device *dev)
 	I915_WRITE(CURBPOS, dev_priv->saveCURBPOS);
 	I915_WRITE(CURBPOS, dev_priv->saveCURBPOS);
 	I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR);
 	I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR);
 	I915_WRITE(CURBBASE, dev_priv->saveCURBBASE);
 	I915_WRITE(CURBBASE, dev_priv->saveCURBBASE);
-	if (!IS_I9XX(dev))
+	if (IS_GEN2(dev))
 		I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
 		I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
 
 
 	/* CRT state */
 	/* CRT state */
@@ -674,7 +674,7 @@ void i915_restore_display(struct drm_device *dev)
 		I915_WRITE(ADPA, dev_priv->saveADPA);
 		I915_WRITE(ADPA, dev_priv->saveADPA);
 
 
 	/* LVDS state */
 	/* LVDS state */
-	if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev))
+	if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev))
 		I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
 		I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
 
 
 	if (HAS_PCH_SPLIT(dev)) {
 	if (HAS_PCH_SPLIT(dev)) {
@@ -878,9 +878,7 @@ int i915_restore_state(struct drm_device *dev)
 	for (i = 0; i < 3; i++)
 	for (i = 0; i < 3; i++)
 		I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
 		I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
 
 
-	/* I2C state */
-	intel_i2c_reset_gmbus(dev);
+	intel_i2c_reset(dev);
 
 
 	return 0;
 	return 0;
 }
 }
-

+ 286 - 0
drivers/gpu/drm/i915/intel_acpi.c

@@ -0,0 +1,286 @@
+/*
+ * Intel ACPI functions
+ *
+ * _DSM related code stolen from nouveau_acpi.c.
+ */
+#include <linux/pci.h>
+#include <linux/acpi.h>
+#include <linux/vga_switcheroo.h>
+#include <acpi/acpi_drivers.h>
+
+#include "drmP.h"
+
+#define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */
+
+#define INTEL_DSM_FN_SUPPORTED_FUNCTIONS 0 /* No args */
+#define INTEL_DSM_FN_PLATFORM_MUX_INFO 1 /* No args */
+
+static struct intel_dsm_priv {
+	acpi_handle dhandle;
+} intel_dsm_priv;
+
+static const u8 intel_dsm_guid[] = {
+	0xd3, 0x73, 0xd8, 0x7e,
+	0xd0, 0xc2,
+	0x4f, 0x4e,
+	0xa8, 0x54,
+	0x0f, 0x13, 0x17, 0xb0, 0x1c, 0x2c
+};
+
+static int intel_dsm(acpi_handle handle, int func, int arg)
+{
+	struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+	struct acpi_object_list input;
+	union acpi_object params[4];
+	union acpi_object *obj;
+	u32 result;
+	int ret = 0;
+
+	input.count = 4;
+	input.pointer = params;
+	params[0].type = ACPI_TYPE_BUFFER;
+	params[0].buffer.length = sizeof(intel_dsm_guid);
+	params[0].buffer.pointer = (char *)intel_dsm_guid;
+	params[1].type = ACPI_TYPE_INTEGER;
+	params[1].integer.value = INTEL_DSM_REVISION_ID;
+	params[2].type = ACPI_TYPE_INTEGER;
+	params[2].integer.value = func;
+	params[3].type = ACPI_TYPE_INTEGER;
+	params[3].integer.value = arg;
+
+	ret = acpi_evaluate_object(handle, "_DSM", &input, &output);
+	if (ret) {
+		DRM_DEBUG_DRIVER("failed to evaluate _DSM: %d\n", ret);
+		return ret;
+	}
+
+	obj = (union acpi_object *)output.pointer;
+
+	result = 0;
+	switch (obj->type) {
+	case ACPI_TYPE_INTEGER:
+		result = obj->integer.value;
+		break;
+
+	case ACPI_TYPE_BUFFER:
+		if (obj->buffer.length == 4) {
+			result =(obj->buffer.pointer[0] |
+				(obj->buffer.pointer[1] <<  8) |
+				(obj->buffer.pointer[2] << 16) |
+				(obj->buffer.pointer[3] << 24));
+			break;
+		}
+	default:
+		ret = -EINVAL;
+		break;
+	}
+	if (result == 0x80000002)
+		ret = -ENODEV;
+
+	kfree(output.pointer);
+	return ret;
+}
+
+static char *intel_dsm_port_name(u8 id)
+{
+	switch (id) {
+	case 0:
+		return "Reserved";
+	case 1:
+		return "Analog VGA";
+	case 2:
+		return "LVDS";
+	case 3:
+		return "Reserved";
+	case 4:
+		return "HDMI/DVI_B";
+	case 5:
+		return "HDMI/DVI_C";
+	case 6:
+		return "HDMI/DVI_D";
+	case 7:
+		return "DisplayPort_A";
+	case 8:
+		return "DisplayPort_B";
+	case 9:
+		return "DisplayPort_C";
+	case 0xa:
+		return "DisplayPort_D";
+	case 0xb:
+	case 0xc:
+	case 0xd:
+		return "Reserved";
+	case 0xe:
+		return "WiDi";
+	default:
+		return "bad type";
+	}
+}
+
+static char *intel_dsm_mux_type(u8 type)
+{
+	switch (type) {
+	case 0:
+		return "unknown";
+	case 1:
+		return "No MUX, iGPU only";
+	case 2:
+		return "No MUX, dGPU only";
+	case 3:
+		return "MUXed between iGPU and dGPU";
+	default:
+		return "bad type";
+	}
+}
+
+static void intel_dsm_platform_mux_info(void)
+{
+	struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+	struct acpi_object_list input;
+	union acpi_object params[4];
+	union acpi_object *pkg;
+	int i, ret;
+
+	input.count = 4;
+	input.pointer = params;
+	params[0].type = ACPI_TYPE_BUFFER;
+	params[0].buffer.length = sizeof(intel_dsm_guid);
+	params[0].buffer.pointer = (char *)intel_dsm_guid;
+	params[1].type = ACPI_TYPE_INTEGER;
+	params[1].integer.value = INTEL_DSM_REVISION_ID;
+	params[2].type = ACPI_TYPE_INTEGER;
+	params[2].integer.value = INTEL_DSM_FN_PLATFORM_MUX_INFO;
+	params[3].type = ACPI_TYPE_INTEGER;
+	params[3].integer.value = 0;
+
+	ret = acpi_evaluate_object(intel_dsm_priv.dhandle, "_DSM", &input,
+				   &output);
+	if (ret) {
+		DRM_DEBUG_DRIVER("failed to evaluate _DSM: %d\n", ret);
+		goto out;
+	}
+
+	pkg = (union acpi_object *)output.pointer;
+
+	if (pkg->type == ACPI_TYPE_PACKAGE) {
+		union acpi_object *connector_count = &pkg->package.elements[0];
+		DRM_DEBUG_DRIVER("MUX info connectors: %lld\n",
+			  (unsigned long long)connector_count->integer.value);
+		for (i = 1; i < pkg->package.count; i++) {
+			union acpi_object *obj = &pkg->package.elements[i];
+			union acpi_object *connector_id =
+				&obj->package.elements[0];
+			union acpi_object *info = &obj->package.elements[1];
+			DRM_DEBUG_DRIVER("Connector id: 0x%016llx\n",
+				  (unsigned long long)connector_id->integer.value);
+			DRM_DEBUG_DRIVER("  port id: %s\n",
+			       intel_dsm_port_name(info->buffer.pointer[0]));
+			DRM_DEBUG_DRIVER("  display mux info: %s\n",
+			       intel_dsm_mux_type(info->buffer.pointer[1]));
+			DRM_DEBUG_DRIVER("  aux/dc mux info: %s\n",
+			       intel_dsm_mux_type(info->buffer.pointer[2]));
+			DRM_DEBUG_DRIVER("  hpd mux info: %s\n",
+			       intel_dsm_mux_type(info->buffer.pointer[3]));
+		}
+	} else {
+		DRM_ERROR("MUX INFO call failed\n");
+	}
+
+out:
+	kfree(output.pointer);
+}
+
+static int intel_dsm_switchto(enum vga_switcheroo_client_id id)
+{
+	return 0;
+}
+
+static int intel_dsm_power_state(enum vga_switcheroo_client_id id,
+				 enum vga_switcheroo_state state)
+{
+	return 0;
+}
+
+static int intel_dsm_init(void)
+{
+	return 0;
+}
+
+static int intel_dsm_get_client_id(struct pci_dev *pdev)
+{
+	if (intel_dsm_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
+		return VGA_SWITCHEROO_IGD;
+	else
+		return VGA_SWITCHEROO_DIS;
+}
+
+static struct vga_switcheroo_handler intel_dsm_handler = {
+	.switchto = intel_dsm_switchto,
+	.power_state = intel_dsm_power_state,
+	.init = intel_dsm_init,
+	.get_client_id = intel_dsm_get_client_id,
+};
+
+static bool intel_dsm_pci_probe(struct pci_dev *pdev)
+{
+	acpi_handle dhandle, intel_handle;
+	acpi_status status;
+	int ret;
+
+	dhandle = DEVICE_ACPI_HANDLE(&pdev->dev);
+	if (!dhandle)
+		return false;
+
+	status = acpi_get_handle(dhandle, "_DSM", &intel_handle);
+	if (ACPI_FAILURE(status)) {
+		DRM_DEBUG_KMS("no _DSM method for intel device\n");
+		return false;
+	}
+
+	ret = intel_dsm(dhandle, INTEL_DSM_FN_SUPPORTED_FUNCTIONS, 0);
+	if (ret < 0) {
+		DRM_ERROR("failed to get supported _DSM functions\n");
+		return false;
+	}
+
+	intel_dsm_priv.dhandle = dhandle;
+
+	intel_dsm_platform_mux_info();
+	return true;
+}
+
+static bool intel_dsm_detect(void)
+{
+	char acpi_method_name[255] = { 0 };
+	struct acpi_buffer buffer = {sizeof(acpi_method_name), acpi_method_name};
+	struct pci_dev *pdev = NULL;
+	bool has_dsm = false;
+	int vga_count = 0;
+
+	while ((pdev = pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, pdev)) != NULL) {
+		vga_count++;
+		has_dsm |= intel_dsm_pci_probe(pdev);
+	}
+
+	if (vga_count == 2 && has_dsm) {
+		acpi_get_name(intel_dsm_priv.dhandle, ACPI_FULL_PATHNAME, &buffer);
+		DRM_DEBUG_DRIVER("VGA switcheroo: detected DSM switching method %s handle\n",
+				 acpi_method_name);
+		return true;
+	}
+
+	return false;
+}
+
+void intel_register_dsm_handler(void)
+{
+	if (!intel_dsm_detect())
+		return;
+
+	vga_switcheroo_register_handler(&intel_dsm_handler);
+}
+
+void intel_unregister_dsm_handler(void)
+{
+	vga_switcheroo_unregister_handler();
+}

+ 163 - 71
drivers/gpu/drm/i915/intel_bios.c

@@ -24,6 +24,7 @@
  *    Eric Anholt <eric@anholt.net>
  *    Eric Anholt <eric@anholt.net>
  *
  *
  */
  */
+#include <drm/drm_dp_helper.h>
 #include "drmP.h"
 #include "drmP.h"
 #include "drm.h"
 #include "drm.h"
 #include "i915_drm.h"
 #include "i915_drm.h"
@@ -129,10 +130,6 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
 	int i, temp_downclock;
 	int i, temp_downclock;
 	struct drm_display_mode *temp_mode;
 	struct drm_display_mode *temp_mode;
 
 
-	/* Defaults if we can't find VBT info */
-	dev_priv->lvds_dither = 0;
-	dev_priv->lvds_vbt = 0;
-
 	lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
 	lvds_options = find_section(bdb, BDB_LVDS_OPTIONS);
 	if (!lvds_options)
 	if (!lvds_options)
 		return;
 		return;
@@ -140,6 +137,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
 	dev_priv->lvds_dither = lvds_options->pixel_dither;
 	dev_priv->lvds_dither = lvds_options->pixel_dither;
 	if (lvds_options->panel_type == 0xff)
 	if (lvds_options->panel_type == 0xff)
 		return;
 		return;
+
 	panel_type = lvds_options->panel_type;
 	panel_type = lvds_options->panel_type;
 
 
 	lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
 	lvds_lfp_data = find_section(bdb, BDB_LVDS_LFP_DATA);
@@ -169,6 +167,8 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv,
 			((unsigned char *)entry + dvo_timing_offset);
 			((unsigned char *)entry + dvo_timing_offset);
 
 
 	panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
 	panel_fixed_mode = kzalloc(sizeof(*panel_fixed_mode), GFP_KERNEL);
+	if (!panel_fixed_mode)
+		return;
 
 
 	fill_detail_timing_data(panel_fixed_mode, dvo_timing);
 	fill_detail_timing_data(panel_fixed_mode, dvo_timing);
 
 
@@ -230,8 +230,6 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
 	struct lvds_dvo_timing *dvo_timing;
 	struct lvds_dvo_timing *dvo_timing;
 	struct drm_display_mode *panel_fixed_mode;
 	struct drm_display_mode *panel_fixed_mode;
 
 
-	dev_priv->sdvo_lvds_vbt_mode = NULL;
-
 	sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS);
 	sdvo_lvds_options = find_section(bdb, BDB_SDVO_LVDS_OPTIONS);
 	if (!sdvo_lvds_options)
 	if (!sdvo_lvds_options)
 		return;
 		return;
@@ -260,10 +258,6 @@ parse_general_features(struct drm_i915_private *dev_priv,
 	struct drm_device *dev = dev_priv->dev;
 	struct drm_device *dev = dev_priv->dev;
 	struct bdb_general_features *general;
 	struct bdb_general_features *general;
 
 
-	/* Set sensible defaults in case we can't find the general block */
-	dev_priv->int_tv_support = 1;
-	dev_priv->int_crt_support = 1;
-
 	general = find_section(bdb, BDB_GENERAL_FEATURES);
 	general = find_section(bdb, BDB_GENERAL_FEATURES);
 	if (general) {
 	if (general) {
 		dev_priv->int_tv_support = general->int_tv_support;
 		dev_priv->int_tv_support = general->int_tv_support;
@@ -271,10 +265,10 @@ parse_general_features(struct drm_i915_private *dev_priv,
 		dev_priv->lvds_use_ssc = general->enable_ssc;
 		dev_priv->lvds_use_ssc = general->enable_ssc;
 
 
 		if (dev_priv->lvds_use_ssc) {
 		if (dev_priv->lvds_use_ssc) {
-			if (IS_I85X(dev_priv->dev))
+			if (IS_I85X(dev))
 				dev_priv->lvds_ssc_freq =
 				dev_priv->lvds_ssc_freq =
 					general->ssc_freq ? 66 : 48;
 					general->ssc_freq ? 66 : 48;
-			else if (IS_IRONLAKE(dev_priv->dev) || IS_GEN6(dev))
+			else if (IS_GEN5(dev) || IS_GEN6(dev))
 				dev_priv->lvds_ssc_freq =
 				dev_priv->lvds_ssc_freq =
 					general->ssc_freq ? 100 : 120;
 					general->ssc_freq ? 100 : 120;
 			else
 			else
@@ -289,14 +283,6 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
 			  struct bdb_header *bdb)
 			  struct bdb_header *bdb)
 {
 {
 	struct bdb_general_definitions *general;
 	struct bdb_general_definitions *general;
-	const int crt_bus_map_table[] = {
-		GPIOB,
-		GPIOA,
-		GPIOC,
-		GPIOD,
-		GPIOE,
-		GPIOF,
-	};
 
 
 	general = find_section(bdb, BDB_GENERAL_DEFINITIONS);
 	general = find_section(bdb, BDB_GENERAL_DEFINITIONS);
 	if (general) {
 	if (general) {
@@ -304,10 +290,8 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
 		if (block_size >= sizeof(*general)) {
 		if (block_size >= sizeof(*general)) {
 			int bus_pin = general->crt_ddc_gmbus_pin;
 			int bus_pin = general->crt_ddc_gmbus_pin;
 			DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
 			DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
-			if ((bus_pin >= 1) && (bus_pin <= 6)) {
-				dev_priv->crt_ddc_bus =
-					crt_bus_map_table[bus_pin-1];
-			}
+			if (bus_pin >= 1 && bus_pin <= 6)
+				dev_priv->crt_ddc_pin = bus_pin;
 		} else {
 		} else {
 			DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n",
 			DRM_DEBUG_KMS("BDB_GD too small (%d). Invalid.\n",
 				  block_size);
 				  block_size);
@@ -317,7 +301,7 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
 
 
 static void
 static void
 parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
 parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
-		       struct bdb_header *bdb)
+			  struct bdb_header *bdb)
 {
 {
 	struct sdvo_device_mapping *p_mapping;
 	struct sdvo_device_mapping *p_mapping;
 	struct bdb_general_definitions *p_defs;
 	struct bdb_general_definitions *p_defs;
@@ -327,7 +311,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
 
 
 	p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
 	p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
 	if (!p_defs) {
 	if (!p_defs) {
-		DRM_DEBUG_KMS("No general definition block is found\n");
+		DRM_DEBUG_KMS("No general definition block is found, unable to construct sdvo mapping.\n");
 		return;
 		return;
 	}
 	}
 	/* judge whether the size of child device meets the requirements.
 	/* judge whether the size of child device meets the requirements.
@@ -377,7 +361,16 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv,
 			p_mapping->slave_addr = p_child->slave_addr;
 			p_mapping->slave_addr = p_child->slave_addr;
 			p_mapping->dvo_wiring = p_child->dvo_wiring;
 			p_mapping->dvo_wiring = p_child->dvo_wiring;
 			p_mapping->ddc_pin = p_child->ddc_pin;
 			p_mapping->ddc_pin = p_child->ddc_pin;
+			p_mapping->i2c_pin = p_child->i2c_pin;
+			p_mapping->i2c_speed = p_child->i2c_speed;
 			p_mapping->initialized = 1;
 			p_mapping->initialized = 1;
+			DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d, i2c_speed=%d\n",
+				      p_mapping->dvo_port,
+				      p_mapping->slave_addr,
+				      p_mapping->dvo_wiring,
+				      p_mapping->ddc_pin,
+				      p_mapping->i2c_pin,
+				      p_mapping->i2c_speed);
 		} else {
 		} else {
 			DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
 			DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
 					 "two SDVO device.\n");
 					 "two SDVO device.\n");
@@ -409,14 +402,11 @@ parse_driver_features(struct drm_i915_private *dev_priv,
 	if (!driver)
 	if (!driver)
 		return;
 		return;
 
 
-	if (driver && SUPPORTS_EDP(dev) &&
-	    driver->lvds_config == BDB_DRIVER_FEATURE_EDP) {
-		dev_priv->edp_support = 1;
-	} else {
-		dev_priv->edp_support = 0;
-	}
+	if (SUPPORTS_EDP(dev) &&
+	    driver->lvds_config == BDB_DRIVER_FEATURE_EDP)
+		dev_priv->edp.support = 1;
 
 
-	if (driver && driver->dual_frequency)
+	if (driver->dual_frequency)
 		dev_priv->render_reclock_avail = true;
 		dev_priv->render_reclock_avail = true;
 }
 }
 
 
@@ -424,27 +414,78 @@ static void
 parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
 parse_edp(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
 {
 {
 	struct bdb_edp *edp;
 	struct bdb_edp *edp;
+	struct edp_power_seq *edp_pps;
+	struct edp_link_params *edp_link_params;
 
 
 	edp = find_section(bdb, BDB_EDP);
 	edp = find_section(bdb, BDB_EDP);
 	if (!edp) {
 	if (!edp) {
-		if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp_support) {
+		if (SUPPORTS_EDP(dev_priv->dev) && dev_priv->edp.support) {
 			DRM_DEBUG_KMS("No eDP BDB found but eDP panel "
 			DRM_DEBUG_KMS("No eDP BDB found but eDP panel "
-				      "supported, assume 18bpp panel color "
-				      "depth.\n");
-			dev_priv->edp_bpp = 18;
+				      "supported, assume %dbpp panel color "
+				      "depth.\n",
+				      dev_priv->edp.bpp);
 		}
 		}
 		return;
 		return;
 	}
 	}
 
 
 	switch ((edp->color_depth >> (panel_type * 2)) & 3) {
 	switch ((edp->color_depth >> (panel_type * 2)) & 3) {
 	case EDP_18BPP:
 	case EDP_18BPP:
-		dev_priv->edp_bpp = 18;
+		dev_priv->edp.bpp = 18;
 		break;
 		break;
 	case EDP_24BPP:
 	case EDP_24BPP:
-		dev_priv->edp_bpp = 24;
+		dev_priv->edp.bpp = 24;
 		break;
 		break;
 	case EDP_30BPP:
 	case EDP_30BPP:
-		dev_priv->edp_bpp = 30;
+		dev_priv->edp.bpp = 30;
+		break;
+	}
+
+	/* Get the eDP sequencing and link info */
+	edp_pps = &edp->power_seqs[panel_type];
+	edp_link_params = &edp->link_params[panel_type];
+
+	dev_priv->edp.pps = *edp_pps;
+
+	dev_priv->edp.rate = edp_link_params->rate ? DP_LINK_BW_2_7 :
+		DP_LINK_BW_1_62;
+	switch (edp_link_params->lanes) {
+	case 0:
+		dev_priv->edp.lanes = 1;
+		break;
+	case 1:
+		dev_priv->edp.lanes = 2;
+		break;
+	case 3:
+	default:
+		dev_priv->edp.lanes = 4;
+		break;
+	}
+	switch (edp_link_params->preemphasis) {
+	case 0:
+		dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_0;
+		break;
+	case 1:
+		dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_3_5;
+		break;
+	case 2:
+		dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_6;
+		break;
+	case 3:
+		dev_priv->edp.preemphasis = DP_TRAIN_PRE_EMPHASIS_9_5;
+		break;
+	}
+	switch (edp_link_params->vswing) {
+	case 0:
+		dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_400;
+		break;
+	case 1:
+		dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_600;
+		break;
+	case 2:
+		dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_800;
+		break;
+	case 3:
+		dev_priv->edp.vswing = DP_TRAIN_VOLTAGE_SWING_1200;
 		break;
 		break;
 	}
 	}
 }
 }
@@ -460,7 +501,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
 
 
 	p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
 	p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
 	if (!p_defs) {
 	if (!p_defs) {
-		DRM_DEBUG_KMS("No general definition block is found\n");
+		DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
 		return;
 		return;
 	}
 	}
 	/* judge whether the size of child device meets the requirements.
 	/* judge whether the size of child device meets the requirements.
@@ -513,50 +554,83 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
 	}
 	}
 	return;
 	return;
 }
 }
+
+static void
+init_vbt_defaults(struct drm_i915_private *dev_priv)
+{
+	dev_priv->crt_ddc_pin = GMBUS_PORT_VGADDC;
+
+	/* LFP panel data */
+	dev_priv->lvds_dither = 1;
+	dev_priv->lvds_vbt = 0;
+
+	/* SDVO panel data */
+	dev_priv->sdvo_lvds_vbt_mode = NULL;
+
+	/* general features */
+	dev_priv->int_tv_support = 1;
+	dev_priv->int_crt_support = 1;
+	dev_priv->lvds_use_ssc = 0;
+
+	/* eDP data */
+	dev_priv->edp.bpp = 18;
+}
+
 /**
 /**
- * intel_init_bios - initialize VBIOS settings & find VBT
+ * intel_parse_bios - find VBT and initialize settings from the BIOS
  * @dev: DRM device
  * @dev: DRM device
  *
  *
  * Loads the Video BIOS and checks that the VBT exists.  Sets scratch registers
  * Loads the Video BIOS and checks that the VBT exists.  Sets scratch registers
  * to appropriate values.
  * to appropriate values.
  *
  *
- * VBT existence is a sanity check that is relied on by other i830_bios.c code.
- * Note that it would be better to use a BIOS call to get the VBT, as BIOSes may
- * feed an updated VBT back through that, compared to what we'll fetch using
- * this method of groping around in the BIOS data.
- *
  * Returns 0 on success, nonzero on failure.
  * Returns 0 on success, nonzero on failure.
  */
  */
 bool
 bool
-intel_init_bios(struct drm_device *dev)
+intel_parse_bios(struct drm_device *dev)
 {
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct pci_dev *pdev = dev->pdev;
 	struct pci_dev *pdev = dev->pdev;
-	struct vbt_header *vbt = NULL;
-	struct bdb_header *bdb;
-	u8 __iomem *bios;
-	size_t size;
-	int i;
-
-	bios = pci_map_rom(pdev, &size);
-	if (!bios)
-		return -1;
-
-	/* Scour memory looking for the VBT signature */
-	for (i = 0; i + 4 < size; i++) {
-		if (!memcmp(bios + i, "$VBT", 4)) {
-			vbt = (struct vbt_header *)(bios + i);
-			break;
-		}
+	struct bdb_header *bdb = NULL;
+	u8 __iomem *bios = NULL;
+
+	init_vbt_defaults(dev_priv);
+
+	/* XXX Should this validation be moved to intel_opregion.c? */
+	if (dev_priv->opregion.vbt) {
+		struct vbt_header *vbt = dev_priv->opregion.vbt;
+		if (memcmp(vbt->signature, "$VBT", 4) == 0) {
+			DRM_DEBUG_DRIVER("Using VBT from OpRegion: %20s\n",
+					 vbt->signature);
+			bdb = (struct bdb_header *)((char *)vbt + vbt->bdb_offset);
+		} else
+			dev_priv->opregion.vbt = NULL;
 	}
 	}
 
 
-	if (!vbt) {
-		DRM_ERROR("VBT signature missing\n");
-		pci_unmap_rom(pdev, bios);
-		return -1;
-	}
+	if (bdb == NULL) {
+		struct vbt_header *vbt = NULL;
+		size_t size;
+		int i;
 
 
-	bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset);
+		bios = pci_map_rom(pdev, &size);
+		if (!bios)
+			return -1;
+
+		/* Scour memory looking for the VBT signature */
+		for (i = 0; i + 4 < size; i++) {
+			if (!memcmp(bios + i, "$VBT", 4)) {
+				vbt = (struct vbt_header *)(bios + i);
+				break;
+			}
+		}
+
+		if (!vbt) {
+			DRM_ERROR("VBT signature missing\n");
+			pci_unmap_rom(pdev, bios);
+			return -1;
+		}
+
+		bdb = (struct bdb_header *)(bios + i + vbt->bdb_offset);
+	}
 
 
 	/* Grab useful general definitions */
 	/* Grab useful general definitions */
 	parse_general_features(dev_priv, bdb);
 	parse_general_features(dev_priv, bdb);
@@ -568,7 +642,25 @@ intel_init_bios(struct drm_device *dev)
 	parse_driver_features(dev_priv, bdb);
 	parse_driver_features(dev_priv, bdb);
 	parse_edp(dev_priv, bdb);
 	parse_edp(dev_priv, bdb);
 
 
-	pci_unmap_rom(pdev, bios);
+	if (bios)
+		pci_unmap_rom(pdev, bios);
 
 
 	return 0;
 	return 0;
 }
 }
+
+/* Ensure that vital registers have been initialised, even if the BIOS
+ * is absent or just failing to do its job.
+ */
+void intel_setup_bios(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	 /* Set the Panel Power On/Off timings if uninitialized. */
+	if ((I915_READ(PP_ON_DELAYS) == 0) && (I915_READ(PP_OFF_DELAYS) == 0)) {
+		/* Set T2 to 40ms and T5 to 200ms */
+		I915_WRITE(PP_ON_DELAYS, 0x019007d0);
+
+		/* Set T3 to 35ms and Tx to 200ms */
+		I915_WRITE(PP_OFF_DELAYS, 0x015e07d0);
+	}
+}

+ 4 - 2
drivers/gpu/drm/i915/intel_bios.h

@@ -197,7 +197,8 @@ struct bdb_general_features {
 struct child_device_config {
 struct child_device_config {
 	u16 handle;
 	u16 handle;
 	u16 device_type;
 	u16 device_type;
-	u8  device_id[10]; /* See DEVICE_TYPE_* above */
+	u8  i2c_speed;
+	u8  rsvd[9];
 	u16 addin_offset;
 	u16 addin_offset;
 	u8  dvo_port; /* See Device_PORT_* above */
 	u8  dvo_port; /* See Device_PORT_* above */
 	u8  i2c_pin;
 	u8  i2c_pin;
@@ -466,7 +467,8 @@ struct bdb_edp {
 	struct edp_link_params link_params[16];
 	struct edp_link_params link_params[16];
 } __attribute__ ((packed));
 } __attribute__ ((packed));
 
 
-bool intel_init_bios(struct drm_device *dev);
+void intel_setup_bios(struct drm_device *dev);
+bool intel_parse_bios(struct drm_device *dev);
 
 
 /*
 /*
  * Driver<->VBIOS interaction occurs through scratch bits in
  * Driver<->VBIOS interaction occurs through scratch bits in

+ 64 - 63
drivers/gpu/drm/i915/intel_crt.c

@@ -79,7 +79,7 @@ static int intel_crt_mode_valid(struct drm_connector *connector,
 	if (mode->clock < 25000)
 	if (mode->clock < 25000)
 		return MODE_CLOCK_LOW;
 		return MODE_CLOCK_LOW;
 
 
-	if (!IS_I9XX(dev))
+	if (IS_GEN2(dev))
 		max_clock = 350000;
 		max_clock = 350000;
 	else
 	else
 		max_clock = 400000;
 		max_clock = 400000;
@@ -123,7 +123,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
 	 * Disable separate mode multiplier used when cloning SDVO to CRT
 	 * Disable separate mode multiplier used when cloning SDVO to CRT
 	 * XXX this needs to be adjusted when we really are cloning
 	 * XXX this needs to be adjusted when we really are cloning
 	 */
 	 */
-	if (IS_I965G(dev) && !HAS_PCH_SPLIT(dev)) {
+	if (INTEL_INFO(dev)->gen >= 4 && !HAS_PCH_SPLIT(dev)) {
 		dpll_md = I915_READ(dpll_md_reg);
 		dpll_md = I915_READ(dpll_md_reg);
 		I915_WRITE(dpll_md_reg,
 		I915_WRITE(dpll_md_reg,
 			   dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
 			   dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
@@ -187,11 +187,12 @@ static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
 	I915_WRITE(PCH_ADPA, adpa);
 	I915_WRITE(PCH_ADPA, adpa);
 
 
 	if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
 	if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
-		     1000, 1))
+		     1000))
 		DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
 		DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
 
 
 	if (turn_off_dac) {
 	if (turn_off_dac) {
-		I915_WRITE(PCH_ADPA, temp);
+		/* Make sure hotplug is enabled */
+		I915_WRITE(PCH_ADPA, temp | ADPA_CRT_HOTPLUG_ENABLE);
 		(void)I915_READ(PCH_ADPA);
 		(void)I915_READ(PCH_ADPA);
 	}
 	}
 
 
@@ -244,7 +245,7 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
 		/* wait for FORCE_DETECT to go off */
 		/* wait for FORCE_DETECT to go off */
 		if (wait_for((I915_READ(PORT_HOTPLUG_EN) &
 		if (wait_for((I915_READ(PORT_HOTPLUG_EN) &
 			      CRT_HOTPLUG_FORCE_DETECT) == 0,
 			      CRT_HOTPLUG_FORCE_DETECT) == 0,
-			     1000, 1))
+			     1000))
 			DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off");
 			DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off");
 	}
 	}
 
 
@@ -261,21 +262,47 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
 	return ret;
 	return ret;
 }
 }
 
 
+static bool intel_crt_ddc_probe(struct drm_i915_private *dev_priv, int ddc_bus)
+{
+	u8 buf;
+	struct i2c_msg msgs[] = {
+		{
+			.addr = 0xA0,
+			.flags = 0,
+			.len = 1,
+			.buf = &buf,
+		},
+	};
+	/* DDC monitor detect: Does it ACK a write to 0xA0? */
+	return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 1) == 1;
+}
+
 static bool intel_crt_detect_ddc(struct drm_encoder *encoder)
 static bool intel_crt_detect_ddc(struct drm_encoder *encoder)
 {
 {
-	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+	struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
+	struct drm_i915_private *dev_priv = encoder->dev->dev_private;
 
 
 	/* CRT should always be at 0, but check anyway */
 	/* CRT should always be at 0, but check anyway */
 	if (intel_encoder->type != INTEL_OUTPUT_ANALOG)
 	if (intel_encoder->type != INTEL_OUTPUT_ANALOG)
 		return false;
 		return false;
 
 
-	return intel_ddc_probe(intel_encoder);
+	if (intel_crt_ddc_probe(dev_priv, dev_priv->crt_ddc_pin)) {
+		DRM_DEBUG_KMS("CRT detected via DDC:0xa0\n");
+		return true;
+	}
+
+	if (intel_ddc_probe(intel_encoder, dev_priv->crt_ddc_pin)) {
+		DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
+		return true;
+	}
+
+	return false;
 }
 }
 
 
 static enum drm_connector_status
 static enum drm_connector_status
 intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder)
 intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder)
 {
 {
-	struct drm_encoder *encoder = &intel_encoder->enc;
+	struct drm_encoder *encoder = &intel_encoder->base;
 	struct drm_device *dev = encoder->dev;
 	struct drm_device *dev = encoder->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 	struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -295,6 +322,8 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder
 	uint8_t	st00;
 	uint8_t	st00;
 	enum drm_connector_status status;
 	enum drm_connector_status status;
 
 
+	DRM_DEBUG_KMS("starting load-detect on CRT\n");
+
 	if (pipe == 0) {
 	if (pipe == 0) {
 		bclrpat_reg = BCLRPAT_A;
 		bclrpat_reg = BCLRPAT_A;
 		vtotal_reg = VTOTAL_A;
 		vtotal_reg = VTOTAL_A;
@@ -324,9 +353,10 @@ intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder
 	/* Set the border color to purple. */
 	/* Set the border color to purple. */
 	I915_WRITE(bclrpat_reg, 0x500050);
 	I915_WRITE(bclrpat_reg, 0x500050);
 
 
-	if (IS_I9XX(dev)) {
+	if (!IS_GEN2(dev)) {
 		uint32_t pipeconf = I915_READ(pipeconf_reg);
 		uint32_t pipeconf = I915_READ(pipeconf_reg);
 		I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER);
 		I915_WRITE(pipeconf_reg, pipeconf | PIPECONF_FORCE_BORDER);
+		POSTING_READ(pipeconf_reg);
 		/* Wait for next Vblank to substitue
 		/* Wait for next Vblank to substitue
 		 * border color for Color info */
 		 * border color for Color info */
 		intel_wait_for_vblank(dev, pipe);
 		intel_wait_for_vblank(dev, pipe);
@@ -404,34 +434,37 @@ static enum drm_connector_status
 intel_crt_detect(struct drm_connector *connector, bool force)
 intel_crt_detect(struct drm_connector *connector, bool force)
 {
 {
 	struct drm_device *dev = connector->dev;
 	struct drm_device *dev = connector->dev;
-	struct drm_encoder *encoder = intel_attached_encoder(connector);
-	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
+	struct intel_encoder *encoder = intel_attached_encoder(connector);
 	struct drm_crtc *crtc;
 	struct drm_crtc *crtc;
 	int dpms_mode;
 	int dpms_mode;
 	enum drm_connector_status status;
 	enum drm_connector_status status;
 
 
-	if (IS_I9XX(dev) && !IS_I915G(dev) && !IS_I915GM(dev)) {
-		if (intel_crt_detect_hotplug(connector))
+	if (I915_HAS_HOTPLUG(dev)) {
+		if (intel_crt_detect_hotplug(connector)) {
+			DRM_DEBUG_KMS("CRT detected via hotplug\n");
 			return connector_status_connected;
 			return connector_status_connected;
-		else
+		} else
 			return connector_status_disconnected;
 			return connector_status_disconnected;
 	}
 	}
 
 
-	if (intel_crt_detect_ddc(encoder))
+	if (intel_crt_detect_ddc(&encoder->base))
 		return connector_status_connected;
 		return connector_status_connected;
 
 
 	if (!force)
 	if (!force)
 		return connector->status;
 		return connector->status;
 
 
 	/* for pre-945g platforms use load detect */
 	/* for pre-945g platforms use load detect */
-	if (encoder->crtc && encoder->crtc->enabled) {
-		status = intel_crt_load_detect(encoder->crtc, intel_encoder);
+	if (encoder->base.crtc && encoder->base.crtc->enabled) {
+		status = intel_crt_load_detect(encoder->base.crtc, encoder);
 	} else {
 	} else {
-		crtc = intel_get_load_detect_pipe(intel_encoder, connector,
+		crtc = intel_get_load_detect_pipe(encoder, connector,
 						  NULL, &dpms_mode);
 						  NULL, &dpms_mode);
 		if (crtc) {
 		if (crtc) {
-			status = intel_crt_load_detect(crtc, intel_encoder);
-			intel_release_load_detect_pipe(intel_encoder,
+			if (intel_crt_detect_ddc(&encoder->base))
+				status = connector_status_connected;
+			else
+				status = intel_crt_load_detect(crtc, encoder);
+			intel_release_load_detect_pipe(encoder,
 						       connector, dpms_mode);
 						       connector, dpms_mode);
 		} else
 		} else
 			status = connector_status_unknown;
 			status = connector_status_unknown;
@@ -449,32 +482,18 @@ static void intel_crt_destroy(struct drm_connector *connector)
 
 
 static int intel_crt_get_modes(struct drm_connector *connector)
 static int intel_crt_get_modes(struct drm_connector *connector)
 {
 {
-	int ret;
-	struct drm_encoder *encoder = intel_attached_encoder(connector);
-	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-	struct i2c_adapter *ddc_bus;
 	struct drm_device *dev = connector->dev;
 	struct drm_device *dev = connector->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret;
 
 
-
-	ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
+	ret = intel_ddc_get_modes(connector,
+				 &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
 	if (ret || !IS_G4X(dev))
 	if (ret || !IS_G4X(dev))
-		goto end;
+		return ret;
 
 
 	/* Try to probe digital port for output in DVI-I -> VGA mode. */
 	/* Try to probe digital port for output in DVI-I -> VGA mode. */
-	ddc_bus = intel_i2c_create(connector->dev, GPIOD, "CRTDDC_D");
-
-	if (!ddc_bus) {
-		dev_printk(KERN_ERR, &connector->dev->pdev->dev,
-			   "DDC bus registration failed for CRTDDC_D.\n");
-		goto end;
-	}
-	/* Try to get modes by GPIOD port */
-	ret = intel_ddc_get_modes(connector, ddc_bus);
-	intel_i2c_destroy(ddc_bus);
-
-end:
-	return ret;
-
+	return intel_ddc_get_modes(connector,
+				   &dev_priv->gmbus[GMBUS_PORT_DPB].adapter);
 }
 }
 
 
 static int intel_crt_set_property(struct drm_connector *connector,
 static int intel_crt_set_property(struct drm_connector *connector,
@@ -507,7 +526,7 @@ static const struct drm_connector_funcs intel_crt_connector_funcs = {
 static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = {
 static const struct drm_connector_helper_funcs intel_crt_connector_helper_funcs = {
 	.mode_valid = intel_crt_mode_valid,
 	.mode_valid = intel_crt_mode_valid,
 	.get_modes = intel_crt_get_modes,
 	.get_modes = intel_crt_get_modes,
-	.best_encoder = intel_attached_encoder,
+	.best_encoder = intel_best_encoder,
 };
 };
 
 
 static const struct drm_encoder_funcs intel_crt_enc_funcs = {
 static const struct drm_encoder_funcs intel_crt_enc_funcs = {
@@ -520,7 +539,6 @@ void intel_crt_init(struct drm_device *dev)
 	struct intel_encoder *intel_encoder;
 	struct intel_encoder *intel_encoder;
 	struct intel_connector *intel_connector;
 	struct intel_connector *intel_connector;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 i2c_reg;
 
 
 	intel_encoder = kzalloc(sizeof(struct intel_encoder), GFP_KERNEL);
 	intel_encoder = kzalloc(sizeof(struct intel_encoder), GFP_KERNEL);
 	if (!intel_encoder)
 	if (!intel_encoder)
@@ -536,27 +554,10 @@ void intel_crt_init(struct drm_device *dev)
 	drm_connector_init(dev, &intel_connector->base,
 	drm_connector_init(dev, &intel_connector->base,
 			   &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
 			   &intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
 
 
-	drm_encoder_init(dev, &intel_encoder->enc, &intel_crt_enc_funcs,
+	drm_encoder_init(dev, &intel_encoder->base, &intel_crt_enc_funcs,
 			 DRM_MODE_ENCODER_DAC);
 			 DRM_MODE_ENCODER_DAC);
 
 
-	drm_mode_connector_attach_encoder(&intel_connector->base,
-					  &intel_encoder->enc);
-
-	/* Set up the DDC bus. */
-	if (HAS_PCH_SPLIT(dev))
-		i2c_reg = PCH_GPIOA;
-	else {
-		i2c_reg = GPIOA;
-		/* Use VBT information for CRT DDC if available */
-		if (dev_priv->crt_ddc_bus != 0)
-			i2c_reg = dev_priv->crt_ddc_bus;
-	}
-	intel_encoder->ddc_bus = intel_i2c_create(dev, i2c_reg, "CRTDDC_A");
-	if (!intel_encoder->ddc_bus) {
-		dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
-			   "failed.\n");
-		return;
-	}
+	intel_connector_attach_encoder(intel_connector, intel_encoder);
 
 
 	intel_encoder->type = INTEL_OUTPUT_ANALOG;
 	intel_encoder->type = INTEL_OUTPUT_ANALOG;
 	intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
 	intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
@@ -566,7 +567,7 @@ void intel_crt_init(struct drm_device *dev)
 	connector->interlace_allowed = 1;
 	connector->interlace_allowed = 1;
 	connector->doublescan_allowed = 0;
 	connector->doublescan_allowed = 0;
 
 
-	drm_encoder_helper_add(&intel_encoder->enc, &intel_crt_helper_funcs);
+	drm_encoder_helper_add(&intel_encoder->base, &intel_crt_helper_funcs);
 	drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
 	drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
 
 
 	drm_sysfs_connector_add(connector);
 	drm_sysfs_connector_add(connector);

File diff suppressed because it is too large
+ 760 - 654
drivers/gpu/drm/i915/intel_display.c


File diff suppressed because it is too large
+ 338 - 212
drivers/gpu/drm/i915/intel_dp.c


+ 103 - 57
drivers/gpu/drm/i915/intel_drv.h

@@ -26,14 +26,12 @@
 #define __INTEL_DRV_H__
 #define __INTEL_DRV_H__
 
 
 #include <linux/i2c.h>
 #include <linux/i2c.h>
-#include <linux/i2c-id.h>
-#include <linux/i2c-algo-bit.h>
 #include "i915_drv.h"
 #include "i915_drv.h"
 #include "drm_crtc.h"
 #include "drm_crtc.h"
-
 #include "drm_crtc_helper.h"
 #include "drm_crtc_helper.h"
+#include "drm_fb_helper.h"
 
 
-#define wait_for(COND, MS, W) ({ \
+#define _wait_for(COND, MS, W) ({ \
 	unsigned long timeout__ = jiffies + msecs_to_jiffies(MS);	\
 	unsigned long timeout__ = jiffies + msecs_to_jiffies(MS);	\
 	int ret__ = 0;							\
 	int ret__ = 0;							\
 	while (! (COND)) {						\
 	while (! (COND)) {						\
@@ -41,11 +39,24 @@
 			ret__ = -ETIMEDOUT;				\
 			ret__ = -ETIMEDOUT;				\
 			break;						\
 			break;						\
 		}							\
 		}							\
-		if (W) msleep(W);					\
+		if (W && !in_dbg_master()) msleep(W);			\
 	}								\
 	}								\
 	ret__;								\
 	ret__;								\
 })
 })
 
 
+#define wait_for(COND, MS) _wait_for(COND, MS, 1)
+#define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0)
+
+#define MSLEEP(x) do { \
+	if (in_dbg_master()) \
+	       	mdelay(x); \
+	else \
+		msleep(x); \
+} while(0)
+
+#define KHz(x) (1000*x)
+#define MHz(x) KHz(1000*x)
+
 /*
 /*
  * Display related stuff
  * Display related stuff
  */
  */
@@ -96,24 +107,39 @@
 #define INTEL_DVO_CHIP_TMDS 2
 #define INTEL_DVO_CHIP_TMDS 2
 #define INTEL_DVO_CHIP_TVOUT 4
 #define INTEL_DVO_CHIP_TVOUT 4
 
 
-struct intel_i2c_chan {
-	struct drm_device *drm_dev; /* for getting at dev. private (mmio etc.) */
-	u32 reg; /* GPIO reg */
-	struct i2c_adapter adapter;
-	struct i2c_algo_bit_data algo;
-};
+/* drm_display_mode->private_flags */
+#define INTEL_MODE_PIXEL_MULTIPLIER_SHIFT (0x0)
+#define INTEL_MODE_PIXEL_MULTIPLIER_MASK (0xf << INTEL_MODE_PIXEL_MULTIPLIER_SHIFT)
+
+static inline void
+intel_mode_set_pixel_multiplier(struct drm_display_mode *mode,
+				int multiplier)
+{
+	mode->clock *= multiplier;
+	mode->private_flags |= multiplier;
+}
+
+static inline int
+intel_mode_get_pixel_multiplier(const struct drm_display_mode *mode)
+{
+	return (mode->private_flags & INTEL_MODE_PIXEL_MULTIPLIER_MASK) >> INTEL_MODE_PIXEL_MULTIPLIER_SHIFT;
+}
 
 
 struct intel_framebuffer {
 struct intel_framebuffer {
 	struct drm_framebuffer base;
 	struct drm_framebuffer base;
 	struct drm_gem_object *obj;
 	struct drm_gem_object *obj;
 };
 };
 
 
+struct intel_fbdev {
+	struct drm_fb_helper helper;
+	struct intel_framebuffer ifb;
+	struct list_head fbdev_list;
+	struct drm_display_mode *our_mode;
+};
 
 
 struct intel_encoder {
 struct intel_encoder {
-	struct drm_encoder enc;
+	struct drm_encoder base;
 	int type;
 	int type;
-	struct i2c_adapter *i2c_bus;
-	struct i2c_adapter *ddc_bus;
 	bool load_detect_temp;
 	bool load_detect_temp;
 	bool needs_tv_clock;
 	bool needs_tv_clock;
 	void (*hot_plug)(struct intel_encoder *);
 	void (*hot_plug)(struct intel_encoder *);
@@ -123,32 +149,7 @@ struct intel_encoder {
 
 
 struct intel_connector {
 struct intel_connector {
 	struct drm_connector base;
 	struct drm_connector base;
-};
-
-struct intel_crtc;
-struct intel_overlay {
-	struct drm_device *dev;
-	struct intel_crtc *crtc;
-	struct drm_i915_gem_object *vid_bo;
-	struct drm_i915_gem_object *old_vid_bo;
-	int active;
-	int pfit_active;
-	u32 pfit_vscale_ratio; /* shifted-point number, (1<<12) == 1.0 */
-	u32 color_key;
-	u32 brightness, contrast, saturation;
-	u32 old_xscale, old_yscale;
-	/* register access */
-	u32 flip_addr;
-	struct drm_i915_gem_object *reg_bo;
-	void *virt_addr;
-	/* flip handling */
-	uint32_t last_flip_req;
-	int hw_wedged;
-#define HW_WEDGED		1
-#define NEEDS_WAIT_FOR_FLIP	2
-#define RELEASE_OLD_VID		3
-#define SWITCH_OFF_STAGE_1	4
-#define SWITCH_OFF_STAGE_2	5
+	struct intel_encoder *encoder;
 };
 };
 
 
 struct intel_crtc {
 struct intel_crtc {
@@ -157,6 +158,7 @@ struct intel_crtc {
 	enum plane plane;
 	enum plane plane;
 	u8 lut_r[256], lut_g[256], lut_b[256];
 	u8 lut_r[256], lut_g[256], lut_b[256];
 	int dpms_mode;
 	int dpms_mode;
+	bool active; /* is the crtc on? independent of the dpms mode */
 	bool busy; /* is scanout buffer being updated frequently? */
 	bool busy; /* is scanout buffer being updated frequently? */
 	struct timer_list idle_timer;
 	struct timer_list idle_timer;
 	bool lowfreq_avail;
 	bool lowfreq_avail;
@@ -168,14 +170,53 @@ struct intel_crtc {
 	uint32_t cursor_addr;
 	uint32_t cursor_addr;
 	int16_t cursor_x, cursor_y;
 	int16_t cursor_x, cursor_y;
 	int16_t cursor_width, cursor_height;
 	int16_t cursor_width, cursor_height;
-	bool cursor_visible, cursor_on;
+	bool cursor_visible;
 };
 };
 
 
 #define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
 #define to_intel_crtc(x) container_of(x, struct intel_crtc, base)
 #define to_intel_connector(x) container_of(x, struct intel_connector, base)
 #define to_intel_connector(x) container_of(x, struct intel_connector, base)
-#define enc_to_intel_encoder(x) container_of(x, struct intel_encoder, enc)
+#define to_intel_encoder(x) container_of(x, struct intel_encoder, base)
 #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
 #define to_intel_framebuffer(x) container_of(x, struct intel_framebuffer, base)
 
 
+#define DIP_TYPE_AVI    0x82
+#define DIP_VERSION_AVI 0x2
+#define DIP_LEN_AVI     13
+
+struct dip_infoframe {
+	uint8_t type;		/* HB0 */
+	uint8_t ver;		/* HB1 */
+	uint8_t len;		/* HB2 - body len, not including checksum */
+	uint8_t ecc;		/* Header ECC */
+	uint8_t checksum;	/* PB0 */
+	union {
+		struct {
+			/* PB1 - Y 6:5, A 4:4, B 3:2, S 1:0 */
+			uint8_t Y_A_B_S;
+			/* PB2 - C 7:6, M 5:4, R 3:0 */
+			uint8_t C_M_R;
+			/* PB3 - ITC 7:7, EC 6:4, Q 3:2, SC 1:0 */
+			uint8_t ITC_EC_Q_SC;
+			/* PB4 - VIC 6:0 */
+			uint8_t VIC;
+			/* PB5 - PR 3:0 */
+			uint8_t PR;
+			/* PB6 to PB13 */
+			uint16_t top_bar_end;
+			uint16_t bottom_bar_start;
+			uint16_t left_bar_end;
+			uint16_t right_bar_start;
+		} avi;
+		uint8_t payload[27];
+	} __attribute__ ((packed)) body;
+} __attribute__((packed));
+
+static inline struct drm_crtc *
+intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	return dev_priv->pipe_to_crtc_mapping[pipe];
+}
+
 struct intel_unpin_work {
 struct intel_unpin_work {
 	struct work_struct work;
 	struct work_struct work;
 	struct drm_device *dev;
 	struct drm_device *dev;
@@ -186,16 +227,12 @@ struct intel_unpin_work {
 	bool enable_stall_check;
 	bool enable_stall_check;
 };
 };
 
 
-struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
-				     const char *name);
-void intel_i2c_destroy(struct i2c_adapter *adapter);
 int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
 int intel_ddc_get_modes(struct drm_connector *c, struct i2c_adapter *adapter);
-extern bool intel_ddc_probe(struct intel_encoder *intel_encoder);
-void intel_i2c_quirk_set(struct drm_device *dev, bool enable);
-void intel_i2c_reset_gmbus(struct drm_device *dev);
+extern bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus);
 
 
 extern void intel_crt_init(struct drm_device *dev);
 extern void intel_crt_init(struct drm_device *dev);
 extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
 extern void intel_hdmi_init(struct drm_device *dev, int sdvox_reg);
+void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
 extern bool intel_sdvo_init(struct drm_device *dev, int output_device);
 extern bool intel_sdvo_init(struct drm_device *dev, int output_device);
 extern void intel_dvo_init(struct drm_device *dev);
 extern void intel_dvo_init(struct drm_device *dev);
 extern void intel_tv_init(struct drm_device *dev);
 extern void intel_tv_init(struct drm_device *dev);
@@ -205,32 +242,41 @@ extern void intel_dp_init(struct drm_device *dev, int dp_reg);
 void
 void
 intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
 intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
 		 struct drm_display_mode *adjusted_mode);
 		 struct drm_display_mode *adjusted_mode);
-extern bool intel_pch_has_edp(struct drm_crtc *crtc);
 extern bool intel_dpd_is_edp(struct drm_device *dev);
 extern bool intel_dpd_is_edp(struct drm_device *dev);
 extern void intel_edp_link_config (struct intel_encoder *, int *, int *);
 extern void intel_edp_link_config (struct intel_encoder *, int *, int *);
+extern bool intel_encoder_is_pch_edp(struct drm_encoder *encoder);
 
 
-
+/* intel_panel.c */
 extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
 extern void intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
 				   struct drm_display_mode *adjusted_mode);
 				   struct drm_display_mode *adjusted_mode);
 extern void intel_pch_panel_fitting(struct drm_device *dev,
 extern void intel_pch_panel_fitting(struct drm_device *dev,
 				    int fitting_mode,
 				    int fitting_mode,
 				    struct drm_display_mode *mode,
 				    struct drm_display_mode *mode,
 				    struct drm_display_mode *adjusted_mode);
 				    struct drm_display_mode *adjusted_mode);
+extern u32 intel_panel_get_max_backlight(struct drm_device *dev);
+extern u32 intel_panel_get_backlight(struct drm_device *dev);
+extern void intel_panel_set_backlight(struct drm_device *dev, u32 level);
 
 
-extern int intel_panel_fitter_pipe (struct drm_device *dev);
 extern void intel_crtc_load_lut(struct drm_crtc *crtc);
 extern void intel_crtc_load_lut(struct drm_crtc *crtc);
 extern void intel_encoder_prepare (struct drm_encoder *encoder);
 extern void intel_encoder_prepare (struct drm_encoder *encoder);
 extern void intel_encoder_commit (struct drm_encoder *encoder);
 extern void intel_encoder_commit (struct drm_encoder *encoder);
 extern void intel_encoder_destroy(struct drm_encoder *encoder);
 extern void intel_encoder_destroy(struct drm_encoder *encoder);
 
 
-extern struct drm_encoder *intel_attached_encoder(struct drm_connector *connector);
+static inline struct intel_encoder *intel_attached_encoder(struct drm_connector *connector)
+{
+	return to_intel_connector(connector)->encoder;
+}
+
+extern void intel_connector_attach_encoder(struct intel_connector *connector,
+					   struct intel_encoder *encoder);
+extern struct drm_encoder *intel_best_encoder(struct drm_connector *connector);
 
 
 extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
 extern struct drm_display_mode *intel_crtc_mode_get(struct drm_device *dev,
 						    struct drm_crtc *crtc);
 						    struct drm_crtc *crtc);
 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
 int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
 				struct drm_file *file_priv);
 				struct drm_file *file_priv);
 extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
 extern void intel_wait_for_vblank(struct drm_device *dev, int pipe);
-extern struct drm_crtc *intel_get_crtc_from_pipe(struct drm_device *dev, int pipe);
+extern void intel_wait_for_pipe_off(struct drm_device *dev, int pipe);
 extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
 extern struct drm_crtc *intel_get_load_detect_pipe(struct intel_encoder *intel_encoder,
 						   struct drm_connector *connector,
 						   struct drm_connector *connector,
 						   struct drm_display_mode *mode,
 						   struct drm_display_mode *mode,
@@ -252,7 +298,8 @@ extern void ironlake_enable_drps(struct drm_device *dev);
 extern void ironlake_disable_drps(struct drm_device *dev);
 extern void ironlake_disable_drps(struct drm_device *dev);
 
 
 extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
 extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
-				      struct drm_gem_object *obj);
+				      struct drm_gem_object *obj,
+				      bool pipelined);
 
 
 extern int intel_framebuffer_init(struct drm_device *dev,
 extern int intel_framebuffer_init(struct drm_device *dev,
 				  struct intel_framebuffer *ifb,
 				  struct intel_framebuffer *ifb,
@@ -267,9 +314,8 @@ extern void intel_finish_page_flip_plane(struct drm_device *dev, int plane);
 
 
 extern void intel_setup_overlay(struct drm_device *dev);
 extern void intel_setup_overlay(struct drm_device *dev);
 extern void intel_cleanup_overlay(struct drm_device *dev);
 extern void intel_cleanup_overlay(struct drm_device *dev);
-extern int intel_overlay_switch_off(struct intel_overlay *overlay);
-extern int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
-						int interruptible);
+extern int intel_overlay_switch_off(struct intel_overlay *overlay,
+				    bool interruptible);
 extern int intel_overlay_put_image(struct drm_device *dev, void *data,
 extern int intel_overlay_put_image(struct drm_device *dev, void *data,
 				   struct drm_file *file_priv);
 				   struct drm_file *file_priv);
 extern int intel_overlay_attrs(struct drm_device *dev, void *data,
 extern int intel_overlay_attrs(struct drm_device *dev, void *data,

+ 28 - 41
drivers/gpu/drm/i915/intel_dvo.c

@@ -72,7 +72,7 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
 		.name = "ch7017",
 		.name = "ch7017",
 		.dvo_reg = DVOC,
 		.dvo_reg = DVOC,
 		.slave_addr = 0x75,
 		.slave_addr = 0x75,
-		.gpio = GPIOE,
+		.gpio = GMBUS_PORT_DPB,
 		.dev_ops = &ch7017_ops,
 		.dev_ops = &ch7017_ops,
 	}
 	}
 };
 };
@@ -88,7 +88,13 @@ struct intel_dvo {
 
 
 static struct intel_dvo *enc_to_intel_dvo(struct drm_encoder *encoder)
 static struct intel_dvo *enc_to_intel_dvo(struct drm_encoder *encoder)
 {
 {
-	return container_of(enc_to_intel_encoder(encoder), struct intel_dvo, base);
+	return container_of(encoder, struct intel_dvo, base.base);
+}
+
+static struct intel_dvo *intel_attached_dvo(struct drm_connector *connector)
+{
+	return container_of(intel_attached_encoder(connector),
+			    struct intel_dvo, base);
 }
 }
 
 
 static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
 static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
@@ -112,8 +118,7 @@ static void intel_dvo_dpms(struct drm_encoder *encoder, int mode)
 static int intel_dvo_mode_valid(struct drm_connector *connector,
 static int intel_dvo_mode_valid(struct drm_connector *connector,
 				struct drm_display_mode *mode)
 				struct drm_display_mode *mode)
 {
 {
-	struct drm_encoder *encoder = intel_attached_encoder(connector);
-	struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
+	struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
 
 
 	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
 	if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
 		return MODE_NO_DBLESCAN;
 		return MODE_NO_DBLESCAN;
@@ -224,23 +229,22 @@ static void intel_dvo_mode_set(struct drm_encoder *encoder,
 static enum drm_connector_status
 static enum drm_connector_status
 intel_dvo_detect(struct drm_connector *connector, bool force)
 intel_dvo_detect(struct drm_connector *connector, bool force)
 {
 {
-	struct drm_encoder *encoder = intel_attached_encoder(connector);
-	struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
-
+	struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
 	return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev);
 	return intel_dvo->dev.dev_ops->detect(&intel_dvo->dev);
 }
 }
 
 
 static int intel_dvo_get_modes(struct drm_connector *connector)
 static int intel_dvo_get_modes(struct drm_connector *connector)
 {
 {
-	struct drm_encoder *encoder = intel_attached_encoder(connector);
-	struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
+	struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
+	struct drm_i915_private *dev_priv = connector->dev->dev_private;
 
 
 	/* We should probably have an i2c driver get_modes function for those
 	/* We should probably have an i2c driver get_modes function for those
 	 * devices which will have a fixed set of modes determined by the chip
 	 * devices which will have a fixed set of modes determined by the chip
 	 * (TV-out, for example), but for now with just TMDS and LVDS,
 	 * (TV-out, for example), but for now with just TMDS and LVDS,
 	 * that's not the case.
 	 * that's not the case.
 	 */
 	 */
-	intel_ddc_get_modes(connector, intel_dvo->base.ddc_bus);
+	intel_ddc_get_modes(connector,
+			    &dev_priv->gmbus[GMBUS_PORT_DPC].adapter);
 	if (!list_empty(&connector->probed_modes))
 	if (!list_empty(&connector->probed_modes))
 		return 1;
 		return 1;
 
 
@@ -281,7 +285,7 @@ static const struct drm_connector_funcs intel_dvo_connector_funcs = {
 static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = {
 static const struct drm_connector_helper_funcs intel_dvo_connector_helper_funcs = {
 	.mode_valid = intel_dvo_mode_valid,
 	.mode_valid = intel_dvo_mode_valid,
 	.get_modes = intel_dvo_get_modes,
 	.get_modes = intel_dvo_get_modes,
-	.best_encoder = intel_attached_encoder,
+	.best_encoder = intel_best_encoder,
 };
 };
 
 
 static void intel_dvo_enc_destroy(struct drm_encoder *encoder)
 static void intel_dvo_enc_destroy(struct drm_encoder *encoder)
@@ -311,8 +315,7 @@ intel_dvo_get_current_mode(struct drm_connector *connector)
 {
 {
 	struct drm_device *dev = connector->dev;
 	struct drm_device *dev = connector->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_encoder *encoder = intel_attached_encoder(connector);
-	struct intel_dvo *intel_dvo = enc_to_intel_dvo(encoder);
+	struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
 	uint32_t dvo_val = I915_READ(intel_dvo->dev.dvo_reg);
 	uint32_t dvo_val = I915_READ(intel_dvo->dev.dvo_reg);
 	struct drm_display_mode *mode = NULL;
 	struct drm_display_mode *mode = NULL;
 
 
@@ -323,7 +326,7 @@ intel_dvo_get_current_mode(struct drm_connector *connector)
 		struct drm_crtc *crtc;
 		struct drm_crtc *crtc;
 		int pipe = (dvo_val & DVO_PIPE_B_SELECT) ? 1 : 0;
 		int pipe = (dvo_val & DVO_PIPE_B_SELECT) ? 1 : 0;
 
 
-		crtc = intel_get_crtc_from_pipe(dev, pipe);
+		crtc = intel_get_crtc_for_pipe(dev, pipe);
 		if (crtc) {
 		if (crtc) {
 			mode = intel_crtc_mode_get(dev, crtc);
 			mode = intel_crtc_mode_get(dev, crtc);
 			if (mode) {
 			if (mode) {
@@ -341,11 +344,10 @@ intel_dvo_get_current_mode(struct drm_connector *connector)
 
 
 void intel_dvo_init(struct drm_device *dev)
 void intel_dvo_init(struct drm_device *dev)
 {
 {
+	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_encoder *intel_encoder;
 	struct intel_encoder *intel_encoder;
 	struct intel_dvo *intel_dvo;
 	struct intel_dvo *intel_dvo;
 	struct intel_connector *intel_connector;
 	struct intel_connector *intel_connector;
-	struct i2c_adapter *i2cbus = NULL;
-	int ret = 0;
 	int i;
 	int i;
 	int encoder_type = DRM_MODE_ENCODER_NONE;
 	int encoder_type = DRM_MODE_ENCODER_NONE;
 
 
@@ -360,16 +362,14 @@ void intel_dvo_init(struct drm_device *dev)
 	}
 	}
 
 
 	intel_encoder = &intel_dvo->base;
 	intel_encoder = &intel_dvo->base;
-
-	/* Set up the DDC bus */
-	intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "DVODDC_D");
-	if (!intel_encoder->ddc_bus)
-		goto free_intel;
+	drm_encoder_init(dev, &intel_encoder->base,
+			 &intel_dvo_enc_funcs, encoder_type);
 
 
 	/* Now, try to find a controller */
 	/* Now, try to find a controller */
 	for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
 	for (i = 0; i < ARRAY_SIZE(intel_dvo_devices); i++) {
 		struct drm_connector *connector = &intel_connector->base;
 		struct drm_connector *connector = &intel_connector->base;
 		const struct intel_dvo_device *dvo = &intel_dvo_devices[i];
 		const struct intel_dvo_device *dvo = &intel_dvo_devices[i];
+		struct i2c_adapter *i2c;
 		int gpio;
 		int gpio;
 
 
 		/* Allow the I2C driver info to specify the GPIO to be used in
 		/* Allow the I2C driver info to specify the GPIO to be used in
@@ -379,24 +379,18 @@ void intel_dvo_init(struct drm_device *dev)
 		if (dvo->gpio != 0)
 		if (dvo->gpio != 0)
 			gpio = dvo->gpio;
 			gpio = dvo->gpio;
 		else if (dvo->type == INTEL_DVO_CHIP_LVDS)
 		else if (dvo->type == INTEL_DVO_CHIP_LVDS)
-			gpio = GPIOB;
+			gpio = GMBUS_PORT_SSC;
 		else
 		else
-			gpio = GPIOE;
+			gpio = GMBUS_PORT_DPB;
 
 
 		/* Set up the I2C bus necessary for the chip we're probing.
 		/* Set up the I2C bus necessary for the chip we're probing.
 		 * It appears that everything is on GPIOE except for panels
 		 * It appears that everything is on GPIOE except for panels
 		 * on i830 laptops, which are on GPIOB (DVOA).
 		 * on i830 laptops, which are on GPIOB (DVOA).
 		 */
 		 */
-		if (i2cbus != NULL)
-			intel_i2c_destroy(i2cbus);
-		if (!(i2cbus = intel_i2c_create(dev, gpio,
-			gpio == GPIOB ? "DVOI2C_B" : "DVOI2C_E"))) {
-			continue;
-		}
+		i2c = &dev_priv->gmbus[gpio].adapter;
 
 
 		intel_dvo->dev = *dvo;
 		intel_dvo->dev = *dvo;
-		ret = dvo->dev_ops->init(&intel_dvo->dev, i2cbus);
-		if (!ret)
+		if (!dvo->dev_ops->init(&intel_dvo->dev, i2c))
 			continue;
 			continue;
 
 
 		intel_encoder->type = INTEL_OUTPUT_DVO;
 		intel_encoder->type = INTEL_OUTPUT_DVO;
@@ -427,13 +421,10 @@ void intel_dvo_init(struct drm_device *dev)
 		connector->interlace_allowed = false;
 		connector->interlace_allowed = false;
 		connector->doublescan_allowed = false;
 		connector->doublescan_allowed = false;
 
 
-		drm_encoder_init(dev, &intel_encoder->enc,
-				 &intel_dvo_enc_funcs, encoder_type);
-		drm_encoder_helper_add(&intel_encoder->enc,
+		drm_encoder_helper_add(&intel_encoder->base,
 				       &intel_dvo_helper_funcs);
 				       &intel_dvo_helper_funcs);
 
 
-		drm_mode_connector_attach_encoder(&intel_connector->base,
-						  &intel_encoder->enc);
+		intel_connector_attach_encoder(intel_connector, intel_encoder);
 		if (dvo->type == INTEL_DVO_CHIP_LVDS) {
 		if (dvo->type == INTEL_DVO_CHIP_LVDS) {
 			/* For our LVDS chipsets, we should hopefully be able
 			/* For our LVDS chipsets, we should hopefully be able
 			 * to dig the fixed panel mode out of the BIOS data.
 			 * to dig the fixed panel mode out of the BIOS data.
@@ -451,11 +442,7 @@ void intel_dvo_init(struct drm_device *dev)
 		return;
 		return;
 	}
 	}
 
 
-	intel_i2c_destroy(intel_encoder->ddc_bus);
-	/* Didn't find a chip, so tear down. */
-	if (i2cbus != NULL)
-		intel_i2c_destroy(i2cbus);
-free_intel:
+	drm_encoder_cleanup(&intel_encoder->base);
 	kfree(intel_dvo);
 	kfree(intel_dvo);
 	kfree(intel_connector);
 	kfree(intel_connector);
 }
 }

+ 7 - 22
drivers/gpu/drm/i915/intel_fb.c

@@ -44,13 +44,6 @@
 #include "i915_drm.h"
 #include "i915_drm.h"
 #include "i915_drv.h"
 #include "i915_drv.h"
 
 
-struct intel_fbdev {
-	struct drm_fb_helper helper;
-	struct intel_framebuffer ifb;
-	struct list_head fbdev_list;
-	struct drm_display_mode *our_mode;
-};
-
 static struct fb_ops intelfb_ops = {
 static struct fb_ops intelfb_ops = {
 	.owner = THIS_MODULE,
 	.owner = THIS_MODULE,
 	.fb_check_var = drm_fb_helper_check_var,
 	.fb_check_var = drm_fb_helper_check_var,
@@ -75,7 +68,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
 	struct drm_gem_object *fbo = NULL;
 	struct drm_gem_object *fbo = NULL;
 	struct drm_i915_gem_object *obj_priv;
 	struct drm_i915_gem_object *obj_priv;
 	struct device *device = &dev->pdev->dev;
 	struct device *device = &dev->pdev->dev;
-	int size, ret, mmio_bar = IS_I9XX(dev) ? 0 : 1;
+	int size, ret, mmio_bar = IS_GEN2(dev) ? 1 : 0;
 
 
 	/* we don't do packed 24bpp */
 	/* we don't do packed 24bpp */
 	if (sizes->surface_bpp == 24)
 	if (sizes->surface_bpp == 24)
@@ -100,19 +93,13 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
 
 
 	mutex_lock(&dev->struct_mutex);
 	mutex_lock(&dev->struct_mutex);
 
 
-	ret = intel_pin_and_fence_fb_obj(dev, fbo);
+	/* Flush everything out, we'll be doing GTT only from now on */
+	ret = intel_pin_and_fence_fb_obj(dev, fbo, false);
 	if (ret) {
 	if (ret) {
 		DRM_ERROR("failed to pin fb: %d\n", ret);
 		DRM_ERROR("failed to pin fb: %d\n", ret);
 		goto out_unref;
 		goto out_unref;
 	}
 	}
 
 
-	/* Flush everything out, we'll be doing GTT only from now on */
-	ret = i915_gem_object_set_to_gtt_domain(fbo, 1);
-	if (ret) {
-		DRM_ERROR("failed to bind fb: %d.\n", ret);
-		goto out_unpin;
-	}
-
 	info = framebuffer_alloc(0, device);
 	info = framebuffer_alloc(0, device);
 	if (!info) {
 	if (!info) {
 		ret = -ENOMEM;
 		ret = -ENOMEM;
@@ -142,7 +129,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
 		goto out_unpin;
 		goto out_unpin;
 	}
 	}
 	info->apertures->ranges[0].base = dev->mode_config.fb_base;
 	info->apertures->ranges[0].base = dev->mode_config.fb_base;
-	if (IS_I9XX(dev))
+	if (!IS_GEN2(dev))
 		info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 2);
 		info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 2);
 	else
 	else
 		info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
 		info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
@@ -219,8 +206,8 @@ static struct drm_fb_helper_funcs intel_fb_helper_funcs = {
 	.fb_probe = intel_fb_find_or_create_single,
 	.fb_probe = intel_fb_find_or_create_single,
 };
 };
 
 
-int intel_fbdev_destroy(struct drm_device *dev,
-			struct intel_fbdev *ifbdev)
+static void intel_fbdev_destroy(struct drm_device *dev,
+				struct intel_fbdev *ifbdev)
 {
 {
 	struct fb_info *info;
 	struct fb_info *info;
 	struct intel_framebuffer *ifb = &ifbdev->ifb;
 	struct intel_framebuffer *ifb = &ifbdev->ifb;
@@ -238,11 +225,9 @@ int intel_fbdev_destroy(struct drm_device *dev,
 
 
 	drm_framebuffer_cleanup(&ifb->base);
 	drm_framebuffer_cleanup(&ifb->base);
 	if (ifb->obj) {
 	if (ifb->obj) {
-		drm_gem_object_unreference(ifb->obj);
+		drm_gem_object_unreference_unlocked(ifb->obj);
 		ifb->obj = NULL;
 		ifb->obj = NULL;
 	}
 	}
-
-	return 0;
 }
 }
 
 
 int intel_fbdev_init(struct drm_device *dev)
 int intel_fbdev_init(struct drm_device *dev)

+ 157 - 36
drivers/gpu/drm/i915/intel_hdmi.c

@@ -40,12 +40,76 @@
 struct intel_hdmi {
 struct intel_hdmi {
 	struct intel_encoder base;
 	struct intel_encoder base;
 	u32 sdvox_reg;
 	u32 sdvox_reg;
+	int ddc_bus;
 	bool has_hdmi_sink;
 	bool has_hdmi_sink;
+	bool has_audio;
+	int force_audio;
+	struct drm_property *force_audio_property;
 };
 };
 
 
 static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
 static struct intel_hdmi *enc_to_intel_hdmi(struct drm_encoder *encoder)
 {
 {
-	return container_of(enc_to_intel_encoder(encoder), struct intel_hdmi, base);
+	return container_of(encoder, struct intel_hdmi, base.base);
+}
+
+static struct intel_hdmi *intel_attached_hdmi(struct drm_connector *connector)
+{
+	return container_of(intel_attached_encoder(connector),
+			    struct intel_hdmi, base);
+}
+
+void intel_dip_infoframe_csum(struct dip_infoframe *avi_if)
+{
+	uint8_t *data = (uint8_t *)avi_if;
+	uint8_t sum = 0;
+	unsigned i;
+
+	avi_if->checksum = 0;
+	avi_if->ecc = 0;
+
+	for (i = 0; i < sizeof(*avi_if); i++)
+		sum += data[i];
+
+	avi_if->checksum = 0x100 - sum;
+}
+
+static void intel_hdmi_set_avi_infoframe(struct drm_encoder *encoder)
+{
+	struct dip_infoframe avi_if = {
+		.type = DIP_TYPE_AVI,
+		.ver = DIP_VERSION_AVI,
+		.len = DIP_LEN_AVI,
+	};
+	uint32_t *data = (uint32_t *)&avi_if;
+	struct drm_device *dev = encoder->dev;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+	u32 port;
+	unsigned i;
+
+	if (!intel_hdmi->has_hdmi_sink)
+		return;
+
+	/* XXX first guess at handling video port, is this corrent? */
+	if (intel_hdmi->sdvox_reg == SDVOB)
+		port = VIDEO_DIP_PORT_B;
+	else if (intel_hdmi->sdvox_reg == SDVOC)
+		port = VIDEO_DIP_PORT_C;
+	else
+		return;
+
+	I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | port |
+		   VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC);
+
+	intel_dip_infoframe_csum(&avi_if);
+	for (i = 0; i < sizeof(avi_if); i += 4) {
+		I915_WRITE(VIDEO_DIP_DATA, *data);
+		data++;
+	}
+
+	I915_WRITE(VIDEO_DIP_CTL, VIDEO_DIP_ENABLE | port |
+		   VIDEO_DIP_SELECT_AVI | VIDEO_DIP_FREQ_VSYNC |
+		   VIDEO_DIP_ENABLE_AVI);
 }
 }
 
 
 static void intel_hdmi_mode_set(struct drm_encoder *encoder,
 static void intel_hdmi_mode_set(struct drm_encoder *encoder,
@@ -65,10 +129,13 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
 	if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
 	if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
 		sdvox |= SDVO_HSYNC_ACTIVE_HIGH;
 		sdvox |= SDVO_HSYNC_ACTIVE_HIGH;
 
 
-	if (intel_hdmi->has_hdmi_sink) {
+	/* Required on CPT */
+	if (intel_hdmi->has_hdmi_sink && HAS_PCH_CPT(dev))
+		sdvox |= HDMI_MODE_SELECT;
+
+	if (intel_hdmi->has_audio) {
 		sdvox |= SDVO_AUDIO_ENABLE;
 		sdvox |= SDVO_AUDIO_ENABLE;
-		if (HAS_PCH_CPT(dev))
-			sdvox |= HDMI_MODE_SELECT;
+		sdvox |= SDVO_NULL_PACKETS_DURING_VSYNC;
 	}
 	}
 
 
 	if (intel_crtc->pipe == 1) {
 	if (intel_crtc->pipe == 1) {
@@ -80,6 +147,8 @@ static void intel_hdmi_mode_set(struct drm_encoder *encoder,
 
 
 	I915_WRITE(intel_hdmi->sdvox_reg, sdvox);
 	I915_WRITE(intel_hdmi->sdvox_reg, sdvox);
 	POSTING_READ(intel_hdmi->sdvox_reg);
 	POSTING_READ(intel_hdmi->sdvox_reg);
+
+	intel_hdmi_set_avi_infoframe(encoder);
 }
 }
 
 
 static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
 static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
@@ -141,36 +210,85 @@ static bool intel_hdmi_mode_fixup(struct drm_encoder *encoder,
 static enum drm_connector_status
 static enum drm_connector_status
 intel_hdmi_detect(struct drm_connector *connector, bool force)
 intel_hdmi_detect(struct drm_connector *connector, bool force)
 {
 {
-	struct drm_encoder *encoder = intel_attached_encoder(connector);
-	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
-	struct edid *edid = NULL;
+	struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+	struct drm_i915_private *dev_priv = connector->dev->dev_private;
+	struct edid *edid;
 	enum drm_connector_status status = connector_status_disconnected;
 	enum drm_connector_status status = connector_status_disconnected;
 
 
 	intel_hdmi->has_hdmi_sink = false;
 	intel_hdmi->has_hdmi_sink = false;
-	edid = drm_get_edid(connector, intel_hdmi->base.ddc_bus);
+	intel_hdmi->has_audio = false;
+	edid = drm_get_edid(connector,
+			    &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
 
 
 	if (edid) {
 	if (edid) {
 		if (edid->input & DRM_EDID_INPUT_DIGITAL) {
 		if (edid->input & DRM_EDID_INPUT_DIGITAL) {
 			status = connector_status_connected;
 			status = connector_status_connected;
 			intel_hdmi->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
 			intel_hdmi->has_hdmi_sink = drm_detect_hdmi_monitor(edid);
+			intel_hdmi->has_audio = drm_detect_monitor_audio(edid);
 		}
 		}
 		connector->display_info.raw_edid = NULL;
 		connector->display_info.raw_edid = NULL;
 		kfree(edid);
 		kfree(edid);
 	}
 	}
 
 
+	if (status == connector_status_connected) {
+		if (intel_hdmi->force_audio)
+			intel_hdmi->has_audio = intel_hdmi->force_audio > 0;
+	}
+
 	return status;
 	return status;
 }
 }
 
 
 static int intel_hdmi_get_modes(struct drm_connector *connector)
 static int intel_hdmi_get_modes(struct drm_connector *connector)
 {
 {
-	struct drm_encoder *encoder = intel_attached_encoder(connector);
-	struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
+	struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+	struct drm_i915_private *dev_priv = connector->dev->dev_private;
 
 
 	/* We should parse the EDID data and find out if it's an HDMI sink so
 	/* We should parse the EDID data and find out if it's an HDMI sink so
 	 * we can send audio to it.
 	 * we can send audio to it.
 	 */
 	 */
 
 
-	return intel_ddc_get_modes(connector, intel_hdmi->base.ddc_bus);
+	return intel_ddc_get_modes(connector,
+				   &dev_priv->gmbus[intel_hdmi->ddc_bus].adapter);
+}
+
+static int
+intel_hdmi_set_property(struct drm_connector *connector,
+		      struct drm_property *property,
+		      uint64_t val)
+{
+	struct intel_hdmi *intel_hdmi = intel_attached_hdmi(connector);
+	int ret;
+
+	ret = drm_connector_property_set_value(connector, property, val);
+	if (ret)
+		return ret;
+
+	if (property == intel_hdmi->force_audio_property) {
+		if (val == intel_hdmi->force_audio)
+			return 0;
+
+		intel_hdmi->force_audio = val;
+
+		if (val > 0 && intel_hdmi->has_audio)
+			return 0;
+		if (val < 0 && !intel_hdmi->has_audio)
+			return 0;
+
+		intel_hdmi->has_audio = val > 0;
+		goto done;
+	}
+
+	return -EINVAL;
+
+done:
+	if (intel_hdmi->base.base.crtc) {
+		struct drm_crtc *crtc = intel_hdmi->base.base.crtc;
+		drm_crtc_helper_set_mode(crtc, &crtc->mode,
+					 crtc->x, crtc->y,
+					 crtc->fb);
+	}
+
+	return 0;
 }
 }
 
 
 static void intel_hdmi_destroy(struct drm_connector *connector)
 static void intel_hdmi_destroy(struct drm_connector *connector)
@@ -192,19 +310,34 @@ static const struct drm_connector_funcs intel_hdmi_connector_funcs = {
 	.dpms = drm_helper_connector_dpms,
 	.dpms = drm_helper_connector_dpms,
 	.detect = intel_hdmi_detect,
 	.detect = intel_hdmi_detect,
 	.fill_modes = drm_helper_probe_single_connector_modes,
 	.fill_modes = drm_helper_probe_single_connector_modes,
+	.set_property = intel_hdmi_set_property,
 	.destroy = intel_hdmi_destroy,
 	.destroy = intel_hdmi_destroy,
 };
 };
 
 
 static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = {
 static const struct drm_connector_helper_funcs intel_hdmi_connector_helper_funcs = {
 	.get_modes = intel_hdmi_get_modes,
 	.get_modes = intel_hdmi_get_modes,
 	.mode_valid = intel_hdmi_mode_valid,
 	.mode_valid = intel_hdmi_mode_valid,
-	.best_encoder = intel_attached_encoder,
+	.best_encoder = intel_best_encoder,
 };
 };
 
 
 static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
 static const struct drm_encoder_funcs intel_hdmi_enc_funcs = {
 	.destroy = intel_encoder_destroy,
 	.destroy = intel_encoder_destroy,
 };
 };
 
 
+static void
+intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
+{
+	struct drm_device *dev = connector->dev;
+
+	intel_hdmi->force_audio_property =
+		drm_property_create(dev, DRM_MODE_PROP_RANGE, "force_audio", 2);
+	if (intel_hdmi->force_audio_property) {
+		intel_hdmi->force_audio_property->values[0] = -1;
+		intel_hdmi->force_audio_property->values[1] = 1;
+		drm_connector_attach_property(connector, intel_hdmi->force_audio_property, 0);
+	}
+}
+
 void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
 void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
 {
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
@@ -224,6 +357,9 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
 	}
 	}
 
 
 	intel_encoder = &intel_hdmi->base;
 	intel_encoder = &intel_hdmi->base;
+	drm_encoder_init(dev, &intel_encoder->base, &intel_hdmi_enc_funcs,
+			 DRM_MODE_ENCODER_TMDS);
+
 	connector = &intel_connector->base;
 	connector = &intel_connector->base;
 	drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
 	drm_connector_init(dev, connector, &intel_hdmi_connector_funcs,
 			   DRM_MODE_CONNECTOR_HDMIA);
 			   DRM_MODE_CONNECTOR_HDMIA);
@@ -239,39 +375,33 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
 	/* Set up the DDC bus. */
 	/* Set up the DDC bus. */
 	if (sdvox_reg == SDVOB) {
 	if (sdvox_reg == SDVOB) {
 		intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
 		intel_encoder->clone_mask = (1 << INTEL_HDMIB_CLONE_BIT);
-		intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOE, "HDMIB");
+		intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
 		dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
 		dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
 	} else if (sdvox_reg == SDVOC) {
 	} else if (sdvox_reg == SDVOC) {
 		intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
 		intel_encoder->clone_mask = (1 << INTEL_HDMIC_CLONE_BIT);
-		intel_encoder->ddc_bus = intel_i2c_create(dev, GPIOD, "HDMIC");
+		intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
 		dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
 		dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
 	} else if (sdvox_reg == HDMIB) {
 	} else if (sdvox_reg == HDMIB) {
 		intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
 		intel_encoder->clone_mask = (1 << INTEL_HDMID_CLONE_BIT);
-		intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOE,
-								"HDMIB");
+		intel_hdmi->ddc_bus = GMBUS_PORT_DPB;
 		dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
 		dev_priv->hotplug_supported_mask |= HDMIB_HOTPLUG_INT_STATUS;
 	} else if (sdvox_reg == HDMIC) {
 	} else if (sdvox_reg == HDMIC) {
 		intel_encoder->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT);
 		intel_encoder->clone_mask = (1 << INTEL_HDMIE_CLONE_BIT);
-		intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOD,
-								"HDMIC");
+		intel_hdmi->ddc_bus = GMBUS_PORT_DPC;
 		dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
 		dev_priv->hotplug_supported_mask |= HDMIC_HOTPLUG_INT_STATUS;
 	} else if (sdvox_reg == HDMID) {
 	} else if (sdvox_reg == HDMID) {
 		intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
 		intel_encoder->clone_mask = (1 << INTEL_HDMIF_CLONE_BIT);
-		intel_encoder->ddc_bus = intel_i2c_create(dev, PCH_GPIOF,
-								"HDMID");
+		intel_hdmi->ddc_bus = GMBUS_PORT_DPD;
 		dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
 		dev_priv->hotplug_supported_mask |= HDMID_HOTPLUG_INT_STATUS;
 	}
 	}
-	if (!intel_encoder->ddc_bus)
-		goto err_connector;
 
 
 	intel_hdmi->sdvox_reg = sdvox_reg;
 	intel_hdmi->sdvox_reg = sdvox_reg;
 
 
-	drm_encoder_init(dev, &intel_encoder->enc, &intel_hdmi_enc_funcs,
-			 DRM_MODE_ENCODER_TMDS);
-	drm_encoder_helper_add(&intel_encoder->enc, &intel_hdmi_helper_funcs);
+	drm_encoder_helper_add(&intel_encoder->base, &intel_hdmi_helper_funcs);
+
+	intel_hdmi_add_properties(intel_hdmi, connector);
 
 
-	drm_mode_connector_attach_encoder(&intel_connector->base,
-					  &intel_encoder->enc);
+	intel_connector_attach_encoder(intel_connector, intel_encoder);
 	drm_sysfs_connector_add(connector);
 	drm_sysfs_connector_add(connector);
 
 
 	/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
 	/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
@@ -282,13 +412,4 @@ void intel_hdmi_init(struct drm_device *dev, int sdvox_reg)
 		u32 temp = I915_READ(PEG_BAND_GAP_DATA);
 		u32 temp = I915_READ(PEG_BAND_GAP_DATA);
 		I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
 		I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
 	}
 	}
-
-	return;
-
-err_connector:
-	drm_connector_cleanup(connector);
-	kfree(intel_hdmi);
-	kfree(intel_connector);
-
-	return;
 }
 }

+ 357 - 127
drivers/gpu/drm/i915/intel_i2c.c

@@ -1,6 +1,6 @@
 /*
 /*
  * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
  * Copyright (c) 2006 Dave Airlie <airlied@linux.ie>
- * Copyright © 2006-2008 Intel Corporation
+ * Copyright © 2006-2008,2010 Intel Corporation
  *   Jesse Barnes <jesse.barnes@intel.com>
  *   Jesse Barnes <jesse.barnes@intel.com>
  *
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -24,10 +24,9 @@
  *
  *
  * Authors:
  * Authors:
  *	Eric Anholt <eric@anholt.net>
  *	Eric Anholt <eric@anholt.net>
+ *	Chris Wilson <chris@chris-wilson.co.uk>
  */
  */
 #include <linux/i2c.h>
 #include <linux/i2c.h>
-#include <linux/slab.h>
-#include <linux/i2c-id.h>
 #include <linux/i2c-algo-bit.h>
 #include <linux/i2c-algo-bit.h>
 #include "drmP.h"
 #include "drmP.h"
 #include "drm.h"
 #include "drm.h"
@@ -35,79 +34,106 @@
 #include "i915_drm.h"
 #include "i915_drm.h"
 #include "i915_drv.h"
 #include "i915_drv.h"
 
 
-void intel_i2c_quirk_set(struct drm_device *dev, bool enable)
+/* Intel GPIO access functions */
+
+#define I2C_RISEFALL_TIME 20
+
+static inline struct intel_gmbus *
+to_intel_gmbus(struct i2c_adapter *i2c)
+{
+	return container_of(i2c, struct intel_gmbus, adapter);
+}
+
+struct intel_gpio {
+	struct i2c_adapter adapter;
+	struct i2c_algo_bit_data algo;
+	struct drm_i915_private *dev_priv;
+	u32 reg;
+};
+
+void
+intel_i2c_reset(struct drm_device *dev)
 {
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	if (HAS_PCH_SPLIT(dev))
+		I915_WRITE(PCH_GMBUS0, 0);
+	else
+		I915_WRITE(GMBUS0, 0);
+}
+
+static void intel_i2c_quirk_set(struct drm_i915_private *dev_priv, bool enable)
+{
+	u32 val;
 
 
 	/* When using bit bashing for I2C, this bit needs to be set to 1 */
 	/* When using bit bashing for I2C, this bit needs to be set to 1 */
-	if (!IS_PINEVIEW(dev))
+	if (!IS_PINEVIEW(dev_priv->dev))
 		return;
 		return;
+
+	val = I915_READ(DSPCLK_GATE_D);
 	if (enable)
 	if (enable)
-		I915_WRITE(DSPCLK_GATE_D,
-			I915_READ(DSPCLK_GATE_D) | DPCUNIT_CLOCK_GATE_DISABLE);
+		val |= DPCUNIT_CLOCK_GATE_DISABLE;
 	else
 	else
-		I915_WRITE(DSPCLK_GATE_D,
-			I915_READ(DSPCLK_GATE_D) & (~DPCUNIT_CLOCK_GATE_DISABLE));
+		val &= ~DPCUNIT_CLOCK_GATE_DISABLE;
+	I915_WRITE(DSPCLK_GATE_D, val);
 }
 }
 
 
-/*
- * Intel GPIO access functions
- */
+static u32 get_reserved(struct intel_gpio *gpio)
+{
+	struct drm_i915_private *dev_priv = gpio->dev_priv;
+	struct drm_device *dev = dev_priv->dev;
+	u32 reserved = 0;
 
 
-#define I2C_RISEFALL_TIME 20
+	/* On most chips, these bits must be preserved in software. */
+	if (!IS_I830(dev) && !IS_845G(dev))
+		reserved = I915_READ(gpio->reg) & (GPIO_DATA_PULLUP_DISABLE |
+						   GPIO_CLOCK_PULLUP_DISABLE);
+
+	return reserved;
+}
 
 
 static int get_clock(void *data)
 static int get_clock(void *data)
 {
 {
-	struct intel_i2c_chan *chan = data;
-	struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
-	u32 val;
-
-	val = I915_READ(chan->reg);
-	return ((val & GPIO_CLOCK_VAL_IN) != 0);
+	struct intel_gpio *gpio = data;
+	struct drm_i915_private *dev_priv = gpio->dev_priv;
+	u32 reserved = get_reserved(gpio);
+	I915_WRITE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK);
+	I915_WRITE(gpio->reg, reserved);
+	return (I915_READ(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0;
 }
 }
 
 
 static int get_data(void *data)
 static int get_data(void *data)
 {
 {
-	struct intel_i2c_chan *chan = data;
-	struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
-	u32 val;
-
-	val = I915_READ(chan->reg);
-	return ((val & GPIO_DATA_VAL_IN) != 0);
+	struct intel_gpio *gpio = data;
+	struct drm_i915_private *dev_priv = gpio->dev_priv;
+	u32 reserved = get_reserved(gpio);
+	I915_WRITE(gpio->reg, reserved | GPIO_DATA_DIR_MASK);
+	I915_WRITE(gpio->reg, reserved);
+	return (I915_READ(gpio->reg) & GPIO_DATA_VAL_IN) != 0;
 }
 }
 
 
 static void set_clock(void *data, int state_high)
 static void set_clock(void *data, int state_high)
 {
 {
-	struct intel_i2c_chan *chan = data;
-	struct drm_device *dev = chan->drm_dev;
-	struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
-	u32 reserved = 0, clock_bits;
-
-	/* On most chips, these bits must be preserved in software. */
-	if (!IS_I830(dev) && !IS_845G(dev))
-		reserved = I915_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
-						   GPIO_CLOCK_PULLUP_DISABLE);
+	struct intel_gpio *gpio = data;
+	struct drm_i915_private *dev_priv = gpio->dev_priv;
+	u32 reserved = get_reserved(gpio);
+	u32 clock_bits;
 
 
 	if (state_high)
 	if (state_high)
 		clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK;
 		clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK;
 	else
 	else
 		clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
 		clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
 			GPIO_CLOCK_VAL_MASK;
 			GPIO_CLOCK_VAL_MASK;
-	I915_WRITE(chan->reg, reserved | clock_bits);
-	udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
+
+	I915_WRITE(gpio->reg, reserved | clock_bits);
+	POSTING_READ(gpio->reg);
 }
 }
 
 
 static void set_data(void *data, int state_high)
 static void set_data(void *data, int state_high)
 {
 {
-	struct intel_i2c_chan *chan = data;
-	struct drm_device *dev = chan->drm_dev;
-	struct drm_i915_private *dev_priv = chan->drm_dev->dev_private;
-	u32 reserved = 0, data_bits;
-
-	/* On most chips, these bits must be preserved in software. */
-	if (!IS_I830(dev) && !IS_845G(dev))
-		reserved = I915_READ(chan->reg) & (GPIO_DATA_PULLUP_DISABLE |
-						   GPIO_CLOCK_PULLUP_DISABLE);
+	struct intel_gpio *gpio = data;
+	struct drm_i915_private *dev_priv = gpio->dev_priv;
+	u32 reserved = get_reserved(gpio);
+	u32 data_bits;
 
 
 	if (state_high)
 	if (state_high)
 		data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK;
 		data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK;
@@ -115,109 +141,313 @@ static void set_data(void *data, int state_high)
 		data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
 		data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
 			GPIO_DATA_VAL_MASK;
 			GPIO_DATA_VAL_MASK;
 
 
-	I915_WRITE(chan->reg, reserved | data_bits);
-	udelay(I2C_RISEFALL_TIME); /* wait for the line to change state */
+	I915_WRITE(gpio->reg, reserved | data_bits);
+	POSTING_READ(gpio->reg);
 }
 }
 
 
-/* Clears the GMBUS setup.  Our driver doesn't make use of the GMBUS I2C
- * engine, but if the BIOS leaves it enabled, then that can break our use
- * of the bit-banging I2C interfaces.  This is notably the case with the
- * Mac Mini in EFI mode.
- */
-void
-intel_i2c_reset_gmbus(struct drm_device *dev)
+static struct i2c_adapter *
+intel_gpio_create(struct drm_i915_private *dev_priv, u32 pin)
 {
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
+	static const int map_pin_to_reg[] = {
+		0,
+		GPIOB,
+		GPIOA,
+		GPIOC,
+		GPIOD,
+		GPIOE,
+		0,
+		GPIOF,
+	};
+	struct intel_gpio *gpio;
 
 
-	if (HAS_PCH_SPLIT(dev)) {
-		I915_WRITE(PCH_GMBUS0, 0);
-	} else {
-		I915_WRITE(GMBUS0, 0);
+	if (pin < 1 || pin > 7)
+		return NULL;
+
+	gpio = kzalloc(sizeof(struct intel_gpio), GFP_KERNEL);
+	if (gpio == NULL)
+		return NULL;
+
+	gpio->reg = map_pin_to_reg[pin];
+	if (HAS_PCH_SPLIT(dev_priv->dev))
+		gpio->reg += PCH_GPIOA - GPIOA;
+	gpio->dev_priv = dev_priv;
+
+	snprintf(gpio->adapter.name, I2C_NAME_SIZE, "GPIO%c", "?BACDEF?"[pin]);
+	gpio->adapter.owner = THIS_MODULE;
+	gpio->adapter.algo_data	= &gpio->algo;
+	gpio->adapter.dev.parent = &dev_priv->dev->pdev->dev;
+	gpio->algo.setsda = set_data;
+	gpio->algo.setscl = set_clock;
+	gpio->algo.getsda = get_data;
+	gpio->algo.getscl = get_clock;
+	gpio->algo.udelay = I2C_RISEFALL_TIME;
+	gpio->algo.timeout = usecs_to_jiffies(2200);
+	gpio->algo.data = gpio;
+
+	if (i2c_bit_add_bus(&gpio->adapter))
+		goto out_free;
+
+	return &gpio->adapter;
+
+out_free:
+	kfree(gpio);
+	return NULL;
+}
+
+static int
+intel_i2c_quirk_xfer(struct drm_i915_private *dev_priv,
+		     struct i2c_adapter *adapter,
+		     struct i2c_msg *msgs,
+		     int num)
+{
+	struct intel_gpio *gpio = container_of(adapter,
+					       struct intel_gpio,
+					       adapter);
+	int ret;
+
+	intel_i2c_reset(dev_priv->dev);
+
+	intel_i2c_quirk_set(dev_priv, true);
+	set_data(gpio, 1);
+	set_clock(gpio, 1);
+	udelay(I2C_RISEFALL_TIME);
+
+	ret = adapter->algo->master_xfer(adapter, msgs, num);
+
+	set_data(gpio, 1);
+	set_clock(gpio, 1);
+	intel_i2c_quirk_set(dev_priv, false);
+
+	return ret;
+}
+
+static int
+gmbus_xfer(struct i2c_adapter *adapter,
+	   struct i2c_msg *msgs,
+	   int num)
+{
+	struct intel_gmbus *bus = container_of(adapter,
+					       struct intel_gmbus,
+					       adapter);
+	struct drm_i915_private *dev_priv = adapter->algo_data;
+	int i, reg_offset;
+
+	if (bus->force_bit)
+		return intel_i2c_quirk_xfer(dev_priv,
+					    bus->force_bit, msgs, num);
+
+	reg_offset = HAS_PCH_SPLIT(dev_priv->dev) ? PCH_GMBUS0 - GMBUS0 : 0;
+
+	I915_WRITE(GMBUS0 + reg_offset, bus->reg0);
+
+	for (i = 0; i < num; i++) {
+		u16 len = msgs[i].len;
+		u8 *buf = msgs[i].buf;
+
+		if (msgs[i].flags & I2C_M_RD) {
+			I915_WRITE(GMBUS1 + reg_offset,
+				   GMBUS_CYCLE_WAIT | (i + 1 == num ? GMBUS_CYCLE_STOP : 0) |
+				   (len << GMBUS_BYTE_COUNT_SHIFT) |
+				   (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
+				   GMBUS_SLAVE_READ | GMBUS_SW_RDY);
+			POSTING_READ(GMBUS2+reg_offset);
+			do {
+				u32 val, loop = 0;
+
+				if (wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
+					goto timeout;
+				if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
+					return 0;
+
+				val = I915_READ(GMBUS3 + reg_offset);
+				do {
+					*buf++ = val & 0xff;
+					val >>= 8;
+				} while (--len && ++loop < 4);
+			} while (len);
+		} else {
+			u32 val, loop;
+
+			val = loop = 0;
+			do {
+				val |= *buf++ << (8 * loop);
+			} while (--len && ++loop < 4);
+
+			I915_WRITE(GMBUS3 + reg_offset, val);
+			I915_WRITE(GMBUS1 + reg_offset,
+				   (i + 1 == num ? GMBUS_CYCLE_STOP : GMBUS_CYCLE_WAIT) |
+				   (msgs[i].len << GMBUS_BYTE_COUNT_SHIFT) |
+				   (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) |
+				   GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
+			POSTING_READ(GMBUS2+reg_offset);
+
+			while (len) {
+				if (wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50))
+					goto timeout;
+				if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
+					return 0;
+
+				val = loop = 0;
+				do {
+					val |= *buf++ << (8 * loop);
+				} while (--len && ++loop < 4);
+
+				I915_WRITE(GMBUS3 + reg_offset, val);
+				POSTING_READ(GMBUS2+reg_offset);
+			}
+		}
+
+		if (i + 1 < num && wait_for(I915_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), 50))
+			goto timeout;
+		if (I915_READ(GMBUS2 + reg_offset) & GMBUS_SATOER)
+			return 0;
 	}
 	}
+
+	return num;
+
+timeout:
+	DRM_INFO("GMBUS timed out, falling back to bit banging on pin %d [%s]\n",
+		 bus->reg0 & 0xff, bus->adapter.name);
+	/* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */
+	bus->force_bit = intel_gpio_create(dev_priv, bus->reg0 & 0xff);
+	if (!bus->force_bit)
+		return -ENOMEM;
+
+	return intel_i2c_quirk_xfer(dev_priv, bus->force_bit, msgs, num);
 }
 }
 
 
+static u32 gmbus_func(struct i2c_adapter *adapter)
+{
+	struct intel_gmbus *bus = container_of(adapter,
+					       struct intel_gmbus,
+					       adapter);
+
+	if (bus->force_bit)
+		bus->force_bit->algo->functionality(bus->force_bit);
+
+	return (I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL |
+		/* I2C_FUNC_10BIT_ADDR | */
+		I2C_FUNC_SMBUS_READ_BLOCK_DATA |
+		I2C_FUNC_SMBUS_BLOCK_PROC_CALL);
+}
+
+static const struct i2c_algorithm gmbus_algorithm = {
+	.master_xfer	= gmbus_xfer,
+	.functionality	= gmbus_func
+};
+
 /**
 /**
- * intel_i2c_create - instantiate an Intel i2c bus using the specified GPIO reg
+ * intel_gmbus_setup - instantiate all Intel i2c GMBuses
  * @dev: DRM device
  * @dev: DRM device
- * @output: driver specific output device
- * @reg: GPIO reg to use
- * @name: name for this bus
- * @slave_addr: slave address (if fixed)
- *
- * Creates and registers a new i2c bus with the Linux i2c layer, for use
- * in output probing and control (e.g. DDC or SDVO control functions).
- *
- * Possible values for @reg include:
- *   %GPIOA
- *   %GPIOB
- *   %GPIOC
- *   %GPIOD
- *   %GPIOE
- *   %GPIOF
- *   %GPIOG
- *   %GPIOH
- * see PRM for details on how these different busses are used.
  */
  */
-struct i2c_adapter *intel_i2c_create(struct drm_device *dev, const u32 reg,
-				     const char *name)
+int intel_setup_gmbus(struct drm_device *dev)
 {
 {
-	struct intel_i2c_chan *chan;
+	static const char *names[GMBUS_NUM_PORTS] = {
+		"disabled",
+		"ssc",
+		"vga",
+		"panel",
+		"dpc",
+		"dpb",
+		"reserved"
+		"dpd",
+	};
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int ret, i;
 
 
-	chan = kzalloc(sizeof(struct intel_i2c_chan), GFP_KERNEL);
-	if (!chan)
-		goto out_free;
+	dev_priv->gmbus = kcalloc(sizeof(struct intel_gmbus), GMBUS_NUM_PORTS,
+				  GFP_KERNEL);
+	if (dev_priv->gmbus == NULL)
+		return -ENOMEM;
 
 
-	chan->drm_dev = dev;
-	chan->reg = reg;
-	snprintf(chan->adapter.name, I2C_NAME_SIZE, "intel drm %s", name);
-	chan->adapter.owner = THIS_MODULE;
-	chan->adapter.algo_data	= &chan->algo;
-	chan->adapter.dev.parent = &dev->pdev->dev;
-	chan->algo.setsda = set_data;
-	chan->algo.setscl = set_clock;
-	chan->algo.getsda = get_data;
-	chan->algo.getscl = get_clock;
-	chan->algo.udelay = 20;
-	chan->algo.timeout = usecs_to_jiffies(2200);
-	chan->algo.data = chan;
-
-	i2c_set_adapdata(&chan->adapter, chan);
-
-	if(i2c_bit_add_bus(&chan->adapter))
-		goto out_free;
+	for (i = 0; i < GMBUS_NUM_PORTS; i++) {
+		struct intel_gmbus *bus = &dev_priv->gmbus[i];
 
 
-	intel_i2c_reset_gmbus(dev);
+		bus->adapter.owner = THIS_MODULE;
+		bus->adapter.class = I2C_CLASS_DDC;
+		snprintf(bus->adapter.name,
+			 I2C_NAME_SIZE,
+			 "gmbus %s",
+			 names[i]);
 
 
-	/* JJJ:  raise SCL and SDA? */
-	intel_i2c_quirk_set(dev, true);
-	set_data(chan, 1);
-	set_clock(chan, 1);
-	intel_i2c_quirk_set(dev, false);
-	udelay(20);
+		bus->adapter.dev.parent = &dev->pdev->dev;
+		bus->adapter.algo_data	= dev_priv;
 
 
-	return &chan->adapter;
+		bus->adapter.algo = &gmbus_algorithm;
+		ret = i2c_add_adapter(&bus->adapter);
+		if (ret)
+			goto err;
 
 
-out_free:
-	kfree(chan);
-	return NULL;
+		/* By default use a conservative clock rate */
+		bus->reg0 = i | GMBUS_RATE_100KHZ;
+
+		/* XXX force bit banging until GMBUS is fully debugged */
+		bus->force_bit = intel_gpio_create(dev_priv, i);
+	}
+
+	intel_i2c_reset(dev_priv->dev);
+
+	return 0;
+
+err:
+	while (--i) {
+		struct intel_gmbus *bus = &dev_priv->gmbus[i];
+		i2c_del_adapter(&bus->adapter);
+	}
+	kfree(dev_priv->gmbus);
+	dev_priv->gmbus = NULL;
+	return ret;
 }
 }
 
 
-/**
- * intel_i2c_destroy - unregister and free i2c bus resources
- * @output: channel to free
- *
- * Unregister the adapter from the i2c layer, then free the structure.
- */
-void intel_i2c_destroy(struct i2c_adapter *adapter)
+void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed)
+{
+	struct intel_gmbus *bus = to_intel_gmbus(adapter);
+
+	/* speed:
+	 * 0x0 = 100 KHz
+	 * 0x1 = 50 KHz
+	 * 0x2 = 400 KHz
+	 * 0x3 = 1000 Khz
+	 */
+	bus->reg0 = (bus->reg0 & ~(0x3 << 8)) | (speed << 8);
+}
+
+void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
+{
+	struct intel_gmbus *bus = to_intel_gmbus(adapter);
+
+	if (force_bit) {
+		if (bus->force_bit == NULL) {
+			struct drm_i915_private *dev_priv = adapter->algo_data;
+			bus->force_bit = intel_gpio_create(dev_priv,
+							   bus->reg0 & 0xff);
+		}
+	} else {
+		if (bus->force_bit) {
+			i2c_del_adapter(bus->force_bit);
+			kfree(bus->force_bit);
+			bus->force_bit = NULL;
+		}
+	}
+}
+
+void intel_teardown_gmbus(struct drm_device *dev)
 {
 {
-	struct intel_i2c_chan *chan;
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
 
 
-	if (!adapter)
+	if (dev_priv->gmbus == NULL)
 		return;
 		return;
 
 
-	chan = container_of(adapter,
-			    struct intel_i2c_chan,
-			    adapter);
-	i2c_del_adapter(&chan->adapter);
-	kfree(chan);
+	for (i = 0; i < GMBUS_NUM_PORTS; i++) {
+		struct intel_gmbus *bus = &dev_priv->gmbus[i];
+		if (bus->force_bit) {
+			i2c_del_adapter(bus->force_bit);
+			kfree(bus->force_bit);
+		}
+		i2c_del_adapter(&bus->adapter);
+	}
+
+	kfree(dev_priv->gmbus);
+	dev_priv->gmbus = NULL;
 }
 }

+ 230 - 205
drivers/gpu/drm/i915/intel_lvds.c

@@ -43,102 +43,76 @@
 /* Private structure for the integrated LVDS support */
 /* Private structure for the integrated LVDS support */
 struct intel_lvds {
 struct intel_lvds {
 	struct intel_encoder base;
 	struct intel_encoder base;
+
+	struct edid *edid;
+
 	int fitting_mode;
 	int fitting_mode;
 	u32 pfit_control;
 	u32 pfit_control;
 	u32 pfit_pgm_ratios;
 	u32 pfit_pgm_ratios;
+	bool pfit_dirty;
+
+	struct drm_display_mode *fixed_mode;
 };
 };
 
 
-static struct intel_lvds *enc_to_intel_lvds(struct drm_encoder *encoder)
+static struct intel_lvds *to_intel_lvds(struct drm_encoder *encoder)
 {
 {
-	return container_of(enc_to_intel_encoder(encoder), struct intel_lvds, base);
-}
-
-/**
- * Sets the backlight level.
- *
- * \param level backlight level, from 0 to intel_lvds_get_max_backlight().
- */
-static void intel_lvds_set_backlight(struct drm_device *dev, int level)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 blc_pwm_ctl, reg;
-
-	if (HAS_PCH_SPLIT(dev))
-		reg = BLC_PWM_CPU_CTL;
-	else
-		reg = BLC_PWM_CTL;
-
-	blc_pwm_ctl = I915_READ(reg) & ~BACKLIGHT_DUTY_CYCLE_MASK;
-	I915_WRITE(reg, (blc_pwm_ctl |
-				 (level << BACKLIGHT_DUTY_CYCLE_SHIFT)));
+	return container_of(encoder, struct intel_lvds, base.base);
 }
 }
 
 
-/**
- * Returns the maximum level of the backlight duty cycle field.
- */
-static u32 intel_lvds_get_max_backlight(struct drm_device *dev)
+static struct intel_lvds *intel_attached_lvds(struct drm_connector *connector)
 {
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 reg;
-
-	if (HAS_PCH_SPLIT(dev))
-		reg = BLC_PWM_PCH_CTL2;
-	else
-		reg = BLC_PWM_CTL;
-
-	return ((I915_READ(reg) & BACKLIGHT_MODULATION_FREQ_MASK) >>
-		BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
+	return container_of(intel_attached_encoder(connector),
+			    struct intel_lvds, base);
 }
 }
 
 
 /**
 /**
  * Sets the power state for the panel.
  * Sets the power state for the panel.
  */
  */
-static void intel_lvds_set_power(struct drm_device *dev, bool on)
+static void intel_lvds_set_power(struct intel_lvds *intel_lvds, bool on)
 {
 {
+	struct drm_device *dev = intel_lvds->base.base.dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 ctl_reg, status_reg, lvds_reg;
+	u32 ctl_reg, lvds_reg;
 
 
 	if (HAS_PCH_SPLIT(dev)) {
 	if (HAS_PCH_SPLIT(dev)) {
 		ctl_reg = PCH_PP_CONTROL;
 		ctl_reg = PCH_PP_CONTROL;
-		status_reg = PCH_PP_STATUS;
 		lvds_reg = PCH_LVDS;
 		lvds_reg = PCH_LVDS;
 	} else {
 	} else {
 		ctl_reg = PP_CONTROL;
 		ctl_reg = PP_CONTROL;
-		status_reg = PP_STATUS;
 		lvds_reg = LVDS;
 		lvds_reg = LVDS;
 	}
 	}
 
 
 	if (on) {
 	if (on) {
 		I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
 		I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
-		POSTING_READ(lvds_reg);
-
-		I915_WRITE(ctl_reg, I915_READ(ctl_reg) |
-			   POWER_TARGET_ON);
-		if (wait_for(I915_READ(status_reg) & PP_ON, 1000, 0))
-			DRM_ERROR("timed out waiting to enable LVDS pipe");
-
-		intel_lvds_set_backlight(dev, dev_priv->backlight_duty_cycle);
+		I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
+		intel_panel_set_backlight(dev, dev_priv->backlight_level);
 	} else {
 	} else {
-		intel_lvds_set_backlight(dev, 0);
+		dev_priv->backlight_level = intel_panel_get_backlight(dev);
+
+		intel_panel_set_backlight(dev, 0);
+		I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
 
 
-		I915_WRITE(ctl_reg, I915_READ(ctl_reg) &
-			   ~POWER_TARGET_ON);
-		if (wait_for((I915_READ(status_reg) & PP_ON) == 0, 1000, 0))
-			DRM_ERROR("timed out waiting for LVDS pipe to turn off");
+		if (intel_lvds->pfit_control) {
+			if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000))
+				DRM_ERROR("timed out waiting for panel to power off\n");
+			I915_WRITE(PFIT_CONTROL, 0);
+			intel_lvds->pfit_control = 0;
+			intel_lvds->pfit_dirty = false;
+		}
 
 
 		I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
 		I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
-		POSTING_READ(lvds_reg);
 	}
 	}
+	POSTING_READ(lvds_reg);
 }
 }
 
 
 static void intel_lvds_dpms(struct drm_encoder *encoder, int mode)
 static void intel_lvds_dpms(struct drm_encoder *encoder, int mode)
 {
 {
-	struct drm_device *dev = encoder->dev;
+	struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
 
 
 	if (mode == DRM_MODE_DPMS_ON)
 	if (mode == DRM_MODE_DPMS_ON)
-		intel_lvds_set_power(dev, true);
+		intel_lvds_set_power(intel_lvds, true);
 	else
 	else
-		intel_lvds_set_power(dev, false);
+		intel_lvds_set_power(intel_lvds, false);
 
 
 	/* XXX: We never power down the LVDS pairs. */
 	/* XXX: We never power down the LVDS pairs. */
 }
 }
@@ -146,16 +120,13 @@ static void intel_lvds_dpms(struct drm_encoder *encoder, int mode)
 static int intel_lvds_mode_valid(struct drm_connector *connector,
 static int intel_lvds_mode_valid(struct drm_connector *connector,
 				 struct drm_display_mode *mode)
 				 struct drm_display_mode *mode)
 {
 {
-	struct drm_device *dev = connector->dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_display_mode *fixed_mode = dev_priv->panel_fixed_mode;
+	struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
+	struct drm_display_mode *fixed_mode = intel_lvds->fixed_mode;
 
 
-	if (fixed_mode)	{
-		if (mode->hdisplay > fixed_mode->hdisplay)
-			return MODE_PANEL;
-		if (mode->vdisplay > fixed_mode->vdisplay)
-			return MODE_PANEL;
-	}
+	if (mode->hdisplay > fixed_mode->hdisplay)
+		return MODE_PANEL;
+	if (mode->vdisplay > fixed_mode->vdisplay)
+		return MODE_PANEL;
 
 
 	return MODE_OK;
 	return MODE_OK;
 }
 }
@@ -223,12 +194,12 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
 	struct drm_device *dev = encoder->dev;
 	struct drm_device *dev = encoder->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
 	struct intel_crtc *intel_crtc = to_intel_crtc(encoder->crtc);
-	struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder);
+	struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
 	struct drm_encoder *tmp_encoder;
 	struct drm_encoder *tmp_encoder;
 	u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
 	u32 pfit_control = 0, pfit_pgm_ratios = 0, border = 0;
 
 
 	/* Should never happen!! */
 	/* Should never happen!! */
-	if (!IS_I965G(dev) && intel_crtc->pipe == 0) {
+	if (INTEL_INFO(dev)->gen < 4 && intel_crtc->pipe == 0) {
 		DRM_ERROR("Can't support LVDS on pipe A\n");
 		DRM_ERROR("Can't support LVDS on pipe A\n");
 		return false;
 		return false;
 	}
 	}
@@ -241,9 +212,6 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
 			return false;
 			return false;
 		}
 		}
 	}
 	}
-	/* If we don't have a panel mode, there is nothing we can do */
-	if (dev_priv->panel_fixed_mode == NULL)
-		return true;
 
 
 	/*
 	/*
 	 * We have timings from the BIOS for the panel, put them in
 	 * We have timings from the BIOS for the panel, put them in
@@ -251,7 +219,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
 	 * with the panel scaling set up to source from the H/VDisplay
 	 * with the panel scaling set up to source from the H/VDisplay
 	 * of the original mode.
 	 * of the original mode.
 	 */
 	 */
-	intel_fixed_panel_mode(dev_priv->panel_fixed_mode, adjusted_mode);
+	intel_fixed_panel_mode(intel_lvds->fixed_mode, adjusted_mode);
 
 
 	if (HAS_PCH_SPLIT(dev)) {
 	if (HAS_PCH_SPLIT(dev)) {
 		intel_pch_panel_fitting(dev, intel_lvds->fitting_mode,
 		intel_pch_panel_fitting(dev, intel_lvds->fitting_mode,
@@ -260,8 +228,8 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
 	}
 	}
 
 
 	/* Make sure pre-965s set dither correctly */
 	/* Make sure pre-965s set dither correctly */
-	if (!IS_I965G(dev)) {
-		if (dev_priv->panel_wants_dither || dev_priv->lvds_dither)
+	if (INTEL_INFO(dev)->gen < 4) {
+		if (dev_priv->lvds_dither)
 			pfit_control |= PANEL_8TO6_DITHER_ENABLE;
 			pfit_control |= PANEL_8TO6_DITHER_ENABLE;
 	}
 	}
 
 
@@ -271,7 +239,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
 		goto out;
 		goto out;
 
 
 	/* 965+ wants fuzzy fitting */
 	/* 965+ wants fuzzy fitting */
-	if (IS_I965G(dev))
+	if (INTEL_INFO(dev)->gen >= 4)
 		pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
 		pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
 				 PFIT_FILTER_FUZZY);
 				 PFIT_FILTER_FUZZY);
 
 
@@ -297,7 +265,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
 
 
 	case DRM_MODE_SCALE_ASPECT:
 	case DRM_MODE_SCALE_ASPECT:
 		/* Scale but preserve the aspect ratio */
 		/* Scale but preserve the aspect ratio */
-		if (IS_I965G(dev)) {
+		if (INTEL_INFO(dev)->gen >= 4) {
 			u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
 			u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
 			u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
 			u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
 
 
@@ -356,7 +324,7 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
 		 * Fortunately this is all done for us in hw.
 		 * Fortunately this is all done for us in hw.
 		 */
 		 */
 		pfit_control |= PFIT_ENABLE;
 		pfit_control |= PFIT_ENABLE;
-		if (IS_I965G(dev))
+		if (INTEL_INFO(dev)->gen >= 4)
 			pfit_control |= PFIT_SCALING_AUTO;
 			pfit_control |= PFIT_SCALING_AUTO;
 		else
 		else
 			pfit_control |= (VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
 			pfit_control |= (VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
@@ -369,8 +337,12 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
 	}
 	}
 
 
 out:
 out:
-	intel_lvds->pfit_control = pfit_control;
-	intel_lvds->pfit_pgm_ratios = pfit_pgm_ratios;
+	if (pfit_control != intel_lvds->pfit_control ||
+	    pfit_pgm_ratios != intel_lvds->pfit_pgm_ratios) {
+		intel_lvds->pfit_control = pfit_control;
+		intel_lvds->pfit_pgm_ratios = pfit_pgm_ratios;
+		intel_lvds->pfit_dirty = true;
+	}
 	dev_priv->lvds_border_bits = border;
 	dev_priv->lvds_border_bits = border;
 
 
 	/*
 	/*
@@ -386,30 +358,60 @@ static void intel_lvds_prepare(struct drm_encoder *encoder)
 {
 {
 	struct drm_device *dev = encoder->dev;
 	struct drm_device *dev = encoder->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	u32 reg;
-
-	if (HAS_PCH_SPLIT(dev))
-		reg = BLC_PWM_CPU_CTL;
-	else
-		reg = BLC_PWM_CTL;
-
-	dev_priv->saveBLC_PWM_CTL = I915_READ(reg);
-	dev_priv->backlight_duty_cycle = (dev_priv->saveBLC_PWM_CTL &
-				       BACKLIGHT_DUTY_CYCLE_MASK);
+	struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
+
+	dev_priv->backlight_level = intel_panel_get_backlight(dev);
+
+	/* We try to do the minimum that is necessary in order to unlock
+	 * the registers for mode setting.
+	 *
+	 * On Ironlake, this is quite simple as we just set the unlock key
+	 * and ignore all subtleties. (This may cause some issues...)
+	 *
+	 * Prior to Ironlake, we must disable the pipe if we want to adjust
+	 * the panel fitter. However at all other times we can just reset
+	 * the registers regardless.
+	 */
 
 
-	intel_lvds_set_power(dev, false);
+	if (HAS_PCH_SPLIT(dev)) {
+		I915_WRITE(PCH_PP_CONTROL,
+			   I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
+	} else if (intel_lvds->pfit_dirty) {
+		I915_WRITE(PP_CONTROL,
+			   (I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS)
+			   & ~POWER_TARGET_ON);
+	} else {
+		I915_WRITE(PP_CONTROL,
+			   I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
+	}
 }
 }
 
 
-static void intel_lvds_commit( struct drm_encoder *encoder)
+static void intel_lvds_commit(struct drm_encoder *encoder)
 {
 {
 	struct drm_device *dev = encoder->dev;
 	struct drm_device *dev = encoder->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
 
 
-	if (dev_priv->backlight_duty_cycle == 0)
-		dev_priv->backlight_duty_cycle =
-			intel_lvds_get_max_backlight(dev);
+	if (dev_priv->backlight_level == 0)
+		dev_priv->backlight_level = intel_panel_get_max_backlight(dev);
+
+	/* Undo any unlocking done in prepare to prevent accidental
+	 * adjustment of the registers.
+	 */
+	if (HAS_PCH_SPLIT(dev)) {
+		u32 val = I915_READ(PCH_PP_CONTROL);
+		if ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS)
+			I915_WRITE(PCH_PP_CONTROL, val & 0x3);
+	} else {
+		u32 val = I915_READ(PP_CONTROL);
+		if ((val & PANEL_UNLOCK_REGS) == PANEL_UNLOCK_REGS)
+			I915_WRITE(PP_CONTROL, val & 0x3);
+	}
 
 
-	intel_lvds_set_power(dev, true);
+	/* Always do a full power on as we do not know what state
+	 * we were left in.
+	 */
+	intel_lvds_set_power(intel_lvds, true);
 }
 }
 
 
 static void intel_lvds_mode_set(struct drm_encoder *encoder,
 static void intel_lvds_mode_set(struct drm_encoder *encoder,
@@ -418,7 +420,7 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
 {
 {
 	struct drm_device *dev = encoder->dev;
 	struct drm_device *dev = encoder->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder);
+	struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
 
 
 	/*
 	/*
 	 * The LVDS pin pair will already have been turned on in the
 	 * The LVDS pin pair will already have been turned on in the
@@ -429,13 +431,23 @@ static void intel_lvds_mode_set(struct drm_encoder *encoder,
 	if (HAS_PCH_SPLIT(dev))
 	if (HAS_PCH_SPLIT(dev))
 		return;
 		return;
 
 
+	if (!intel_lvds->pfit_dirty)
+		return;
+
 	/*
 	/*
 	 * Enable automatic panel scaling so that non-native modes fill the
 	 * Enable automatic panel scaling so that non-native modes fill the
 	 * screen.  Should be enabled before the pipe is enabled, according to
 	 * screen.  Should be enabled before the pipe is enabled, according to
 	 * register description and PRM.
 	 * register description and PRM.
 	 */
 	 */
+	DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
+		      intel_lvds->pfit_control,
+		      intel_lvds->pfit_pgm_ratios);
+	if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000))
+		DRM_ERROR("timed out waiting for panel to power off\n");
+
 	I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios);
 	I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios);
 	I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control);
 	I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control);
+	intel_lvds->pfit_dirty = false;
 }
 }
 
 
 /**
 /**
@@ -465,38 +477,22 @@ intel_lvds_detect(struct drm_connector *connector, bool force)
  */
  */
 static int intel_lvds_get_modes(struct drm_connector *connector)
 static int intel_lvds_get_modes(struct drm_connector *connector)
 {
 {
+	struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
 	struct drm_device *dev = connector->dev;
 	struct drm_device *dev = connector->dev;
-	struct drm_encoder *encoder = intel_attached_encoder(connector);
-	struct intel_encoder *intel_encoder = enc_to_intel_encoder(encoder);
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int ret = 0;
-
-	if (dev_priv->lvds_edid_good) {
-		ret = intel_ddc_get_modes(connector, intel_encoder->ddc_bus);
+	struct drm_display_mode *mode;
 
 
-		if (ret)
-			return ret;
+	if (intel_lvds->edid) {
+		drm_mode_connector_update_edid_property(connector,
+							intel_lvds->edid);
+		return drm_add_edid_modes(connector, intel_lvds->edid);
 	}
 	}
 
 
-	/* Didn't get an EDID, so
-	 * Set wide sync ranges so we get all modes
-	 * handed to valid_mode for checking
-	 */
-	connector->display_info.min_vfreq = 0;
-	connector->display_info.max_vfreq = 200;
-	connector->display_info.min_hfreq = 0;
-	connector->display_info.max_hfreq = 200;
-
-	if (dev_priv->panel_fixed_mode != NULL) {
-		struct drm_display_mode *mode;
-
-		mode = drm_mode_duplicate(dev, dev_priv->panel_fixed_mode);
-		drm_mode_probed_add(connector, mode);
-
-		return 1;
-	}
+	mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode);
+	if (mode == 0)
+		return 0;
 
 
-	return 0;
+	drm_mode_probed_add(connector, mode);
+	return 1;
 }
 }
 
 
 static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id)
 static int intel_no_modeset_on_lid_dmi_callback(const struct dmi_system_id *id)
@@ -587,18 +583,17 @@ static int intel_lvds_set_property(struct drm_connector *connector,
 				   struct drm_property *property,
 				   struct drm_property *property,
 				   uint64_t value)
 				   uint64_t value)
 {
 {
+	struct intel_lvds *intel_lvds = intel_attached_lvds(connector);
 	struct drm_device *dev = connector->dev;
 	struct drm_device *dev = connector->dev;
 
 
-	if (property == dev->mode_config.scaling_mode_property &&
-				connector->encoder) {
-		struct drm_crtc *crtc = connector->encoder->crtc;
-		struct drm_encoder *encoder = connector->encoder;
-		struct intel_lvds *intel_lvds = enc_to_intel_lvds(encoder);
+	if (property == dev->mode_config.scaling_mode_property) {
+		struct drm_crtc *crtc = intel_lvds->base.base.crtc;
 
 
 		if (value == DRM_MODE_SCALE_NONE) {
 		if (value == DRM_MODE_SCALE_NONE) {
 			DRM_DEBUG_KMS("no scaling not supported\n");
 			DRM_DEBUG_KMS("no scaling not supported\n");
-			return 0;
+			return -EINVAL;
 		}
 		}
+
 		if (intel_lvds->fitting_mode == value) {
 		if (intel_lvds->fitting_mode == value) {
 			/* the LVDS scaling property is not changed */
 			/* the LVDS scaling property is not changed */
 			return 0;
 			return 0;
@@ -628,7 +623,7 @@ static const struct drm_encoder_helper_funcs intel_lvds_helper_funcs = {
 static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
 static const struct drm_connector_helper_funcs intel_lvds_connector_helper_funcs = {
 	.get_modes = intel_lvds_get_modes,
 	.get_modes = intel_lvds_get_modes,
 	.mode_valid = intel_lvds_mode_valid,
 	.mode_valid = intel_lvds_mode_valid,
-	.best_encoder = intel_attached_encoder,
+	.best_encoder = intel_best_encoder,
 };
 };
 
 
 static const struct drm_connector_funcs intel_lvds_connector_funcs = {
 static const struct drm_connector_funcs intel_lvds_connector_funcs = {
@@ -726,16 +721,14 @@ static const struct dmi_system_id intel_no_lvds[] = {
  * Find the reduced downclock for LVDS in EDID.
  * Find the reduced downclock for LVDS in EDID.
  */
  */
 static void intel_find_lvds_downclock(struct drm_device *dev,
 static void intel_find_lvds_downclock(struct drm_device *dev,
-				struct drm_connector *connector)
+				      struct drm_display_mode *fixed_mode,
+				      struct drm_connector *connector)
 {
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct drm_display_mode *scan, *panel_fixed_mode;
+	struct drm_display_mode *scan;
 	int temp_downclock;
 	int temp_downclock;
 
 
-	panel_fixed_mode = dev_priv->panel_fixed_mode;
-	temp_downclock = panel_fixed_mode->clock;
-
-	mutex_lock(&dev->mode_config.mutex);
+	temp_downclock = fixed_mode->clock;
 	list_for_each_entry(scan, &connector->probed_modes, head) {
 	list_for_each_entry(scan, &connector->probed_modes, head) {
 		/*
 		/*
 		 * If one mode has the same resolution with the fixed_panel
 		 * If one mode has the same resolution with the fixed_panel
@@ -744,14 +737,14 @@ static void intel_find_lvds_downclock(struct drm_device *dev,
 		 * case we can set the different FPx0/1 to dynamically select
 		 * case we can set the different FPx0/1 to dynamically select
 		 * between low and high frequency.
 		 * between low and high frequency.
 		 */
 		 */
-		if (scan->hdisplay == panel_fixed_mode->hdisplay &&
-			scan->hsync_start == panel_fixed_mode->hsync_start &&
-			scan->hsync_end == panel_fixed_mode->hsync_end &&
-			scan->htotal == panel_fixed_mode->htotal &&
-			scan->vdisplay == panel_fixed_mode->vdisplay &&
-			scan->vsync_start == panel_fixed_mode->vsync_start &&
-			scan->vsync_end == panel_fixed_mode->vsync_end &&
-			scan->vtotal == panel_fixed_mode->vtotal) {
+		if (scan->hdisplay == fixed_mode->hdisplay &&
+		    scan->hsync_start == fixed_mode->hsync_start &&
+		    scan->hsync_end == fixed_mode->hsync_end &&
+		    scan->htotal == fixed_mode->htotal &&
+		    scan->vdisplay == fixed_mode->vdisplay &&
+		    scan->vsync_start == fixed_mode->vsync_start &&
+		    scan->vsync_end == fixed_mode->vsync_end &&
+		    scan->vtotal == fixed_mode->vtotal) {
 			if (scan->clock < temp_downclock) {
 			if (scan->clock < temp_downclock) {
 				/*
 				/*
 				 * The downclock is already found. But we
 				 * The downclock is already found. But we
@@ -761,17 +754,14 @@ static void intel_find_lvds_downclock(struct drm_device *dev,
 			}
 			}
 		}
 		}
 	}
 	}
-	mutex_unlock(&dev->mode_config.mutex);
-	if (temp_downclock < panel_fixed_mode->clock &&
-	    i915_lvds_downclock) {
+	if (temp_downclock < fixed_mode->clock && i915_lvds_downclock) {
 		/* We found the downclock for LVDS. */
 		/* We found the downclock for LVDS. */
 		dev_priv->lvds_downclock_avail = 1;
 		dev_priv->lvds_downclock_avail = 1;
 		dev_priv->lvds_downclock = temp_downclock;
 		dev_priv->lvds_downclock = temp_downclock;
 		DRM_DEBUG_KMS("LVDS downclock is found in EDID. "
 		DRM_DEBUG_KMS("LVDS downclock is found in EDID. "
-				"Normal clock %dKhz, downclock %dKhz\n",
-				panel_fixed_mode->clock, temp_downclock);
+			      "Normal clock %dKhz, downclock %dKhz\n",
+			      fixed_mode->clock, temp_downclock);
 	}
 	}
-	return;
 }
 }
 
 
 /*
 /*
@@ -780,38 +770,67 @@ static void intel_find_lvds_downclock(struct drm_device *dev,
  * If it is present, return 1.
  * If it is present, return 1.
  * If it is not present, return false.
  * If it is not present, return false.
  * If no child dev is parsed from VBT, it assumes that the LVDS is present.
  * If no child dev is parsed from VBT, it assumes that the LVDS is present.
- * Note: The addin_offset should also be checked for LVDS panel.
- * Only when it is non-zero, it is assumed that it is present.
  */
  */
-static int lvds_is_present_in_vbt(struct drm_device *dev)
+static bool lvds_is_present_in_vbt(struct drm_device *dev,
+				   u8 *i2c_pin)
 {
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct child_device_config *p_child;
-	int i, ret;
+	int i;
 
 
 	if (!dev_priv->child_dev_num)
 	if (!dev_priv->child_dev_num)
-		return 1;
+		return true;
 
 
-	ret = 0;
 	for (i = 0; i < dev_priv->child_dev_num; i++) {
 	for (i = 0; i < dev_priv->child_dev_num; i++) {
-		p_child = dev_priv->child_dev + i;
-		/*
-		 * If the device type is not LFP, continue.
-		 * If the device type is 0x22, it is also regarded as LFP.
+		struct child_device_config *child = dev_priv->child_dev + i;
+
+		/* If the device type is not LFP, continue.
+		 * We have to check both the new identifiers as well as the
+		 * old for compatibility with some BIOSes.
 		 */
 		 */
-		if (p_child->device_type != DEVICE_TYPE_INT_LFP &&
-			p_child->device_type != DEVICE_TYPE_LFP)
+		if (child->device_type != DEVICE_TYPE_INT_LFP &&
+		    child->device_type != DEVICE_TYPE_LFP)
 			continue;
 			continue;
 
 
-		/* The addin_offset should be checked. Only when it is
-		 * non-zero, it is regarded as present.
+		if (child->i2c_pin)
+		    *i2c_pin = child->i2c_pin;
+
+		/* However, we cannot trust the BIOS writers to populate
+		 * the VBT correctly.  Since LVDS requires additional
+		 * information from AIM blocks, a non-zero addin offset is
+		 * a good indicator that the LVDS is actually present.
 		 */
 		 */
-		if (p_child->addin_offset) {
-			ret = 1;
-			break;
-		}
+		if (child->addin_offset)
+			return true;
+
+		/* But even then some BIOS writers perform some black magic
+		 * and instantiate the device without reference to any
+		 * additional data.  Trust that if the VBT was written into
+		 * the OpRegion then they have validated the LVDS's existence.
+		 */
+		if (dev_priv->opregion.vbt)
+			return true;
 	}
 	}
-	return ret;
+
+	return false;
+}
+
+static bool intel_lvds_ddc_probe(struct drm_device *dev, u8 pin)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u8 buf = 0;
+	struct i2c_msg msgs[] = {
+		{
+			.addr = 0xA0,
+			.flags = 0,
+			.len = 1,
+			.buf = &buf,
+		},
+	};
+	struct i2c_adapter *i2c = &dev_priv->gmbus[pin].adapter;
+	/* XXX this only appears to work when using GMBUS */
+	if (intel_gmbus_is_forced_bit(i2c))
+		return true;
+	return i2c_transfer(i2c, msgs, 1) == 1;
 }
 }
 
 
 /**
 /**
@@ -832,13 +851,15 @@ void intel_lvds_init(struct drm_device *dev)
 	struct drm_display_mode *scan; /* *modes, *bios_mode; */
 	struct drm_display_mode *scan; /* *modes, *bios_mode; */
 	struct drm_crtc *crtc;
 	struct drm_crtc *crtc;
 	u32 lvds;
 	u32 lvds;
-	int pipe, gpio = GPIOC;
+	int pipe;
+	u8 pin;
 
 
 	/* Skip init on machines we know falsely report LVDS */
 	/* Skip init on machines we know falsely report LVDS */
 	if (dmi_check_system(intel_no_lvds))
 	if (dmi_check_system(intel_no_lvds))
 		return;
 		return;
 
 
-	if (!lvds_is_present_in_vbt(dev)) {
+	pin = GMBUS_PORT_PANEL;
+	if (!lvds_is_present_in_vbt(dev, &pin)) {
 		DRM_DEBUG_KMS("LVDS is not present in VBT\n");
 		DRM_DEBUG_KMS("LVDS is not present in VBT\n");
 		return;
 		return;
 	}
 	}
@@ -846,11 +867,15 @@ void intel_lvds_init(struct drm_device *dev)
 	if (HAS_PCH_SPLIT(dev)) {
 	if (HAS_PCH_SPLIT(dev)) {
 		if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
 		if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
 			return;
 			return;
-		if (dev_priv->edp_support) {
+		if (dev_priv->edp.support) {
 			DRM_DEBUG_KMS("disable LVDS for eDP support\n");
 			DRM_DEBUG_KMS("disable LVDS for eDP support\n");
 			return;
 			return;
 		}
 		}
-		gpio = PCH_GPIOC;
+	}
+
+	if (!intel_lvds_ddc_probe(dev, pin)) {
+		DRM_DEBUG_KMS("LVDS did not respond to DDC probe\n");
+		return;
 	}
 	}
 
 
 	intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL);
 	intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL);
@@ -864,16 +889,20 @@ void intel_lvds_init(struct drm_device *dev)
 		return;
 		return;
 	}
 	}
 
 
+	if (!HAS_PCH_SPLIT(dev)) {
+		intel_lvds->pfit_control = I915_READ(PFIT_CONTROL);
+	}
+
 	intel_encoder = &intel_lvds->base;
 	intel_encoder = &intel_lvds->base;
-	encoder = &intel_encoder->enc;
+	encoder = &intel_encoder->base;
 	connector = &intel_connector->base;
 	connector = &intel_connector->base;
 	drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs,
 	drm_connector_init(dev, &intel_connector->base, &intel_lvds_connector_funcs,
 			   DRM_MODE_CONNECTOR_LVDS);
 			   DRM_MODE_CONNECTOR_LVDS);
 
 
-	drm_encoder_init(dev, &intel_encoder->enc, &intel_lvds_enc_funcs,
+	drm_encoder_init(dev, &intel_encoder->base, &intel_lvds_enc_funcs,
 			 DRM_MODE_ENCODER_LVDS);
 			 DRM_MODE_ENCODER_LVDS);
 
 
-	drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc);
+	intel_connector_attach_encoder(intel_connector, intel_encoder);
 	intel_encoder->type = INTEL_OUTPUT_LVDS;
 	intel_encoder->type = INTEL_OUTPUT_LVDS;
 
 
 	intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
 	intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
@@ -904,43 +933,41 @@ void intel_lvds_init(struct drm_device *dev)
 	 *    if closed, act like it's not there for now
 	 *    if closed, act like it's not there for now
 	 */
 	 */
 
 
-	/* Set up the DDC bus. */
-	intel_encoder->ddc_bus = intel_i2c_create(dev, gpio, "LVDSDDC_C");
-	if (!intel_encoder->ddc_bus) {
-		dev_printk(KERN_ERR, &dev->pdev->dev, "DDC bus registration "
-			   "failed.\n");
-		goto failed;
-	}
-
 	/*
 	/*
 	 * Attempt to get the fixed panel mode from DDC.  Assume that the
 	 * Attempt to get the fixed panel mode from DDC.  Assume that the
 	 * preferred mode is the right one.
 	 * preferred mode is the right one.
 	 */
 	 */
-	dev_priv->lvds_edid_good = true;
+	intel_lvds->edid = drm_get_edid(connector,
+					&dev_priv->gmbus[pin].adapter);
 
 
-	if (!intel_ddc_get_modes(connector, intel_encoder->ddc_bus))
-		dev_priv->lvds_edid_good = false;
+	if (!intel_lvds->edid) {
+		/* Didn't get an EDID, so
+		 * Set wide sync ranges so we get all modes
+		 * handed to valid_mode for checking
+		 */
+		connector->display_info.min_vfreq = 0;
+		connector->display_info.max_vfreq = 200;
+		connector->display_info.min_hfreq = 0;
+		connector->display_info.max_hfreq = 200;
+	}
 
 
 	list_for_each_entry(scan, &connector->probed_modes, head) {
 	list_for_each_entry(scan, &connector->probed_modes, head) {
-		mutex_lock(&dev->mode_config.mutex);
 		if (scan->type & DRM_MODE_TYPE_PREFERRED) {
 		if (scan->type & DRM_MODE_TYPE_PREFERRED) {
-			dev_priv->panel_fixed_mode =
+			intel_lvds->fixed_mode =
 				drm_mode_duplicate(dev, scan);
 				drm_mode_duplicate(dev, scan);
-			mutex_unlock(&dev->mode_config.mutex);
-			intel_find_lvds_downclock(dev, connector);
+			intel_find_lvds_downclock(dev,
+						  intel_lvds->fixed_mode,
+						  connector);
 			goto out;
 			goto out;
 		}
 		}
-		mutex_unlock(&dev->mode_config.mutex);
 	}
 	}
 
 
 	/* Failed to get EDID, what about VBT? */
 	/* Failed to get EDID, what about VBT? */
 	if (dev_priv->lfp_lvds_vbt_mode) {
 	if (dev_priv->lfp_lvds_vbt_mode) {
-		mutex_lock(&dev->mode_config.mutex);
-		dev_priv->panel_fixed_mode =
+		intel_lvds->fixed_mode =
 			drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
 			drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode);
-		mutex_unlock(&dev->mode_config.mutex);
-		if (dev_priv->panel_fixed_mode) {
-			dev_priv->panel_fixed_mode->type |=
+		if (intel_lvds->fixed_mode) {
+			intel_lvds->fixed_mode->type |=
 				DRM_MODE_TYPE_PREFERRED;
 				DRM_MODE_TYPE_PREFERRED;
 			goto out;
 			goto out;
 		}
 		}
@@ -958,19 +985,19 @@ void intel_lvds_init(struct drm_device *dev)
 
 
 	lvds = I915_READ(LVDS);
 	lvds = I915_READ(LVDS);
 	pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
 	pipe = (lvds & LVDS_PIPEB_SELECT) ? 1 : 0;
-	crtc = intel_get_crtc_from_pipe(dev, pipe);
+	crtc = intel_get_crtc_for_pipe(dev, pipe);
 
 
 	if (crtc && (lvds & LVDS_PORT_EN)) {
 	if (crtc && (lvds & LVDS_PORT_EN)) {
-		dev_priv->panel_fixed_mode = intel_crtc_mode_get(dev, crtc);
-		if (dev_priv->panel_fixed_mode) {
-			dev_priv->panel_fixed_mode->type |=
+		intel_lvds->fixed_mode = intel_crtc_mode_get(dev, crtc);
+		if (intel_lvds->fixed_mode) {
+			intel_lvds->fixed_mode->type |=
 				DRM_MODE_TYPE_PREFERRED;
 				DRM_MODE_TYPE_PREFERRED;
 			goto out;
 			goto out;
 		}
 		}
 	}
 	}
 
 
 	/* If we still don't have a mode after all that, give up. */
 	/* If we still don't have a mode after all that, give up. */
-	if (!dev_priv->panel_fixed_mode)
+	if (!intel_lvds->fixed_mode)
 		goto failed;
 		goto failed;
 
 
 out:
 out:
@@ -997,8 +1024,6 @@ out:
 
 
 failed:
 failed:
 	DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
 	DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
-	if (intel_encoder->ddc_bus)
-		intel_i2c_destroy(intel_encoder->ddc_bus);
 	drm_connector_cleanup(connector);
 	drm_connector_cleanup(connector);
 	drm_encoder_cleanup(encoder);
 	drm_encoder_cleanup(encoder);
 	kfree(intel_lvds);
 	kfree(intel_lvds);

+ 4 - 12
drivers/gpu/drm/i915/intel_modes.c

@@ -1,6 +1,6 @@
 /*
 /*
  * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
  * Copyright (c) 2007 Dave Airlie <airlied@linux.ie>
- * Copyright (c) 2007 Intel Corporation
+ * Copyright (c) 2007, 2010 Intel Corporation
  *   Jesse Barnes <jesse.barnes@intel.com>
  *   Jesse Barnes <jesse.barnes@intel.com>
  *
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  * Permission is hereby granted, free of charge, to any person obtaining a
@@ -34,11 +34,11 @@
  * intel_ddc_probe
  * intel_ddc_probe
  *
  *
  */
  */
-bool intel_ddc_probe(struct intel_encoder *intel_encoder)
+bool intel_ddc_probe(struct intel_encoder *intel_encoder, int ddc_bus)
 {
 {
+	struct drm_i915_private *dev_priv = intel_encoder->base.dev->dev_private;
 	u8 out_buf[] = { 0x0, 0x0};
 	u8 out_buf[] = { 0x0, 0x0};
 	u8 buf[2];
 	u8 buf[2];
-	int ret;
 	struct i2c_msg msgs[] = {
 	struct i2c_msg msgs[] = {
 		{
 		{
 			.addr = 0x50,
 			.addr = 0x50,
@@ -54,13 +54,7 @@ bool intel_ddc_probe(struct intel_encoder *intel_encoder)
 		}
 		}
 	};
 	};
 
 
-	intel_i2c_quirk_set(intel_encoder->enc.dev, true);
-	ret = i2c_transfer(intel_encoder->ddc_bus, msgs, 2);
-	intel_i2c_quirk_set(intel_encoder->enc.dev, false);
-	if (ret == 2)
-		return true;
-
-	return false;
+	return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 2) == 2;
 }
 }
 
 
 /**
 /**
@@ -76,9 +70,7 @@ int intel_ddc_get_modes(struct drm_connector *connector,
 	struct edid *edid;
 	struct edid *edid;
 	int ret = 0;
 	int ret = 0;
 
 
-	intel_i2c_quirk_set(connector->dev, true);
 	edid = drm_get_edid(connector, adapter);
 	edid = drm_get_edid(connector, adapter);
-	intel_i2c_quirk_set(connector->dev, false);
 	if (edid) {
 	if (edid) {
 		drm_mode_connector_update_edid_property(connector, edid);
 		drm_mode_connector_update_edid_property(connector, edid);
 		ret = drm_add_edid_modes(connector, edid);
 		ret = drm_add_edid_modes(connector, edid);

+ 68 - 113
drivers/gpu/drm/i915/i915_opregion.c → drivers/gpu/drm/i915/intel_opregion.c

@@ -31,17 +31,16 @@
 #include "drmP.h"
 #include "drmP.h"
 #include "i915_drm.h"
 #include "i915_drm.h"
 #include "i915_drv.h"
 #include "i915_drv.h"
+#include "intel_drv.h"
 
 
 #define PCI_ASLE 0xe4
 #define PCI_ASLE 0xe4
-#define PCI_LBPC 0xf4
 #define PCI_ASLS 0xfc
 #define PCI_ASLS 0xfc
 
 
-#define OPREGION_SZ            (8*1024)
 #define OPREGION_HEADER_OFFSET 0
 #define OPREGION_HEADER_OFFSET 0
 #define OPREGION_ACPI_OFFSET   0x100
 #define OPREGION_ACPI_OFFSET   0x100
 #define OPREGION_SWSCI_OFFSET  0x200
 #define OPREGION_SWSCI_OFFSET  0x200
 #define OPREGION_ASLE_OFFSET   0x300
 #define OPREGION_ASLE_OFFSET   0x300
-#define OPREGION_VBT_OFFSET    0x1000
+#define OPREGION_VBT_OFFSET    0x400
 
 
 #define OPREGION_SIGNATURE "IntelGraphicsMem"
 #define OPREGION_SIGNATURE "IntelGraphicsMem"
 #define MBOX_ACPI      (1<<0)
 #define MBOX_ACPI      (1<<0)
@@ -143,40 +142,22 @@ struct opregion_asle {
 #define ACPI_DIGITAL_OUTPUT (3<<8)
 #define ACPI_DIGITAL_OUTPUT (3<<8)
 #define ACPI_LVDS_OUTPUT (4<<8)
 #define ACPI_LVDS_OUTPUT (4<<8)
 
 
+#ifdef CONFIG_ACPI
 static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
 static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
 {
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct opregion_asle *asle = dev_priv->opregion.asle;
 	struct opregion_asle *asle = dev_priv->opregion.asle;
-	u32 blc_pwm_ctl, blc_pwm_ctl2;
-	u32 max_backlight, level, shift;
+	u32 max;
 
 
 	if (!(bclp & ASLE_BCLP_VALID))
 	if (!(bclp & ASLE_BCLP_VALID))
 		return ASLE_BACKLIGHT_FAILED;
 		return ASLE_BACKLIGHT_FAILED;
 
 
 	bclp &= ASLE_BCLP_MSK;
 	bclp &= ASLE_BCLP_MSK;
-	if (bclp < 0 || bclp > 255)
+	if (bclp > 255)
 		return ASLE_BACKLIGHT_FAILED;
 		return ASLE_BACKLIGHT_FAILED;
 
 
-	blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
-	blc_pwm_ctl2 = I915_READ(BLC_PWM_CTL2);
-
-	if (IS_I965G(dev) && (blc_pwm_ctl2 & BLM_COMBINATION_MODE))
-		pci_write_config_dword(dev->pdev, PCI_LBPC, bclp);
-	else {
-		if (IS_PINEVIEW(dev)) {
-			blc_pwm_ctl &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
-			max_backlight = (blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >> 
-					BACKLIGHT_MODULATION_FREQ_SHIFT;
-			shift = BACKLIGHT_DUTY_CYCLE_SHIFT + 1;
-		} else {
-			blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK;
-			max_backlight = ((blc_pwm_ctl & BACKLIGHT_MODULATION_FREQ_MASK) >> 
-					BACKLIGHT_MODULATION_FREQ_SHIFT) * 2;
-			shift = BACKLIGHT_DUTY_CYCLE_SHIFT;
-		}
-		level = (bclp * max_backlight) / 255;
-		I915_WRITE(BLC_PWM_CTL, blc_pwm_ctl | (level << shift));
-	}
+	max = intel_panel_get_max_backlight(dev);
+	intel_panel_set_backlight(dev, bclp * max / 255);
 	asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
 	asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
 
 
 	return 0;
 	return 0;
@@ -211,7 +192,7 @@ static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
 	return 0;
 	return 0;
 }
 }
 
 
-void opregion_asle_intr(struct drm_device *dev)
+void intel_opregion_asle_intr(struct drm_device *dev)
 {
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct opregion_asle *asle = dev_priv->opregion.asle;
 	struct opregion_asle *asle = dev_priv->opregion.asle;
@@ -243,37 +224,8 @@ void opregion_asle_intr(struct drm_device *dev)
 	asle->aslc = asle_stat;
 	asle->aslc = asle_stat;
 }
 }
 
 
-static u32 asle_set_backlight_ironlake(struct drm_device *dev, u32 bclp)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct opregion_asle *asle = dev_priv->opregion.asle;
-	u32 cpu_pwm_ctl, pch_pwm_ctl2;
-	u32 max_backlight, level;
-
-	if (!(bclp & ASLE_BCLP_VALID))
-		return ASLE_BACKLIGHT_FAILED;
-
-	bclp &= ASLE_BCLP_MSK;
-	if (bclp < 0 || bclp > 255)
-		return ASLE_BACKLIGHT_FAILED;
-
-	cpu_pwm_ctl = I915_READ(BLC_PWM_CPU_CTL);
-	pch_pwm_ctl2 = I915_READ(BLC_PWM_PCH_CTL2);
-	/* get the max PWM frequency */
-	max_backlight = (pch_pwm_ctl2 >> 16) & BACKLIGHT_DUTY_CYCLE_MASK;
-	/* calculate the expected PMW frequency */
-	level = (bclp * max_backlight) / 255;
-	/* reserve the high 16 bits */
-	cpu_pwm_ctl &= ~(BACKLIGHT_DUTY_CYCLE_MASK);
-	/* write the updated PWM frequency */
-	I915_WRITE(BLC_PWM_CPU_CTL, cpu_pwm_ctl | level);
-
-	asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
-
-	return 0;
-}
-
-void ironlake_opregion_gse_intr(struct drm_device *dev)
+/* Only present on Ironlake+ */
+void intel_opregion_gse_intr(struct drm_device *dev)
 {
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct opregion_asle *asle = dev_priv->opregion.asle;
 	struct opregion_asle *asle = dev_priv->opregion.asle;
@@ -296,7 +248,7 @@ void ironlake_opregion_gse_intr(struct drm_device *dev)
 	}
 	}
 
 
 	if (asle_req & ASLE_SET_BACKLIGHT)
 	if (asle_req & ASLE_SET_BACKLIGHT)
-		asle_stat |= asle_set_backlight_ironlake(dev, asle->bclp);
+		asle_stat |= asle_set_backlight(dev, asle->bclp);
 
 
 	if (asle_req & ASLE_SET_PFIT) {
 	if (asle_req & ASLE_SET_PFIT) {
 		DRM_DEBUG_DRIVER("Pfit is not supported\n");
 		DRM_DEBUG_DRIVER("Pfit is not supported\n");
@@ -315,7 +267,7 @@ void ironlake_opregion_gse_intr(struct drm_device *dev)
 #define ASLE_PFIT_EN   (1<<2)
 #define ASLE_PFIT_EN   (1<<2)
 #define ASLE_PFMB_EN   (1<<3)
 #define ASLE_PFMB_EN   (1<<3)
 
 
-void opregion_enable_asle(struct drm_device *dev)
+void intel_opregion_enable_asle(struct drm_device *dev)
 {
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct opregion_asle *asle = dev_priv->opregion.asle;
 	struct opregion_asle *asle = dev_priv->opregion.asle;
@@ -464,7 +416,58 @@ blind_set:
 	goto end;
 	goto end;
 }
 }
 
 
-int intel_opregion_init(struct drm_device *dev, int resume)
+void intel_opregion_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_opregion *opregion = &dev_priv->opregion;
+
+	if (!opregion->header)
+		return;
+
+	if (opregion->acpi) {
+		if (drm_core_check_feature(dev, DRIVER_MODESET))
+			intel_didl_outputs(dev);
+
+		/* Notify BIOS we are ready to handle ACPI video ext notifs.
+		 * Right now, all the events are handled by the ACPI video module.
+		 * We don't actually need to do anything with them. */
+		opregion->acpi->csts = 0;
+		opregion->acpi->drdy = 1;
+
+		system_opregion = opregion;
+		register_acpi_notifier(&intel_opregion_notifier);
+	}
+
+	if (opregion->asle)
+		intel_opregion_enable_asle(dev);
+}
+
+void intel_opregion_fini(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_opregion *opregion = &dev_priv->opregion;
+
+	if (!opregion->header)
+		return;
+
+	if (opregion->acpi) {
+		opregion->acpi->drdy = 0;
+
+		system_opregion = NULL;
+		unregister_acpi_notifier(&intel_opregion_notifier);
+	}
+
+	/* just clear all opregion memory pointers now */
+	iounmap(opregion->header);
+	opregion->header = NULL;
+	opregion->acpi = NULL;
+	opregion->swsci = NULL;
+	opregion->asle = NULL;
+	opregion->vbt = NULL;
+}
+#endif
+
+int intel_opregion_setup(struct drm_device *dev)
 {
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct intel_opregion *opregion = &dev_priv->opregion;
 	struct intel_opregion *opregion = &dev_priv->opregion;
@@ -479,29 +482,23 @@ int intel_opregion_init(struct drm_device *dev, int resume)
 		return -ENOTSUPP;
 		return -ENOTSUPP;
 	}
 	}
 
 
-	base = ioremap(asls, OPREGION_SZ);
+	base = ioremap(asls, OPREGION_SIZE);
 	if (!base)
 	if (!base)
 		return -ENOMEM;
 		return -ENOMEM;
 
 
-	opregion->header = base;
-	if (memcmp(opregion->header->signature, OPREGION_SIGNATURE, 16)) {
+	if (memcmp(base, OPREGION_SIGNATURE, 16)) {
 		DRM_DEBUG_DRIVER("opregion signature mismatch\n");
 		DRM_DEBUG_DRIVER("opregion signature mismatch\n");
 		err = -EINVAL;
 		err = -EINVAL;
 		goto err_out;
 		goto err_out;
 	}
 	}
+	opregion->header = base;
+	opregion->vbt = base + OPREGION_VBT_OFFSET;
 
 
 	mboxes = opregion->header->mboxes;
 	mboxes = opregion->header->mboxes;
 	if (mboxes & MBOX_ACPI) {
 	if (mboxes & MBOX_ACPI) {
 		DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
 		DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
 		opregion->acpi = base + OPREGION_ACPI_OFFSET;
 		opregion->acpi = base + OPREGION_ACPI_OFFSET;
-		if (drm_core_check_feature(dev, DRIVER_MODESET))
-			intel_didl_outputs(dev);
-	} else {
-		DRM_DEBUG_DRIVER("Public ACPI methods not supported\n");
-		err = -ENOTSUPP;
-		goto err_out;
 	}
 	}
-	opregion->enabled = 1;
 
 
 	if (mboxes & MBOX_SWSCI) {
 	if (mboxes & MBOX_SWSCI) {
 		DRM_DEBUG_DRIVER("SWSCI supported\n");
 		DRM_DEBUG_DRIVER("SWSCI supported\n");
@@ -510,53 +507,11 @@ int intel_opregion_init(struct drm_device *dev, int resume)
 	if (mboxes & MBOX_ASLE) {
 	if (mboxes & MBOX_ASLE) {
 		DRM_DEBUG_DRIVER("ASLE supported\n");
 		DRM_DEBUG_DRIVER("ASLE supported\n");
 		opregion->asle = base + OPREGION_ASLE_OFFSET;
 		opregion->asle = base + OPREGION_ASLE_OFFSET;
-		opregion_enable_asle(dev);
 	}
 	}
 
 
-	if (!resume)
-		acpi_video_register();
-
-
-	/* Notify BIOS we are ready to handle ACPI video ext notifs.
-	 * Right now, all the events are handled by the ACPI video module.
-	 * We don't actually need to do anything with them. */
-	opregion->acpi->csts = 0;
-	opregion->acpi->drdy = 1;
-
-	system_opregion = opregion;
-	register_acpi_notifier(&intel_opregion_notifier);
-
 	return 0;
 	return 0;
 
 
 err_out:
 err_out:
 	iounmap(opregion->header);
 	iounmap(opregion->header);
-	opregion->header = NULL;
-	acpi_video_register();
 	return err;
 	return err;
 }
 }
-
-void intel_opregion_free(struct drm_device *dev, int suspend)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	struct intel_opregion *opregion = &dev_priv->opregion;
-
-	if (!opregion->enabled)
-		return;
-
-	if (!suspend)
-		acpi_video_unregister();
-
-	opregion->acpi->drdy = 0;
-
-	system_opregion = NULL;
-	unregister_acpi_notifier(&intel_opregion_notifier);
-
-	/* just clear all opregion memory pointers now */
-	iounmap(opregion->header);
-	opregion->header = NULL;
-	opregion->acpi = NULL;
-	opregion->swsci = NULL;
-	opregion->asle = NULL;
-
-	opregion->enabled = 0;
-}

File diff suppressed because it is too large
+ 373 - 364
drivers/gpu/drm/i915/intel_overlay.c


+ 109 - 0
drivers/gpu/drm/i915/intel_panel.c

@@ -30,6 +30,8 @@
 
 
 #include "intel_drv.h"
 #include "intel_drv.h"
 
 
+#define PCI_LBPC 0xf4 /* legacy/combination backlight modes */
+
 void
 void
 intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
 intel_fixed_panel_mode(struct drm_display_mode *fixed_mode,
 		       struct drm_display_mode *adjusted_mode)
 		       struct drm_display_mode *adjusted_mode)
@@ -109,3 +111,110 @@ done:
 	dev_priv->pch_pf_pos = (x << 16) | y;
 	dev_priv->pch_pf_pos = (x << 16) | y;
 	dev_priv->pch_pf_size = (width << 16) | height;
 	dev_priv->pch_pf_size = (width << 16) | height;
 }
 }
+
+static int is_backlight_combination_mode(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (INTEL_INFO(dev)->gen >= 4)
+		return I915_READ(BLC_PWM_CTL2) & BLM_COMBINATION_MODE;
+
+	if (IS_GEN2(dev))
+		return I915_READ(BLC_PWM_CTL) & BLM_LEGACY_MODE;
+
+	return 0;
+}
+
+u32 intel_panel_get_max_backlight(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 max;
+
+	if (HAS_PCH_SPLIT(dev)) {
+		max = I915_READ(BLC_PWM_PCH_CTL2) >> 16;
+	} else {
+		max = I915_READ(BLC_PWM_CTL);
+		if (IS_PINEVIEW(dev)) {
+			max >>= 17;
+		} else {
+			max >>= 16;
+			if (INTEL_INFO(dev)->gen < 4)
+				max &= ~1;
+		}
+
+		if (is_backlight_combination_mode(dev))
+			max *= 0xff;
+	}
+
+	if (max == 0) {
+		/* XXX add code here to query mode clock or hardware clock
+		 * and program max PWM appropriately.
+		 */
+		DRM_ERROR("fixme: max PWM is zero.\n");
+		max = 1;
+	}
+
+	DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max);
+	return max;
+}
+
+u32 intel_panel_get_backlight(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 val;
+
+	if (HAS_PCH_SPLIT(dev)) {
+		val = I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+	} else {
+		val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+		if (IS_PINEVIEW(dev))
+			val >>= 1;
+
+		if (is_backlight_combination_mode(dev)){
+			u8 lbpc;
+
+			val &= ~1;
+			pci_read_config_byte(dev->pdev, PCI_LBPC, &lbpc);
+			val *= lbpc;
+			val >>= 1;
+		}
+	}
+
+	DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
+	return val;
+}
+
+static void intel_pch_panel_set_backlight(struct drm_device *dev, u32 level)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 val = I915_READ(BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+	I915_WRITE(BLC_PWM_CPU_CTL, val | level);
+}
+
+void intel_panel_set_backlight(struct drm_device *dev, u32 level)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	u32 tmp;
+
+	DRM_DEBUG_DRIVER("set backlight PWM = %d\n", level);
+
+	if (HAS_PCH_SPLIT(dev))
+		return intel_pch_panel_set_backlight(dev, level);
+
+	if (is_backlight_combination_mode(dev)){
+		u32 max = intel_panel_get_max_backlight(dev);
+		u8 lpbc;
+
+		lpbc = level * 0xfe / max + 1;
+		level /= lpbc;
+		pci_write_config_byte(dev->pdev, PCI_LBPC, lpbc);
+	}
+
+	tmp = I915_READ(BLC_PWM_CTL);
+	if (IS_PINEVIEW(dev)) {
+		tmp &= ~(BACKLIGHT_DUTY_CYCLE_MASK - 1);
+		level <<= 1;
+	} else
+		tmp &= ~BACKLIGHT_DUTY_CYCLE_MASK;
+	I915_WRITE(BLC_PWM_CTL, tmp | level);
+}

+ 248 - 209
drivers/gpu/drm/i915/intel_ringbuffer.c

@@ -32,6 +32,7 @@
 #include "i915_drv.h"
 #include "i915_drv.h"
 #include "i915_drm.h"
 #include "i915_drm.h"
 #include "i915_trace.h"
 #include "i915_trace.h"
+#include "intel_drv.h"
 
 
 static u32 i915_gem_get_seqno(struct drm_device *dev)
 static u32 i915_gem_get_seqno(struct drm_device *dev)
 {
 {
@@ -49,9 +50,9 @@ static u32 i915_gem_get_seqno(struct drm_device *dev)
 
 
 static void
 static void
 render_ring_flush(struct drm_device *dev,
 render_ring_flush(struct drm_device *dev,
-		struct intel_ring_buffer *ring,
-		u32	invalidate_domains,
-		u32	flush_domains)
+		  struct intel_ring_buffer *ring,
+		  u32	invalidate_domains,
+		  u32	flush_domains)
 {
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	u32 cmd;
 	u32 cmd;
@@ -97,7 +98,7 @@ render_ring_flush(struct drm_device *dev,
 		if ((invalidate_domains|flush_domains) &
 		if ((invalidate_domains|flush_domains) &
 		    I915_GEM_DOMAIN_RENDER)
 		    I915_GEM_DOMAIN_RENDER)
 			cmd &= ~MI_NO_WRITE_FLUSH;
 			cmd &= ~MI_NO_WRITE_FLUSH;
-		if (!IS_I965G(dev)) {
+		if (INTEL_INFO(dev)->gen < 4) {
 			/*
 			/*
 			 * On the 965, the sampler cache always gets flushed
 			 * On the 965, the sampler cache always gets flushed
 			 * and this bit is reserved.
 			 * and this bit is reserved.
@@ -118,38 +119,26 @@ render_ring_flush(struct drm_device *dev,
 	}
 	}
 }
 }
 
 
-static unsigned int render_ring_get_head(struct drm_device *dev,
-		struct intel_ring_buffer *ring)
-{
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	return I915_READ(PRB0_HEAD) & HEAD_ADDR;
-}
-
-static unsigned int render_ring_get_tail(struct drm_device *dev,
-		struct intel_ring_buffer *ring)
+static void ring_write_tail(struct drm_device *dev,
+			    struct intel_ring_buffer *ring,
+			    u32 value)
 {
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	return I915_READ(PRB0_TAIL) & TAIL_ADDR;
+	I915_WRITE_TAIL(ring, value);
 }
 }
 
 
-static unsigned int render_ring_get_active_head(struct drm_device *dev,
-		struct intel_ring_buffer *ring)
+u32 intel_ring_get_active_head(struct drm_device *dev,
+			       struct intel_ring_buffer *ring)
 {
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
+	u32 acthd_reg = INTEL_INFO(dev)->gen >= 4 ?
+			RING_ACTHD(ring->mmio_base) : ACTHD;
 
 
 	return I915_READ(acthd_reg);
 	return I915_READ(acthd_reg);
 }
 }
 
 
-static void render_ring_advance_ring(struct drm_device *dev,
-		struct intel_ring_buffer *ring)
-{
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	I915_WRITE(PRB0_TAIL, ring->tail);
-}
-
 static int init_ring_common(struct drm_device *dev,
 static int init_ring_common(struct drm_device *dev,
-		struct intel_ring_buffer *ring)
+			    struct intel_ring_buffer *ring)
 {
 {
 	u32 head;
 	u32 head;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	drm_i915_private_t *dev_priv = dev->dev_private;
@@ -157,57 +146,57 @@ static int init_ring_common(struct drm_device *dev,
 	obj_priv = to_intel_bo(ring->gem_object);
 	obj_priv = to_intel_bo(ring->gem_object);
 
 
 	/* Stop the ring if it's running. */
 	/* Stop the ring if it's running. */
-	I915_WRITE(ring->regs.ctl, 0);
-	I915_WRITE(ring->regs.head, 0);
-	I915_WRITE(ring->regs.tail, 0);
+	I915_WRITE_CTL(ring, 0);
+	I915_WRITE_HEAD(ring, 0);
+	ring->write_tail(dev, ring, 0);
 
 
 	/* Initialize the ring. */
 	/* Initialize the ring. */
-	I915_WRITE(ring->regs.start, obj_priv->gtt_offset);
-	head = ring->get_head(dev, ring);
+	I915_WRITE_START(ring, obj_priv->gtt_offset);
+	head = I915_READ_HEAD(ring) & HEAD_ADDR;
 
 
 	/* G45 ring initialization fails to reset head to zero */
 	/* G45 ring initialization fails to reset head to zero */
 	if (head != 0) {
 	if (head != 0) {
 		DRM_ERROR("%s head not reset to zero "
 		DRM_ERROR("%s head not reset to zero "
 				"ctl %08x head %08x tail %08x start %08x\n",
 				"ctl %08x head %08x tail %08x start %08x\n",
 				ring->name,
 				ring->name,
-				I915_READ(ring->regs.ctl),
-				I915_READ(ring->regs.head),
-				I915_READ(ring->regs.tail),
-				I915_READ(ring->regs.start));
+				I915_READ_CTL(ring),
+				I915_READ_HEAD(ring),
+				I915_READ_TAIL(ring),
+				I915_READ_START(ring));
 
 
-		I915_WRITE(ring->regs.head, 0);
+		I915_WRITE_HEAD(ring, 0);
 
 
 		DRM_ERROR("%s head forced to zero "
 		DRM_ERROR("%s head forced to zero "
 				"ctl %08x head %08x tail %08x start %08x\n",
 				"ctl %08x head %08x tail %08x start %08x\n",
 				ring->name,
 				ring->name,
-				I915_READ(ring->regs.ctl),
-				I915_READ(ring->regs.head),
-				I915_READ(ring->regs.tail),
-				I915_READ(ring->regs.start));
+				I915_READ_CTL(ring),
+				I915_READ_HEAD(ring),
+				I915_READ_TAIL(ring),
+				I915_READ_START(ring));
 	}
 	}
 
 
-	I915_WRITE(ring->regs.ctl,
+	I915_WRITE_CTL(ring,
 			((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
 			((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
 			| RING_NO_REPORT | RING_VALID);
 			| RING_NO_REPORT | RING_VALID);
 
 
-	head = I915_READ(ring->regs.head) & HEAD_ADDR;
+	head = I915_READ_HEAD(ring) & HEAD_ADDR;
 	/* If the head is still not zero, the ring is dead */
 	/* If the head is still not zero, the ring is dead */
 	if (head != 0) {
 	if (head != 0) {
 		DRM_ERROR("%s initialization failed "
 		DRM_ERROR("%s initialization failed "
 				"ctl %08x head %08x tail %08x start %08x\n",
 				"ctl %08x head %08x tail %08x start %08x\n",
 				ring->name,
 				ring->name,
-				I915_READ(ring->regs.ctl),
-				I915_READ(ring->regs.head),
-				I915_READ(ring->regs.tail),
-				I915_READ(ring->regs.start));
+				I915_READ_CTL(ring),
+				I915_READ_HEAD(ring),
+				I915_READ_TAIL(ring),
+				I915_READ_START(ring));
 		return -EIO;
 		return -EIO;
 	}
 	}
 
 
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
 		i915_kernel_lost_context(dev);
 		i915_kernel_lost_context(dev);
 	else {
 	else {
-		ring->head = ring->get_head(dev, ring);
-		ring->tail = ring->get_tail(dev, ring);
+		ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
+		ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
 		ring->space = ring->head - (ring->tail + 8);
 		ring->space = ring->head - (ring->tail + 8);
 		if (ring->space < 0)
 		if (ring->space < 0)
 			ring->space += ring->size;
 			ring->space += ring->size;
@@ -216,13 +205,13 @@ static int init_ring_common(struct drm_device *dev,
 }
 }
 
 
 static int init_render_ring(struct drm_device *dev,
 static int init_render_ring(struct drm_device *dev,
-		struct intel_ring_buffer *ring)
+			    struct intel_ring_buffer *ring)
 {
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	int ret = init_ring_common(dev, ring);
 	int ret = init_ring_common(dev, ring);
 	int mode;
 	int mode;
 
 
-	if (IS_I9XX(dev) && !IS_GEN3(dev)) {
+	if (INTEL_INFO(dev)->gen > 3) {
 		mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
 		mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
 		if (IS_GEN6(dev))
 		if (IS_GEN6(dev))
 			mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
 			mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
@@ -250,9 +239,8 @@ do {									\
  */
  */
 static u32
 static u32
 render_ring_add_request(struct drm_device *dev,
 render_ring_add_request(struct drm_device *dev,
-		struct intel_ring_buffer *ring,
-		struct drm_file *file_priv,
-		u32 flush_domains)
+			struct intel_ring_buffer *ring,
+			u32 flush_domains)
 {
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	u32 seqno;
 	u32 seqno;
@@ -315,8 +303,8 @@ render_ring_add_request(struct drm_device *dev,
 }
 }
 
 
 static u32
 static u32
-render_ring_get_gem_seqno(struct drm_device *dev,
-		struct intel_ring_buffer *ring)
+render_ring_get_seqno(struct drm_device *dev,
+		      struct intel_ring_buffer *ring)
 {
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	if (HAS_PIPE_CONTROL(dev))
 	if (HAS_PIPE_CONTROL(dev))
@@ -327,7 +315,7 @@ render_ring_get_gem_seqno(struct drm_device *dev,
 
 
 static void
 static void
 render_ring_get_user_irq(struct drm_device *dev,
 render_ring_get_user_irq(struct drm_device *dev,
-		struct intel_ring_buffer *ring)
+			 struct intel_ring_buffer *ring)
 {
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	unsigned long irqflags;
 	unsigned long irqflags;
@@ -344,7 +332,7 @@ render_ring_get_user_irq(struct drm_device *dev,
 
 
 static void
 static void
 render_ring_put_user_irq(struct drm_device *dev,
 render_ring_put_user_irq(struct drm_device *dev,
-		struct intel_ring_buffer *ring)
+			 struct intel_ring_buffer *ring)
 {
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 	unsigned long irqflags;
 	unsigned long irqflags;
@@ -360,21 +348,23 @@ render_ring_put_user_irq(struct drm_device *dev,
 	spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
 	spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
 }
 }
 
 
-static void render_setup_status_page(struct drm_device *dev,
-	struct	intel_ring_buffer *ring)
+void intel_ring_setup_status_page(struct drm_device *dev,
+				  struct intel_ring_buffer *ring)
 {
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	if (IS_GEN6(dev)) {
 	if (IS_GEN6(dev)) {
-		I915_WRITE(HWS_PGA_GEN6, ring->status_page.gfx_addr);
-		I915_READ(HWS_PGA_GEN6); /* posting read */
+		I915_WRITE(RING_HWS_PGA_GEN6(ring->mmio_base),
+			   ring->status_page.gfx_addr);
+		I915_READ(RING_HWS_PGA_GEN6(ring->mmio_base)); /* posting read */
 	} else {
 	} else {
-		I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
-		I915_READ(HWS_PGA); /* posting read */
+		I915_WRITE(RING_HWS_PGA(ring->mmio_base),
+			   ring->status_page.gfx_addr);
+		I915_READ(RING_HWS_PGA(ring->mmio_base)); /* posting read */
 	}
 	}
 
 
 }
 }
 
 
-void
+static void
 bsd_ring_flush(struct drm_device *dev,
 bsd_ring_flush(struct drm_device *dev,
 		struct intel_ring_buffer *ring,
 		struct intel_ring_buffer *ring,
 		u32     invalidate_domains,
 		u32     invalidate_domains,
@@ -386,45 +376,16 @@ bsd_ring_flush(struct drm_device *dev,
 	intel_ring_advance(dev, ring);
 	intel_ring_advance(dev, ring);
 }
 }
 
 
-static inline unsigned int bsd_ring_get_head(struct drm_device *dev,
-		struct intel_ring_buffer *ring)
-{
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	return I915_READ(BSD_RING_HEAD) & HEAD_ADDR;
-}
-
-static inline unsigned int bsd_ring_get_tail(struct drm_device *dev,
-		struct intel_ring_buffer *ring)
-{
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	return I915_READ(BSD_RING_TAIL) & TAIL_ADDR;
-}
-
-static inline unsigned int bsd_ring_get_active_head(struct drm_device *dev,
-		struct intel_ring_buffer *ring)
-{
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	return I915_READ(BSD_RING_ACTHD);
-}
-
-static inline void bsd_ring_advance_ring(struct drm_device *dev,
-		struct intel_ring_buffer *ring)
-{
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	I915_WRITE(BSD_RING_TAIL, ring->tail);
-}
-
 static int init_bsd_ring(struct drm_device *dev,
 static int init_bsd_ring(struct drm_device *dev,
-		struct intel_ring_buffer *ring)
+			 struct intel_ring_buffer *ring)
 {
 {
 	return init_ring_common(dev, ring);
 	return init_ring_common(dev, ring);
 }
 }
 
 
 static u32
 static u32
-bsd_ring_add_request(struct drm_device *dev,
-		struct intel_ring_buffer *ring,
-		struct drm_file *file_priv,
-		u32 flush_domains)
+ring_add_request(struct drm_device *dev,
+		 struct intel_ring_buffer *ring,
+		 u32 flush_domains)
 {
 {
 	u32 seqno;
 	u32 seqno;
 
 
@@ -443,40 +404,32 @@ bsd_ring_add_request(struct drm_device *dev,
 	return seqno;
 	return seqno;
 }
 }
 
 
-static void bsd_setup_status_page(struct drm_device *dev,
-		struct  intel_ring_buffer *ring)
-{
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	I915_WRITE(BSD_HWS_PGA, ring->status_page.gfx_addr);
-	I915_READ(BSD_HWS_PGA);
-}
-
 static void
 static void
 bsd_ring_get_user_irq(struct drm_device *dev,
 bsd_ring_get_user_irq(struct drm_device *dev,
-		struct intel_ring_buffer *ring)
+		      struct intel_ring_buffer *ring)
 {
 {
 	/* do nothing */
 	/* do nothing */
 }
 }
 static void
 static void
 bsd_ring_put_user_irq(struct drm_device *dev,
 bsd_ring_put_user_irq(struct drm_device *dev,
-		struct intel_ring_buffer *ring)
+		      struct intel_ring_buffer *ring)
 {
 {
 	/* do nothing */
 	/* do nothing */
 }
 }
 
 
 static u32
 static u32
-bsd_ring_get_gem_seqno(struct drm_device *dev,
-		struct intel_ring_buffer *ring)
+ring_status_page_get_seqno(struct drm_device *dev,
+			   struct intel_ring_buffer *ring)
 {
 {
 	return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
 	return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
 }
 }
 
 
 static int
 static int
-bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev,
-		struct intel_ring_buffer *ring,
-		struct drm_i915_gem_execbuffer2 *exec,
-		struct drm_clip_rect *cliprects,
-		uint64_t exec_offset)
+ring_dispatch_gem_execbuffer(struct drm_device *dev,
+			     struct intel_ring_buffer *ring,
+			     struct drm_i915_gem_execbuffer2 *exec,
+			     struct drm_clip_rect *cliprects,
+			     uint64_t exec_offset)
 {
 {
 	uint32_t exec_start;
 	uint32_t exec_start;
 	exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
 	exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
@@ -488,13 +441,12 @@ bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev,
 	return 0;
 	return 0;
 }
 }
 
 
-
 static int
 static int
 render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
 render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
-		struct intel_ring_buffer *ring,
-		struct drm_i915_gem_execbuffer2 *exec,
-		struct drm_clip_rect *cliprects,
-		uint64_t exec_offset)
+				    struct intel_ring_buffer *ring,
+				    struct drm_i915_gem_execbuffer2 *exec,
+				    struct drm_clip_rect *cliprects,
+				    uint64_t exec_offset)
 {
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	int nbox = exec->num_cliprects;
 	int nbox = exec->num_cliprects;
@@ -523,8 +475,8 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
 			intel_ring_emit(dev, ring, exec_start + exec_len - 4);
 			intel_ring_emit(dev, ring, exec_start + exec_len - 4);
 			intel_ring_emit(dev, ring, 0);
 			intel_ring_emit(dev, ring, 0);
 		} else {
 		} else {
-			intel_ring_begin(dev, ring, 4);
-			if (IS_I965G(dev)) {
+			intel_ring_begin(dev, ring, 2);
+			if (INTEL_INFO(dev)->gen >= 4) {
 				intel_ring_emit(dev, ring,
 				intel_ring_emit(dev, ring,
 						MI_BATCH_BUFFER_START | (2 << 6)
 						MI_BATCH_BUFFER_START | (2 << 6)
 						| MI_BATCH_NON_SECURE_I965);
 						| MI_BATCH_NON_SECURE_I965);
@@ -539,7 +491,7 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
 		intel_ring_advance(dev, ring);
 		intel_ring_advance(dev, ring);
 	}
 	}
 
 
-	if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
+	if (IS_G4X(dev) || IS_GEN5(dev)) {
 		intel_ring_begin(dev, ring, 2);
 		intel_ring_begin(dev, ring, 2);
 		intel_ring_emit(dev, ring, MI_FLUSH |
 		intel_ring_emit(dev, ring, MI_FLUSH |
 				MI_NO_WRITE_FLUSH |
 				MI_NO_WRITE_FLUSH |
@@ -553,7 +505,7 @@ render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
 }
 }
 
 
 static void cleanup_status_page(struct drm_device *dev,
 static void cleanup_status_page(struct drm_device *dev,
-		struct intel_ring_buffer *ring)
+				struct intel_ring_buffer *ring)
 {
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_gem_object *obj;
 	struct drm_gem_object *obj;
@@ -573,7 +525,7 @@ static void cleanup_status_page(struct drm_device *dev,
 }
 }
 
 
 static int init_status_page(struct drm_device *dev,
 static int init_status_page(struct drm_device *dev,
-		struct intel_ring_buffer *ring)
+			    struct intel_ring_buffer *ring)
 {
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_gem_object *obj;
 	struct drm_gem_object *obj;
@@ -603,7 +555,7 @@ static int init_status_page(struct drm_device *dev,
 	ring->status_page.obj = obj;
 	ring->status_page.obj = obj;
 	memset(ring->status_page.page_addr, 0, PAGE_SIZE);
 	memset(ring->status_page.page_addr, 0, PAGE_SIZE);
 
 
-	ring->setup_status_page(dev, ring);
+	intel_ring_setup_status_page(dev, ring);
 	DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
 	DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
 			ring->name, ring->status_page.gfx_addr);
 			ring->name, ring->status_page.gfx_addr);
 
 
@@ -617,15 +569,18 @@ err:
 	return ret;
 	return ret;
 }
 }
 
 
-
 int intel_init_ring_buffer(struct drm_device *dev,
 int intel_init_ring_buffer(struct drm_device *dev,
-		struct intel_ring_buffer *ring)
+			   struct intel_ring_buffer *ring)
 {
 {
+	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_gem_object *obj_priv;
 	struct drm_i915_gem_object *obj_priv;
 	struct drm_gem_object *obj;
 	struct drm_gem_object *obj;
 	int ret;
 	int ret;
 
 
 	ring->dev = dev;
 	ring->dev = dev;
+	INIT_LIST_HEAD(&ring->active_list);
+	INIT_LIST_HEAD(&ring->request_list);
+	INIT_LIST_HEAD(&ring->gpu_write_list);
 
 
 	if (I915_NEED_GFX_HWS(dev)) {
 	if (I915_NEED_GFX_HWS(dev)) {
 		ret = init_status_page(dev, ring);
 		ret = init_status_page(dev, ring);
@@ -642,7 +597,7 @@ int intel_init_ring_buffer(struct drm_device *dev,
 
 
 	ring->gem_object = obj;
 	ring->gem_object = obj;
 
 
-	ret = i915_gem_object_pin(obj, ring->alignment);
+	ret = i915_gem_object_pin(obj, PAGE_SIZE);
 	if (ret)
 	if (ret)
 		goto err_unref;
 		goto err_unref;
 
 
@@ -668,14 +623,12 @@ int intel_init_ring_buffer(struct drm_device *dev,
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
 	if (!drm_core_check_feature(dev, DRIVER_MODESET))
 		i915_kernel_lost_context(dev);
 		i915_kernel_lost_context(dev);
 	else {
 	else {
-		ring->head = ring->get_head(dev, ring);
-		ring->tail = ring->get_tail(dev, ring);
+		ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
+		ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
 		ring->space = ring->head - (ring->tail + 8);
 		ring->space = ring->head - (ring->tail + 8);
 		if (ring->space < 0)
 		if (ring->space < 0)
 			ring->space += ring->size;
 			ring->space += ring->size;
 	}
 	}
-	INIT_LIST_HEAD(&ring->active_list);
-	INIT_LIST_HEAD(&ring->request_list);
 	return ret;
 	return ret;
 
 
 err_unmap:
 err_unmap:
@@ -691,7 +644,7 @@ err_hws:
 }
 }
 
 
 void intel_cleanup_ring_buffer(struct drm_device *dev,
 void intel_cleanup_ring_buffer(struct drm_device *dev,
-		struct intel_ring_buffer *ring)
+			       struct intel_ring_buffer *ring)
 {
 {
 	if (ring->gem_object == NULL)
 	if (ring->gem_object == NULL)
 		return;
 		return;
@@ -704,8 +657,8 @@ void intel_cleanup_ring_buffer(struct drm_device *dev,
 	cleanup_status_page(dev, ring);
 	cleanup_status_page(dev, ring);
 }
 }
 
 
-int intel_wrap_ring_buffer(struct drm_device *dev,
-		struct intel_ring_buffer *ring)
+static int intel_wrap_ring_buffer(struct drm_device *dev,
+				  struct intel_ring_buffer *ring)
 {
 {
 	unsigned int *virt;
 	unsigned int *virt;
 	int rem;
 	int rem;
@@ -731,14 +684,15 @@ int intel_wrap_ring_buffer(struct drm_device *dev,
 }
 }
 
 
 int intel_wait_ring_buffer(struct drm_device *dev,
 int intel_wait_ring_buffer(struct drm_device *dev,
-		struct intel_ring_buffer *ring, int n)
+			   struct intel_ring_buffer *ring, int n)
 {
 {
 	unsigned long end;
 	unsigned long end;
+	drm_i915_private_t *dev_priv = dev->dev_private;
 
 
 	trace_i915_ring_wait_begin (dev);
 	trace_i915_ring_wait_begin (dev);
 	end = jiffies + 3 * HZ;
 	end = jiffies + 3 * HZ;
 	do {
 	do {
-		ring->head = ring->get_head(dev, ring);
+		ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
 		ring->space = ring->head - (ring->tail + 8);
 		ring->space = ring->head - (ring->tail + 8);
 		if (ring->space < 0)
 		if (ring->space < 0)
 			ring->space += ring->size;
 			ring->space += ring->size;
@@ -753,14 +707,15 @@ int intel_wait_ring_buffer(struct drm_device *dev,
 				master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
 				master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
 		}
 		}
 
 
-		yield();
+		msleep(1);
 	} while (!time_after(jiffies, end));
 	} while (!time_after(jiffies, end));
 	trace_i915_ring_wait_end (dev);
 	trace_i915_ring_wait_end (dev);
 	return -EBUSY;
 	return -EBUSY;
 }
 }
 
 
 void intel_ring_begin(struct drm_device *dev,
 void intel_ring_begin(struct drm_device *dev,
-		struct intel_ring_buffer *ring, int num_dwords)
+		      struct intel_ring_buffer *ring,
+		      int num_dwords)
 {
 {
 	int n = 4*num_dwords;
 	int n = 4*num_dwords;
 	if (unlikely(ring->tail + n > ring->size))
 	if (unlikely(ring->tail + n > ring->size))
@@ -772,97 +727,181 @@ void intel_ring_begin(struct drm_device *dev,
 }
 }
 
 
 void intel_ring_advance(struct drm_device *dev,
 void intel_ring_advance(struct drm_device *dev,
-		struct intel_ring_buffer *ring)
+			struct intel_ring_buffer *ring)
 {
 {
 	ring->tail &= ring->size - 1;
 	ring->tail &= ring->size - 1;
-	ring->advance_ring(dev, ring);
-}
-
-void intel_fill_struct(struct drm_device *dev,
-		struct intel_ring_buffer *ring,
-		void *data,
-		unsigned int len)
-{
-	unsigned int *virt = ring->virtual_start + ring->tail;
-	BUG_ON((len&~(4-1)) != 0);
-	intel_ring_begin(dev, ring, len/4);
-	memcpy(virt, data, len);
-	ring->tail += len;
-	ring->tail &= ring->size - 1;
-	ring->space -= len;
-	intel_ring_advance(dev, ring);
+	ring->write_tail(dev, ring, ring->tail);
 }
 }
 
 
-struct intel_ring_buffer render_ring = {
+static const struct intel_ring_buffer render_ring = {
 	.name			= "render ring",
 	.name			= "render ring",
-	.regs                   = {
-		.ctl = PRB0_CTL,
-		.head = PRB0_HEAD,
-		.tail = PRB0_TAIL,
-		.start = PRB0_START
-	},
-	.ring_flag		= I915_EXEC_RENDER,
+	.id			= RING_RENDER,
+	.mmio_base		= RENDER_RING_BASE,
 	.size			= 32 * PAGE_SIZE,
 	.size			= 32 * PAGE_SIZE,
-	.alignment		= PAGE_SIZE,
-	.virtual_start		= NULL,
-	.dev			= NULL,
-	.gem_object		= NULL,
-	.head			= 0,
-	.tail			= 0,
-	.space			= 0,
-	.user_irq_refcount	= 0,
-	.irq_gem_seqno		= 0,
-	.waiting_gem_seqno	= 0,
-	.setup_status_page	= render_setup_status_page,
 	.init			= init_render_ring,
 	.init			= init_render_ring,
-	.get_head		= render_ring_get_head,
-	.get_tail		= render_ring_get_tail,
-	.get_active_head	= render_ring_get_active_head,
-	.advance_ring		= render_ring_advance_ring,
+	.write_tail		= ring_write_tail,
 	.flush			= render_ring_flush,
 	.flush			= render_ring_flush,
 	.add_request		= render_ring_add_request,
 	.add_request		= render_ring_add_request,
-	.get_gem_seqno		= render_ring_get_gem_seqno,
+	.get_seqno		= render_ring_get_seqno,
 	.user_irq_get		= render_ring_get_user_irq,
 	.user_irq_get		= render_ring_get_user_irq,
 	.user_irq_put		= render_ring_put_user_irq,
 	.user_irq_put		= render_ring_put_user_irq,
 	.dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer,
 	.dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer,
-	.status_page		= {NULL, 0, NULL},
-	.map			= {0,}
 };
 };
 
 
 /* ring buffer for bit-stream decoder */
 /* ring buffer for bit-stream decoder */
 
 
-struct intel_ring_buffer bsd_ring = {
+static const struct intel_ring_buffer bsd_ring = {
 	.name                   = "bsd ring",
 	.name                   = "bsd ring",
-	.regs			= {
-		.ctl = BSD_RING_CTL,
-		.head = BSD_RING_HEAD,
-		.tail = BSD_RING_TAIL,
-		.start = BSD_RING_START
-	},
-	.ring_flag		= I915_EXEC_BSD,
+	.id			= RING_BSD,
+	.mmio_base		= BSD_RING_BASE,
 	.size			= 32 * PAGE_SIZE,
 	.size			= 32 * PAGE_SIZE,
-	.alignment		= PAGE_SIZE,
-	.virtual_start		= NULL,
-	.dev			= NULL,
-	.gem_object		= NULL,
-	.head			= 0,
-	.tail			= 0,
-	.space			= 0,
-	.user_irq_refcount	= 0,
-	.irq_gem_seqno		= 0,
-	.waiting_gem_seqno	= 0,
-	.setup_status_page	= bsd_setup_status_page,
 	.init			= init_bsd_ring,
 	.init			= init_bsd_ring,
-	.get_head		= bsd_ring_get_head,
-	.get_tail		= bsd_ring_get_tail,
-	.get_active_head	= bsd_ring_get_active_head,
-	.advance_ring		= bsd_ring_advance_ring,
+	.write_tail		= ring_write_tail,
 	.flush			= bsd_ring_flush,
 	.flush			= bsd_ring_flush,
-	.add_request		= bsd_ring_add_request,
-	.get_gem_seqno		= bsd_ring_get_gem_seqno,
+	.add_request		= ring_add_request,
+	.get_seqno		= ring_status_page_get_seqno,
 	.user_irq_get		= bsd_ring_get_user_irq,
 	.user_irq_get		= bsd_ring_get_user_irq,
 	.user_irq_put		= bsd_ring_put_user_irq,
 	.user_irq_put		= bsd_ring_put_user_irq,
-	.dispatch_gem_execbuffer = bsd_ring_dispatch_gem_execbuffer,
-	.status_page		= {NULL, 0, NULL},
-	.map			= {0,}
+	.dispatch_gem_execbuffer = ring_dispatch_gem_execbuffer,
 };
 };
+
+
+static void gen6_bsd_ring_write_tail(struct drm_device *dev,
+				     struct intel_ring_buffer *ring,
+				     u32 value)
+{
+       drm_i915_private_t *dev_priv = dev->dev_private;
+
+       /* Every tail move must follow the sequence below */
+       I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
+	       GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
+	       GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
+       I915_WRITE(GEN6_BSD_RNCID, 0x0);
+
+       if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
+                               GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0,
+                       50))
+               DRM_ERROR("timed out waiting for IDLE Indicator\n");
+
+       I915_WRITE_TAIL(ring, value);
+       I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
+	       GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
+	       GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
+}
+
+static void gen6_ring_flush(struct drm_device *dev,
+			    struct intel_ring_buffer *ring,
+			    u32 invalidate_domains,
+			    u32 flush_domains)
+{
+       intel_ring_begin(dev, ring, 4);
+       intel_ring_emit(dev, ring, MI_FLUSH_DW);
+       intel_ring_emit(dev, ring, 0);
+       intel_ring_emit(dev, ring, 0);
+       intel_ring_emit(dev, ring, 0);
+       intel_ring_advance(dev, ring);
+}
+
+static int
+gen6_ring_dispatch_gem_execbuffer(struct drm_device *dev,
+				  struct intel_ring_buffer *ring,
+				  struct drm_i915_gem_execbuffer2 *exec,
+				  struct drm_clip_rect *cliprects,
+				  uint64_t exec_offset)
+{
+       uint32_t exec_start;
+
+       exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
+
+       intel_ring_begin(dev, ring, 2);
+       intel_ring_emit(dev, ring,
+		       MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
+       /* bit0-7 is the length on GEN6+ */
+       intel_ring_emit(dev, ring, exec_start);
+       intel_ring_advance(dev, ring);
+
+       return 0;
+}
+
+/* ring buffer for Video Codec for Gen6+ */
+static const struct intel_ring_buffer gen6_bsd_ring = {
+       .name			= "gen6 bsd ring",
+       .id			= RING_BSD,
+       .mmio_base		= GEN6_BSD_RING_BASE,
+       .size			= 32 * PAGE_SIZE,
+       .init			= init_bsd_ring,
+       .write_tail		= gen6_bsd_ring_write_tail,
+       .flush			= gen6_ring_flush,
+       .add_request		= ring_add_request,
+       .get_seqno		= ring_status_page_get_seqno,
+       .user_irq_get		= bsd_ring_get_user_irq,
+       .user_irq_put		= bsd_ring_put_user_irq,
+       .dispatch_gem_execbuffer	= gen6_ring_dispatch_gem_execbuffer,
+};
+
+/* Blitter support (SandyBridge+) */
+
+static void
+blt_ring_get_user_irq(struct drm_device *dev,
+		      struct intel_ring_buffer *ring)
+{
+	/* do nothing */
+}
+static void
+blt_ring_put_user_irq(struct drm_device *dev,
+		      struct intel_ring_buffer *ring)
+{
+	/* do nothing */
+}
+
+static const struct intel_ring_buffer gen6_blt_ring = {
+       .name			= "blt ring",
+       .id			= RING_BLT,
+       .mmio_base		= BLT_RING_BASE,
+       .size			= 32 * PAGE_SIZE,
+       .init			= init_ring_common,
+       .write_tail		= ring_write_tail,
+       .flush			= gen6_ring_flush,
+       .add_request		= ring_add_request,
+       .get_seqno		= ring_status_page_get_seqno,
+       .user_irq_get		= blt_ring_get_user_irq,
+       .user_irq_put		= blt_ring_put_user_irq,
+       .dispatch_gem_execbuffer	= gen6_ring_dispatch_gem_execbuffer,
+};
+
+int intel_init_render_ring_buffer(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+
+	dev_priv->render_ring = render_ring;
+
+	if (!I915_NEED_GFX_HWS(dev)) {
+		dev_priv->render_ring.status_page.page_addr
+			= dev_priv->status_page_dmah->vaddr;
+		memset(dev_priv->render_ring.status_page.page_addr,
+				0, PAGE_SIZE);
+	}
+
+	return intel_init_ring_buffer(dev, &dev_priv->render_ring);
+}
+
+int intel_init_bsd_ring_buffer(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+
+	if (IS_GEN6(dev))
+		dev_priv->bsd_ring = gen6_bsd_ring;
+	else
+		dev_priv->bsd_ring = bsd_ring;
+
+	return intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
+}
+
+int intel_init_blt_ring_buffer(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+
+	dev_priv->blt_ring = gen6_blt_ring;
+
+	return intel_init_ring_buffer(dev, &dev_priv->blt_ring);
+}

+ 47 - 34
drivers/gpu/drm/i915/intel_ringbuffer.h

@@ -7,25 +7,32 @@ struct  intel_hw_status_page {
 	struct		drm_gem_object *obj;
 	struct		drm_gem_object *obj;
 };
 };
 
 
+#define I915_READ_TAIL(ring) I915_READ(RING_TAIL(ring->mmio_base))
+#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL(ring->mmio_base), val)
+#define I915_READ_START(ring) I915_READ(RING_START(ring->mmio_base))
+#define I915_WRITE_START(ring, val) I915_WRITE(RING_START(ring->mmio_base), val)
+#define I915_READ_HEAD(ring) I915_READ(RING_HEAD(ring->mmio_base))
+#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD(ring->mmio_base), val)
+#define I915_READ_CTL(ring) I915_READ(RING_CTL(ring->mmio_base))
+#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL(ring->mmio_base), val)
+
 struct drm_i915_gem_execbuffer2;
 struct drm_i915_gem_execbuffer2;
 struct  intel_ring_buffer {
 struct  intel_ring_buffer {
 	const char	*name;
 	const char	*name;
-	struct		ring_regs {
-			u32 ctl;
-			u32 head;
-			u32 tail;
-			u32 start;
-	} regs;
-	unsigned int	ring_flag;
+	enum intel_ring_id {
+		RING_RENDER = 0x1,
+		RING_BSD = 0x2,
+		RING_BLT = 0x4,
+	} id;
+	u32		mmio_base;
 	unsigned long	size;
 	unsigned long	size;
-	unsigned int	alignment;
 	void		*virtual_start;
 	void		*virtual_start;
 	struct		drm_device *dev;
 	struct		drm_device *dev;
 	struct		drm_gem_object *gem_object;
 	struct		drm_gem_object *gem_object;
 
 
 	unsigned int	head;
 	unsigned int	head;
 	unsigned int	tail;
 	unsigned int	tail;
-	unsigned int	space;
+	int		space;
 	struct intel_hw_status_page status_page;
 	struct intel_hw_status_page status_page;
 
 
 	u32		irq_gem_seqno;		/* last seq seem at irq time */
 	u32		irq_gem_seqno;		/* last seq seem at irq time */
@@ -35,30 +42,22 @@ struct  intel_ring_buffer {
 			struct intel_ring_buffer *ring);
 			struct intel_ring_buffer *ring);
 	void		(*user_irq_put)(struct drm_device *dev,
 	void		(*user_irq_put)(struct drm_device *dev,
 			struct intel_ring_buffer *ring);
 			struct intel_ring_buffer *ring);
-	void		(*setup_status_page)(struct drm_device *dev,
-			struct	intel_ring_buffer *ring);
 
 
 	int		(*init)(struct drm_device *dev,
 	int		(*init)(struct drm_device *dev,
 			struct intel_ring_buffer *ring);
 			struct intel_ring_buffer *ring);
 
 
-	unsigned int	(*get_head)(struct drm_device *dev,
-			struct intel_ring_buffer *ring);
-	unsigned int	(*get_tail)(struct drm_device *dev,
-			struct intel_ring_buffer *ring);
-	unsigned int	(*get_active_head)(struct drm_device *dev,
-			struct intel_ring_buffer *ring);
-	void		(*advance_ring)(struct drm_device *dev,
-			struct intel_ring_buffer *ring);
+	void		(*write_tail)(struct drm_device *dev,
+				      struct intel_ring_buffer *ring,
+				      u32 value);
 	void		(*flush)(struct drm_device *dev,
 	void		(*flush)(struct drm_device *dev,
 			struct intel_ring_buffer *ring,
 			struct intel_ring_buffer *ring,
 			u32	invalidate_domains,
 			u32	invalidate_domains,
 			u32	flush_domains);
 			u32	flush_domains);
 	u32		(*add_request)(struct drm_device *dev,
 	u32		(*add_request)(struct drm_device *dev,
 			struct intel_ring_buffer *ring,
 			struct intel_ring_buffer *ring,
-			struct drm_file *file_priv,
 			u32 flush_domains);
 			u32 flush_domains);
-	u32		(*get_gem_seqno)(struct drm_device *dev,
-			struct intel_ring_buffer *ring);
+	u32		(*get_seqno)(struct drm_device *dev,
+				     struct intel_ring_buffer *ring);
 	int		(*dispatch_gem_execbuffer)(struct drm_device *dev,
 	int		(*dispatch_gem_execbuffer)(struct drm_device *dev,
 			struct intel_ring_buffer *ring,
 			struct intel_ring_buffer *ring,
 			struct drm_i915_gem_execbuffer2 *exec,
 			struct drm_i915_gem_execbuffer2 *exec,
@@ -83,6 +82,20 @@ struct  intel_ring_buffer {
 	 */
 	 */
 	struct list_head request_list;
 	struct list_head request_list;
 
 
+	/**
+	 * List of objects currently pending a GPU write flush.
+	 *
+	 * All elements on this list will belong to either the
+	 * active_list or flushing_list, last_rendering_seqno can
+	 * be used to differentiate between the two elements.
+	 */
+	struct list_head gpu_write_list;
+
+	/**
+	 * Do we have some not yet emitted requests outstanding?
+	 */
+	bool outstanding_lazy_request;
+
 	wait_queue_head_t irq_queue;
 	wait_queue_head_t irq_queue;
 	drm_local_map_t map;
 	drm_local_map_t map;
 };
 };
@@ -96,15 +109,13 @@ intel_read_status_page(struct intel_ring_buffer *ring,
 }
 }
 
 
 int intel_init_ring_buffer(struct drm_device *dev,
 int intel_init_ring_buffer(struct drm_device *dev,
-		struct intel_ring_buffer *ring);
+			   struct intel_ring_buffer *ring);
 void intel_cleanup_ring_buffer(struct drm_device *dev,
 void intel_cleanup_ring_buffer(struct drm_device *dev,
-		struct intel_ring_buffer *ring);
+			       struct intel_ring_buffer *ring);
 int intel_wait_ring_buffer(struct drm_device *dev,
 int intel_wait_ring_buffer(struct drm_device *dev,
-		struct intel_ring_buffer *ring, int n);
-int intel_wrap_ring_buffer(struct drm_device *dev,
-		struct intel_ring_buffer *ring);
+			   struct intel_ring_buffer *ring, int n);
 void intel_ring_begin(struct drm_device *dev,
 void intel_ring_begin(struct drm_device *dev,
-		struct intel_ring_buffer *ring, int n);
+		      struct intel_ring_buffer *ring, int n);
 
 
 static inline void intel_ring_emit(struct drm_device *dev,
 static inline void intel_ring_emit(struct drm_device *dev,
 				   struct intel_ring_buffer *ring,
 				   struct intel_ring_buffer *ring,
@@ -115,17 +126,19 @@ static inline void intel_ring_emit(struct drm_device *dev,
 	ring->tail += 4;
 	ring->tail += 4;
 }
 }
 
 
-void intel_fill_struct(struct drm_device *dev,
-		struct intel_ring_buffer *ring,
-		void *data,
-		unsigned int len);
 void intel_ring_advance(struct drm_device *dev,
 void intel_ring_advance(struct drm_device *dev,
 		struct intel_ring_buffer *ring);
 		struct intel_ring_buffer *ring);
 
 
 u32 intel_ring_get_seqno(struct drm_device *dev,
 u32 intel_ring_get_seqno(struct drm_device *dev,
 		struct intel_ring_buffer *ring);
 		struct intel_ring_buffer *ring);
 
 
-extern struct intel_ring_buffer render_ring;
-extern struct intel_ring_buffer bsd_ring;
+int intel_init_render_ring_buffer(struct drm_device *dev);
+int intel_init_bsd_ring_buffer(struct drm_device *dev);
+int intel_init_blt_ring_buffer(struct drm_device *dev);
+
+u32 intel_ring_get_active_head(struct drm_device *dev,
+			       struct intel_ring_buffer *ring);
+void intel_ring_setup_status_page(struct drm_device *dev,
+				  struct intel_ring_buffer *ring);
 
 
 #endif /* _INTEL_RINGBUFFER_H_ */
 #endif /* _INTEL_RINGBUFFER_H_ */

File diff suppressed because it is too large
+ 294 - 377
drivers/gpu/drm/i915/intel_sdvo.c


+ 80 - 85
drivers/gpu/drm/i915/intel_tv.c

@@ -48,7 +48,7 @@ struct intel_tv {
 	struct intel_encoder base;
 	struct intel_encoder base;
 
 
 	int type;
 	int type;
-	char *tv_format;
+	const char *tv_format;
 	int margin[4];
 	int margin[4];
 	u32 save_TV_H_CTL_1;
 	u32 save_TV_H_CTL_1;
 	u32 save_TV_H_CTL_2;
 	u32 save_TV_H_CTL_2;
@@ -350,7 +350,7 @@ static const struct video_levels component_levels = {
 
 
 
 
 struct tv_mode {
 struct tv_mode {
-	char *name;
+	const char *name;
 	int clock;
 	int clock;
 	int refresh; /* in millihertz (for precision) */
 	int refresh; /* in millihertz (for precision) */
 	u32 oversample;
 	u32 oversample;
@@ -900,7 +900,14 @@ static const struct tv_mode tv_modes[] = {
 
 
 static struct intel_tv *enc_to_intel_tv(struct drm_encoder *encoder)
 static struct intel_tv *enc_to_intel_tv(struct drm_encoder *encoder)
 {
 {
-	return container_of(enc_to_intel_encoder(encoder), struct intel_tv, base);
+	return container_of(encoder, struct intel_tv, base.base);
+}
+
+static struct intel_tv *intel_attached_tv(struct drm_connector *connector)
+{
+	return container_of(intel_attached_encoder(connector),
+			    struct intel_tv,
+			    base);
 }
 }
 
 
 static void
 static void
@@ -922,7 +929,7 @@ intel_tv_dpms(struct drm_encoder *encoder, int mode)
 }
 }
 
 
 static const struct tv_mode *
 static const struct tv_mode *
-intel_tv_mode_lookup (char *tv_format)
+intel_tv_mode_lookup(const char *tv_format)
 {
 {
 	int i;
 	int i;
 
 
@@ -936,22 +943,23 @@ intel_tv_mode_lookup (char *tv_format)
 }
 }
 
 
 static const struct tv_mode *
 static const struct tv_mode *
-intel_tv_mode_find (struct intel_tv *intel_tv)
+intel_tv_mode_find(struct intel_tv *intel_tv)
 {
 {
 	return intel_tv_mode_lookup(intel_tv->tv_format);
 	return intel_tv_mode_lookup(intel_tv->tv_format);
 }
 }
 
 
 static enum drm_mode_status
 static enum drm_mode_status
-intel_tv_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode)
+intel_tv_mode_valid(struct drm_connector *connector,
+		    struct drm_display_mode *mode)
 {
 {
-	struct drm_encoder *encoder = intel_attached_encoder(connector);
-	struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+	struct intel_tv *intel_tv = intel_attached_tv(connector);
 	const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
 	const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
 
 
 	/* Ensure TV refresh is close to desired refresh */
 	/* Ensure TV refresh is close to desired refresh */
 	if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000)
 	if (tv_mode && abs(tv_mode->refresh - drm_mode_vrefresh(mode) * 1000)
 				< 1000)
 				< 1000)
 		return MODE_OK;
 		return MODE_OK;
+
 	return MODE_CLOCK_RANGE;
 	return MODE_CLOCK_RANGE;
 }
 }
 
 
@@ -1131,7 +1139,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
 			   color_conversion->av);
 			   color_conversion->av);
 	}
 	}
 
 
-	if (IS_I965G(dev))
+	if (INTEL_INFO(dev)->gen >= 4)
 		I915_WRITE(TV_CLR_KNOBS, 0x00404000);
 		I915_WRITE(TV_CLR_KNOBS, 0x00404000);
 	else
 	else
 		I915_WRITE(TV_CLR_KNOBS, 0x00606000);
 		I915_WRITE(TV_CLR_KNOBS, 0x00606000);
@@ -1157,12 +1165,12 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
 		I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
 		I915_WRITE(dspbase_reg, I915_READ(dspbase_reg));
 
 
 		/* Wait for vblank for the disable to take effect */
 		/* Wait for vblank for the disable to take effect */
-		if (!IS_I9XX(dev))
+		if (IS_GEN2(dev))
 			intel_wait_for_vblank(dev, intel_crtc->pipe);
 			intel_wait_for_vblank(dev, intel_crtc->pipe);
 
 
-		I915_WRITE(pipeconf_reg, pipeconf & ~PIPEACONF_ENABLE);
+		I915_WRITE(pipeconf_reg, pipeconf & ~PIPECONF_ENABLE);
 		/* Wait for vblank for the disable to take effect. */
 		/* Wait for vblank for the disable to take effect. */
-		intel_wait_for_vblank(dev, intel_crtc->pipe);
+		intel_wait_for_pipe_off(dev, intel_crtc->pipe);
 
 
 		/* Filter ctl must be set before TV_WIN_SIZE */
 		/* Filter ctl must be set before TV_WIN_SIZE */
 		I915_WRITE(TV_FILTER_CTL_1, TV_AUTO_SCALE);
 		I915_WRITE(TV_FILTER_CTL_1, TV_AUTO_SCALE);
@@ -1196,7 +1204,7 @@ intel_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
 		I915_WRITE(TV_V_LUMA_0 + (i<<2), tv_mode->filter_table[j++]);
 		I915_WRITE(TV_V_LUMA_0 + (i<<2), tv_mode->filter_table[j++]);
 	for (i = 0; i < 43; i++)
 	for (i = 0; i < 43; i++)
 		I915_WRITE(TV_V_CHROMA_0 + (i<<2), tv_mode->filter_table[j++]);
 		I915_WRITE(TV_V_CHROMA_0 + (i<<2), tv_mode->filter_table[j++]);
-	I915_WRITE(TV_DAC, 0);
+	I915_WRITE(TV_DAC, I915_READ(TV_DAC) & TV_DAC_SAVE);
 	I915_WRITE(TV_CTL, tv_ctl);
 	I915_WRITE(TV_CTL, tv_ctl);
 }
 }
 
 
@@ -1228,15 +1236,13 @@ static const struct drm_display_mode reported_modes[] = {
 static int
 static int
 intel_tv_detect_type (struct intel_tv *intel_tv)
 intel_tv_detect_type (struct intel_tv *intel_tv)
 {
 {
-	struct drm_encoder *encoder = &intel_tv->base.enc;
+	struct drm_encoder *encoder = &intel_tv->base.base;
 	struct drm_device *dev = encoder->dev;
 	struct drm_device *dev = encoder->dev;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_private *dev_priv = dev->dev_private;
 	unsigned long irqflags;
 	unsigned long irqflags;
 	u32 tv_ctl, save_tv_ctl;
 	u32 tv_ctl, save_tv_ctl;
 	u32 tv_dac, save_tv_dac;
 	u32 tv_dac, save_tv_dac;
-	int type = DRM_MODE_CONNECTOR_Unknown;
-
-	tv_dac = I915_READ(TV_DAC);
+	int type;
 
 
 	/* Disable TV interrupts around load detect or we'll recurse */
 	/* Disable TV interrupts around load detect or we'll recurse */
 	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
 	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
@@ -1244,19 +1250,14 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
 			      PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
 			      PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
 	spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
 	spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
 
 
-	/*
-	 * Detect TV by polling)
-	 */
-	save_tv_dac = tv_dac;
-	tv_ctl = I915_READ(TV_CTL);
-	save_tv_ctl = tv_ctl;
-	tv_ctl &= ~TV_ENC_ENABLE;
-	tv_ctl &= ~TV_TEST_MODE_MASK;
+	save_tv_dac = tv_dac = I915_READ(TV_DAC);
+	save_tv_ctl = tv_ctl = I915_READ(TV_CTL);
+
+	/* Poll for TV detection */
+	tv_ctl &= ~(TV_ENC_ENABLE | TV_TEST_MODE_MASK);
 	tv_ctl |= TV_TEST_MODE_MONITOR_DETECT;
 	tv_ctl |= TV_TEST_MODE_MONITOR_DETECT;
-	tv_dac &= ~TVDAC_SENSE_MASK;
-	tv_dac &= ~DAC_A_MASK;
-	tv_dac &= ~DAC_B_MASK;
-	tv_dac &= ~DAC_C_MASK;
+
+	tv_dac &= ~(TVDAC_SENSE_MASK | DAC_A_MASK | DAC_B_MASK | DAC_C_MASK);
 	tv_dac |= (TVDAC_STATE_CHG_EN |
 	tv_dac |= (TVDAC_STATE_CHG_EN |
 		   TVDAC_A_SENSE_CTL |
 		   TVDAC_A_SENSE_CTL |
 		   TVDAC_B_SENSE_CTL |
 		   TVDAC_B_SENSE_CTL |
@@ -1265,37 +1266,40 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
 		   DAC_A_0_7_V |
 		   DAC_A_0_7_V |
 		   DAC_B_0_7_V |
 		   DAC_B_0_7_V |
 		   DAC_C_0_7_V);
 		   DAC_C_0_7_V);
+
 	I915_WRITE(TV_CTL, tv_ctl);
 	I915_WRITE(TV_CTL, tv_ctl);
 	I915_WRITE(TV_DAC, tv_dac);
 	I915_WRITE(TV_DAC, tv_dac);
 	POSTING_READ(TV_DAC);
 	POSTING_READ(TV_DAC);
-	msleep(20);
 
 
-	tv_dac = I915_READ(TV_DAC);
-	I915_WRITE(TV_DAC, save_tv_dac);
-	I915_WRITE(TV_CTL, save_tv_ctl);
-	POSTING_READ(TV_CTL);
-	msleep(20);
+	intel_wait_for_vblank(intel_tv->base.base.dev,
+			      to_intel_crtc(intel_tv->base.base.crtc)->pipe);
 
 
-	/*
-	 *  A B C
-	 *  0 1 1 Composite
-	 *  1 0 X svideo
-	 *  0 0 0 Component
-	 */
-	if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) {
-		DRM_DEBUG_KMS("Detected Composite TV connection\n");
-		type = DRM_MODE_CONNECTOR_Composite;
-	} else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) {
-		DRM_DEBUG_KMS("Detected S-Video TV connection\n");
-		type = DRM_MODE_CONNECTOR_SVIDEO;
-	} else if ((tv_dac & TVDAC_SENSE_MASK) == 0) {
-		DRM_DEBUG_KMS("Detected Component TV connection\n");
-		type = DRM_MODE_CONNECTOR_Component;
-	} else {
-		DRM_DEBUG_KMS("No TV connection detected\n");
-		type = -1;
+	type = -1;
+	if (wait_for((tv_dac = I915_READ(TV_DAC)) & TVDAC_STATE_CHG, 20) == 0) {
+		DRM_DEBUG_KMS("TV detected: %x, %x\n", tv_ctl, tv_dac);
+		/*
+		 *  A B C
+		 *  0 1 1 Composite
+		 *  1 0 X svideo
+		 *  0 0 0 Component
+		 */
+		if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) {
+			DRM_DEBUG_KMS("Detected Composite TV connection\n");
+			type = DRM_MODE_CONNECTOR_Composite;
+		} else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) {
+			DRM_DEBUG_KMS("Detected S-Video TV connection\n");
+			type = DRM_MODE_CONNECTOR_SVIDEO;
+		} else if ((tv_dac & TVDAC_SENSE_MASK) == 0) {
+			DRM_DEBUG_KMS("Detected Component TV connection\n");
+			type = DRM_MODE_CONNECTOR_Component;
+		} else {
+			DRM_DEBUG_KMS("Unrecognised TV connection\n");
+		}
 	}
 	}
 
 
+	I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
+	I915_WRITE(TV_CTL, save_tv_ctl);
+
 	/* Restore interrupt config */
 	/* Restore interrupt config */
 	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
 	spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
 	i915_enable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE |
 	i915_enable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE |
@@ -1311,8 +1315,7 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
  */
  */
 static void intel_tv_find_better_format(struct drm_connector *connector)
 static void intel_tv_find_better_format(struct drm_connector *connector)
 {
 {
-	struct drm_encoder *encoder = intel_attached_encoder(connector);
-	struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+	struct intel_tv *intel_tv = intel_attached_tv(connector);
 	const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
 	const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
 	int i;
 	int i;
 
 
@@ -1344,14 +1347,13 @@ static enum drm_connector_status
 intel_tv_detect(struct drm_connector *connector, bool force)
 intel_tv_detect(struct drm_connector *connector, bool force)
 {
 {
 	struct drm_display_mode mode;
 	struct drm_display_mode mode;
-	struct drm_encoder *encoder = intel_attached_encoder(connector);
-	struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+	struct intel_tv *intel_tv = intel_attached_tv(connector);
 	int type;
 	int type;
 
 
 	mode = reported_modes[0];
 	mode = reported_modes[0];
 	drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V);
 	drm_mode_set_crtcinfo(&mode, CRTC_INTERLACE_HALVE_V);
 
 
-	if (encoder->crtc && encoder->crtc->enabled) {
+	if (intel_tv->base.base.crtc && intel_tv->base.base.crtc->enabled) {
 		type = intel_tv_detect_type(intel_tv);
 		type = intel_tv_detect_type(intel_tv);
 	} else if (force) {
 	} else if (force) {
 		struct drm_crtc *crtc;
 		struct drm_crtc *crtc;
@@ -1375,11 +1377,10 @@ intel_tv_detect(struct drm_connector *connector, bool force)
 	return connector_status_connected;
 	return connector_status_connected;
 }
 }
 
 
-static struct input_res {
-	char *name;
+static const struct input_res {
+	const char *name;
 	int w, h;
 	int w, h;
-} input_res_table[] =
-{
+} input_res_table[] = {
 	{"640x480", 640, 480},
 	{"640x480", 640, 480},
 	{"800x600", 800, 600},
 	{"800x600", 800, 600},
 	{"1024x768", 1024, 768},
 	{"1024x768", 1024, 768},
@@ -1396,8 +1397,7 @@ static void
 intel_tv_chose_preferred_modes(struct drm_connector *connector,
 intel_tv_chose_preferred_modes(struct drm_connector *connector,
 			       struct drm_display_mode *mode_ptr)
 			       struct drm_display_mode *mode_ptr)
 {
 {
-	struct drm_encoder *encoder = intel_attached_encoder(connector);
-	struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+	struct intel_tv *intel_tv = intel_attached_tv(connector);
 	const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
 	const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
 
 
 	if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480)
 	if (tv_mode->nbr_end < 480 && mode_ptr->vdisplay == 480)
@@ -1422,15 +1422,14 @@ static int
 intel_tv_get_modes(struct drm_connector *connector)
 intel_tv_get_modes(struct drm_connector *connector)
 {
 {
 	struct drm_display_mode *mode_ptr;
 	struct drm_display_mode *mode_ptr;
-	struct drm_encoder *encoder = intel_attached_encoder(connector);
-	struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
+	struct intel_tv *intel_tv = intel_attached_tv(connector);
 	const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
 	const struct tv_mode *tv_mode = intel_tv_mode_find(intel_tv);
 	int j, count = 0;
 	int j, count = 0;
 	u64 tmp;
 	u64 tmp;
 
 
 	for (j = 0; j < ARRAY_SIZE(input_res_table);
 	for (j = 0; j < ARRAY_SIZE(input_res_table);
 	     j++) {
 	     j++) {
-		struct input_res *input = &input_res_table[j];
+		const struct input_res *input = &input_res_table[j];
 		unsigned int hactive_s = input->w;
 		unsigned int hactive_s = input->w;
 		unsigned int vactive_s = input->h;
 		unsigned int vactive_s = input->h;
 
 
@@ -1488,9 +1487,8 @@ intel_tv_set_property(struct drm_connector *connector, struct drm_property *prop
 		      uint64_t val)
 		      uint64_t val)
 {
 {
 	struct drm_device *dev = connector->dev;
 	struct drm_device *dev = connector->dev;
-	struct drm_encoder *encoder = intel_attached_encoder(connector);
-	struct intel_tv *intel_tv = enc_to_intel_tv(encoder);
-	struct drm_crtc *crtc = encoder->crtc;
+	struct intel_tv *intel_tv = intel_attached_tv(connector);
+	struct drm_crtc *crtc = intel_tv->base.base.crtc;
 	int ret = 0;
 	int ret = 0;
 	bool changed = false;
 	bool changed = false;
 
 
@@ -1555,7 +1553,7 @@ static const struct drm_connector_funcs intel_tv_connector_funcs = {
 static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = {
 static const struct drm_connector_helper_funcs intel_tv_connector_helper_funcs = {
 	.mode_valid = intel_tv_mode_valid,
 	.mode_valid = intel_tv_mode_valid,
 	.get_modes = intel_tv_get_modes,
 	.get_modes = intel_tv_get_modes,
-	.best_encoder = intel_attached_encoder,
+	.best_encoder = intel_best_encoder,
 };
 };
 
 
 static const struct drm_encoder_funcs intel_tv_enc_funcs = {
 static const struct drm_encoder_funcs intel_tv_enc_funcs = {
@@ -1607,7 +1605,7 @@ intel_tv_init(struct drm_device *dev)
 	struct intel_encoder *intel_encoder;
 	struct intel_encoder *intel_encoder;
 	struct intel_connector *intel_connector;
 	struct intel_connector *intel_connector;
 	u32 tv_dac_on, tv_dac_off, save_tv_dac;
 	u32 tv_dac_on, tv_dac_off, save_tv_dac;
-	char **tv_format_names;
+	char *tv_format_names[ARRAY_SIZE(tv_modes)];
 	int i, initial_mode = 0;
 	int i, initial_mode = 0;
 
 
 	if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED)
 	if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED)
@@ -1661,15 +1659,15 @@ intel_tv_init(struct drm_device *dev)
 	drm_connector_init(dev, connector, &intel_tv_connector_funcs,
 	drm_connector_init(dev, connector, &intel_tv_connector_funcs,
 			   DRM_MODE_CONNECTOR_SVIDEO);
 			   DRM_MODE_CONNECTOR_SVIDEO);
 
 
-	drm_encoder_init(dev, &intel_encoder->enc, &intel_tv_enc_funcs,
+	drm_encoder_init(dev, &intel_encoder->base, &intel_tv_enc_funcs,
 			 DRM_MODE_ENCODER_TVDAC);
 			 DRM_MODE_ENCODER_TVDAC);
 
 
-	drm_mode_connector_attach_encoder(&intel_connector->base, &intel_encoder->enc);
+	intel_connector_attach_encoder(intel_connector, intel_encoder);
 	intel_encoder->type = INTEL_OUTPUT_TVOUT;
 	intel_encoder->type = INTEL_OUTPUT_TVOUT;
 	intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
 	intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
 	intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT);
 	intel_encoder->clone_mask = (1 << INTEL_TV_CLONE_BIT);
-	intel_encoder->enc.possible_crtcs = ((1 << 0) | (1 << 1));
-	intel_encoder->enc.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
+	intel_encoder->base.possible_crtcs = ((1 << 0) | (1 << 1));
+	intel_encoder->base.possible_clones = (1 << INTEL_OUTPUT_TVOUT);
 	intel_tv->type = DRM_MODE_CONNECTOR_Unknown;
 	intel_tv->type = DRM_MODE_CONNECTOR_Unknown;
 
 
 	/* BIOS margin values */
 	/* BIOS margin values */
@@ -1678,21 +1676,19 @@ intel_tv_init(struct drm_device *dev)
 	intel_tv->margin[TV_MARGIN_RIGHT] = 46;
 	intel_tv->margin[TV_MARGIN_RIGHT] = 46;
 	intel_tv->margin[TV_MARGIN_BOTTOM] = 37;
 	intel_tv->margin[TV_MARGIN_BOTTOM] = 37;
 
 
-	intel_tv->tv_format = kstrdup(tv_modes[initial_mode].name, GFP_KERNEL);
+	intel_tv->tv_format = tv_modes[initial_mode].name;
 
 
-	drm_encoder_helper_add(&intel_encoder->enc, &intel_tv_helper_funcs);
+	drm_encoder_helper_add(&intel_encoder->base, &intel_tv_helper_funcs);
 	drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs);
 	drm_connector_helper_add(connector, &intel_tv_connector_helper_funcs);
 	connector->interlace_allowed = false;
 	connector->interlace_allowed = false;
 	connector->doublescan_allowed = false;
 	connector->doublescan_allowed = false;
 
 
 	/* Create TV properties then attach current values */
 	/* Create TV properties then attach current values */
-	tv_format_names = kmalloc(sizeof(char *) * ARRAY_SIZE(tv_modes),
-				  GFP_KERNEL);
-	if (!tv_format_names)
-		goto out;
 	for (i = 0; i < ARRAY_SIZE(tv_modes); i++)
 	for (i = 0; i < ARRAY_SIZE(tv_modes); i++)
-		tv_format_names[i] = tv_modes[i].name;
-	drm_mode_create_tv_properties(dev, ARRAY_SIZE(tv_modes), tv_format_names);
+		tv_format_names[i] = (char *)tv_modes[i].name;
+	drm_mode_create_tv_properties(dev,
+				      ARRAY_SIZE(tv_modes),
+				      tv_format_names);
 
 
 	drm_connector_attach_property(connector, dev->mode_config.tv_mode_property,
 	drm_connector_attach_property(connector, dev->mode_config.tv_mode_property,
 				   initial_mode);
 				   initial_mode);
@@ -1708,6 +1704,5 @@ intel_tv_init(struct drm_device *dev)
 	drm_connector_attach_property(connector,
 	drm_connector_attach_property(connector,
 				   dev->mode_config.tv_bottom_margin_property,
 				   dev->mode_config.tv_bottom_margin_property,
 				   intel_tv->margin[TV_MARGIN_BOTTOM]);
 				   intel_tv->margin[TV_MARGIN_BOTTOM]);
-out:
 	drm_sysfs_connector_add(connector);
 	drm_sysfs_connector_add(connector);
 }
 }

+ 0 - 2
drivers/gpu/drm/mga/mga_drv.c

@@ -60,8 +60,6 @@ static struct drm_driver driver = {
 	.irq_uninstall = mga_driver_irq_uninstall,
 	.irq_uninstall = mga_driver_irq_uninstall,
 	.irq_handler = mga_driver_irq_handler,
 	.irq_handler = mga_driver_irq_handler,
 	.reclaim_buffers = drm_core_reclaim_buffers,
 	.reclaim_buffers = drm_core_reclaim_buffers,
-	.get_map_ofs = drm_core_get_map_ofs,
-	.get_reg_ofs = drm_core_get_reg_ofs,
 	.ioctls = mga_ioctls,
 	.ioctls = mga_ioctls,
 	.dma_ioctl = mga_dma_buffers,
 	.dma_ioctl = mga_dma_buffers,
 	.fops = {
 	.fops = {

+ 1 - 0
drivers/gpu/drm/nouveau/Kconfig

@@ -10,6 +10,7 @@ config DRM_NOUVEAU
 	select FB
 	select FB
 	select FRAMEBUFFER_CONSOLE if !EMBEDDED
 	select FRAMEBUFFER_CONSOLE if !EMBEDDED
 	select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT
 	select FB_BACKLIGHT if DRM_NOUVEAU_BACKLIGHT
+	select ACPI_VIDEO if ACPI
 	help
 	help
 	  Choose this option for open-source nVidia support.
 	  Choose this option for open-source nVidia support.
 
 

+ 4 - 2
drivers/gpu/drm/nouveau/Makefile

@@ -9,7 +9,8 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
              nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \
              nouveau_bo.o nouveau_fence.o nouveau_gem.o nouveau_ttm.o \
              nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \
              nouveau_hw.o nouveau_calc.o nouveau_bios.o nouveau_i2c.o \
              nouveau_display.o nouveau_connector.o nouveau_fbcon.o \
              nouveau_display.o nouveau_connector.o nouveau_fbcon.o \
-             nouveau_dp.o \
+             nouveau_dp.o nouveau_ramht.o \
+	     nouveau_pm.o nouveau_volt.o nouveau_perf.o nouveau_temp.o \
              nv04_timer.o \
              nv04_timer.o \
              nv04_mc.o nv40_mc.o nv50_mc.o \
              nv04_mc.o nv40_mc.o nv50_mc.o \
              nv04_fb.o nv10_fb.o nv30_fb.o nv40_fb.o nv50_fb.o nvc0_fb.o \
              nv04_fb.o nv10_fb.o nv30_fb.o nv40_fb.o nv50_fb.o nvc0_fb.o \
@@ -23,7 +24,8 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
              nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \
              nv04_dac.o nv04_dfp.o nv04_tv.o nv17_tv.o nv17_tv_modes.o \
              nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \
              nv04_crtc.o nv04_display.o nv04_cursor.o nv04_fbcon.o \
              nv10_gpio.o nv50_gpio.o \
              nv10_gpio.o nv50_gpio.o \
-	     nv50_calc.o
+	     nv50_calc.o \
+	     nv04_pm.o nv50_pm.o nva3_pm.o
 
 
 nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o
 nouveau-$(CONFIG_DRM_NOUVEAU_DEBUG) += nouveau_debugfs.o
 nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o
 nouveau-$(CONFIG_COMPAT) += nouveau_ioc32.o

+ 1 - 1
drivers/gpu/drm/nouveau/nouveau_acpi.c

@@ -292,6 +292,6 @@ nouveau_acpi_edid(struct drm_device *dev, struct drm_connector *connector)
 	if (ret < 0)
 	if (ret < 0)
 		return ret;
 		return ret;
 
 
-	nv_connector->edid = edid;
+	nv_connector->edid = kmemdup(edid, EDID_LENGTH, GFP_KERNEL);
 	return 0;
 	return 0;
 }
 }

+ 220 - 146
drivers/gpu/drm/nouveau/nouveau_bios.c

@@ -43,9 +43,6 @@
 #define BIOSLOG(sip, fmt, arg...) NV_DEBUG(sip->dev, fmt, ##arg)
 #define BIOSLOG(sip, fmt, arg...) NV_DEBUG(sip->dev, fmt, ##arg)
 #define LOG_OLD_VALUE(x)
 #define LOG_OLD_VALUE(x)
 
 
-#define ROM16(x) le16_to_cpu(*(uint16_t *)&(x))
-#define ROM32(x) le32_to_cpu(*(uint32_t *)&(x))
-
 struct init_exec {
 struct init_exec {
 	bool execute;
 	bool execute;
 	bool repeat;
 	bool repeat;
@@ -272,12 +269,6 @@ struct init_tbl_entry {
 	int (*handler)(struct nvbios *, uint16_t, struct init_exec *);
 	int (*handler)(struct nvbios *, uint16_t, struct init_exec *);
 };
 };
 
 
-struct bit_entry {
-	uint8_t id[2];
-	uint16_t length;
-	uint16_t offset;
-};
-
 static int parse_init_table(struct nvbios *, unsigned int, struct init_exec *);
 static int parse_init_table(struct nvbios *, unsigned int, struct init_exec *);
 
 
 #define MACRO_INDEX_SIZE	2
 #define MACRO_INDEX_SIZE	2
@@ -1231,7 +1222,7 @@ init_dp_condition(struct nvbios *bios, uint16_t offset, struct init_exec *iexec)
 			return 3;
 			return 3;
 		}
 		}
 
 
-		if (cond & 1)
+		if (!(cond & 1))
 			iexec->execute = false;
 			iexec->execute = false;
 	}
 	}
 		break;
 		break;
@@ -4675,6 +4666,92 @@ int run_tmds_table(struct drm_device *dev, struct dcb_entry *dcbent, int head, i
 	return 0;
 	return 0;
 }
 }
 
 
+struct pll_mapping {
+	u8  type;
+	u32 reg;
+};
+
+static struct pll_mapping nv04_pll_mapping[] = {
+	{ PLL_CORE  , NV_PRAMDAC_NVPLL_COEFF },
+	{ PLL_MEMORY, NV_PRAMDAC_MPLL_COEFF },
+	{ PLL_VPLL0 , NV_PRAMDAC_VPLL_COEFF },
+	{ PLL_VPLL1 , NV_RAMDAC_VPLL2 },
+	{}
+};
+
+static struct pll_mapping nv40_pll_mapping[] = {
+	{ PLL_CORE  , 0x004000 },
+	{ PLL_MEMORY, 0x004020 },
+	{ PLL_VPLL0 , NV_PRAMDAC_VPLL_COEFF },
+	{ PLL_VPLL1 , NV_RAMDAC_VPLL2 },
+	{}
+};
+
+static struct pll_mapping nv50_pll_mapping[] = {
+	{ PLL_CORE  , 0x004028 },
+	{ PLL_SHADER, 0x004020 },
+	{ PLL_UNK03 , 0x004000 },
+	{ PLL_MEMORY, 0x004008 },
+	{ PLL_UNK40 , 0x00e810 },
+	{ PLL_UNK41 , 0x00e818 },
+	{ PLL_UNK42 , 0x00e824 },
+	{ PLL_VPLL0 , 0x614100 },
+	{ PLL_VPLL1 , 0x614900 },
+	{}
+};
+
+static struct pll_mapping nv84_pll_mapping[] = {
+	{ PLL_CORE  , 0x004028 },
+	{ PLL_SHADER, 0x004020 },
+	{ PLL_MEMORY, 0x004008 },
+	{ PLL_UNK05 , 0x004030 },
+	{ PLL_UNK41 , 0x00e818 },
+	{ PLL_VPLL0 , 0x614100 },
+	{ PLL_VPLL1 , 0x614900 },
+	{}
+};
+
+u32
+get_pll_register(struct drm_device *dev, enum pll_types type)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nvbios *bios = &dev_priv->vbios;
+	struct pll_mapping *map;
+	int i;
+
+	if (dev_priv->card_type < NV_40)
+		map = nv04_pll_mapping;
+	else
+	if (dev_priv->card_type < NV_50)
+		map = nv40_pll_mapping;
+	else {
+		u8 *plim = &bios->data[bios->pll_limit_tbl_ptr];
+
+		if (plim[0] >= 0x30) {
+			u8 *entry = plim + plim[1];
+			for (i = 0; i < plim[3]; i++, entry += plim[2]) {
+				if (entry[0] == type)
+					return ROM32(entry[3]);
+			}
+
+			return 0;
+		}
+
+		if (dev_priv->chipset == 0x50)
+			map = nv50_pll_mapping;
+		else
+			map = nv84_pll_mapping;
+	}
+
+	while (map->reg) {
+		if (map->type == type)
+			return map->reg;
+		map++;
+	}
+
+	return 0;
+}
+
 int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims *pll_lim)
 int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims *pll_lim)
 {
 {
 	/*
 	/*
@@ -4750,6 +4827,17 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims
 	/* initialize all members to zero */
 	/* initialize all members to zero */
 	memset(pll_lim, 0, sizeof(struct pll_lims));
 	memset(pll_lim, 0, sizeof(struct pll_lims));
 
 
+	/* if we were passed a type rather than a register, figure
+	 * out the register and store it
+	 */
+	if (limit_match > PLL_MAX)
+		pll_lim->reg = limit_match;
+	else {
+		pll_lim->reg = get_pll_register(dev, limit_match);
+		if (!pll_lim->reg)
+			return -ENOENT;
+	}
+
 	if (pll_lim_ver == 0x10 || pll_lim_ver == 0x11) {
 	if (pll_lim_ver == 0x10 || pll_lim_ver == 0x11) {
 		uint8_t *pll_rec = &bios->data[bios->pll_limit_tbl_ptr + headerlen + recordlen * pllindex];
 		uint8_t *pll_rec = &bios->data[bios->pll_limit_tbl_ptr + headerlen + recordlen * pllindex];
 
 
@@ -4785,7 +4873,6 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims
 		pll_lim->max_usable_log2p = 0x6;
 		pll_lim->max_usable_log2p = 0x6;
 	} else if (pll_lim_ver == 0x20 || pll_lim_ver == 0x21) {
 	} else if (pll_lim_ver == 0x20 || pll_lim_ver == 0x21) {
 		uint16_t plloffs = bios->pll_limit_tbl_ptr + headerlen;
 		uint16_t plloffs = bios->pll_limit_tbl_ptr + headerlen;
-		uint32_t reg = 0; /* default match */
 		uint8_t *pll_rec;
 		uint8_t *pll_rec;
 		int i;
 		int i;
 
 
@@ -4797,37 +4884,22 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims
 			NV_WARN(dev, "Default PLL limit entry has non-zero "
 			NV_WARN(dev, "Default PLL limit entry has non-zero "
 				       "register field\n");
 				       "register field\n");
 
 
-		if (limit_match > MAX_PLL_TYPES)
-			/* we've been passed a reg as the match */
-			reg = limit_match;
-		else /* limit match is a pll type */
-			for (i = 1; i < entries && !reg; i++) {
-				uint32_t cmpreg = ROM32(bios->data[plloffs + recordlen * i]);
-
-				if (limit_match == NVPLL &&
-				    (cmpreg == NV_PRAMDAC_NVPLL_COEFF || cmpreg == 0x4000))
-					reg = cmpreg;
-				if (limit_match == MPLL &&
-				    (cmpreg == NV_PRAMDAC_MPLL_COEFF || cmpreg == 0x4020))
-					reg = cmpreg;
-				if (limit_match == VPLL1 &&
-				    (cmpreg == NV_PRAMDAC_VPLL_COEFF || cmpreg == 0x4010))
-					reg = cmpreg;
-				if (limit_match == VPLL2 &&
-				    (cmpreg == NV_RAMDAC_VPLL2 || cmpreg == 0x4018))
-					reg = cmpreg;
-			}
-
 		for (i = 1; i < entries; i++)
 		for (i = 1; i < entries; i++)
-			if (ROM32(bios->data[plloffs + recordlen * i]) == reg) {
+			if (ROM32(bios->data[plloffs + recordlen * i]) == pll_lim->reg) {
 				pllindex = i;
 				pllindex = i;
 				break;
 				break;
 			}
 			}
 
 
+		if ((dev_priv->card_type >= NV_50) && (pllindex == 0)) {
+			NV_ERROR(dev, "Register 0x%08x not found in PLL "
+				 "limits table", pll_lim->reg);
+			return -ENOENT;
+		}
+
 		pll_rec = &bios->data[plloffs + recordlen * pllindex];
 		pll_rec = &bios->data[plloffs + recordlen * pllindex];
 
 
 		BIOSLOG(bios, "Loading PLL limits for reg 0x%08x\n",
 		BIOSLOG(bios, "Loading PLL limits for reg 0x%08x\n",
-			pllindex ? reg : 0);
+			pllindex ? pll_lim->reg : 0);
 
 
 		/*
 		/*
 		 * Frequencies are stored in tables in MHz, kHz are more
 		 * Frequencies are stored in tables in MHz, kHz are more
@@ -4877,8 +4949,8 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims
 		if (cv == 0x51 && !pll_lim->refclk) {
 		if (cv == 0x51 && !pll_lim->refclk) {
 			uint32_t sel_clk = bios_rd32(bios, NV_PRAMDAC_SEL_CLK);
 			uint32_t sel_clk = bios_rd32(bios, NV_PRAMDAC_SEL_CLK);
 
 
-			if (((limit_match == NV_PRAMDAC_VPLL_COEFF || limit_match == VPLL1) && sel_clk & 0x20) ||
-			    ((limit_match == NV_RAMDAC_VPLL2 || limit_match == VPLL2) && sel_clk & 0x80)) {
+			if ((pll_lim->reg == NV_PRAMDAC_VPLL_COEFF && sel_clk & 0x20) ||
+			    (pll_lim->reg == NV_RAMDAC_VPLL2 && sel_clk & 0x80)) {
 				if (bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, NV_CIO_CRE_CHIP_ID_INDEX) < 0xa3)
 				if (bios_idxprt_rd(bios, NV_CIO_CRX__COLOR, NV_CIO_CRE_CHIP_ID_INDEX) < 0xa3)
 					pll_lim->refclk = 200000;
 					pll_lim->refclk = 200000;
 				else
 				else
@@ -4891,10 +4963,10 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims
 		int i;
 		int i;
 
 
 		BIOSLOG(bios, "Loading PLL limits for register 0x%08x\n",
 		BIOSLOG(bios, "Loading PLL limits for register 0x%08x\n",
-			limit_match);
+			pll_lim->reg);
 
 
 		for (i = 0; i < entries; i++, entry += recordlen) {
 		for (i = 0; i < entries; i++, entry += recordlen) {
-			if (ROM32(entry[3]) == limit_match) {
+			if (ROM32(entry[3]) == pll_lim->reg) {
 				record = &bios->data[ROM16(entry[1])];
 				record = &bios->data[ROM16(entry[1])];
 				break;
 				break;
 			}
 			}
@@ -4902,7 +4974,7 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims
 
 
 		if (!record) {
 		if (!record) {
 			NV_ERROR(dev, "Register 0x%08x not found in PLL "
 			NV_ERROR(dev, "Register 0x%08x not found in PLL "
-				 "limits table", limit_match);
+				 "limits table", pll_lim->reg);
 			return -ENOENT;
 			return -ENOENT;
 		}
 		}
 
 
@@ -4931,10 +5003,10 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims
 		int i;
 		int i;
 
 
 		BIOSLOG(bios, "Loading PLL limits for register 0x%08x\n",
 		BIOSLOG(bios, "Loading PLL limits for register 0x%08x\n",
-			limit_match);
+			pll_lim->reg);
 
 
 		for (i = 0; i < entries; i++, entry += recordlen) {
 		for (i = 0; i < entries; i++, entry += recordlen) {
-			if (ROM32(entry[3]) == limit_match) {
+			if (ROM32(entry[3]) == pll_lim->reg) {
 				record = &bios->data[ROM16(entry[1])];
 				record = &bios->data[ROM16(entry[1])];
 				break;
 				break;
 			}
 			}
@@ -4942,7 +5014,7 @@ int get_pll_limits(struct drm_device *dev, uint32_t limit_match, struct pll_lims
 
 
 		if (!record) {
 		if (!record) {
 			NV_ERROR(dev, "Register 0x%08x not found in PLL "
 			NV_ERROR(dev, "Register 0x%08x not found in PLL "
-				 "limits table", limit_match);
+				 "limits table", pll_lim->reg);
 			return -ENOENT;
 			return -ENOENT;
 		}
 		}
 
 
@@ -5293,7 +5365,7 @@ parse_bit_M_tbl_entry(struct drm_device *dev, struct nvbios *bios,
 	if (bitentry->length < 0x5)
 	if (bitentry->length < 0x5)
 		return 0;
 		return 0;
 
 
-	if (bitentry->id[1] < 2) {
+	if (bitentry->version < 2) {
 		bios->ram_restrict_group_count = bios->data[bitentry->offset + 2];
 		bios->ram_restrict_group_count = bios->data[bitentry->offset + 2];
 		bios->ram_restrict_tbl_ptr = ROM16(bios->data[bitentry->offset + 3]);
 		bios->ram_restrict_tbl_ptr = ROM16(bios->data[bitentry->offset + 3]);
 	} else {
 	} else {
@@ -5403,27 +5475,40 @@ struct bit_table {
 
 
 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
 
 
+int
+bit_table(struct drm_device *dev, u8 id, struct bit_entry *bit)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nvbios *bios = &dev_priv->vbios;
+	u8 entries, *entry;
+
+	entries = bios->data[bios->offset + 10];
+	entry   = &bios->data[bios->offset + 12];
+	while (entries--) {
+		if (entry[0] == id) {
+			bit->id = entry[0];
+			bit->version = entry[1];
+			bit->length = ROM16(entry[2]);
+			bit->offset = ROM16(entry[4]);
+			bit->data = ROMPTR(bios, entry[4]);
+			return 0;
+		}
+
+		entry += bios->data[bios->offset + 9];
+	}
+
+	return -ENOENT;
+}
+
 static int
 static int
 parse_bit_table(struct nvbios *bios, const uint16_t bitoffset,
 parse_bit_table(struct nvbios *bios, const uint16_t bitoffset,
 		struct bit_table *table)
 		struct bit_table *table)
 {
 {
 	struct drm_device *dev = bios->dev;
 	struct drm_device *dev = bios->dev;
-	uint8_t maxentries = bios->data[bitoffset + 4];
-	int i, offset;
 	struct bit_entry bitentry;
 	struct bit_entry bitentry;
 
 
-	for (i = 0, offset = bitoffset + 6; i < maxentries; i++, offset += 6) {
-		bitentry.id[0] = bios->data[offset];
-
-		if (bitentry.id[0] != table->id)
-			continue;
-
-		bitentry.id[1] = bios->data[offset + 1];
-		bitentry.length = ROM16(bios->data[offset + 2]);
-		bitentry.offset = ROM16(bios->data[offset + 4]);
-
+	if (bit_table(dev, table->id, &bitentry) == 0)
 		return table->parse_fn(dev, bios, &bitentry);
 		return table->parse_fn(dev, bios, &bitentry);
-	}
 
 
 	NV_INFO(dev, "BIT table '%c' not found\n", table->id);
 	NV_INFO(dev, "BIT table '%c' not found\n", table->id);
 	return -ENOSYS;
 	return -ENOSYS;
@@ -5683,8 +5768,14 @@ static uint16_t findstr(uint8_t *data, int n, const uint8_t *str, int len)
 static struct dcb_gpio_entry *
 static struct dcb_gpio_entry *
 new_gpio_entry(struct nvbios *bios)
 new_gpio_entry(struct nvbios *bios)
 {
 {
+	struct drm_device *dev = bios->dev;
 	struct dcb_gpio_table *gpio = &bios->dcb.gpio;
 	struct dcb_gpio_table *gpio = &bios->dcb.gpio;
 
 
+	if (gpio->entries >= DCB_MAX_NUM_GPIO_ENTRIES) {
+		NV_ERROR(dev, "exceeded maximum number of gpio entries!!\n");
+		return NULL;
+	}
+
 	return &gpio->entry[gpio->entries++];
 	return &gpio->entry[gpio->entries++];
 }
 }
 
 
@@ -5705,114 +5796,91 @@ nouveau_bios_gpio_entry(struct drm_device *dev, enum dcb_gpio_tag tag)
 	return NULL;
 	return NULL;
 }
 }
 
 
-static void
-parse_dcb30_gpio_entry(struct nvbios *bios, uint16_t offset)
-{
-	struct dcb_gpio_entry *gpio;
-	uint16_t ent = ROM16(bios->data[offset]);
-	uint8_t line = ent & 0x1f,
-		tag = ent >> 5 & 0x3f,
-		flags = ent >> 11 & 0x1f;
-
-	if (tag == 0x3f)
-		return;
-
-	gpio = new_gpio_entry(bios);
-
-	gpio->tag = tag;
-	gpio->line = line;
-	gpio->invert = flags != 4;
-	gpio->entry = ent;
-}
-
-static void
-parse_dcb40_gpio_entry(struct nvbios *bios, uint16_t offset)
-{
-	uint32_t entry = ROM32(bios->data[offset]);
-	struct dcb_gpio_entry *gpio;
-
-	if ((entry & 0x0000ff00) == 0x0000ff00)
-		return;
-
-	gpio = new_gpio_entry(bios);
-	gpio->tag = (entry & 0x0000ff00) >> 8;
-	gpio->line = (entry & 0x0000001f) >> 0;
-	gpio->state_default = (entry & 0x01000000) >> 24;
-	gpio->state[0] = (entry & 0x18000000) >> 27;
-	gpio->state[1] = (entry & 0x60000000) >> 29;
-	gpio->entry = entry;
-}
-
 static void
 static void
 parse_dcb_gpio_table(struct nvbios *bios)
 parse_dcb_gpio_table(struct nvbios *bios)
 {
 {
 	struct drm_device *dev = bios->dev;
 	struct drm_device *dev = bios->dev;
-	uint16_t gpio_table_ptr = bios->dcb.gpio_table_ptr;
-	uint8_t *gpio_table = &bios->data[gpio_table_ptr];
-	int header_len = gpio_table[1],
-	    entries = gpio_table[2],
-	    entry_len = gpio_table[3];
-	void (*parse_entry)(struct nvbios *, uint16_t) = NULL;
+	struct dcb_gpio_entry *e;
+	u8 headerlen, entries, recordlen;
+	u8 *dcb, *gpio = NULL, *entry;
 	int i;
 	int i;
 
 
-	if (bios->dcb.version >= 0x40) {
-		if (gpio_table_ptr && entry_len != 4) {
-			NV_WARN(dev, "Invalid DCB GPIO table entry length.\n");
-			return;
-		}
+	dcb = ROMPTR(bios, bios->data[0x36]);
+	if (dcb[0] >= 0x30) {
+		gpio = ROMPTR(bios, dcb[10]);
+		if (!gpio)
+			goto no_table;
 
 
-		parse_entry = parse_dcb40_gpio_entry;
+		headerlen = gpio[1];
+		entries   = gpio[2];
+		recordlen = gpio[3];
+	} else
+	if (dcb[0] >= 0x22 && dcb[-1] >= 0x13) {
+		gpio = ROMPTR(bios, dcb[-15]);
+		if (!gpio)
+			goto no_table;
+
+		headerlen = 3;
+		entries   = gpio[2];
+		recordlen = gpio[1];
+	} else
+	if (dcb[0] >= 0x22) {
+		/* No GPIO table present, parse the TVDAC GPIO data. */
+		uint8_t *tvdac_gpio = &dcb[-5];
 
 
-	} else if (bios->dcb.version >= 0x30) {
-		if (gpio_table_ptr && entry_len != 2) {
-			NV_WARN(dev, "Invalid DCB GPIO table entry length.\n");
-			return;
+		if (tvdac_gpio[0] & 1) {
+			e = new_gpio_entry(bios);
+			e->tag = DCB_GPIO_TVDAC0;
+			e->line = tvdac_gpio[1] >> 4;
+			e->invert = tvdac_gpio[0] & 2;
 		}
 		}
 
 
-		parse_entry = parse_dcb30_gpio_entry;
-
-	} else if (bios->dcb.version >= 0x22) {
-		/*
-		 * DCBs older than v3.0 don't really have a GPIO
-		 * table, instead they keep some GPIO info at fixed
-		 * locations.
-		 */
-		uint16_t dcbptr = ROM16(bios->data[0x36]);
-		uint8_t *tvdac_gpio = &bios->data[dcbptr - 5];
+		goto no_table;
+	} else {
+		NV_DEBUG(dev, "no/unknown gpio table on DCB 0x%02x\n", dcb[0]);
+		goto no_table;
+	}
 
 
-		if (tvdac_gpio[0] & 1) {
-			struct dcb_gpio_entry *gpio = new_gpio_entry(bios);
+	entry = gpio + headerlen;
+	for (i = 0; i < entries; i++, entry += recordlen) {
+		e = new_gpio_entry(bios);
+		if (!e)
+			break;
 
 
-			gpio->tag = DCB_GPIO_TVDAC0;
-			gpio->line = tvdac_gpio[1] >> 4;
-			gpio->invert = tvdac_gpio[0] & 2;
-		}
-	} else {
-		/*
-		 * No systematic way to store GPIO info on pre-v2.2
-		 * DCBs, try to match the PCI device IDs.
-		 */
+		if (gpio[0] < 0x40) {
+			e->entry = ROM16(entry[0]);
+			e->tag = (e->entry & 0x07e0) >> 5;
+			if (e->tag == 0x3f) {
+				bios->dcb.gpio.entries--;
+				continue;
+			}
 
 
-		/* Apple iMac G4 NV18 */
-		if (nv_match_device(dev, 0x0189, 0x10de, 0x0010)) {
-			struct dcb_gpio_entry *gpio = new_gpio_entry(bios);
+			e->line = (e->entry & 0x001f);
+			e->invert = ((e->entry & 0xf800) >> 11) != 4;
+		} else {
+			e->entry = ROM32(entry[0]);
+			e->tag = (e->entry & 0x0000ff00) >> 8;
+			if (e->tag == 0xff) {
+				bios->dcb.gpio.entries--;
+				continue;
+			}
 
 
-			gpio->tag = DCB_GPIO_TVDAC0;
-			gpio->line = 4;
+			e->line = (e->entry & 0x0000001f) >> 0;
+			e->state_default = (e->entry & 0x01000000) >> 24;
+			e->state[0] = (e->entry & 0x18000000) >> 27;
+			e->state[1] = (e->entry & 0x60000000) >> 29;
 		}
 		}
-
 	}
 	}
 
 
-	if (!gpio_table_ptr)
-		return;
-
-	if (entries > DCB_MAX_NUM_GPIO_ENTRIES) {
-		NV_WARN(dev, "Too many entries in the DCB GPIO table.\n");
-		entries = DCB_MAX_NUM_GPIO_ENTRIES;
+no_table:
+	/* Apple iMac G4 NV18 */
+	if (nv_match_device(dev, 0x0189, 0x10de, 0x0010)) {
+		e = new_gpio_entry(bios);
+		if (e) {
+			e->tag = DCB_GPIO_TVDAC0;
+			e->line = 4;
+		}
 	}
 	}
-
-	for (i = 0; i < entries; i++)
-		parse_entry(bios, gpio_table_ptr + header_len + entry_len * i);
 }
 }
 
 
 struct dcb_connector_table_entry *
 struct dcb_connector_table_entry *
@@ -6680,6 +6748,8 @@ static int nouveau_parse_vbios_struct(struct drm_device *dev)
 					bit_signature, sizeof(bit_signature));
 					bit_signature, sizeof(bit_signature));
 	if (offset) {
 	if (offset) {
 		NV_TRACE(dev, "BIT BIOS found\n");
 		NV_TRACE(dev, "BIT BIOS found\n");
+		bios->type = NVBIOS_BIT;
+		bios->offset = offset;
 		return parse_bit_structure(bios, offset + 6);
 		return parse_bit_structure(bios, offset + 6);
 	}
 	}
 
 
@@ -6687,6 +6757,8 @@ static int nouveau_parse_vbios_struct(struct drm_device *dev)
 					bmp_signature, sizeof(bmp_signature));
 					bmp_signature, sizeof(bmp_signature));
 	if (offset) {
 	if (offset) {
 		NV_TRACE(dev, "BMP BIOS found\n");
 		NV_TRACE(dev, "BMP BIOS found\n");
+		bios->type = NVBIOS_BMP;
+		bios->offset = offset;
 		return parse_bmp_structure(dev, bios, offset);
 		return parse_bmp_structure(dev, bios, offset);
 	}
 	}
 
 
@@ -6806,6 +6878,8 @@ nouveau_bios_init(struct drm_device *dev)
 			"running VBIOS init tables.\n");
 			"running VBIOS init tables.\n");
 		bios->execute = true;
 		bios->execute = true;
 	}
 	}
+	if (nouveau_force_post)
+		bios->execute = true;
 
 
 	ret = nouveau_run_vbios_init(dev);
 	ret = nouveau_run_vbios_init(dev);
 	if (ret)
 	if (ret)

+ 37 - 6
drivers/gpu/drm/nouveau/nouveau_bios.h

@@ -34,6 +34,20 @@
 
 
 #define DCB_LOC_ON_CHIP 0
 #define DCB_LOC_ON_CHIP 0
 
 
+#define ROM16(x) le16_to_cpu(*(uint16_t *)&(x))
+#define ROM32(x) le32_to_cpu(*(uint32_t *)&(x))
+#define ROMPTR(bios, x) (ROM16(x) ? &(bios)->data[ROM16(x)] : NULL)
+
+struct bit_entry {
+	uint8_t  id;
+	uint8_t  version;
+	uint16_t length;
+	uint16_t offset;
+	uint8_t *data;
+};
+
+int bit_table(struct drm_device *, u8 id, struct bit_entry *);
+
 struct dcb_i2c_entry {
 struct dcb_i2c_entry {
 	uint32_t entry;
 	uint32_t entry;
 	uint8_t port_type;
 	uint8_t port_type;
@@ -170,16 +184,28 @@ enum LVDS_script {
 	LVDS_PANEL_OFF
 	LVDS_PANEL_OFF
 };
 };
 
 
-/* changing these requires matching changes to reg tables in nv_get_clock */
-#define MAX_PLL_TYPES	4
+/* these match types in pll limits table version 0x40,
+ * nouveau uses them on all chipsets internally where a
+ * specific pll needs to be referenced, but the exact
+ * register isn't known.
+ */
 enum pll_types {
 enum pll_types {
-	NVPLL,
-	MPLL,
-	VPLL1,
-	VPLL2
+	PLL_CORE   = 0x01,
+	PLL_SHADER = 0x02,
+	PLL_UNK03  = 0x03,
+	PLL_MEMORY = 0x04,
+	PLL_UNK05  = 0x05,
+	PLL_UNK40  = 0x40,
+	PLL_UNK41  = 0x41,
+	PLL_UNK42  = 0x42,
+	PLL_VPLL0  = 0x80,
+	PLL_VPLL1  = 0x81,
+	PLL_MAX    = 0xff
 };
 };
 
 
 struct pll_lims {
 struct pll_lims {
+	u32 reg;
+
 	struct {
 	struct {
 		int minfreq;
 		int minfreq;
 		int maxfreq;
 		int maxfreq;
@@ -212,6 +238,11 @@ struct pll_lims {
 
 
 struct nvbios {
 struct nvbios {
 	struct drm_device *dev;
 	struct drm_device *dev;
+	enum {
+		NVBIOS_BMP,
+		NVBIOS_BIT
+	} type;
+	uint16_t offset;
 
 
 	uint8_t chip_version;
 	uint8_t chip_version;
 
 

+ 194 - 96
drivers/gpu/drm/nouveau/nouveau_bo.c

@@ -36,21 +36,6 @@
 #include <linux/log2.h>
 #include <linux/log2.h>
 #include <linux/slab.h>
 #include <linux/slab.h>
 
 
-int
-nouveau_bo_sync_gpu(struct nouveau_bo *nvbo, struct nouveau_channel *chan)
-{
-	struct nouveau_fence *prev_fence = nvbo->bo.sync_obj;
-	int ret;
-
-	if (!prev_fence || nouveau_fence_channel(prev_fence) == chan)
-		return 0;
-
-	spin_lock(&nvbo->bo.lock);
-	ret = ttm_bo_wait(&nvbo->bo, false, false, false);
-	spin_unlock(&nvbo->bo.lock);
-	return ret;
-}
-
 static void
 static void
 nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
 nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
 {
 {
@@ -58,8 +43,6 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
 	struct drm_device *dev = dev_priv->dev;
 	struct drm_device *dev = dev_priv->dev;
 	struct nouveau_bo *nvbo = nouveau_bo(bo);
 	struct nouveau_bo *nvbo = nouveau_bo(bo);
 
 
-	ttm_bo_kunmap(&nvbo->kmap);
-
 	if (unlikely(nvbo->gem))
 	if (unlikely(nvbo->gem))
 		DRM_ERROR("bo %p still attached to GEM object\n", bo);
 		DRM_ERROR("bo %p still attached to GEM object\n", bo);
 
 
@@ -164,8 +147,6 @@ nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
 	nouveau_bo_fixup_align(dev, tile_mode, tile_flags, &align, &size);
 	nouveau_bo_fixup_align(dev, tile_mode, tile_flags, &align, &size);
 	align >>= PAGE_SHIFT;
 	align >>= PAGE_SHIFT;
 
 
-	nvbo->placement.fpfn = 0;
-	nvbo->placement.lpfn = mappable ? dev_priv->fb_mappable_pages : 0;
 	nouveau_bo_placement_set(nvbo, flags, 0);
 	nouveau_bo_placement_set(nvbo, flags, 0);
 
 
 	nvbo->channel = chan;
 	nvbo->channel = chan;
@@ -305,7 +286,8 @@ nouveau_bo_map(struct nouveau_bo *nvbo)
 void
 void
 nouveau_bo_unmap(struct nouveau_bo *nvbo)
 nouveau_bo_unmap(struct nouveau_bo *nvbo)
 {
 {
-	ttm_bo_kunmap(&nvbo->kmap);
+	if (nvbo)
+		ttm_bo_kunmap(&nvbo->kmap);
 }
 }
 
 
 u16
 u16
@@ -399,14 +381,19 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
 		man->default_caching = TTM_PL_FLAG_CACHED;
 		man->default_caching = TTM_PL_FLAG_CACHED;
 		break;
 		break;
 	case TTM_PL_VRAM:
 	case TTM_PL_VRAM:
+		man->func = &ttm_bo_manager_func;
 		man->flags = TTM_MEMTYPE_FLAG_FIXED |
 		man->flags = TTM_MEMTYPE_FLAG_FIXED |
 			     TTM_MEMTYPE_FLAG_MAPPABLE;
 			     TTM_MEMTYPE_FLAG_MAPPABLE;
 		man->available_caching = TTM_PL_FLAG_UNCACHED |
 		man->available_caching = TTM_PL_FLAG_UNCACHED |
 					 TTM_PL_FLAG_WC;
 					 TTM_PL_FLAG_WC;
 		man->default_caching = TTM_PL_FLAG_WC;
 		man->default_caching = TTM_PL_FLAG_WC;
-		man->gpu_offset = dev_priv->vm_vram_base;
+		if (dev_priv->card_type == NV_50)
+			man->gpu_offset = 0x40000000;
+		else
+			man->gpu_offset = 0;
 		break;
 		break;
 	case TTM_PL_TT:
 	case TTM_PL_TT:
+		man->func = &ttm_bo_manager_func;
 		switch (dev_priv->gart_info.type) {
 		switch (dev_priv->gart_info.type) {
 		case NOUVEAU_GART_AGP:
 		case NOUVEAU_GART_AGP:
 			man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
 			man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
@@ -469,19 +456,26 @@ nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
-	ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL,
-					evict || (nvbo->channel &&
-						  nvbo->channel != chan),
+	if (nvbo->channel) {
+		ret = nouveau_fence_sync(fence, nvbo->channel);
+		if (ret)
+			goto out;
+	}
+
+	ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict,
 					no_wait_reserve, no_wait_gpu, new_mem);
 					no_wait_reserve, no_wait_gpu, new_mem);
+out:
 	nouveau_fence_unref((void *)&fence);
 	nouveau_fence_unref((void *)&fence);
 	return ret;
 	return ret;
 }
 }
 
 
 static inline uint32_t
 static inline uint32_t
-nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
-		      struct ttm_mem_reg *mem)
+nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
+		      struct nouveau_channel *chan, struct ttm_mem_reg *mem)
 {
 {
-	if (chan == nouveau_bdev(nvbo->bo.bdev)->channel) {
+	struct nouveau_bo *nvbo = nouveau_bo(bo);
+
+	if (nvbo->no_vm) {
 		if (mem->mem_type == TTM_PL_TT)
 		if (mem->mem_type == TTM_PL_TT)
 			return NvDmaGART;
 			return NvDmaGART;
 		return NvDmaVRAM;
 		return NvDmaVRAM;
@@ -493,86 +487,181 @@ nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
 }
 }
 
 
 static int
 static int
-nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
-		     bool no_wait_reserve, bool no_wait_gpu,
-		     struct ttm_mem_reg *new_mem)
+nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
+		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
 {
 {
-	struct nouveau_bo *nvbo = nouveau_bo(bo);
 	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
 	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
-	struct ttm_mem_reg *old_mem = &bo->mem;
-	struct nouveau_channel *chan;
-	uint64_t src_offset, dst_offset;
-	uint32_t page_count;
+	struct nouveau_bo *nvbo = nouveau_bo(bo);
+	u64 length = (new_mem->num_pages << PAGE_SHIFT);
+	u64 src_offset, dst_offset;
 	int ret;
 	int ret;
 
 
-	chan = nvbo->channel;
-	if (!chan || nvbo->tile_flags || nvbo->no_vm)
-		chan = dev_priv->channel;
-
-	src_offset = old_mem->mm_node->start << PAGE_SHIFT;
-	dst_offset = new_mem->mm_node->start << PAGE_SHIFT;
-	if (chan != dev_priv->channel) {
-		if (old_mem->mem_type == TTM_PL_TT)
-			src_offset += dev_priv->vm_gart_base;
-		else
+	src_offset = old_mem->start << PAGE_SHIFT;
+	dst_offset = new_mem->start << PAGE_SHIFT;
+	if (!nvbo->no_vm) {
+		if (old_mem->mem_type == TTM_PL_VRAM)
 			src_offset += dev_priv->vm_vram_base;
 			src_offset += dev_priv->vm_vram_base;
-
-		if (new_mem->mem_type == TTM_PL_TT)
-			dst_offset += dev_priv->vm_gart_base;
 		else
 		else
+			src_offset += dev_priv->vm_gart_base;
+
+		if (new_mem->mem_type == TTM_PL_VRAM)
 			dst_offset += dev_priv->vm_vram_base;
 			dst_offset += dev_priv->vm_vram_base;
+		else
+			dst_offset += dev_priv->vm_gart_base;
 	}
 	}
 
 
 	ret = RING_SPACE(chan, 3);
 	ret = RING_SPACE(chan, 3);
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
-	BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
-	OUT_RING(chan, nouveau_bo_mem_ctxdma(nvbo, chan, old_mem));
-	OUT_RING(chan, nouveau_bo_mem_ctxdma(nvbo, chan, new_mem));
 
 
-	if (dev_priv->card_type >= NV_50) {
-		ret = RING_SPACE(chan, 4);
+	BEGIN_RING(chan, NvSubM2MF, 0x0184, 2);
+	OUT_RING  (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
+	OUT_RING  (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
+
+	while (length) {
+		u32 amount, stride, height;
+
+		amount  = min(length, (u64)(4 * 1024 * 1024));
+		stride  = 16 * 4;
+		height  = amount / stride;
+
+		if (new_mem->mem_type == TTM_PL_VRAM && nvbo->tile_flags) {
+			ret = RING_SPACE(chan, 8);
+			if (ret)
+				return ret;
+
+			BEGIN_RING(chan, NvSubM2MF, 0x0200, 7);
+			OUT_RING  (chan, 0);
+			OUT_RING  (chan, 0);
+			OUT_RING  (chan, stride);
+			OUT_RING  (chan, height);
+			OUT_RING  (chan, 1);
+			OUT_RING  (chan, 0);
+			OUT_RING  (chan, 0);
+		} else {
+			ret = RING_SPACE(chan, 2);
+			if (ret)
+				return ret;
+
+			BEGIN_RING(chan, NvSubM2MF, 0x0200, 1);
+			OUT_RING  (chan, 1);
+		}
+		if (old_mem->mem_type == TTM_PL_VRAM && nvbo->tile_flags) {
+			ret = RING_SPACE(chan, 8);
+			if (ret)
+				return ret;
+
+			BEGIN_RING(chan, NvSubM2MF, 0x021c, 7);
+			OUT_RING  (chan, 0);
+			OUT_RING  (chan, 0);
+			OUT_RING  (chan, stride);
+			OUT_RING  (chan, height);
+			OUT_RING  (chan, 1);
+			OUT_RING  (chan, 0);
+			OUT_RING  (chan, 0);
+		} else {
+			ret = RING_SPACE(chan, 2);
+			if (ret)
+				return ret;
+
+			BEGIN_RING(chan, NvSubM2MF, 0x021c, 1);
+			OUT_RING  (chan, 1);
+		}
+
+		ret = RING_SPACE(chan, 14);
 		if (ret)
 		if (ret)
 			return ret;
 			return ret;
-		BEGIN_RING(chan, NvSubM2MF, 0x0200, 1);
-		OUT_RING(chan, 1);
-		BEGIN_RING(chan, NvSubM2MF, 0x021c, 1);
-		OUT_RING(chan, 1);
+
+		BEGIN_RING(chan, NvSubM2MF, 0x0238, 2);
+		OUT_RING  (chan, upper_32_bits(src_offset));
+		OUT_RING  (chan, upper_32_bits(dst_offset));
+		BEGIN_RING(chan, NvSubM2MF, 0x030c, 8);
+		OUT_RING  (chan, lower_32_bits(src_offset));
+		OUT_RING  (chan, lower_32_bits(dst_offset));
+		OUT_RING  (chan, stride);
+		OUT_RING  (chan, stride);
+		OUT_RING  (chan, stride);
+		OUT_RING  (chan, height);
+		OUT_RING  (chan, 0x00000101);
+		OUT_RING  (chan, 0x00000000);
+		BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
+		OUT_RING  (chan, 0);
+
+		length -= amount;
+		src_offset += amount;
+		dst_offset += amount;
 	}
 	}
 
 
+	return 0;
+}
+
+static int
+nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
+		  struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
+{
+	u32 src_offset = old_mem->start << PAGE_SHIFT;
+	u32 dst_offset = new_mem->start << PAGE_SHIFT;
+	u32 page_count = new_mem->num_pages;
+	int ret;
+
+	ret = RING_SPACE(chan, 3);
+	if (ret)
+		return ret;
+
+	BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
+	OUT_RING  (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
+	OUT_RING  (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
+
 	page_count = new_mem->num_pages;
 	page_count = new_mem->num_pages;
 	while (page_count) {
 	while (page_count) {
 		int line_count = (page_count > 2047) ? 2047 : page_count;
 		int line_count = (page_count > 2047) ? 2047 : page_count;
 
 
-		if (dev_priv->card_type >= NV_50) {
-			ret = RING_SPACE(chan, 3);
-			if (ret)
-				return ret;
-			BEGIN_RING(chan, NvSubM2MF, 0x0238, 2);
-			OUT_RING(chan, upper_32_bits(src_offset));
-			OUT_RING(chan, upper_32_bits(dst_offset));
-		}
 		ret = RING_SPACE(chan, 11);
 		ret = RING_SPACE(chan, 11);
 		if (ret)
 		if (ret)
 			return ret;
 			return ret;
+
 		BEGIN_RING(chan, NvSubM2MF,
 		BEGIN_RING(chan, NvSubM2MF,
 				 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
 				 NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
-		OUT_RING(chan, lower_32_bits(src_offset));
-		OUT_RING(chan, lower_32_bits(dst_offset));
-		OUT_RING(chan, PAGE_SIZE); /* src_pitch */
-		OUT_RING(chan, PAGE_SIZE); /* dst_pitch */
-		OUT_RING(chan, PAGE_SIZE); /* line_length */
-		OUT_RING(chan, line_count);
-		OUT_RING(chan, (1<<8)|(1<<0));
-		OUT_RING(chan, 0);
+		OUT_RING  (chan, src_offset);
+		OUT_RING  (chan, dst_offset);
+		OUT_RING  (chan, PAGE_SIZE); /* src_pitch */
+		OUT_RING  (chan, PAGE_SIZE); /* dst_pitch */
+		OUT_RING  (chan, PAGE_SIZE); /* line_length */
+		OUT_RING  (chan, line_count);
+		OUT_RING  (chan, 0x00000101);
+		OUT_RING  (chan, 0x00000000);
 		BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
 		BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
-		OUT_RING(chan, 0);
+		OUT_RING  (chan, 0);
 
 
 		page_count -= line_count;
 		page_count -= line_count;
 		src_offset += (PAGE_SIZE * line_count);
 		src_offset += (PAGE_SIZE * line_count);
 		dst_offset += (PAGE_SIZE * line_count);
 		dst_offset += (PAGE_SIZE * line_count);
 	}
 	}
 
 
+	return 0;
+}
+
+static int
+nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
+		     bool no_wait_reserve, bool no_wait_gpu,
+		     struct ttm_mem_reg *new_mem)
+{
+	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+	struct nouveau_bo *nvbo = nouveau_bo(bo);
+	struct nouveau_channel *chan;
+	int ret;
+
+	chan = nvbo->channel;
+	if (!chan || nvbo->no_vm)
+		chan = dev_priv->channel;
+
+	if (dev_priv->card_type < NV_50)
+		ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
+	else
+		ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
+	if (ret)
+		return ret;
+
 	return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait_reserve, no_wait_gpu, new_mem);
 	return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait_reserve, no_wait_gpu, new_mem);
 }
 }
 
 
@@ -606,12 +695,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
 
 
 	ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
 	ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
 out:
 out:
-	if (tmp_mem.mm_node) {
-		spin_lock(&bo->bdev->glob->lru_lock);
-		drm_mm_put_block(tmp_mem.mm_node);
-		spin_unlock(&bo->bdev->glob->lru_lock);
-	}
-
+	ttm_bo_mem_put(bo, &tmp_mem);
 	return ret;
 	return ret;
 }
 }
 
 
@@ -644,12 +728,7 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
 		goto out;
 		goto out;
 
 
 out:
 out:
-	if (tmp_mem.mm_node) {
-		spin_lock(&bo->bdev->glob->lru_lock);
-		drm_mm_put_block(tmp_mem.mm_node);
-		spin_unlock(&bo->bdev->glob->lru_lock);
-	}
-
+	ttm_bo_mem_put(bo, &tmp_mem);
 	return ret;
 	return ret;
 }
 }
 
 
@@ -669,7 +748,7 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
 		return 0;
 		return 0;
 	}
 	}
 
 
-	offset = new_mem->mm_node->start << PAGE_SHIFT;
+	offset = new_mem->start << PAGE_SHIFT;
 
 
 	if (dev_priv->card_type == NV_50) {
 	if (dev_priv->card_type == NV_50) {
 		ret = nv50_mem_vm_bind_linear(dev,
 		ret = nv50_mem_vm_bind_linear(dev,
@@ -719,12 +798,6 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
-	/* Software copy if the card isn't up and running yet. */
-	if (!dev_priv->channel) {
-		ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
-		goto out;
-	}
-
 	/* Fake bo copy. */
 	/* Fake bo copy. */
 	if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
 	if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
 		BUG_ON(bo->mem.mm_node != NULL);
 		BUG_ON(bo->mem.mm_node != NULL);
@@ -733,6 +806,12 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
 		goto out;
 		goto out;
 	}
 	}
 
 
+	/* Software copy if the card isn't up and running yet. */
+	if (!dev_priv->channel) {
+		ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+		goto out;
+	}
+
 	/* Hardware assisted copy. */
 	/* Hardware assisted copy. */
 	if (new_mem->mem_type == TTM_PL_SYSTEM)
 	if (new_mem->mem_type == TTM_PL_SYSTEM)
 		ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
 		ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
@@ -783,14 +862,14 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
 	case TTM_PL_TT:
 	case TTM_PL_TT:
 #if __OS_HAS_AGP
 #if __OS_HAS_AGP
 		if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
 		if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
-			mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+			mem->bus.offset = mem->start << PAGE_SHIFT;
 			mem->bus.base = dev_priv->gart_info.aper_base;
 			mem->bus.base = dev_priv->gart_info.aper_base;
 			mem->bus.is_iomem = true;
 			mem->bus.is_iomem = true;
 		}
 		}
 #endif
 #endif
 		break;
 		break;
 	case TTM_PL_VRAM:
 	case TTM_PL_VRAM:
-		mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
+		mem->bus.offset = mem->start << PAGE_SHIFT;
 		mem->bus.base = pci_resource_start(dev->pdev, 1);
 		mem->bus.base = pci_resource_start(dev->pdev, 1);
 		mem->bus.is_iomem = true;
 		mem->bus.is_iomem = true;
 		break;
 		break;
@@ -808,7 +887,26 @@ nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
 static int
 static int
 nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
 nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
 {
 {
-	return 0;
+	struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+	struct nouveau_bo *nvbo = nouveau_bo(bo);
+
+	/* as long as the bo isn't in vram, and isn't tiled, we've got
+	 * nothing to do here.
+	 */
+	if (bo->mem.mem_type != TTM_PL_VRAM) {
+		if (dev_priv->card_type < NV_50 || !nvbo->tile_flags)
+			return 0;
+	}
+
+	/* make sure bo is in mappable vram */
+	if (bo->mem.start + bo->mem.num_pages < dev_priv->fb_mappable_pages)
+		return 0;
+
+
+	nvbo->placement.fpfn = 0;
+	nvbo->placement.lpfn = dev_priv->fb_mappable_pages;
+	nouveau_bo_placement_set(nvbo, TTM_PL_VRAM, 0);
+	return ttm_bo_validate(bo, &nvbo->placement, false, true, false);
 }
 }
 
 
 struct ttm_bo_driver nouveau_bo_driver = {
 struct ttm_bo_driver nouveau_bo_driver = {

+ 5 - 5
drivers/gpu/drm/nouveau/nouveau_calc.c

@@ -198,8 +198,8 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp,
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nv_fifo_info fifo_data;
 	struct nv_fifo_info fifo_data;
 	struct nv_sim_state sim_data;
 	struct nv_sim_state sim_data;
-	int MClk = nouveau_hw_get_clock(dev, MPLL);
-	int NVClk = nouveau_hw_get_clock(dev, NVPLL);
+	int MClk = nouveau_hw_get_clock(dev, PLL_MEMORY);
+	int NVClk = nouveau_hw_get_clock(dev, PLL_CORE);
 	uint32_t cfg1 = nvReadFB(dev, NV04_PFB_CFG1);
 	uint32_t cfg1 = nvReadFB(dev, NV04_PFB_CFG1);
 
 
 	sim_data.pclk_khz = VClk;
 	sim_data.pclk_khz = VClk;
@@ -234,7 +234,7 @@ nv04_update_arb(struct drm_device *dev, int VClk, int bpp,
 }
 }
 
 
 static void
 static void
-nv30_update_arb(int *burst, int *lwm)
+nv20_update_arb(int *burst, int *lwm)
 {
 {
 	unsigned int fifo_size, burst_size, graphics_lwm;
 	unsigned int fifo_size, burst_size, graphics_lwm;
 
 
@@ -251,14 +251,14 @@ nouveau_calc_arb(struct drm_device *dev, int vclk, int bpp, int *burst, int *lwm
 {
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 
 
-	if (dev_priv->card_type < NV_30)
+	if (dev_priv->card_type < NV_20)
 		nv04_update_arb(dev, vclk, bpp, burst, lwm);
 		nv04_update_arb(dev, vclk, bpp, burst, lwm);
 	else if ((dev->pci_device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ ||
 	else if ((dev->pci_device & 0xfff0) == 0x0240 /*CHIPSET_C51*/ ||
 		 (dev->pci_device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) {
 		 (dev->pci_device & 0xfff0) == 0x03d0 /*CHIPSET_C512*/) {
 		*burst = 128;
 		*burst = 128;
 		*lwm = 0x0480;
 		*lwm = 0x0480;
 	} else
 	} else
-		nv30_update_arb(burst, lwm);
+		nv20_update_arb(burst, lwm);
 }
 }
 
 
 static int
 static int

+ 9 - 14
drivers/gpu/drm/nouveau/nouveau_channel.c

@@ -48,14 +48,14 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
 						  dev_priv->gart_info.aper_size,
 						  dev_priv->gart_info.aper_size,
 						  NV_DMA_ACCESS_RO, &pushbuf,
 						  NV_DMA_ACCESS_RO, &pushbuf,
 						  NULL);
 						  NULL);
-		chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT;
+		chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
 	} else
 	} else
 	if (dev_priv->card_type != NV_04) {
 	if (dev_priv->card_type != NV_04) {
 		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
 		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0,
 					     dev_priv->fb_available_size,
 					     dev_priv->fb_available_size,
 					     NV_DMA_ACCESS_RO,
 					     NV_DMA_ACCESS_RO,
 					     NV_DMA_TARGET_VIDMEM, &pushbuf);
 					     NV_DMA_TARGET_VIDMEM, &pushbuf);
-		chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT;
+		chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
 	} else {
 	} else {
 		/* NV04 cmdbuf hack, from original ddx.. not sure of it's
 		/* NV04 cmdbuf hack, from original ddx.. not sure of it's
 		 * exact reason for existing :)  PCI access to cmdbuf in
 		 * exact reason for existing :)  PCI access to cmdbuf in
@@ -67,17 +67,11 @@ nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan)
 					     dev_priv->fb_available_size,
 					     dev_priv->fb_available_size,
 					     NV_DMA_ACCESS_RO,
 					     NV_DMA_ACCESS_RO,
 					     NV_DMA_TARGET_PCI, &pushbuf);
 					     NV_DMA_TARGET_PCI, &pushbuf);
-		chan->pushbuf_base = pb->bo.mem.mm_node->start << PAGE_SHIFT;
-	}
-
-	ret = nouveau_gpuobj_ref_add(dev, chan, 0, pushbuf, &chan->pushbuf);
-	if (ret) {
-		NV_ERROR(dev, "Error referencing pushbuf ctxdma: %d\n", ret);
-		if (pushbuf != dev_priv->gart_info.sg_ctxdma)
-			nouveau_gpuobj_del(dev, &pushbuf);
-		return ret;
+		chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT;
 	}
 	}
 
 
+	nouveau_gpuobj_ref(pushbuf, &chan->pushbuf);
+	nouveau_gpuobj_ref(NULL, &pushbuf);
 	return 0;
 	return 0;
 }
 }
 
 
@@ -229,7 +223,7 @@ nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret,
 
 
 	ret = nouveau_dma_init(chan);
 	ret = nouveau_dma_init(chan);
 	if (!ret)
 	if (!ret)
-		ret = nouveau_fence_init(chan);
+		ret = nouveau_fence_channel_init(chan);
 	if (ret) {
 	if (ret) {
 		nouveau_channel_free(chan);
 		nouveau_channel_free(chan);
 		return ret;
 		return ret;
@@ -276,7 +270,7 @@ nouveau_channel_free(struct nouveau_channel *chan)
 	 * above attempts at idling were OK, but if we failed this'll tell TTM
 	 * above attempts at idling were OK, but if we failed this'll tell TTM
 	 * we're done with the buffers.
 	 * we're done with the buffers.
 	 */
 	 */
-	nouveau_fence_fini(chan);
+	nouveau_fence_channel_fini(chan);
 
 
 	/* This will prevent pfifo from switching channels. */
 	/* This will prevent pfifo from switching channels. */
 	pfifo->reassign(dev, false);
 	pfifo->reassign(dev, false);
@@ -308,8 +302,9 @@ nouveau_channel_free(struct nouveau_channel *chan)
 	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
 	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
 
 
 	/* Release the channel's resources */
 	/* Release the channel's resources */
-	nouveau_gpuobj_ref_del(dev, &chan->pushbuf);
+	nouveau_gpuobj_ref(NULL, &chan->pushbuf);
 	if (chan->pushbuf_bo) {
 	if (chan->pushbuf_bo) {
+		nouveau_bo_unmap(chan->pushbuf_bo);
 		nouveau_bo_unpin(chan->pushbuf_bo);
 		nouveau_bo_unpin(chan->pushbuf_bo);
 		nouveau_bo_ref(NULL, &chan->pushbuf_bo);
 		nouveau_bo_ref(NULL, &chan->pushbuf_bo);
 	}
 	}

+ 53 - 1
drivers/gpu/drm/nouveau/nouveau_connector.c

@@ -76,6 +76,22 @@ nouveau_encoder_connector_get(struct nouveau_encoder *encoder)
 	return NULL;
 	return NULL;
 }
 }
 
 
+/*TODO: This could use improvement, and learn to handle the fixed
+ *      BIOS tables etc.  It's fine currently, for its only user.
+ */
+int
+nouveau_connector_bpp(struct drm_connector *connector)
+{
+	struct nouveau_connector *nv_connector = nouveau_connector(connector);
+
+	if (nv_connector->edid && nv_connector->edid->revision >= 4) {
+		u8 bpc = ((nv_connector->edid->input & 0x70) >> 3) + 4;
+		if (bpc > 4)
+			return bpc;
+	}
+
+	return 18;
+}
 
 
 static void
 static void
 nouveau_connector_destroy(struct drm_connector *drm_connector)
 nouveau_connector_destroy(struct drm_connector *drm_connector)
@@ -130,6 +146,36 @@ nouveau_connector_ddc_detect(struct drm_connector *connector,
 	return NULL;
 	return NULL;
 }
 }
 
 
+static struct nouveau_encoder *
+nouveau_connector_of_detect(struct drm_connector *connector)
+{
+#ifdef __powerpc__
+	struct drm_device *dev = connector->dev;
+	struct nouveau_connector *nv_connector = nouveau_connector(connector);
+	struct nouveau_encoder *nv_encoder;
+	struct device_node *cn, *dn = pci_device_to_OF_node(dev->pdev);
+
+	if (!dn ||
+	    !((nv_encoder = find_encoder_by_type(connector, OUTPUT_TMDS)) ||
+	      (nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG))))
+		return NULL;
+
+	for_each_child_of_node(dn, cn) {
+		const char *name = of_get_property(cn, "name", NULL);
+		const void *edid = of_get_property(cn, "EDID", NULL);
+		int idx = name ? name[strlen(name) - 1] - 'A' : 0;
+
+		if (nv_encoder->dcb->i2c_index == idx && edid) {
+			nv_connector->edid =
+				kmemdup(edid, EDID_LENGTH, GFP_KERNEL);
+			of_node_put(cn);
+			return nv_encoder;
+		}
+	}
+#endif
+	return NULL;
+}
+
 static void
 static void
 nouveau_connector_set_encoder(struct drm_connector *connector,
 nouveau_connector_set_encoder(struct drm_connector *connector,
 			      struct nouveau_encoder *nv_encoder)
 			      struct nouveau_encoder *nv_encoder)
@@ -225,6 +271,12 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
 		return connector_status_connected;
 		return connector_status_connected;
 	}
 	}
 
 
+	nv_encoder = nouveau_connector_of_detect(connector);
+	if (nv_encoder) {
+		nouveau_connector_set_encoder(connector, nv_encoder);
+		return connector_status_connected;
+	}
+
 detect_analog:
 detect_analog:
 	nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG);
 	nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG);
 	if (!nv_encoder && !nouveau_tv_disable)
 	if (!nv_encoder && !nouveau_tv_disable)
@@ -630,7 +682,7 @@ nouveau_connector_mode_valid(struct drm_connector *connector,
 		else
 		else
 			max_clock = nv_encoder->dp.link_nr * 162000;
 			max_clock = nv_encoder->dp.link_nr * 162000;
 
 
-		clock *= 3;
+		clock = clock * nouveau_connector_bpp(connector) / 8;
 		break;
 		break;
 	default:
 	default:
 		BUG_ON(1);
 		BUG_ON(1);

+ 3 - 0
drivers/gpu/drm/nouveau/nouveau_connector.h

@@ -55,4 +55,7 @@ nouveau_connector_create(struct drm_device *, int index);
 void
 void
 nouveau_connector_set_polling(struct drm_connector *);
 nouveau_connector_set_polling(struct drm_connector *);
 
 
+int
+nouveau_connector_bpp(struct drm_connector *);
+
 #endif /* __NOUVEAU_CONNECTOR_H__ */
 #endif /* __NOUVEAU_CONNECTOR_H__ */

+ 16 - 0
drivers/gpu/drm/nouveau/nouveau_debugfs.c

@@ -157,7 +157,23 @@ nouveau_debugfs_vbios_image(struct seq_file *m, void *data)
 	return 0;
 	return 0;
 }
 }
 
 
+static int
+nouveau_debugfs_evict_vram(struct seq_file *m, void *data)
+{
+	struct drm_info_node *node = (struct drm_info_node *) m->private;
+	struct drm_nouveau_private *dev_priv = node->minor->dev->dev_private;
+	int ret;
+
+	ret = ttm_bo_evict_mm(&dev_priv->ttm.bdev, TTM_PL_VRAM);
+	if (ret)
+		seq_printf(m, "failed: %d", ret);
+	else
+		seq_printf(m, "succeeded\n");
+	return 0;
+}
+
 static struct drm_info_list nouveau_debugfs_list[] = {
 static struct drm_info_list nouveau_debugfs_list[] = {
+	{ "evict_vram", nouveau_debugfs_evict_vram, 0, NULL },
 	{ "chipset", nouveau_debugfs_chipset_info, 0, NULL },
 	{ "chipset", nouveau_debugfs_chipset_info, 0, NULL },
 	{ "memory", nouveau_debugfs_memory_info, 0, NULL },
 	{ "memory", nouveau_debugfs_memory_info, 0, NULL },
 	{ "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL },
 	{ "vbios.rom", nouveau_debugfs_vbios_image, 0, NULL },

+ 6 - 26
drivers/gpu/drm/nouveau/nouveau_dma.c

@@ -28,6 +28,7 @@
 #include "drm.h"
 #include "drm.h"
 #include "nouveau_drv.h"
 #include "nouveau_drv.h"
 #include "nouveau_dma.h"
 #include "nouveau_dma.h"
+#include "nouveau_ramht.h"
 
 
 void
 void
 nouveau_dma_pre_init(struct nouveau_channel *chan)
 nouveau_dma_pre_init(struct nouveau_channel *chan)
@@ -58,26 +59,17 @@ nouveau_dma_init(struct nouveau_channel *chan)
 {
 {
 	struct drm_device *dev = chan->dev;
 	struct drm_device *dev = chan->dev;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	struct nouveau_gpuobj *m2mf = NULL;
-	struct nouveau_gpuobj *nvsw = NULL;
+	struct nouveau_gpuobj *obj = NULL;
 	int ret, i;
 	int ret, i;
 
 
 	/* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */
 	/* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */
 	ret = nouveau_gpuobj_gr_new(chan, dev_priv->card_type < NV_50 ?
 	ret = nouveau_gpuobj_gr_new(chan, dev_priv->card_type < NV_50 ?
-				    0x0039 : 0x5039, &m2mf);
+				    0x0039 : 0x5039, &obj);
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
-	ret = nouveau_gpuobj_ref_add(dev, chan, NvM2MF, m2mf, NULL);
-	if (ret)
-		return ret;
-
-	/* Create an NV_SW object for various sync purposes */
-	ret = nouveau_gpuobj_sw_new(chan, NV_SW, &nvsw);
-	if (ret)
-		return ret;
-
-	ret = nouveau_gpuobj_ref_add(dev, chan, NvSw, nvsw, NULL);
+	ret = nouveau_ramht_insert(chan, NvM2MF, obj);
+	nouveau_gpuobj_ref(NULL, &obj);
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
@@ -91,11 +83,6 @@ nouveau_dma_init(struct nouveau_channel *chan)
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
-	/* Map M2MF notifier object - fbcon. */
-	ret = nouveau_bo_map(chan->notifier_bo);
-	if (ret)
-		return ret;
-
 	/* Insert NOPS for NOUVEAU_DMA_SKIPS */
 	/* Insert NOPS for NOUVEAU_DMA_SKIPS */
 	ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
 	ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
 	if (ret)
 	if (ret)
@@ -113,13 +100,6 @@ nouveau_dma_init(struct nouveau_channel *chan)
 	BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 1);
 	BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 1);
 	OUT_RING(chan, NvNotify0);
 	OUT_RING(chan, NvNotify0);
 
 
-	/* Initialise NV_SW */
-	ret = RING_SPACE(chan, 2);
-	if (ret)
-		return ret;
-	BEGIN_RING(chan, NvSubSw, 0, 1);
-	OUT_RING(chan, NvSw);
-
 	/* Sit back and pray the channel works.. */
 	/* Sit back and pray the channel works.. */
 	FIRE_RING(chan);
 	FIRE_RING(chan);
 
 
@@ -217,7 +197,7 @@ nv50_dma_push_wait(struct nouveau_channel *chan, int count)
 
 
 		chan->dma.ib_free = get - chan->dma.ib_put;
 		chan->dma.ib_free = get - chan->dma.ib_put;
 		if (chan->dma.ib_free <= 0)
 		if (chan->dma.ib_free <= 0)
-			chan->dma.ib_free += chan->dma.ib_max + 1;
+			chan->dma.ib_free += chan->dma.ib_max;
 	}
 	}
 
 
 	return 0;
 	return 0;

+ 1 - 0
drivers/gpu/drm/nouveau/nouveau_dma.h

@@ -72,6 +72,7 @@ enum {
 	NvGdiRect	= 0x8000000c,
 	NvGdiRect	= 0x8000000c,
 	NvImageBlit	= 0x8000000d,
 	NvImageBlit	= 0x8000000d,
 	NvSw		= 0x8000000e,
 	NvSw		= 0x8000000e,
+	NvSema		= 0x8000000f,
 
 
 	/* G80+ display objects */
 	/* G80+ display objects */
 	NvEvoVRAM	= 0x01000000,
 	NvEvoVRAM	= 0x01000000,

+ 7 - 3
drivers/gpu/drm/nouveau/nouveau_dp.c

@@ -317,7 +317,8 @@ train:
 		return false;
 		return false;
 
 
 	config[0] = nv_encoder->dp.link_nr;
 	config[0] = nv_encoder->dp.link_nr;
-	if (nv_encoder->dp.dpcd_version >= 0x11)
+	if (nv_encoder->dp.dpcd_version >= 0x11 &&
+	    nv_encoder->dp.enhanced_frame)
 		config[0] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
 		config[0] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
 
 
 	ret = nouveau_dp_lane_count_set(encoder, config[0]);
 	ret = nouveau_dp_lane_count_set(encoder, config[0]);
@@ -468,10 +469,12 @@ nouveau_dp_detect(struct drm_encoder *encoder)
 	    !nv_encoder->dcb->dpconf.link_bw)
 	    !nv_encoder->dcb->dpconf.link_bw)
 		nv_encoder->dp.link_bw = DP_LINK_BW_1_62;
 		nv_encoder->dp.link_bw = DP_LINK_BW_1_62;
 
 
-	nv_encoder->dp.link_nr = dpcd[2] & 0xf;
+	nv_encoder->dp.link_nr = dpcd[2] & DP_MAX_LANE_COUNT_MASK;
 	if (nv_encoder->dp.link_nr > nv_encoder->dcb->dpconf.link_nr)
 	if (nv_encoder->dp.link_nr > nv_encoder->dcb->dpconf.link_nr)
 		nv_encoder->dp.link_nr = nv_encoder->dcb->dpconf.link_nr;
 		nv_encoder->dp.link_nr = nv_encoder->dcb->dpconf.link_nr;
 
 
+	nv_encoder->dp.enhanced_frame = (dpcd[2] & DP_ENHANCED_FRAME_CAP);
+
 	return true;
 	return true;
 }
 }
 
 
@@ -524,7 +527,8 @@ nouveau_dp_auxch(struct nouveau_i2c_chan *auxch, int cmd, int addr,
 		nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x80000000);
 		nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x80000000);
 		nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl);
 		nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl);
 		nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x00010000);
 		nv_wr32(dev, NV50_AUXCH_CTRL(index), ctrl | 0x00010000);
-		if (!nv_wait(NV50_AUXCH_CTRL(index), 0x00010000, 0x00000000)) {
+		if (!nv_wait(dev, NV50_AUXCH_CTRL(index),
+			     0x00010000, 0x00000000)) {
 			NV_ERROR(dev, "expected bit 16 == 0, got 0x%08x\n",
 			NV_ERROR(dev, "expected bit 16 == 0, got 0x%08x\n",
 				 nv_rd32(dev, NV50_AUXCH_CTRL(index)));
 				 nv_rd32(dev, NV50_AUXCH_CTRL(index)));
 			ret = -EBUSY;
 			ret = -EBUSY;

+ 18 - 5
drivers/gpu/drm/nouveau/nouveau_drv.c

@@ -31,13 +31,14 @@
 #include "nouveau_hw.h"
 #include "nouveau_hw.h"
 #include "nouveau_fb.h"
 #include "nouveau_fb.h"
 #include "nouveau_fbcon.h"
 #include "nouveau_fbcon.h"
+#include "nouveau_pm.h"
 #include "nv50_display.h"
 #include "nv50_display.h"
 
 
 #include "drm_pciids.h"
 #include "drm_pciids.h"
 
 
-MODULE_PARM_DESC(noagp, "Disable AGP");
-int nouveau_noagp;
-module_param_named(noagp, nouveau_noagp, int, 0400);
+MODULE_PARM_DESC(agpmode, "AGP mode (0 to disable AGP)");
+int nouveau_agpmode = -1;
+module_param_named(agpmode, nouveau_agpmode, int, 0400);
 
 
 MODULE_PARM_DESC(modeset, "Enable kernel modesetting");
 MODULE_PARM_DESC(modeset, "Enable kernel modesetting");
 static int nouveau_modeset = -1; /* kms */
 static int nouveau_modeset = -1; /* kms */
@@ -79,6 +80,10 @@ MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration");
 int nouveau_nofbaccel = 0;
 int nouveau_nofbaccel = 0;
 module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
 module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
 
 
+MODULE_PARM_DESC(force_post, "Force POST");
+int nouveau_force_post = 0;
+module_param_named(force_post, nouveau_force_post, int, 0400);
+
 MODULE_PARM_DESC(override_conntype, "Ignore DCB connector type");
 MODULE_PARM_DESC(override_conntype, "Ignore DCB connector type");
 int nouveau_override_conntype = 0;
 int nouveau_override_conntype = 0;
 module_param_named(override_conntype, nouveau_override_conntype, int, 0400);
 module_param_named(override_conntype, nouveau_override_conntype, int, 0400);
@@ -102,6 +107,14 @@ MODULE_PARM_DESC(reg_debug, "Register access debug bitmask:\n"
 int nouveau_reg_debug;
 int nouveau_reg_debug;
 module_param_named(reg_debug, nouveau_reg_debug, int, 0600);
 module_param_named(reg_debug, nouveau_reg_debug, int, 0600);
 
 
+MODULE_PARM_DESC(perflvl, "Performance level (default: boot)\n");
+char *nouveau_perflvl;
+module_param_named(perflvl, nouveau_perflvl, charp, 0400);
+
+MODULE_PARM_DESC(perflvl_wr, "Allow perflvl changes (warning: dangerous!)\n");
+int nouveau_perflvl_wr;
+module_param_named(perflvl_wr, nouveau_perflvl_wr, int, 0400);
+
 int nouveau_fbpercrtc;
 int nouveau_fbpercrtc;
 #if 0
 #if 0
 module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400);
 module_param_named(fbpercrtc, nouveau_fbpercrtc, int, 0400);
@@ -271,6 +284,8 @@ nouveau_pci_resume(struct pci_dev *pdev)
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
+	nouveau_pm_resume(dev);
+
 	if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
 	if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
 		ret = nouveau_mem_init_agp(dev);
 		ret = nouveau_mem_init_agp(dev);
 		if (ret) {
 		if (ret) {
@@ -379,8 +394,6 @@ static struct drm_driver driver = {
 	.irq_uninstall = nouveau_irq_uninstall,
 	.irq_uninstall = nouveau_irq_uninstall,
 	.irq_handler = nouveau_irq_handler,
 	.irq_handler = nouveau_irq_handler,
 	.reclaim_buffers = drm_core_reclaim_buffers,
 	.reclaim_buffers = drm_core_reclaim_buffers,
-	.get_map_ofs = drm_core_get_map_ofs,
-	.get_reg_ofs = drm_core_get_reg_ofs,
 	.ioctls = nouveau_ioctls,
 	.ioctls = nouveau_ioctls,
 	.fops = {
 	.fops = {
 		.owner = THIS_MODULE,
 		.owner = THIS_MODULE,

+ 162 - 91
drivers/gpu/drm/nouveau/nouveau_drv.h

@@ -133,22 +133,24 @@ enum nouveau_flags {
 #define NVOBJ_ENGINE_DISPLAY	2
 #define NVOBJ_ENGINE_DISPLAY	2
 #define NVOBJ_ENGINE_INT	0xdeadbeef
 #define NVOBJ_ENGINE_INT	0xdeadbeef
 
 
-#define NVOBJ_FLAG_ALLOW_NO_REFS	(1 << 0)
 #define NVOBJ_FLAG_ZERO_ALLOC		(1 << 1)
 #define NVOBJ_FLAG_ZERO_ALLOC		(1 << 1)
 #define NVOBJ_FLAG_ZERO_FREE		(1 << 2)
 #define NVOBJ_FLAG_ZERO_FREE		(1 << 2)
-#define NVOBJ_FLAG_FAKE			(1 << 3)
 struct nouveau_gpuobj {
 struct nouveau_gpuobj {
+	struct drm_device *dev;
+	struct kref refcount;
 	struct list_head list;
 	struct list_head list;
 
 
-	struct nouveau_channel *im_channel;
 	struct drm_mm_node *im_pramin;
 	struct drm_mm_node *im_pramin;
 	struct nouveau_bo *im_backing;
 	struct nouveau_bo *im_backing;
-	uint32_t im_backing_start;
 	uint32_t *im_backing_suspend;
 	uint32_t *im_backing_suspend;
 	int im_bound;
 	int im_bound;
 
 
 	uint32_t flags;
 	uint32_t flags;
-	int refcount;
+
+	u32 size;
+	u32 pinst;
+	u32 cinst;
+	u64 vinst;
 
 
 	uint32_t engine;
 	uint32_t engine;
 	uint32_t class;
 	uint32_t class;
@@ -157,16 +159,6 @@ struct nouveau_gpuobj {
 	void *priv;
 	void *priv;
 };
 };
 
 
-struct nouveau_gpuobj_ref {
-	struct list_head list;
-
-	struct nouveau_gpuobj *gpuobj;
-	uint32_t instance;
-
-	struct nouveau_channel *channel;
-	int handle;
-};
-
 struct nouveau_channel {
 struct nouveau_channel {
 	struct drm_device *dev;
 	struct drm_device *dev;
 	int id;
 	int id;
@@ -192,33 +184,32 @@ struct nouveau_channel {
 	} fence;
 	} fence;
 
 
 	/* DMA push buffer */
 	/* DMA push buffer */
-	struct nouveau_gpuobj_ref *pushbuf;
-	struct nouveau_bo         *pushbuf_bo;
-	uint32_t                   pushbuf_base;
+	struct nouveau_gpuobj *pushbuf;
+	struct nouveau_bo     *pushbuf_bo;
+	uint32_t               pushbuf_base;
 
 
 	/* Notifier memory */
 	/* Notifier memory */
 	struct nouveau_bo *notifier_bo;
 	struct nouveau_bo *notifier_bo;
 	struct drm_mm notifier_heap;
 	struct drm_mm notifier_heap;
 
 
 	/* PFIFO context */
 	/* PFIFO context */
-	struct nouveau_gpuobj_ref *ramfc;
-	struct nouveau_gpuobj_ref *cache;
+	struct nouveau_gpuobj *ramfc;
+	struct nouveau_gpuobj *cache;
 
 
 	/* PGRAPH context */
 	/* PGRAPH context */
 	/* XXX may be merge 2 pointers as private data ??? */
 	/* XXX may be merge 2 pointers as private data ??? */
-	struct nouveau_gpuobj_ref *ramin_grctx;
+	struct nouveau_gpuobj *ramin_grctx;
 	void *pgraph_ctx;
 	void *pgraph_ctx;
 
 
 	/* NV50 VM */
 	/* NV50 VM */
-	struct nouveau_gpuobj     *vm_pd;
-	struct nouveau_gpuobj_ref *vm_gart_pt;
-	struct nouveau_gpuobj_ref *vm_vram_pt[NV50_VM_VRAM_NR];
+	struct nouveau_gpuobj *vm_pd;
+	struct nouveau_gpuobj *vm_gart_pt;
+	struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
 
 
 	/* Objects */
 	/* Objects */
-	struct nouveau_gpuobj_ref *ramin; /* Private instmem */
-	struct drm_mm              ramin_heap; /* Private PRAMIN heap */
-	struct nouveau_gpuobj_ref *ramht; /* Hash table */
-	struct list_head           ramht_refs; /* Objects referenced by RAMHT */
+	struct nouveau_gpuobj *ramin; /* Private instmem */
+	struct drm_mm          ramin_heap; /* Private PRAMIN heap */
+	struct nouveau_ramht  *ramht; /* Hash table */
 
 
 	/* GPU object info for stuff used in-kernel (mm_enabled) */
 	/* GPU object info for stuff used in-kernel (mm_enabled) */
 	uint32_t m2mf_ntfy;
 	uint32_t m2mf_ntfy;
@@ -296,7 +287,7 @@ struct nouveau_fb_engine {
 struct nouveau_fifo_engine {
 struct nouveau_fifo_engine {
 	int  channels;
 	int  channels;
 
 
-	struct nouveau_gpuobj_ref *playlist[2];
+	struct nouveau_gpuobj *playlist[2];
 	int cur_playlist;
 	int cur_playlist;
 
 
 	int  (*init)(struct drm_device *);
 	int  (*init)(struct drm_device *);
@@ -305,7 +296,6 @@ struct nouveau_fifo_engine {
 	void (*disable)(struct drm_device *);
 	void (*disable)(struct drm_device *);
 	void (*enable)(struct drm_device *);
 	void (*enable)(struct drm_device *);
 	bool (*reassign)(struct drm_device *, bool enable);
 	bool (*reassign)(struct drm_device *, bool enable);
-	bool (*cache_flush)(struct drm_device *dev);
 	bool (*cache_pull)(struct drm_device *dev, bool enable);
 	bool (*cache_pull)(struct drm_device *dev, bool enable);
 
 
 	int  (*channel_id)(struct drm_device *);
 	int  (*channel_id)(struct drm_device *);
@@ -334,7 +324,7 @@ struct nouveau_pgraph_engine {
 	int grctx_size;
 	int grctx_size;
 
 
 	/* NV2x/NV3x context table (0x400780) */
 	/* NV2x/NV3x context table (0x400780) */
-	struct nouveau_gpuobj_ref *ctx_table;
+	struct nouveau_gpuobj *ctx_table;
 
 
 	int  (*init)(struct drm_device *);
 	int  (*init)(struct drm_device *);
 	void (*takedown)(struct drm_device *);
 	void (*takedown)(struct drm_device *);
@@ -369,6 +359,91 @@ struct nouveau_gpio_engine {
 	void (*irq_enable)(struct drm_device *, enum dcb_gpio_tag, bool on);
 	void (*irq_enable)(struct drm_device *, enum dcb_gpio_tag, bool on);
 };
 };
 
 
+struct nouveau_pm_voltage_level {
+	u8 voltage;
+	u8 vid;
+};
+
+struct nouveau_pm_voltage {
+	bool supported;
+	u8 vid_mask;
+
+	struct nouveau_pm_voltage_level *level;
+	int nr_level;
+};
+
+#define NOUVEAU_PM_MAX_LEVEL 8
+struct nouveau_pm_level {
+	struct device_attribute dev_attr;
+	char name[32];
+	int id;
+
+	u32 core;
+	u32 memory;
+	u32 shader;
+	u32 unk05;
+
+	u8 voltage;
+	u8 fanspeed;
+
+	u16 memscript;
+};
+
+struct nouveau_pm_temp_sensor_constants {
+	u16 offset_constant;
+	s16 offset_mult;
+	u16 offset_div;
+	u16 slope_mult;
+	u16 slope_div;
+};
+
+struct nouveau_pm_threshold_temp {
+	s16 critical;
+	s16 down_clock;
+	s16 fan_boost;
+};
+
+struct nouveau_pm_memtiming {
+	u32 reg_100220;
+	u32 reg_100224;
+	u32 reg_100228;
+	u32 reg_10022c;
+	u32 reg_100230;
+	u32 reg_100234;
+	u32 reg_100238;
+	u32 reg_10023c;
+};
+
+struct nouveau_pm_memtimings {
+	bool supported;
+	struct nouveau_pm_memtiming *timing;
+	int nr_timing;
+};
+
+struct nouveau_pm_engine {
+	struct nouveau_pm_voltage voltage;
+	struct nouveau_pm_level perflvl[NOUVEAU_PM_MAX_LEVEL];
+	int nr_perflvl;
+	struct nouveau_pm_memtimings memtimings;
+	struct nouveau_pm_temp_sensor_constants sensor_constants;
+	struct nouveau_pm_threshold_temp threshold_temp;
+
+	struct nouveau_pm_level boot;
+	struct nouveau_pm_level *cur;
+
+	struct device *hwmon;
+
+	int (*clock_get)(struct drm_device *, u32 id);
+	void *(*clock_pre)(struct drm_device *, struct nouveau_pm_level *,
+			   u32 id, int khz);
+	void (*clock_set)(struct drm_device *, void *);
+	int (*voltage_get)(struct drm_device *);
+	int (*voltage_set)(struct drm_device *, int voltage);
+	int (*fanspeed_get)(struct drm_device *);
+	int (*fanspeed_set)(struct drm_device *, int fanspeed);
+	int (*temp_get)(struct drm_device *);
+};
+
 struct nouveau_engine {
 struct nouveau_engine {
 	struct nouveau_instmem_engine instmem;
 	struct nouveau_instmem_engine instmem;
 	struct nouveau_mc_engine      mc;
 	struct nouveau_mc_engine      mc;
@@ -378,6 +453,7 @@ struct nouveau_engine {
 	struct nouveau_fifo_engine    fifo;
 	struct nouveau_fifo_engine    fifo;
 	struct nouveau_display_engine display;
 	struct nouveau_display_engine display;
 	struct nouveau_gpio_engine    gpio;
 	struct nouveau_gpio_engine    gpio;
+	struct nouveau_pm_engine      pm;
 };
 };
 
 
 struct nouveau_pll_vals {
 struct nouveau_pll_vals {
@@ -522,8 +598,14 @@ struct drm_nouveau_private {
 	int flags;
 	int flags;
 
 
 	void __iomem *mmio;
 	void __iomem *mmio;
+
+	spinlock_t ramin_lock;
 	void __iomem *ramin;
 	void __iomem *ramin;
-	uint32_t ramin_size;
+	u32 ramin_size;
+	u32 ramin_base;
+	bool ramin_available;
+	struct drm_mm ramin_heap;
+	struct list_head gpuobj_list;
 
 
 	struct nouveau_bo *vga_ram;
 	struct nouveau_bo *vga_ram;
 
 
@@ -540,6 +622,12 @@ struct drm_nouveau_private {
 		atomic_t validate_sequence;
 		atomic_t validate_sequence;
 	} ttm;
 	} ttm;
 
 
+	struct {
+		spinlock_t lock;
+		struct drm_mm heap;
+		struct nouveau_bo *bo;
+	} fence;
+
 	int fifo_alloc_count;
 	int fifo_alloc_count;
 	struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR];
 	struct nouveau_channel *fifos[NOUVEAU_MAX_CHANNEL_NR];
 
 
@@ -550,15 +638,11 @@ struct drm_nouveau_private {
 	spinlock_t context_switch_lock;
 	spinlock_t context_switch_lock;
 
 
 	/* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */
 	/* RAMIN configuration, RAMFC, RAMHT and RAMRO offsets */
-	struct nouveau_gpuobj *ramht;
+	struct nouveau_ramht  *ramht;
+	struct nouveau_gpuobj *ramfc;
+	struct nouveau_gpuobj *ramro;
+
 	uint32_t ramin_rsvd_vram;
 	uint32_t ramin_rsvd_vram;
-	uint32_t ramht_offset;
-	uint32_t ramht_size;
-	uint32_t ramht_bits;
-	uint32_t ramfc_offset;
-	uint32_t ramfc_size;
-	uint32_t ramro_offset;
-	uint32_t ramro_size;
 
 
 	struct {
 	struct {
 		enum {
 		enum {
@@ -576,14 +660,12 @@ struct drm_nouveau_private {
 	} gart_info;
 	} gart_info;
 
 
 	/* nv10-nv40 tiling regions */
 	/* nv10-nv40 tiling regions */
-	struct {
-		struct nouveau_tile_reg reg[NOUVEAU_MAX_TILE_NR];
-		spinlock_t lock;
-	} tile;
+	struct nouveau_tile_reg tile[NOUVEAU_MAX_TILE_NR];
 
 
 	/* VRAM/fb configuration */
 	/* VRAM/fb configuration */
 	uint64_t vram_size;
 	uint64_t vram_size;
 	uint64_t vram_sys_base;
 	uint64_t vram_sys_base;
+	u32 vram_rblock_size;
 
 
 	uint64_t fb_phys;
 	uint64_t fb_phys;
 	uint64_t fb_available_size;
 	uint64_t fb_available_size;
@@ -600,10 +682,6 @@ struct drm_nouveau_private {
 	struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
 	struct nouveau_gpuobj *vm_vram_pt[NV50_VM_VRAM_NR];
 	int vm_vram_pt_nr;
 	int vm_vram_pt_nr;
 
 
-	struct drm_mm ramin_heap;
-
-	struct list_head gpuobj_list;
-
 	struct nvbios vbios;
 	struct nvbios vbios;
 
 
 	struct nv04_mode_state mode_reg;
 	struct nv04_mode_state mode_reg;
@@ -633,6 +711,12 @@ struct drm_nouveau_private {
 	struct apertures_struct *apertures;
 	struct apertures_struct *apertures;
 };
 };
 
 
+static inline struct drm_nouveau_private *
+nouveau_private(struct drm_device *dev)
+{
+	return dev->dev_private;
+}
+
 static inline struct drm_nouveau_private *
 static inline struct drm_nouveau_private *
 nouveau_bdev(struct ttm_bo_device *bd)
 nouveau_bdev(struct ttm_bo_device *bd)
 {
 {
@@ -669,7 +753,7 @@ nouveau_bo_ref(struct nouveau_bo *ref, struct nouveau_bo **pnvbo)
 } while (0)
 } while (0)
 
 
 /* nouveau_drv.c */
 /* nouveau_drv.c */
-extern int nouveau_noagp;
+extern int nouveau_agpmode;
 extern int nouveau_duallink;
 extern int nouveau_duallink;
 extern int nouveau_uscript_lvds;
 extern int nouveau_uscript_lvds;
 extern int nouveau_uscript_tmds;
 extern int nouveau_uscript_tmds;
@@ -683,7 +767,10 @@ extern char *nouveau_vbios;
 extern int nouveau_ignorelid;
 extern int nouveau_ignorelid;
 extern int nouveau_nofbaccel;
 extern int nouveau_nofbaccel;
 extern int nouveau_noaccel;
 extern int nouveau_noaccel;
+extern int nouveau_force_post;
 extern int nouveau_override_conntype;
 extern int nouveau_override_conntype;
+extern char *nouveau_perflvl;
+extern int nouveau_perflvl_wr;
 
 
 extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state);
 extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state);
 extern int nouveau_pci_resume(struct pci_dev *pdev);
 extern int nouveau_pci_resume(struct pci_dev *pdev);
@@ -704,8 +791,10 @@ extern bool nouveau_wait_for_idle(struct drm_device *);
 extern int  nouveau_card_init(struct drm_device *);
 extern int  nouveau_card_init(struct drm_device *);
 
 
 /* nouveau_mem.c */
 /* nouveau_mem.c */
-extern int  nouveau_mem_detect(struct drm_device *dev);
-extern int  nouveau_mem_init(struct drm_device *);
+extern int  nouveau_mem_vram_init(struct drm_device *);
+extern void nouveau_mem_vram_fini(struct drm_device *);
+extern int  nouveau_mem_gart_init(struct drm_device *);
+extern void nouveau_mem_gart_fini(struct drm_device *);
 extern int  nouveau_mem_init_agp(struct drm_device *);
 extern int  nouveau_mem_init_agp(struct drm_device *);
 extern int  nouveau_mem_reset_agp(struct drm_device *);
 extern int  nouveau_mem_reset_agp(struct drm_device *);
 extern void nouveau_mem_close(struct drm_device *);
 extern void nouveau_mem_close(struct drm_device *);
@@ -749,7 +838,6 @@ extern void nouveau_channel_free(struct nouveau_channel *);
 extern int  nouveau_gpuobj_early_init(struct drm_device *);
 extern int  nouveau_gpuobj_early_init(struct drm_device *);
 extern int  nouveau_gpuobj_init(struct drm_device *);
 extern int  nouveau_gpuobj_init(struct drm_device *);
 extern void nouveau_gpuobj_takedown(struct drm_device *);
 extern void nouveau_gpuobj_takedown(struct drm_device *);
-extern void nouveau_gpuobj_late_takedown(struct drm_device *);
 extern int  nouveau_gpuobj_suspend(struct drm_device *dev);
 extern int  nouveau_gpuobj_suspend(struct drm_device *dev);
 extern void nouveau_gpuobj_suspend_cleanup(struct drm_device *dev);
 extern void nouveau_gpuobj_suspend_cleanup(struct drm_device *dev);
 extern void nouveau_gpuobj_resume(struct drm_device *dev);
 extern void nouveau_gpuobj_resume(struct drm_device *dev);
@@ -759,24 +847,11 @@ extern void nouveau_gpuobj_channel_takedown(struct nouveau_channel *);
 extern int nouveau_gpuobj_new(struct drm_device *, struct nouveau_channel *,
 extern int nouveau_gpuobj_new(struct drm_device *, struct nouveau_channel *,
 			      uint32_t size, int align, uint32_t flags,
 			      uint32_t size, int align, uint32_t flags,
 			      struct nouveau_gpuobj **);
 			      struct nouveau_gpuobj **);
-extern int nouveau_gpuobj_del(struct drm_device *, struct nouveau_gpuobj **);
-extern int nouveau_gpuobj_ref_add(struct drm_device *, struct nouveau_channel *,
-				  uint32_t handle, struct nouveau_gpuobj *,
-				  struct nouveau_gpuobj_ref **);
-extern int nouveau_gpuobj_ref_del(struct drm_device *,
-				  struct nouveau_gpuobj_ref **);
-extern int nouveau_gpuobj_ref_find(struct nouveau_channel *, uint32_t handle,
-				   struct nouveau_gpuobj_ref **ref_ret);
-extern int nouveau_gpuobj_new_ref(struct drm_device *,
-				  struct nouveau_channel *alloc_chan,
-				  struct nouveau_channel *ref_chan,
-				  uint32_t handle, uint32_t size, int align,
-				  uint32_t flags, struct nouveau_gpuobj_ref **);
-extern int nouveau_gpuobj_new_fake(struct drm_device *,
-				   uint32_t p_offset, uint32_t b_offset,
-				   uint32_t size, uint32_t flags,
-				   struct nouveau_gpuobj **,
-				   struct nouveau_gpuobj_ref**);
+extern void nouveau_gpuobj_ref(struct nouveau_gpuobj *,
+			       struct nouveau_gpuobj **);
+extern int nouveau_gpuobj_new_fake(struct drm_device *, u32 pinst, u64 vinst,
+				   u32 size, u32 flags,
+				   struct nouveau_gpuobj **);
 extern int nouveau_gpuobj_dma_new(struct nouveau_channel *, int class,
 extern int nouveau_gpuobj_dma_new(struct nouveau_channel *, int class,
 				  uint64_t offset, uint64_t size, int access,
 				  uint64_t offset, uint64_t size, int access,
 				  int target, struct nouveau_gpuobj **);
 				  int target, struct nouveau_gpuobj **);
@@ -879,6 +954,7 @@ extern struct dcb_gpio_entry *nouveau_bios_gpio_entry(struct drm_device *,
 						      enum dcb_gpio_tag);
 						      enum dcb_gpio_tag);
 extern struct dcb_connector_table_entry *
 extern struct dcb_connector_table_entry *
 nouveau_bios_connector_entry(struct drm_device *, int index);
 nouveau_bios_connector_entry(struct drm_device *, int index);
+extern u32 get_pll_register(struct drm_device *, enum pll_types);
 extern int get_pll_limits(struct drm_device *, uint32_t limit_match,
 extern int get_pll_limits(struct drm_device *, uint32_t limit_match,
 			  struct pll_lims *);
 			  struct pll_lims *);
 extern int nouveau_bios_run_display_table(struct drm_device *,
 extern int nouveau_bios_run_display_table(struct drm_device *,
@@ -925,10 +1001,10 @@ extern int  nv40_fb_init(struct drm_device *);
 extern void nv40_fb_takedown(struct drm_device *);
 extern void nv40_fb_takedown(struct drm_device *);
 extern void nv40_fb_set_region_tiling(struct drm_device *, int, uint32_t,
 extern void nv40_fb_set_region_tiling(struct drm_device *, int, uint32_t,
 				      uint32_t, uint32_t);
 				      uint32_t, uint32_t);
-
 /* nv50_fb.c */
 /* nv50_fb.c */
 extern int  nv50_fb_init(struct drm_device *);
 extern int  nv50_fb_init(struct drm_device *);
 extern void nv50_fb_takedown(struct drm_device *);
 extern void nv50_fb_takedown(struct drm_device *);
+extern void nv50_fb_vm_trap(struct drm_device *, int display, const char *);
 
 
 /* nvc0_fb.c */
 /* nvc0_fb.c */
 extern int  nvc0_fb_init(struct drm_device *);
 extern int  nvc0_fb_init(struct drm_device *);
@@ -939,7 +1015,6 @@ extern int  nv04_fifo_init(struct drm_device *);
 extern void nv04_fifo_disable(struct drm_device *);
 extern void nv04_fifo_disable(struct drm_device *);
 extern void nv04_fifo_enable(struct drm_device *);
 extern void nv04_fifo_enable(struct drm_device *);
 extern bool nv04_fifo_reassign(struct drm_device *, bool);
 extern bool nv04_fifo_reassign(struct drm_device *, bool);
-extern bool nv04_fifo_cache_flush(struct drm_device *);
 extern bool nv04_fifo_cache_pull(struct drm_device *, bool);
 extern bool nv04_fifo_cache_pull(struct drm_device *, bool);
 extern int  nv04_fifo_channel_id(struct drm_device *);
 extern int  nv04_fifo_channel_id(struct drm_device *);
 extern int  nv04_fifo_create_context(struct nouveau_channel *);
 extern int  nv04_fifo_create_context(struct nouveau_channel *);
@@ -977,7 +1052,6 @@ extern void nvc0_fifo_takedown(struct drm_device *);
 extern void nvc0_fifo_disable(struct drm_device *);
 extern void nvc0_fifo_disable(struct drm_device *);
 extern void nvc0_fifo_enable(struct drm_device *);
 extern void nvc0_fifo_enable(struct drm_device *);
 extern bool nvc0_fifo_reassign(struct drm_device *, bool);
 extern bool nvc0_fifo_reassign(struct drm_device *, bool);
-extern bool nvc0_fifo_cache_flush(struct drm_device *);
 extern bool nvc0_fifo_cache_pull(struct drm_device *, bool);
 extern bool nvc0_fifo_cache_pull(struct drm_device *, bool);
 extern int  nvc0_fifo_channel_id(struct drm_device *);
 extern int  nvc0_fifo_channel_id(struct drm_device *);
 extern int  nvc0_fifo_create_context(struct nouveau_channel *);
 extern int  nvc0_fifo_create_context(struct nouveau_channel *);
@@ -1169,15 +1243,21 @@ extern int nouveau_bo_sync_gpu(struct nouveau_bo *, struct nouveau_channel *);
 
 
 /* nouveau_fence.c */
 /* nouveau_fence.c */
 struct nouveau_fence;
 struct nouveau_fence;
-extern int nouveau_fence_init(struct nouveau_channel *);
-extern void nouveau_fence_fini(struct nouveau_channel *);
+extern int nouveau_fence_init(struct drm_device *);
+extern void nouveau_fence_fini(struct drm_device *);
+extern int nouveau_fence_channel_init(struct nouveau_channel *);
+extern void nouveau_fence_channel_fini(struct nouveau_channel *);
 extern void nouveau_fence_update(struct nouveau_channel *);
 extern void nouveau_fence_update(struct nouveau_channel *);
 extern int nouveau_fence_new(struct nouveau_channel *, struct nouveau_fence **,
 extern int nouveau_fence_new(struct nouveau_channel *, struct nouveau_fence **,
 			     bool emit);
 			     bool emit);
 extern int nouveau_fence_emit(struct nouveau_fence *);
 extern int nouveau_fence_emit(struct nouveau_fence *);
+extern void nouveau_fence_work(struct nouveau_fence *fence,
+			       void (*work)(void *priv, bool signalled),
+			       void *priv);
 struct nouveau_channel *nouveau_fence_channel(struct nouveau_fence *);
 struct nouveau_channel *nouveau_fence_channel(struct nouveau_fence *);
 extern bool nouveau_fence_signalled(void *obj, void *arg);
 extern bool nouveau_fence_signalled(void *obj, void *arg);
 extern int nouveau_fence_wait(void *obj, void *arg, bool lazy, bool intr);
 extern int nouveau_fence_wait(void *obj, void *arg, bool lazy, bool intr);
+extern int nouveau_fence_sync(struct nouveau_fence *, struct nouveau_channel *);
 extern int nouveau_fence_flush(void *obj, void *arg);
 extern int nouveau_fence_flush(void *obj, void *arg);
 extern void nouveau_fence_unref(void **obj);
 extern void nouveau_fence_unref(void **obj);
 extern void *nouveau_fence_ref(void *obj);
 extern void *nouveau_fence_ref(void *obj);
@@ -1255,12 +1335,11 @@ static inline void nv_wr32(struct drm_device *dev, unsigned reg, u32 val)
 	iowrite32_native(val, dev_priv->mmio + reg);
 	iowrite32_native(val, dev_priv->mmio + reg);
 }
 }
 
 
-static inline void nv_mask(struct drm_device *dev, u32 reg, u32 mask, u32 val)
+static inline u32 nv_mask(struct drm_device *dev, u32 reg, u32 mask, u32 val)
 {
 {
 	u32 tmp = nv_rd32(dev, reg);
 	u32 tmp = nv_rd32(dev, reg);
-	tmp &= ~mask;
-	tmp |= val;
-	nv_wr32(dev, reg, tmp);
+	nv_wr32(dev, reg, (tmp & ~mask) | val);
+	return tmp;
 }
 }
 
 
 static inline u8 nv_rd08(struct drm_device *dev, unsigned reg)
 static inline u8 nv_rd08(struct drm_device *dev, unsigned reg)
@@ -1275,7 +1354,7 @@ static inline void nv_wr08(struct drm_device *dev, unsigned reg, u8 val)
 	iowrite8(val, dev_priv->mmio + reg);
 	iowrite8(val, dev_priv->mmio + reg);
 }
 }
 
 
-#define nv_wait(reg, mask, val) \
+#define nv_wait(dev, reg, mask, val) \
 	nouveau_wait_until(dev, 2000000000ULL, (reg), (mask), (val))
 	nouveau_wait_until(dev, 2000000000ULL, (reg), (mask), (val))
 
 
 /* PRAMIN access */
 /* PRAMIN access */
@@ -1292,17 +1371,8 @@ static inline void nv_wi32(struct drm_device *dev, unsigned offset, u32 val)
 }
 }
 
 
 /* object access */
 /* object access */
-static inline u32 nv_ro32(struct drm_device *dev, struct nouveau_gpuobj *obj,
-				unsigned index)
-{
-	return nv_ri32(dev, obj->im_pramin->start + index * 4);
-}
-
-static inline void nv_wo32(struct drm_device *dev, struct nouveau_gpuobj *obj,
-				unsigned index, u32 val)
-{
-	nv_wi32(dev, obj->im_pramin->start + index * 4, val);
-}
+extern u32 nv_ro32(struct nouveau_gpuobj *, u32 offset);
+extern void nv_wo32(struct nouveau_gpuobj *, u32 offset, u32 val);
 
 
 /*
 /*
  * Logging
  * Logging
@@ -1403,6 +1473,7 @@ nv_match_device(struct drm_device *dev, unsigned device,
 #define NV_SW_SEMAPHORE_OFFSET                                       0x00000064
 #define NV_SW_SEMAPHORE_OFFSET                                       0x00000064
 #define NV_SW_SEMAPHORE_ACQUIRE                                      0x00000068
 #define NV_SW_SEMAPHORE_ACQUIRE                                      0x00000068
 #define NV_SW_SEMAPHORE_RELEASE                                      0x0000006c
 #define NV_SW_SEMAPHORE_RELEASE                                      0x0000006c
+#define NV_SW_YIELD                                                  0x00000080
 #define NV_SW_DMA_VBLSEM                                             0x0000018c
 #define NV_SW_DMA_VBLSEM                                             0x0000018c
 #define NV_SW_VBLSEM_OFFSET                                          0x00000400
 #define NV_SW_VBLSEM_OFFSET                                          0x00000400
 #define NV_SW_VBLSEM_RELEASE_VALUE                                   0x00000404
 #define NV_SW_VBLSEM_RELEASE_VALUE                                   0x00000404

+ 1 - 0
drivers/gpu/drm/nouveau/nouveau_encoder.h

@@ -55,6 +55,7 @@ struct nouveau_encoder {
 			int dpcd_version;
 			int dpcd_version;
 			int link_nr;
 			int link_nr;
 			int link_bw;
 			int link_bw;
+			bool enhanced_frame;
 		} dp;
 		} dp;
 	};
 	};
 };
 };

+ 6 - 0
drivers/gpu/drm/nouveau/nouveau_fbcon.c

@@ -104,6 +104,8 @@ static struct fb_ops nouveau_fbcon_ops = {
 	.fb_pan_display = drm_fb_helper_pan_display,
 	.fb_pan_display = drm_fb_helper_pan_display,
 	.fb_blank = drm_fb_helper_blank,
 	.fb_blank = drm_fb_helper_blank,
 	.fb_setcmap = drm_fb_helper_setcmap,
 	.fb_setcmap = drm_fb_helper_setcmap,
+	.fb_debug_enter = drm_fb_helper_debug_enter,
+	.fb_debug_leave = drm_fb_helper_debug_leave,
 };
 };
 
 
 static struct fb_ops nv04_fbcon_ops = {
 static struct fb_ops nv04_fbcon_ops = {
@@ -117,6 +119,8 @@ static struct fb_ops nv04_fbcon_ops = {
 	.fb_pan_display = drm_fb_helper_pan_display,
 	.fb_pan_display = drm_fb_helper_pan_display,
 	.fb_blank = drm_fb_helper_blank,
 	.fb_blank = drm_fb_helper_blank,
 	.fb_setcmap = drm_fb_helper_setcmap,
 	.fb_setcmap = drm_fb_helper_setcmap,
+	.fb_debug_enter = drm_fb_helper_debug_enter,
+	.fb_debug_leave = drm_fb_helper_debug_leave,
 };
 };
 
 
 static struct fb_ops nv50_fbcon_ops = {
 static struct fb_ops nv50_fbcon_ops = {
@@ -130,6 +134,8 @@ static struct fb_ops nv50_fbcon_ops = {
 	.fb_pan_display = drm_fb_helper_pan_display,
 	.fb_pan_display = drm_fb_helper_pan_display,
 	.fb_blank = drm_fb_helper_blank,
 	.fb_blank = drm_fb_helper_blank,
 	.fb_setcmap = drm_fb_helper_setcmap,
 	.fb_setcmap = drm_fb_helper_setcmap,
+	.fb_debug_enter = drm_fb_helper_debug_enter,
+	.fb_debug_leave = drm_fb_helper_debug_leave,
 };
 };
 
 
 static void nouveau_fbcon_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
 static void nouveau_fbcon_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,

+ 301 - 17
drivers/gpu/drm/nouveau/nouveau_fence.c

@@ -28,9 +28,11 @@
 #include "drm.h"
 #include "drm.h"
 
 
 #include "nouveau_drv.h"
 #include "nouveau_drv.h"
+#include "nouveau_ramht.h"
 #include "nouveau_dma.h"
 #include "nouveau_dma.h"
 
 
-#define USE_REFCNT (dev_priv->card_type >= NV_10)
+#define USE_REFCNT(dev) (nouveau_private(dev)->chipset >= 0x10)
+#define USE_SEMA(dev) (nouveau_private(dev)->chipset >= 0x17)
 
 
 struct nouveau_fence {
 struct nouveau_fence {
 	struct nouveau_channel *channel;
 	struct nouveau_channel *channel;
@@ -39,6 +41,15 @@ struct nouveau_fence {
 
 
 	uint32_t sequence;
 	uint32_t sequence;
 	bool signalled;
 	bool signalled;
+
+	void (*work)(void *priv, bool signalled);
+	void *priv;
+};
+
+struct nouveau_semaphore {
+	struct kref ref;
+	struct drm_device *dev;
+	struct drm_mm_node *mem;
 };
 };
 
 
 static inline struct nouveau_fence *
 static inline struct nouveau_fence *
@@ -59,14 +70,13 @@ nouveau_fence_del(struct kref *ref)
 void
 void
 nouveau_fence_update(struct nouveau_channel *chan)
 nouveau_fence_update(struct nouveau_channel *chan)
 {
 {
-	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
-	struct list_head *entry, *tmp;
-	struct nouveau_fence *fence;
+	struct drm_device *dev = chan->dev;
+	struct nouveau_fence *tmp, *fence;
 	uint32_t sequence;
 	uint32_t sequence;
 
 
 	spin_lock(&chan->fence.lock);
 	spin_lock(&chan->fence.lock);
 
 
-	if (USE_REFCNT)
+	if (USE_REFCNT(dev))
 		sequence = nvchan_rd32(chan, 0x48);
 		sequence = nvchan_rd32(chan, 0x48);
 	else
 	else
 		sequence = atomic_read(&chan->fence.last_sequence_irq);
 		sequence = atomic_read(&chan->fence.last_sequence_irq);
@@ -75,12 +85,14 @@ nouveau_fence_update(struct nouveau_channel *chan)
 		goto out;
 		goto out;
 	chan->fence.sequence_ack = sequence;
 	chan->fence.sequence_ack = sequence;
 
 
-	list_for_each_safe(entry, tmp, &chan->fence.pending) {
-		fence = list_entry(entry, struct nouveau_fence, entry);
-
+	list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
 		sequence = fence->sequence;
 		sequence = fence->sequence;
 		fence->signalled = true;
 		fence->signalled = true;
 		list_del(&fence->entry);
 		list_del(&fence->entry);
+
+		if (unlikely(fence->work))
+			fence->work(fence->priv, true);
+
 		kref_put(&fence->refcount, nouveau_fence_del);
 		kref_put(&fence->refcount, nouveau_fence_del);
 
 
 		if (sequence == chan->fence.sequence_ack)
 		if (sequence == chan->fence.sequence_ack)
@@ -121,8 +133,8 @@ nouveau_fence_channel(struct nouveau_fence *fence)
 int
 int
 nouveau_fence_emit(struct nouveau_fence *fence)
 nouveau_fence_emit(struct nouveau_fence *fence)
 {
 {
-	struct drm_nouveau_private *dev_priv = fence->channel->dev->dev_private;
 	struct nouveau_channel *chan = fence->channel;
 	struct nouveau_channel *chan = fence->channel;
+	struct drm_device *dev = chan->dev;
 	int ret;
 	int ret;
 
 
 	ret = RING_SPACE(chan, 2);
 	ret = RING_SPACE(chan, 2);
@@ -143,13 +155,32 @@ nouveau_fence_emit(struct nouveau_fence *fence)
 	list_add_tail(&fence->entry, &chan->fence.pending);
 	list_add_tail(&fence->entry, &chan->fence.pending);
 	spin_unlock(&chan->fence.lock);
 	spin_unlock(&chan->fence.lock);
 
 
-	BEGIN_RING(chan, NvSubSw, USE_REFCNT ? 0x0050 : 0x0150, 1);
+	BEGIN_RING(chan, NvSubSw, USE_REFCNT(dev) ? 0x0050 : 0x0150, 1);
 	OUT_RING(chan, fence->sequence);
 	OUT_RING(chan, fence->sequence);
 	FIRE_RING(chan);
 	FIRE_RING(chan);
 
 
 	return 0;
 	return 0;
 }
 }
 
 
+void
+nouveau_fence_work(struct nouveau_fence *fence,
+		   void (*work)(void *priv, bool signalled),
+		   void *priv)
+{
+	BUG_ON(fence->work);
+
+	spin_lock(&fence->channel->fence.lock);
+
+	if (fence->signalled) {
+		work(priv, true);
+	} else {
+		fence->work = work;
+		fence->priv = priv;
+	}
+
+	spin_unlock(&fence->channel->fence.lock);
+}
+
 void
 void
 nouveau_fence_unref(void **sync_obj)
 nouveau_fence_unref(void **sync_obj)
 {
 {
@@ -213,6 +244,162 @@ nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
 	return ret;
 	return ret;
 }
 }
 
 
+static struct nouveau_semaphore *
+alloc_semaphore(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_semaphore *sema;
+
+	if (!USE_SEMA(dev))
+		return NULL;
+
+	sema = kmalloc(sizeof(*sema), GFP_KERNEL);
+	if (!sema)
+		goto fail;
+
+	spin_lock(&dev_priv->fence.lock);
+	sema->mem = drm_mm_search_free(&dev_priv->fence.heap, 4, 0, 0);
+	if (sema->mem)
+		sema->mem = drm_mm_get_block(sema->mem, 4, 0);
+	spin_unlock(&dev_priv->fence.lock);
+
+	if (!sema->mem)
+		goto fail;
+
+	kref_init(&sema->ref);
+	sema->dev = dev;
+	nouveau_bo_wr32(dev_priv->fence.bo, sema->mem->start / 4, 0);
+
+	return sema;
+fail:
+	kfree(sema);
+	return NULL;
+}
+
+static void
+free_semaphore(struct kref *ref)
+{
+	struct nouveau_semaphore *sema =
+		container_of(ref, struct nouveau_semaphore, ref);
+	struct drm_nouveau_private *dev_priv = sema->dev->dev_private;
+
+	spin_lock(&dev_priv->fence.lock);
+	drm_mm_put_block(sema->mem);
+	spin_unlock(&dev_priv->fence.lock);
+
+	kfree(sema);
+}
+
+static void
+semaphore_work(void *priv, bool signalled)
+{
+	struct nouveau_semaphore *sema = priv;
+	struct drm_nouveau_private *dev_priv = sema->dev->dev_private;
+
+	if (unlikely(!signalled))
+		nouveau_bo_wr32(dev_priv->fence.bo, sema->mem->start / 4, 1);
+
+	kref_put(&sema->ref, free_semaphore);
+}
+
+static int
+emit_semaphore(struct nouveau_channel *chan, int method,
+	       struct nouveau_semaphore *sema)
+{
+	struct drm_nouveau_private *dev_priv = sema->dev->dev_private;
+	struct nouveau_fence *fence;
+	bool smart = (dev_priv->card_type >= NV_50);
+	int ret;
+
+	ret = RING_SPACE(chan, smart ? 8 : 4);
+	if (ret)
+		return ret;
+
+	if (smart) {
+		BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
+		OUT_RING(chan, NvSema);
+	}
+	BEGIN_RING(chan, NvSubSw, NV_SW_SEMAPHORE_OFFSET, 1);
+	OUT_RING(chan, sema->mem->start);
+
+	if (smart && method == NV_SW_SEMAPHORE_ACQUIRE) {
+		/*
+		 * NV50 tries to be too smart and context-switch
+		 * between semaphores instead of doing a "first come,
+		 * first served" strategy like previous cards
+		 * do.
+		 *
+		 * That's bad because the ACQUIRE latency can get as
+		 * large as the PFIFO context time slice in the
+		 * typical DRI2 case where you have several
+		 * outstanding semaphores at the same moment.
+		 *
+		 * If we're going to ACQUIRE, force the card to
+		 * context switch before, just in case the matching
+		 * RELEASE is already scheduled to be executed in
+		 * another channel.
+		 */
+		BEGIN_RING(chan, NvSubSw, NV_SW_YIELD, 1);
+		OUT_RING(chan, 0);
+	}
+
+	BEGIN_RING(chan, NvSubSw, method, 1);
+	OUT_RING(chan, 1);
+
+	if (smart && method == NV_SW_SEMAPHORE_RELEASE) {
+		/*
+		 * Force the card to context switch, there may be
+		 * another channel waiting for the semaphore we just
+		 * released.
+		 */
+		BEGIN_RING(chan, NvSubSw, NV_SW_YIELD, 1);
+		OUT_RING(chan, 0);
+	}
+
+	/* Delay semaphore destruction until its work is done */
+	ret = nouveau_fence_new(chan, &fence, true);
+	if (ret)
+		return ret;
+
+	kref_get(&sema->ref);
+	nouveau_fence_work(fence, semaphore_work, sema);
+	nouveau_fence_unref((void *)&fence);
+
+	return 0;
+}
+
+int
+nouveau_fence_sync(struct nouveau_fence *fence,
+		   struct nouveau_channel *wchan)
+{
+	struct nouveau_channel *chan = nouveau_fence_channel(fence);
+	struct drm_device *dev = wchan->dev;
+	struct nouveau_semaphore *sema;
+	int ret;
+
+	if (likely(!fence || chan == wchan ||
+		   nouveau_fence_signalled(fence, NULL)))
+		return 0;
+
+	sema = alloc_semaphore(dev);
+	if (!sema) {
+		/* Early card or broken userspace, fall back to
+		 * software sync. */
+		return nouveau_fence_wait(fence, NULL, false, false);
+	}
+
+	/* Make wchan wait until it gets signalled */
+	ret = emit_semaphore(wchan, NV_SW_SEMAPHORE_ACQUIRE, sema);
+	if (ret)
+		goto out;
+
+	/* Signal the semaphore from chan */
+	ret = emit_semaphore(chan, NV_SW_SEMAPHORE_RELEASE, sema);
+out:
+	kref_put(&sema->ref, free_semaphore);
+	return ret;
+}
+
 int
 int
 nouveau_fence_flush(void *sync_obj, void *sync_arg)
 nouveau_fence_flush(void *sync_obj, void *sync_arg)
 {
 {
@@ -220,26 +407,123 @@ nouveau_fence_flush(void *sync_obj, void *sync_arg)
 }
 }
 
 
 int
 int
-nouveau_fence_init(struct nouveau_channel *chan)
+nouveau_fence_channel_init(struct nouveau_channel *chan)
 {
 {
+	struct drm_device *dev = chan->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_gpuobj *obj = NULL;
+	int ret;
+
+	/* Create an NV_SW object for various sync purposes */
+	ret = nouveau_gpuobj_sw_new(chan, NV_SW, &obj);
+	if (ret)
+		return ret;
+
+	ret = nouveau_ramht_insert(chan, NvSw, obj);
+	nouveau_gpuobj_ref(NULL, &obj);
+	if (ret)
+		return ret;
+
+	ret = RING_SPACE(chan, 2);
+	if (ret)
+		return ret;
+	BEGIN_RING(chan, NvSubSw, 0, 1);
+	OUT_RING(chan, NvSw);
+
+	/* Create a DMA object for the shared cross-channel sync area. */
+	if (USE_SEMA(dev)) {
+		struct drm_mm_node *mem = dev_priv->fence.bo->bo.mem.mm_node;
+
+		ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
+					     mem->start << PAGE_SHIFT,
+					     mem->size << PAGE_SHIFT,
+					     NV_DMA_ACCESS_RW,
+					     NV_DMA_TARGET_VIDMEM, &obj);
+		if (ret)
+			return ret;
+
+		ret = nouveau_ramht_insert(chan, NvSema, obj);
+		nouveau_gpuobj_ref(NULL, &obj);
+		if (ret)
+			return ret;
+
+		ret = RING_SPACE(chan, 2);
+		if (ret)
+			return ret;
+		BEGIN_RING(chan, NvSubSw, NV_SW_DMA_SEMAPHORE, 1);
+		OUT_RING(chan, NvSema);
+	}
+
+	FIRE_RING(chan);
+
 	INIT_LIST_HEAD(&chan->fence.pending);
 	INIT_LIST_HEAD(&chan->fence.pending);
 	spin_lock_init(&chan->fence.lock);
 	spin_lock_init(&chan->fence.lock);
 	atomic_set(&chan->fence.last_sequence_irq, 0);
 	atomic_set(&chan->fence.last_sequence_irq, 0);
+
 	return 0;
 	return 0;
 }
 }
 
 
 void
 void
-nouveau_fence_fini(struct nouveau_channel *chan)
+nouveau_fence_channel_fini(struct nouveau_channel *chan)
 {
 {
-	struct list_head *entry, *tmp;
-	struct nouveau_fence *fence;
-
-	list_for_each_safe(entry, tmp, &chan->fence.pending) {
-		fence = list_entry(entry, struct nouveau_fence, entry);
+	struct nouveau_fence *tmp, *fence;
 
 
+	list_for_each_entry_safe(fence, tmp, &chan->fence.pending, entry) {
 		fence->signalled = true;
 		fence->signalled = true;
 		list_del(&fence->entry);
 		list_del(&fence->entry);
+
+		if (unlikely(fence->work))
+			fence->work(fence->priv, false);
+
 		kref_put(&fence->refcount, nouveau_fence_del);
 		kref_put(&fence->refcount, nouveau_fence_del);
 	}
 	}
 }
 }
 
 
+int
+nouveau_fence_init(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	int ret;
+
+	/* Create a shared VRAM heap for cross-channel sync. */
+	if (USE_SEMA(dev)) {
+		ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM,
+				     0, 0, false, true, &dev_priv->fence.bo);
+		if (ret)
+			return ret;
+
+		ret = nouveau_bo_pin(dev_priv->fence.bo, TTM_PL_FLAG_VRAM);
+		if (ret)
+			goto fail;
+
+		ret = nouveau_bo_map(dev_priv->fence.bo);
+		if (ret)
+			goto fail;
+
+		ret = drm_mm_init(&dev_priv->fence.heap, 0,
+				  dev_priv->fence.bo->bo.mem.size);
+		if (ret)
+			goto fail;
+
+		spin_lock_init(&dev_priv->fence.lock);
+	}
+
+	return 0;
+fail:
+	nouveau_bo_unmap(dev_priv->fence.bo);
+	nouveau_bo_ref(NULL, &dev_priv->fence.bo);
+	return ret;
+}
+
+void
+nouveau_fence_fini(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	if (USE_SEMA(dev)) {
+		drm_mm_takedown(&dev_priv->fence.heap);
+		nouveau_bo_unmap(dev_priv->fence.bo);
+		nouveau_bo_unpin(dev_priv->fence.bo);
+		nouveau_bo_ref(NULL, &dev_priv->fence.bo);
+	}
+}

+ 2 - 2
drivers/gpu/drm/nouveau/nouveau_gem.c

@@ -362,7 +362,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
 	list_for_each_entry(nvbo, list, entry) {
 	list_for_each_entry(nvbo, list, entry) {
 		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
 		struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index];
 
 
-		ret = nouveau_bo_sync_gpu(nvbo, chan);
+		ret = nouveau_fence_sync(nvbo->bo.sync_obj, chan);
 		if (unlikely(ret)) {
 		if (unlikely(ret)) {
 			NV_ERROR(dev, "fail pre-validate sync\n");
 			NV_ERROR(dev, "fail pre-validate sync\n");
 			return ret;
 			return ret;
@@ -385,7 +385,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list,
 			return ret;
 			return ret;
 		}
 		}
 
 
-		ret = nouveau_bo_sync_gpu(nvbo, chan);
+		ret = nouveau_fence_sync(nvbo->bo.sync_obj, chan);
 		if (unlikely(ret)) {
 		if (unlikely(ret)) {
 			NV_ERROR(dev, "fail post-validate sync\n");
 			NV_ERROR(dev, "fail post-validate sync\n");
 			return ret;
 			return ret;

+ 1 - 1
drivers/gpu/drm/nouveau/nouveau_grctx.h

@@ -126,7 +126,7 @@ gr_def(struct nouveau_grctx *ctx, uint32_t reg, uint32_t val)
 	reg = (reg - 0x00400000) / 4;
 	reg = (reg - 0x00400000) / 4;
 	reg = (reg - ctx->ctxprog_reg) + ctx->ctxvals_base;
 	reg = (reg - ctx->ctxprog_reg) + ctx->ctxvals_base;
 
 
-	nv_wo32(ctx->dev, ctx->data, reg, val);
+	nv_wo32(ctx->data, reg * 4, val);
 }
 }
 #endif
 #endif
 
 

+ 21 - 24
drivers/gpu/drm/nouveau/nouveau_hw.c

@@ -305,7 +305,7 @@ setPLL_double_lowregs(struct drm_device *dev, uint32_t NMNMreg,
 	bool mpll = Preg == 0x4020;
 	bool mpll = Preg == 0x4020;
 	uint32_t oldPval = nvReadMC(dev, Preg);
 	uint32_t oldPval = nvReadMC(dev, Preg);
 	uint32_t NMNM = pv->NM2 << 16 | pv->NM1;
 	uint32_t NMNM = pv->NM2 << 16 | pv->NM1;
-	uint32_t Pval = (oldPval & (mpll ? ~(0x11 << 16) : ~(1 << 16))) |
+	uint32_t Pval = (oldPval & (mpll ? ~(0x77 << 16) : ~(7 << 16))) |
 			0xc << 28 | pv->log2P << 16;
 			0xc << 28 | pv->log2P << 16;
 	uint32_t saved4600 = 0;
 	uint32_t saved4600 = 0;
 	/* some cards have different maskc040s */
 	/* some cards have different maskc040s */
@@ -427,22 +427,12 @@ nouveau_hw_get_pllvals(struct drm_device *dev, enum pll_types plltype,
 		       struct nouveau_pll_vals *pllvals)
 		       struct nouveau_pll_vals *pllvals)
 {
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	const uint32_t nv04_regs[MAX_PLL_TYPES] = { NV_PRAMDAC_NVPLL_COEFF,
-						    NV_PRAMDAC_MPLL_COEFF,
-						    NV_PRAMDAC_VPLL_COEFF,
-						    NV_RAMDAC_VPLL2 };
-	const uint32_t nv40_regs[MAX_PLL_TYPES] = { 0x4000,
-						    0x4020,
-						    NV_PRAMDAC_VPLL_COEFF,
-						    NV_RAMDAC_VPLL2 };
-	uint32_t reg1, pll1, pll2 = 0;
+	uint32_t reg1 = get_pll_register(dev, plltype), pll1, pll2 = 0;
 	struct pll_lims pll_lim;
 	struct pll_lims pll_lim;
 	int ret;
 	int ret;
 
 
-	if (dev_priv->card_type < NV_40)
-		reg1 = nv04_regs[plltype];
-	else
-		reg1 = nv40_regs[plltype];
+	if (reg1 == 0)
+		return -ENOENT;
 
 
 	pll1 = nvReadMC(dev, reg1);
 	pll1 = nvReadMC(dev, reg1);
 
 
@@ -491,8 +481,10 @@ int
 nouveau_hw_get_clock(struct drm_device *dev, enum pll_types plltype)
 nouveau_hw_get_clock(struct drm_device *dev, enum pll_types plltype)
 {
 {
 	struct nouveau_pll_vals pllvals;
 	struct nouveau_pll_vals pllvals;
+	int ret;
 
 
-	if (plltype == MPLL && (dev->pci_device & 0x0ff0) == CHIPSET_NFORCE) {
+	if (plltype == PLL_MEMORY &&
+	    (dev->pci_device & 0x0ff0) == CHIPSET_NFORCE) {
 		uint32_t mpllP;
 		uint32_t mpllP;
 
 
 		pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP);
 		pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP);
@@ -501,14 +493,17 @@ nouveau_hw_get_clock(struct drm_device *dev, enum pll_types plltype)
 
 
 		return 400000 / mpllP;
 		return 400000 / mpllP;
 	} else
 	} else
-	if (plltype == MPLL && (dev->pci_device & 0xff0) == CHIPSET_NFORCE2) {
+	if (plltype == PLL_MEMORY &&
+	    (dev->pci_device & 0xff0) == CHIPSET_NFORCE2) {
 		uint32_t clock;
 		uint32_t clock;
 
 
 		pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock);
 		pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock);
 		return clock;
 		return clock;
 	}
 	}
 
 
-	nouveau_hw_get_pllvals(dev, plltype, &pllvals);
+	ret = nouveau_hw_get_pllvals(dev, plltype, &pllvals);
+	if (ret)
+		return ret;
 
 
 	return nouveau_hw_pllvals_to_clk(&pllvals);
 	return nouveau_hw_pllvals_to_clk(&pllvals);
 }
 }
@@ -526,9 +521,9 @@ nouveau_hw_fix_bad_vpll(struct drm_device *dev, int head)
 	struct nouveau_pll_vals pv;
 	struct nouveau_pll_vals pv;
 	uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF;
 	uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF;
 
 
-	if (get_pll_limits(dev, head ? VPLL2 : VPLL1, &pll_lim))
+	if (get_pll_limits(dev, pllreg, &pll_lim))
 		return;
 		return;
-	nouveau_hw_get_pllvals(dev, head ? VPLL2 : VPLL1, &pv);
+	nouveau_hw_get_pllvals(dev, pllreg, &pv);
 
 
 	if (pv.M1 >= pll_lim.vco1.min_m && pv.M1 <= pll_lim.vco1.max_m &&
 	if (pv.M1 >= pll_lim.vco1.min_m && pv.M1 <= pll_lim.vco1.max_m &&
 	    pv.N1 >= pll_lim.vco1.min_n && pv.N1 <= pll_lim.vco1.max_n &&
 	    pv.N1 >= pll_lim.vco1.min_n && pv.N1 <= pll_lim.vco1.max_n &&
@@ -661,7 +656,7 @@ nv_save_state_ramdac(struct drm_device *dev, int head,
 	if (dev_priv->card_type >= NV_10)
 	if (dev_priv->card_type >= NV_10)
 		regp->nv10_cursync = NVReadRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC);
 		regp->nv10_cursync = NVReadRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC);
 
 
-	nouveau_hw_get_pllvals(dev, head ? VPLL2 : VPLL1, &regp->pllvals);
+	nouveau_hw_get_pllvals(dev, head ? PLL_VPLL1 : PLL_VPLL0, &regp->pllvals);
 	state->pllsel = NVReadRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT);
 	state->pllsel = NVReadRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT);
 	if (nv_two_heads(dev))
 	if (nv_two_heads(dev))
 		state->sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK);
 		state->sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK);
@@ -866,10 +861,11 @@ nv_save_state_ext(struct drm_device *dev, int head,
 	rd_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX);
 	rd_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX);
 	rd_cio_state(dev, head, regp, NV_CIO_CRE_21);
 	rd_cio_state(dev, head, regp, NV_CIO_CRE_21);
 
 
-	if (dev_priv->card_type >= NV_30) {
+	if (dev_priv->card_type >= NV_20)
 		rd_cio_state(dev, head, regp, NV_CIO_CRE_47);
 		rd_cio_state(dev, head, regp, NV_CIO_CRE_47);
+
+	if (dev_priv->card_type >= NV_30)
 		rd_cio_state(dev, head, regp, 0x9f);
 		rd_cio_state(dev, head, regp, 0x9f);
-	}
 
 
 	rd_cio_state(dev, head, regp, NV_CIO_CRE_49);
 	rd_cio_state(dev, head, regp, NV_CIO_CRE_49);
 	rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
 	rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
@@ -976,10 +972,11 @@ nv_load_state_ext(struct drm_device *dev, int head,
 	wr_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX);
 	wr_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX);
 	wr_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX);
 	wr_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX);
 
 
-	if (dev_priv->card_type >= NV_30) {
+	if (dev_priv->card_type >= NV_20)
 		wr_cio_state(dev, head, regp, NV_CIO_CRE_47);
 		wr_cio_state(dev, head, regp, NV_CIO_CRE_47);
+
+	if (dev_priv->card_type >= NV_30)
 		wr_cio_state(dev, head, regp, 0x9f);
 		wr_cio_state(dev, head, regp, 0x9f);
-	}
 
 
 	wr_cio_state(dev, head, regp, NV_CIO_CRE_49);
 	wr_cio_state(dev, head, regp, NV_CIO_CRE_49);
 	wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
 	wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);

+ 6 - 2
drivers/gpu/drm/nouveau/nouveau_i2c.c

@@ -299,7 +299,10 @@ nouveau_probe_i2c_addr(struct nouveau_i2c_chan *i2c, int addr)
 
 
 int
 int
 nouveau_i2c_identify(struct drm_device *dev, const char *what,
 nouveau_i2c_identify(struct drm_device *dev, const char *what,
-		     struct i2c_board_info *info, int index)
+		     struct i2c_board_info *info,
+		     bool (*match)(struct nouveau_i2c_chan *,
+				   struct i2c_board_info *),
+		     int index)
 {
 {
 	struct nouveau_i2c_chan *i2c = nouveau_i2c_find(dev, index);
 	struct nouveau_i2c_chan *i2c = nouveau_i2c_find(dev, index);
 	int i;
 	int i;
@@ -307,7 +310,8 @@ nouveau_i2c_identify(struct drm_device *dev, const char *what,
 	NV_DEBUG(dev, "Probing %ss on I2C bus: %d\n", what, index);
 	NV_DEBUG(dev, "Probing %ss on I2C bus: %d\n", what, index);
 
 
 	for (i = 0; info[i].addr; i++) {
 	for (i = 0; info[i].addr; i++) {
-		if (nouveau_probe_i2c_addr(i2c, info[i].addr)) {
+		if (nouveau_probe_i2c_addr(i2c, info[i].addr) &&
+		    (!match || match(i2c, &info[i]))) {
 			NV_INFO(dev, "Detected %s: %s\n", what, info[i].type);
 			NV_INFO(dev, "Detected %s: %s\n", what, info[i].type);
 			return i;
 			return i;
 		}
 		}

+ 4 - 1
drivers/gpu/drm/nouveau/nouveau_i2c.h

@@ -43,7 +43,10 @@ void nouveau_i2c_fini(struct drm_device *, struct dcb_i2c_entry *);
 struct nouveau_i2c_chan *nouveau_i2c_find(struct drm_device *, int index);
 struct nouveau_i2c_chan *nouveau_i2c_find(struct drm_device *, int index);
 bool nouveau_probe_i2c_addr(struct nouveau_i2c_chan *i2c, int addr);
 bool nouveau_probe_i2c_addr(struct nouveau_i2c_chan *i2c, int addr);
 int nouveau_i2c_identify(struct drm_device *dev, const char *what,
 int nouveau_i2c_identify(struct drm_device *dev, const char *what,
-			 struct i2c_board_info *info, int index);
+			 struct i2c_board_info *info,
+			 bool (*match)(struct nouveau_i2c_chan *,
+				       struct i2c_board_info *),
+			 int index);
 
 
 extern const struct i2c_algorithm nouveau_dp_i2c_algo;
 extern const struct i2c_algorithm nouveau_dp_i2c_algo;
 
 

+ 64 - 59
drivers/gpu/drm/nouveau/nouveau_irq.c

@@ -35,6 +35,7 @@
 #include "nouveau_drm.h"
 #include "nouveau_drm.h"
 #include "nouveau_drv.h"
 #include "nouveau_drv.h"
 #include "nouveau_reg.h"
 #include "nouveau_reg.h"
+#include "nouveau_ramht.h"
 #include <linux/ratelimit.h>
 #include <linux/ratelimit.h>
 
 
 /* needed for hotplug irq */
 /* needed for hotplug irq */
@@ -106,15 +107,16 @@ nouveau_fifo_swmthd(struct nouveau_channel *chan, uint32_t addr, uint32_t data)
 	const int mthd = addr & 0x1ffc;
 	const int mthd = addr & 0x1ffc;
 
 
 	if (mthd == 0x0000) {
 	if (mthd == 0x0000) {
-		struct nouveau_gpuobj_ref *ref = NULL;
+		struct nouveau_gpuobj *gpuobj;
 
 
-		if (nouveau_gpuobj_ref_find(chan, data, &ref))
+		gpuobj = nouveau_ramht_find(chan, data);
+		if (!gpuobj)
 			return false;
 			return false;
 
 
-		if (ref->gpuobj->engine != NVOBJ_ENGINE_SW)
+		if (gpuobj->engine != NVOBJ_ENGINE_SW)
 			return false;
 			return false;
 
 
-		chan->sw_subchannel[subc] = ref->gpuobj->class;
+		chan->sw_subchannel[subc] = gpuobj->class;
 		nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_rd32(dev,
 		nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_rd32(dev,
 			NV04_PFIFO_CACHE1_ENGINE) & ~(0xf << subc * 4));
 			NV04_PFIFO_CACHE1_ENGINE) & ~(0xf << subc * 4));
 		return true;
 		return true;
@@ -200,16 +202,45 @@ nouveau_fifo_irq_handler(struct drm_device *dev)
 		}
 		}
 
 
 		if (status & NV_PFIFO_INTR_DMA_PUSHER) {
 		if (status & NV_PFIFO_INTR_DMA_PUSHER) {
-			NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d\n", chid);
+			u32 get = nv_rd32(dev, 0x003244);
+			u32 put = nv_rd32(dev, 0x003240);
+			u32 push = nv_rd32(dev, 0x003220);
+			u32 state = nv_rd32(dev, 0x003228);
+
+			if (dev_priv->card_type == NV_50) {
+				u32 ho_get = nv_rd32(dev, 0x003328);
+				u32 ho_put = nv_rd32(dev, 0x003320);
+				u32 ib_get = nv_rd32(dev, 0x003334);
+				u32 ib_put = nv_rd32(dev, 0x003330);
+
+				NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x "
+					     "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
+					     "State 0x%08x Push 0x%08x\n",
+					chid, ho_get, get, ho_put, put, ib_get, ib_put,
+					state, push);
+
+				/* METHOD_COUNT, in DMA_STATE on earlier chipsets */
+				nv_wr32(dev, 0x003364, 0x00000000);
+				if (get != put || ho_get != ho_put) {
+					nv_wr32(dev, 0x003244, put);
+					nv_wr32(dev, 0x003328, ho_put);
+				} else
+				if (ib_get != ib_put) {
+					nv_wr32(dev, 0x003334, ib_put);
+				}
+			} else {
+				NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x "
+					     "Put 0x%08x State 0x%08x Push 0x%08x\n",
+					chid, get, put, state, push);
 
 
-			status &= ~NV_PFIFO_INTR_DMA_PUSHER;
-			nv_wr32(dev, NV03_PFIFO_INTR_0,
-						NV_PFIFO_INTR_DMA_PUSHER);
+				if (get != put)
+					nv_wr32(dev, 0x003244, put);
+			}
 
 
-			nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, 0x00000000);
-			if (nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT) != get)
-				nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET,
-								get + 4);
+			nv_wr32(dev, 0x003228, 0x00000000);
+			nv_wr32(dev, 0x003220, 0x00000001);
+			nv_wr32(dev, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
+			status &= ~NV_PFIFO_INTR_DMA_PUSHER;
 		}
 		}
 
 
 		if (status & NV_PFIFO_INTR_SEMAPHORE) {
 		if (status & NV_PFIFO_INTR_SEMAPHORE) {
@@ -226,6 +257,14 @@ nouveau_fifo_irq_handler(struct drm_device *dev)
 			nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
 			nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
 		}
 		}
 
 
+		if (dev_priv->card_type == NV_50) {
+			if (status & 0x00000010) {
+				nv50_fb_vm_trap(dev, 1, "PFIFO_BAR_FAULT");
+				status &= ~0x00000010;
+				nv_wr32(dev, 0x002100, 0x00000010);
+			}
+		}
+
 		if (status) {
 		if (status) {
 			NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
 			NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
 				status, chid);
 				status, chid);
@@ -357,7 +396,7 @@ nouveau_graph_chid_from_grctx(struct drm_device *dev)
 			if (!chan || !chan->ramin_grctx)
 			if (!chan || !chan->ramin_grctx)
 				continue;
 				continue;
 
 
-			if (inst == chan->ramin_grctx->instance)
+			if (inst == chan->ramin_grctx->pinst)
 				break;
 				break;
 		}
 		}
 	} else {
 	} else {
@@ -369,7 +408,7 @@ nouveau_graph_chid_from_grctx(struct drm_device *dev)
 			if (!chan || !chan->ramin)
 			if (!chan || !chan->ramin)
 				continue;
 				continue;
 
 
-			if (inst == chan->ramin->instance)
+			if (inst == chan->ramin->vinst)
 				break;
 				break;
 		}
 		}
 	}
 	}
@@ -605,40 +644,6 @@ nouveau_pgraph_irq_handler(struct drm_device *dev)
 	nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
 	nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
 }
 }
 
 
-static void
-nv50_pfb_vm_trap(struct drm_device *dev, int display, const char *name)
-{
-	struct drm_nouveau_private *dev_priv = dev->dev_private;
-	uint32_t trap[6];
-	int i, ch;
-	uint32_t idx = nv_rd32(dev, 0x100c90);
-	if (idx & 0x80000000) {
-		idx &= 0xffffff;
-		if (display) {
-			for (i = 0; i < 6; i++) {
-				nv_wr32(dev, 0x100c90, idx | i << 24);
-				trap[i] = nv_rd32(dev, 0x100c94);
-			}
-			for (ch = 0; ch < dev_priv->engine.fifo.channels; ch++) {
-				struct nouveau_channel *chan = dev_priv->fifos[ch];
-
-				if (!chan || !chan->ramin)
-					continue;
-
-				if (trap[1] == chan->ramin->instance >> 12)
-					break;
-			}
-			NV_INFO(dev, "%s - VM: Trapped %s at %02x%04x%04x status %08x %08x channel %d\n",
-					name, (trap[5]&0x100?"read":"write"),
-					trap[5]&0xff, trap[4]&0xffff,
-					trap[3]&0xffff, trap[0], trap[2], ch);
-		}
-		nv_wr32(dev, 0x100c90, idx | 0x80000000);
-	} else if (display) {
-		NV_INFO(dev, "%s - no VM fault?\n", name);
-	}
-}
-
 static struct nouveau_enum_names nv50_mp_exec_error_names[] =
 static struct nouveau_enum_names nv50_mp_exec_error_names[] =
 {
 {
 	{ 3, "STACK_UNDERFLOW" },
 	{ 3, "STACK_UNDERFLOW" },
@@ -711,7 +716,7 @@ nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
 		tps++;
 		tps++;
 		switch (type) {
 		switch (type) {
 		case 6: /* texture error... unknown for now */
 		case 6: /* texture error... unknown for now */
-			nv50_pfb_vm_trap(dev, display, name);
+			nv50_fb_vm_trap(dev, display, name);
 			if (display) {
 			if (display) {
 				NV_ERROR(dev, "magic set %d:\n", i);
 				NV_ERROR(dev, "magic set %d:\n", i);
 				for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
 				for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
@@ -734,7 +739,7 @@ nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
 			uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14);
 			uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14);
 			uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18);
 			uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18);
 			uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c);
 			uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c);
-			nv50_pfb_vm_trap(dev, display, name);
+			nv50_fb_vm_trap(dev, display, name);
 			/* 2d engine destination */
 			/* 2d engine destination */
 			if (ustatus & 0x00000010) {
 			if (ustatus & 0x00000010) {
 				if (display) {
 				if (display) {
@@ -817,7 +822,7 @@ nv50_pgraph_trap_handler(struct drm_device *dev)
 
 
 		/* Known to be triggered by screwed up NOTIFY and COND... */
 		/* Known to be triggered by screwed up NOTIFY and COND... */
 		if (ustatus & 0x00000001) {
 		if (ustatus & 0x00000001) {
-			nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_FAULT");
+			nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_FAULT");
 			nv_wr32(dev, 0x400500, 0);
 			nv_wr32(dev, 0x400500, 0);
 			if (nv_rd32(dev, 0x400808) & 0x80000000) {
 			if (nv_rd32(dev, 0x400808) & 0x80000000) {
 				if (display) {
 				if (display) {
@@ -842,7 +847,7 @@ nv50_pgraph_trap_handler(struct drm_device *dev)
 			ustatus &= ~0x00000001;
 			ustatus &= ~0x00000001;
 		}
 		}
 		if (ustatus & 0x00000002) {
 		if (ustatus & 0x00000002) {
-			nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_QUERY");
+			nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_QUERY");
 			nv_wr32(dev, 0x400500, 0);
 			nv_wr32(dev, 0x400500, 0);
 			if (nv_rd32(dev, 0x40084c) & 0x80000000) {
 			if (nv_rd32(dev, 0x40084c) & 0x80000000) {
 				if (display) {
 				if (display) {
@@ -884,15 +889,15 @@ nv50_pgraph_trap_handler(struct drm_device *dev)
 			NV_INFO(dev, "PGRAPH_TRAP_M2MF - no ustatus?\n");
 			NV_INFO(dev, "PGRAPH_TRAP_M2MF - no ustatus?\n");
 		}
 		}
 		if (ustatus & 0x00000001) {
 		if (ustatus & 0x00000001) {
-			nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_NOTIFY");
+			nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_NOTIFY");
 			ustatus &= ~0x00000001;
 			ustatus &= ~0x00000001;
 		}
 		}
 		if (ustatus & 0x00000002) {
 		if (ustatus & 0x00000002) {
-			nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_IN");
+			nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_IN");
 			ustatus &= ~0x00000002;
 			ustatus &= ~0x00000002;
 		}
 		}
 		if (ustatus & 0x00000004) {
 		if (ustatus & 0x00000004) {
-			nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_OUT");
+			nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_OUT");
 			ustatus &= ~0x00000004;
 			ustatus &= ~0x00000004;
 		}
 		}
 		NV_INFO (dev, "PGRAPH_TRAP_M2MF - %08x %08x %08x %08x\n",
 		NV_INFO (dev, "PGRAPH_TRAP_M2MF - %08x %08x %08x %08x\n",
@@ -917,7 +922,7 @@ nv50_pgraph_trap_handler(struct drm_device *dev)
 			NV_INFO(dev, "PGRAPH_TRAP_VFETCH - no ustatus?\n");
 			NV_INFO(dev, "PGRAPH_TRAP_VFETCH - no ustatus?\n");
 		}
 		}
 		if (ustatus & 0x00000001) {
 		if (ustatus & 0x00000001) {
-			nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_VFETCH_FAULT");
+			nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_VFETCH_FAULT");
 			NV_INFO (dev, "PGRAPH_TRAP_VFETCH_FAULT - %08x %08x %08x %08x\n",
 			NV_INFO (dev, "PGRAPH_TRAP_VFETCH_FAULT - %08x %08x %08x %08x\n",
 					nv_rd32(dev, 0x400c00),
 					nv_rd32(dev, 0x400c00),
 					nv_rd32(dev, 0x400c08),
 					nv_rd32(dev, 0x400c08),
@@ -939,7 +944,7 @@ nv50_pgraph_trap_handler(struct drm_device *dev)
 			NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - no ustatus?\n");
 			NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - no ustatus?\n");
 		}
 		}
 		if (ustatus & 0x00000001) {
 		if (ustatus & 0x00000001) {
-			nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_STRMOUT_FAULT");
+			nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_STRMOUT_FAULT");
 			NV_INFO (dev, "PGRAPH_TRAP_STRMOUT_FAULT - %08x %08x %08x %08x\n",
 			NV_INFO (dev, "PGRAPH_TRAP_STRMOUT_FAULT - %08x %08x %08x %08x\n",
 					nv_rd32(dev, 0x401804),
 					nv_rd32(dev, 0x401804),
 					nv_rd32(dev, 0x401808),
 					nv_rd32(dev, 0x401808),
@@ -964,7 +969,7 @@ nv50_pgraph_trap_handler(struct drm_device *dev)
 			NV_INFO(dev, "PGRAPH_TRAP_CCACHE - no ustatus?\n");
 			NV_INFO(dev, "PGRAPH_TRAP_CCACHE - no ustatus?\n");
 		}
 		}
 		if (ustatus & 0x00000001) {
 		if (ustatus & 0x00000001) {
-			nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_CCACHE_FAULT");
+			nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_CCACHE_FAULT");
 			NV_INFO (dev, "PGRAPH_TRAP_CCACHE_FAULT - %08x %08x %08x %08x %08x %08x %08x\n",
 			NV_INFO (dev, "PGRAPH_TRAP_CCACHE_FAULT - %08x %08x %08x %08x %08x %08x %08x\n",
 					nv_rd32(dev, 0x405800),
 					nv_rd32(dev, 0x405800),
 					nv_rd32(dev, 0x405804),
 					nv_rd32(dev, 0x405804),
@@ -986,7 +991,7 @@ nv50_pgraph_trap_handler(struct drm_device *dev)
 	 * remaining, so try to handle it anyway. Perhaps related to that
 	 * remaining, so try to handle it anyway. Perhaps related to that
 	 * unknown DMA slot on tesla? */
 	 * unknown DMA slot on tesla? */
 	if (status & 0x20) {
 	if (status & 0x20) {
-		nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_UNKC04");
+		nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_UNKC04");
 		ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff;
 		ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff;
 		if (display)
 		if (display)
 			NV_INFO(dev, "PGRAPH_TRAP_UNKC04 - Unhandled ustatus 0x%08x\n", ustatus);
 			NV_INFO(dev, "PGRAPH_TRAP_UNKC04 - Unhandled ustatus 0x%08x\n", ustatus);

+ 313 - 50
drivers/gpu/drm/nouveau/nouveau_mem.c

@@ -35,6 +35,8 @@
 #include "drm_sarea.h"
 #include "drm_sarea.h"
 #include "nouveau_drv.h"
 #include "nouveau_drv.h"
 
 
+#define MIN(a,b) a < b ? a : b
+
 /*
 /*
  * NV10-NV40 tiling helpers
  * NV10-NV40 tiling helpers
  */
  */
@@ -47,18 +49,14 @@ nv10_mem_set_region_tiling(struct drm_device *dev, int i, uint32_t addr,
 	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
 	struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
 	struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
 	struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
 	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
 	struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
-	struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
+	struct nouveau_tile_reg *tile = &dev_priv->tile[i];
 
 
 	tile->addr = addr;
 	tile->addr = addr;
 	tile->size = size;
 	tile->size = size;
 	tile->used = !!pitch;
 	tile->used = !!pitch;
 	nouveau_fence_unref((void **)&tile->fence);
 	nouveau_fence_unref((void **)&tile->fence);
 
 
-	if (!pfifo->cache_flush(dev))
-		return;
-
 	pfifo->reassign(dev, false);
 	pfifo->reassign(dev, false);
-	pfifo->cache_flush(dev);
 	pfifo->cache_pull(dev, false);
 	pfifo->cache_pull(dev, false);
 
 
 	nouveau_wait_for_idle(dev);
 	nouveau_wait_for_idle(dev);
@@ -76,34 +74,36 @@ nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
 {
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
 	struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
-	struct nouveau_tile_reg *tile = dev_priv->tile.reg, *found = NULL;
-	int i;
+	struct nouveau_tile_reg *found = NULL;
+	unsigned long i, flags;
 
 
-	spin_lock(&dev_priv->tile.lock);
+	spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
 
 
 	for (i = 0; i < pfb->num_tiles; i++) {
 	for (i = 0; i < pfb->num_tiles; i++) {
-		if (tile[i].used)
+		struct nouveau_tile_reg *tile = &dev_priv->tile[i];
+
+		if (tile->used)
 			/* Tile region in use. */
 			/* Tile region in use. */
 			continue;
 			continue;
 
 
-		if (tile[i].fence &&
-		    !nouveau_fence_signalled(tile[i].fence, NULL))
+		if (tile->fence &&
+		    !nouveau_fence_signalled(tile->fence, NULL))
 			/* Pending tile region. */
 			/* Pending tile region. */
 			continue;
 			continue;
 
 
-		if (max(tile[i].addr, addr) <
-		    min(tile[i].addr + tile[i].size, addr + size))
+		if (max(tile->addr, addr) <
+		    min(tile->addr + tile->size, addr + size))
 			/* Kill an intersecting tile region. */
 			/* Kill an intersecting tile region. */
 			nv10_mem_set_region_tiling(dev, i, 0, 0, 0);
 			nv10_mem_set_region_tiling(dev, i, 0, 0, 0);
 
 
 		if (pitch && !found) {
 		if (pitch && !found) {
 			/* Free tile region. */
 			/* Free tile region. */
 			nv10_mem_set_region_tiling(dev, i, addr, size, pitch);
 			nv10_mem_set_region_tiling(dev, i, addr, size, pitch);
-			found = &tile[i];
+			found = tile;
 		}
 		}
 	}
 	}
 
 
-	spin_unlock(&dev_priv->tile.lock);
+	spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
 
 
 	return found;
 	return found;
 }
 }
@@ -169,8 +169,9 @@ nv50_mem_vm_bind_linear(struct drm_device *dev, uint64_t virt, uint32_t size,
 			virt  += (end - pte);
 			virt  += (end - pte);
 
 
 			while (pte < end) {
 			while (pte < end) {
-				nv_wo32(dev, pgt, pte++, offset_l);
-				nv_wo32(dev, pgt, pte++, offset_h);
+				nv_wo32(pgt, (pte * 4) + 0, offset_l);
+				nv_wo32(pgt, (pte * 4) + 4, offset_h);
+				pte += 2;
 			}
 			}
 		}
 		}
 	}
 	}
@@ -203,8 +204,10 @@ nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
 		pages -= (end - pte);
 		pages -= (end - pte);
 		virt  += (end - pte) << 15;
 		virt  += (end - pte) << 15;
 
 
-		while (pte < end)
-			nv_wo32(dev, pgt, pte++, 0);
+		while (pte < end) {
+			nv_wo32(pgt, (pte * 4), 0);
+			pte++;
+		}
 	}
 	}
 	dev_priv->engine.instmem.flush(dev);
 	dev_priv->engine.instmem.flush(dev);
 
 
@@ -218,7 +221,7 @@ nv50_mem_vm_unbind(struct drm_device *dev, uint64_t virt, uint32_t size)
  * Cleanup everything
  * Cleanup everything
  */
  */
 void
 void
-nouveau_mem_close(struct drm_device *dev)
+nouveau_mem_vram_fini(struct drm_device *dev)
 {
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 
 
@@ -229,6 +232,19 @@ nouveau_mem_close(struct drm_device *dev)
 
 
 	nouveau_ttm_global_release(dev_priv);
 	nouveau_ttm_global_release(dev_priv);
 
 
+	if (dev_priv->fb_mtrr >= 0) {
+		drm_mtrr_del(dev_priv->fb_mtrr,
+			     pci_resource_start(dev->pdev, 1),
+			     pci_resource_len(dev->pdev, 1), DRM_MTRR_WC);
+		dev_priv->fb_mtrr = -1;
+	}
+}
+
+void
+nouveau_mem_gart_fini(struct drm_device *dev)
+{
+	nouveau_sgdma_takedown(dev);
+
 	if (drm_core_has_AGP(dev) && dev->agp) {
 	if (drm_core_has_AGP(dev) && dev->agp) {
 		struct drm_agp_mem *entry, *tempe;
 		struct drm_agp_mem *entry, *tempe;
 
 
@@ -248,13 +264,6 @@ nouveau_mem_close(struct drm_device *dev)
 		dev->agp->acquired = 0;
 		dev->agp->acquired = 0;
 		dev->agp->enabled = 0;
 		dev->agp->enabled = 0;
 	}
 	}
-
-	if (dev_priv->fb_mtrr) {
-		drm_mtrr_del(dev_priv->fb_mtrr,
-			     pci_resource_start(dev->pdev, 1),
-			     pci_resource_len(dev->pdev, 1), DRM_MTRR_WC);
-		dev_priv->fb_mtrr = -1;
-	}
 }
 }
 
 
 static uint32_t
 static uint32_t
@@ -305,8 +314,62 @@ nouveau_mem_detect_nforce(struct drm_device *dev)
 	return 0;
 	return 0;
 }
 }
 
 
-/* returns the amount of FB ram in bytes */
-int
+static void
+nv50_vram_preinit(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	int i, parts, colbits, rowbitsa, rowbitsb, banks;
+	u64 rowsize, predicted;
+	u32 r0, r4, rt, ru;
+
+	r0 = nv_rd32(dev, 0x100200);
+	r4 = nv_rd32(dev, 0x100204);
+	rt = nv_rd32(dev, 0x100250);
+	ru = nv_rd32(dev, 0x001540);
+	NV_DEBUG(dev, "memcfg 0x%08x 0x%08x 0x%08x 0x%08x\n", r0, r4, rt, ru);
+
+	for (i = 0, parts = 0; i < 8; i++) {
+		if (ru & (0x00010000 << i))
+			parts++;
+	}
+
+	colbits  =  (r4 & 0x0000f000) >> 12;
+	rowbitsa = ((r4 & 0x000f0000) >> 16) + 8;
+	rowbitsb = ((r4 & 0x00f00000) >> 20) + 8;
+	banks    = ((r4 & 0x01000000) ? 8 : 4);
+
+	rowsize = parts * banks * (1 << colbits) * 8;
+	predicted = rowsize << rowbitsa;
+	if (r0 & 0x00000004)
+		predicted += rowsize << rowbitsb;
+
+	if (predicted != dev_priv->vram_size) {
+		NV_WARN(dev, "memory controller reports %dMiB VRAM\n",
+			(u32)(dev_priv->vram_size >> 20));
+		NV_WARN(dev, "we calculated %dMiB VRAM\n",
+			(u32)(predicted >> 20));
+	}
+
+	dev_priv->vram_rblock_size = rowsize >> 12;
+	if (rt & 1)
+		dev_priv->vram_rblock_size *= 3;
+
+	NV_DEBUG(dev, "rblock %lld bytes\n",
+		 (u64)dev_priv->vram_rblock_size << 12);
+}
+
+static void
+nvaa_vram_preinit(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	/* To our knowledge, there's no large scale reordering of pages
+	 * that occurs on IGP chipsets.
+	 */
+	dev_priv->vram_rblock_size = 1;
+}
+
+static int
 nouveau_mem_detect(struct drm_device *dev)
 nouveau_mem_detect(struct drm_device *dev)
 {
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
@@ -325,9 +388,18 @@ nouveau_mem_detect(struct drm_device *dev)
 		dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA);
 		dev_priv->vram_size = nv_rd32(dev, NV04_PFB_FIFO_DATA);
 		dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32;
 		dev_priv->vram_size |= (dev_priv->vram_size & 0xff) << 32;
 		dev_priv->vram_size &= 0xffffffff00ll;
 		dev_priv->vram_size &= 0xffffffff00ll;
-		if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac) {
+
+		switch (dev_priv->chipset) {
+		case 0xaa:
+		case 0xac:
+		case 0xaf:
 			dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10);
 			dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10);
 			dev_priv->vram_sys_base <<= 12;
 			dev_priv->vram_sys_base <<= 12;
+			nvaa_vram_preinit(dev);
+			break;
+		default:
+			nv50_vram_preinit(dev);
+			break;
 		}
 		}
 	} else {
 	} else {
 		dev_priv->vram_size  = nv_rd32(dev, 0x10f20c) << 20;
 		dev_priv->vram_size  = nv_rd32(dev, 0x10f20c) << 20;
@@ -345,6 +417,33 @@ nouveau_mem_detect(struct drm_device *dev)
 	return -ENOMEM;
 	return -ENOMEM;
 }
 }
 
 
+#if __OS_HAS_AGP
+static unsigned long
+get_agp_mode(struct drm_device *dev, unsigned long mode)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+	/*
+	 * FW seems to be broken on nv18, it makes the card lock up
+	 * randomly.
+	 */
+	if (dev_priv->chipset == 0x18)
+		mode &= ~PCI_AGP_COMMAND_FW;
+
+	/*
+	 * AGP mode set in the command line.
+	 */
+	if (nouveau_agpmode > 0) {
+		bool agpv3 = mode & 0x8;
+		int rate = agpv3 ? nouveau_agpmode / 4 : nouveau_agpmode;
+
+		mode = (mode & ~0x7) | (rate & 0x7);
+	}
+
+	return mode;
+}
+#endif
+
 int
 int
 nouveau_mem_reset_agp(struct drm_device *dev)
 nouveau_mem_reset_agp(struct drm_device *dev)
 {
 {
@@ -355,7 +454,8 @@ nouveau_mem_reset_agp(struct drm_device *dev)
 	/* First of all, disable fast writes, otherwise if it's
 	/* First of all, disable fast writes, otherwise if it's
 	 * already enabled in the AGP bridge and we disable the card's
 	 * already enabled in the AGP bridge and we disable the card's
 	 * AGP controller we might be locking ourselves out of it. */
 	 * AGP controller we might be locking ourselves out of it. */
-	if (nv_rd32(dev, NV04_PBUS_PCI_NV_19) & PCI_AGP_COMMAND_FW) {
+	if ((nv_rd32(dev, NV04_PBUS_PCI_NV_19) |
+	     dev->agp->mode) & PCI_AGP_COMMAND_FW) {
 		struct drm_agp_info info;
 		struct drm_agp_info info;
 		struct drm_agp_mode mode;
 		struct drm_agp_mode mode;
 
 
@@ -363,7 +463,7 @@ nouveau_mem_reset_agp(struct drm_device *dev)
 		if (ret)
 		if (ret)
 			return ret;
 			return ret;
 
 
-		mode.mode = info.mode & ~PCI_AGP_COMMAND_FW;
+		mode.mode = get_agp_mode(dev, info.mode) & ~PCI_AGP_COMMAND_FW;
 		ret = drm_agp_enable(dev, mode);
 		ret = drm_agp_enable(dev, mode);
 		if (ret)
 		if (ret)
 			return ret;
 			return ret;
@@ -418,7 +518,7 @@ nouveau_mem_init_agp(struct drm_device *dev)
 	}
 	}
 
 
 	/* see agp.h for the AGPSTAT_* modes available */
 	/* see agp.h for the AGPSTAT_* modes available */
-	mode.mode = info.mode;
+	mode.mode = get_agp_mode(dev, info.mode);
 	ret = drm_agp_enable(dev, mode);
 	ret = drm_agp_enable(dev, mode);
 	if (ret) {
 	if (ret) {
 		NV_ERROR(dev, "Unable to enable AGP: %d\n", ret);
 		NV_ERROR(dev, "Unable to enable AGP: %d\n", ret);
@@ -433,24 +533,27 @@ nouveau_mem_init_agp(struct drm_device *dev)
 }
 }
 
 
 int
 int
-nouveau_mem_init(struct drm_device *dev)
+nouveau_mem_vram_init(struct drm_device *dev)
 {
 {
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
 	struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
-	int ret, dma_bits = 32;
-
-	dev_priv->fb_phys = pci_resource_start(dev->pdev, 1);
-	dev_priv->gart_info.type = NOUVEAU_GART_NONE;
+	int ret, dma_bits;
 
 
 	if (dev_priv->card_type >= NV_50 &&
 	if (dev_priv->card_type >= NV_50 &&
 	    pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
 	    pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
 		dma_bits = 40;
 		dma_bits = 40;
+	else
+		dma_bits = 32;
 
 
 	ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
 	ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
-	if (ret) {
-		NV_ERROR(dev, "Error setting DMA mask: %d\n", ret);
+	if (ret)
 		return ret;
 		return ret;
-	}
+
+	ret = nouveau_mem_detect(dev);
+	if (ret)
+		return ret;
+
+	dev_priv->fb_phys = pci_resource_start(dev->pdev, 1);
 
 
 	ret = nouveau_ttm_global_init(dev_priv);
 	ret = nouveau_ttm_global_init(dev_priv);
 	if (ret)
 	if (ret)
@@ -465,8 +568,6 @@ nouveau_mem_init(struct drm_device *dev)
 		return ret;
 		return ret;
 	}
 	}
 
 
-	spin_lock_init(&dev_priv->tile.lock);
-
 	dev_priv->fb_available_size = dev_priv->vram_size;
 	dev_priv->fb_available_size = dev_priv->vram_size;
 	dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
 	dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
 	if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1))
 	if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1))
@@ -474,7 +575,16 @@ nouveau_mem_init(struct drm_device *dev)
 			pci_resource_len(dev->pdev, 1);
 			pci_resource_len(dev->pdev, 1);
 	dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
 	dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
 
 
-	/* remove reserved space at end of vram from available amount */
+	/* reserve space at end of VRAM for PRAMIN */
+	if (dev_priv->chipset == 0x40 || dev_priv->chipset == 0x47 ||
+	    dev_priv->chipset == 0x49 || dev_priv->chipset == 0x4b)
+		dev_priv->ramin_rsvd_vram = (2 * 1024 * 1024);
+	else
+	if (dev_priv->card_type >= NV_40)
+		dev_priv->ramin_rsvd_vram = (1 * 1024 * 1024);
+	else
+		dev_priv->ramin_rsvd_vram = (512 * 1024);
+
 	dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram;
 	dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram;
 	dev_priv->fb_aper_free = dev_priv->fb_available_size;
 	dev_priv->fb_aper_free = dev_priv->fb_available_size;
 
 
@@ -495,9 +605,23 @@ nouveau_mem_init(struct drm_device *dev)
 		nouveau_bo_ref(NULL, &dev_priv->vga_ram);
 		nouveau_bo_ref(NULL, &dev_priv->vga_ram);
 	}
 	}
 
 
-	/* GART */
+	dev_priv->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1),
+					 pci_resource_len(dev->pdev, 1),
+					 DRM_MTRR_WC);
+	return 0;
+}
+
+int
+nouveau_mem_gart_init(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
+	int ret;
+
+	dev_priv->gart_info.type = NOUVEAU_GART_NONE;
+
 #if !defined(__powerpc__) && !defined(__ia64__)
 #if !defined(__powerpc__) && !defined(__ia64__)
-	if (drm_device_is_agp(dev) && dev->agp && !nouveau_noagp) {
+	if (drm_device_is_agp(dev) && dev->agp && nouveau_agpmode) {
 		ret = nouveau_mem_init_agp(dev);
 		ret = nouveau_mem_init_agp(dev);
 		if (ret)
 		if (ret)
 			NV_ERROR(dev, "Error initialising AGP: %d\n", ret);
 			NV_ERROR(dev, "Error initialising AGP: %d\n", ret);
@@ -523,11 +647,150 @@ nouveau_mem_init(struct drm_device *dev)
 		return ret;
 		return ret;
 	}
 	}
 
 
-	dev_priv->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1),
-					 pci_resource_len(dev->pdev, 1),
-					 DRM_MTRR_WC);
-
 	return 0;
 	return 0;
 }
 }
 
 
+void
+nouveau_mem_timing_init(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+	struct nouveau_pm_memtimings *memtimings = &pm->memtimings;
+	struct nvbios *bios = &dev_priv->vbios;
+	struct bit_entry P;
+	u8 tUNK_0, tUNK_1, tUNK_2;
+	u8 tRP;		/* Byte 3 */
+	u8 tRAS;	/* Byte 5 */
+	u8 tRFC;	/* Byte 7 */
+	u8 tRC;		/* Byte 9 */
+	u8 tUNK_10, tUNK_11, tUNK_12, tUNK_13, tUNK_14;
+	u8 tUNK_18, tUNK_19, tUNK_20, tUNK_21;
+	u8 *mem = NULL, *entry;
+	int i, recordlen, entries;
+
+	if (bios->type == NVBIOS_BIT) {
+		if (bit_table(dev, 'P', &P))
+			return;
+
+		if (P.version == 1)
+			mem = ROMPTR(bios, P.data[4]);
+		else
+		if (P.version == 2)
+			mem = ROMPTR(bios, P.data[8]);
+		else {
+			NV_WARN(dev, "unknown mem for BIT P %d\n", P.version);
+		}
+	} else {
+		NV_DEBUG(dev, "BMP version too old for memory\n");
+		return;
+	}
+
+	if (!mem) {
+		NV_DEBUG(dev, "memory timing table pointer invalid\n");
+		return;
+	}
 
 
+	if (mem[0] != 0x10) {
+		NV_WARN(dev, "memory timing table 0x%02x unknown\n", mem[0]);
+		return;
+	}
+
+	/* validate record length */
+	entries   = mem[2];
+	recordlen = mem[3];
+	if (recordlen < 15) {
+		NV_ERROR(dev, "mem timing table length unknown: %d\n", mem[3]);
+		return;
+	}
+
+	/* parse vbios entries into common format */
+	memtimings->timing =
+		kcalloc(entries, sizeof(*memtimings->timing), GFP_KERNEL);
+	if (!memtimings->timing)
+		return;
+
+	entry = mem + mem[1];
+	for (i = 0; i < entries; i++, entry += recordlen) {
+		struct nouveau_pm_memtiming *timing = &pm->memtimings.timing[i];
+		if (entry[0] == 0)
+			continue;
+
+		tUNK_18 = 1;
+		tUNK_19 = 1;
+		tUNK_20 = 0;
+		tUNK_21 = 0;
+		switch (MIN(recordlen,21)) {
+		case 21:
+			tUNK_21 = entry[21];
+		case 20:
+			tUNK_20 = entry[20];
+		case 19:
+			tUNK_19 = entry[19];
+		case 18:
+			tUNK_18 = entry[18];
+		default:
+			tUNK_0  = entry[0];
+			tUNK_1  = entry[1];
+			tUNK_2  = entry[2];
+			tRP     = entry[3];
+			tRAS    = entry[5];
+			tRFC    = entry[7];
+			tRC     = entry[9];
+			tUNK_10 = entry[10];
+			tUNK_11 = entry[11];
+			tUNK_12 = entry[12];
+			tUNK_13 = entry[13];
+			tUNK_14 = entry[14];
+			break;
+		}
+
+		timing->reg_100220 = (tRC << 24 | tRFC << 16 | tRAS << 8 | tRP);
+
+		/* XXX: I don't trust the -1's and +1's... they must come
+		 *      from somewhere! */
+		timing->reg_100224 = ((tUNK_0 + tUNK_19 + 1) << 24 |
+				      tUNK_18 << 16 |
+				      (tUNK_1 + tUNK_19 + 1) << 8 |
+				      (tUNK_2 - 1));
+
+		timing->reg_100228 = (tUNK_12 << 16 | tUNK_11 << 8 | tUNK_10);
+		if(recordlen > 19) {
+			timing->reg_100228 += (tUNK_19 - 1) << 24;
+		} else {
+			timing->reg_100228 += tUNK_12 << 24;
+		}
+
+		/* XXX: reg_10022c */
+
+		timing->reg_100230 = (tUNK_20 << 24 | tUNK_21 << 16 |
+				      tUNK_13 << 8  | tUNK_13);
+
+		/* XXX: +6? */
+		timing->reg_100234 = (tRAS << 24 | (tUNK_19 + 6) << 8 | tRC);
+		if(tUNK_10 > tUNK_11) {
+			timing->reg_100234 += tUNK_10 << 16;
+		} else {
+			timing->reg_100234 += tUNK_11 << 16;
+		}
+
+		/* XXX; reg_100238, reg_10023c */
+		NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", i,
+			 timing->reg_100220, timing->reg_100224,
+			 timing->reg_100228, timing->reg_10022c);
+		NV_DEBUG(dev, "         230: %08x %08x %08x %08x\n",
+			 timing->reg_100230, timing->reg_100234,
+			 timing->reg_100238, timing->reg_10023c);
+	}
+
+	memtimings->nr_timing  = entries;
+	memtimings->supported = true;
+}
+
+void
+nouveau_mem_timing_fini(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_pm_memtimings *mem = &dev_priv->engine.pm.memtimings;
+
+	kfree(mem->timing);
+}

+ 5 - 4
drivers/gpu/drm/nouveau/nouveau_notifier.c

@@ -28,6 +28,7 @@
 #include "drmP.h"
 #include "drmP.h"
 #include "drm.h"
 #include "drm.h"
 #include "nouveau_drv.h"
 #include "nouveau_drv.h"
+#include "nouveau_ramht.h"
 
 
 int
 int
 nouveau_notifier_init_channel(struct nouveau_channel *chan)
 nouveau_notifier_init_channel(struct nouveau_channel *chan)
@@ -112,7 +113,7 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
 		return -ENOMEM;
 		return -ENOMEM;
 	}
 	}
 
 
-	offset = chan->notifier_bo->bo.mem.mm_node->start << PAGE_SHIFT;
+	offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT;
 	if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM) {
 	if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM) {
 		target = NV_DMA_TARGET_VIDMEM;
 		target = NV_DMA_TARGET_VIDMEM;
 	} else
 	} else
@@ -146,11 +147,11 @@ nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
 	nobj->dtor = nouveau_notifier_gpuobj_dtor;
 	nobj->dtor = nouveau_notifier_gpuobj_dtor;
 	nobj->priv = mem;
 	nobj->priv = mem;
 
 
-	ret = nouveau_gpuobj_ref_add(dev, chan, handle, nobj, NULL);
+	ret = nouveau_ramht_insert(chan, handle, nobj);
+	nouveau_gpuobj_ref(NULL, &nobj);
 	if (ret) {
 	if (ret) {
-		nouveau_gpuobj_del(dev, &nobj);
 		drm_mm_put_block(mem);
 		drm_mm_put_block(mem);
-		NV_ERROR(dev, "Error referencing notifier ctxdma: %d\n", ret);
+		NV_ERROR(dev, "Error adding notifier to ramht: %d\n", ret);
 		return ret;
 		return ret;
 	}
 	}
 
 

File diff suppressed because it is too large
+ 198 - 513
drivers/gpu/drm/nouveau/nouveau_object.c


+ 205 - 0
drivers/gpu/drm/nouveau/nouveau_perf.c

@@ -0,0 +1,205 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_pm.h"
+
+static void
+legacy_perf_init(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nvbios *bios = &dev_priv->vbios;
+	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+	char *perf, *entry, *bmp = &bios->data[bios->offset];
+	int headerlen, use_straps;
+
+	if (bmp[5] < 0x5 || bmp[6] < 0x14) {
+		NV_DEBUG(dev, "BMP version too old for perf\n");
+		return;
+	}
+
+	perf = ROMPTR(bios, bmp[0x73]);
+	if (!perf) {
+		NV_DEBUG(dev, "No memclock table pointer found.\n");
+		return;
+	}
+
+	switch (perf[0]) {
+	case 0x12:
+	case 0x14:
+	case 0x18:
+		use_straps = 0;
+		headerlen = 1;
+		break;
+	case 0x01:
+		use_straps = perf[1] & 1;
+		headerlen = (use_straps ? 8 : 2);
+		break;
+	default:
+		NV_WARN(dev, "Unknown memclock table version %x.\n", perf[0]);
+		return;
+	}
+
+	entry = perf + headerlen;
+	if (use_straps)
+		entry += (nv_rd32(dev, NV_PEXTDEV_BOOT_0) & 0x3c) >> 1;
+
+	sprintf(pm->perflvl[0].name, "performance_level_0");
+	pm->perflvl[0].memory = ROM16(entry[0]) * 20;
+	pm->nr_perflvl = 1;
+}
+
+void
+nouveau_perf_init(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+	struct nvbios *bios = &dev_priv->vbios;
+	struct bit_entry P;
+	u8 version, headerlen, recordlen, entries;
+	u8 *perf, *entry;
+	int vid, i;
+
+	if (bios->type == NVBIOS_BIT) {
+		if (bit_table(dev, 'P', &P))
+			return;
+
+		if (P.version != 1 && P.version != 2) {
+			NV_WARN(dev, "unknown perf for BIT P %d\n", P.version);
+			return;
+		}
+
+		perf = ROMPTR(bios, P.data[0]);
+		version   = perf[0];
+		headerlen = perf[1];
+		if (version < 0x40) {
+			recordlen = perf[3] + (perf[4] * perf[5]);
+			entries   = perf[2];
+		} else {
+			recordlen = perf[2] + (perf[3] * perf[4]);
+			entries   = perf[5];
+		}
+	} else {
+		if (bios->data[bios->offset + 6] < 0x25) {
+			legacy_perf_init(dev);
+			return;
+		}
+
+		perf = ROMPTR(bios, bios->data[bios->offset + 0x94]);
+		if (!perf) {
+			NV_DEBUG(dev, "perf table pointer invalid\n");
+			return;
+		}
+
+		version   = perf[1];
+		headerlen = perf[0];
+		recordlen = perf[3];
+		entries   = perf[2];
+	}
+
+	entry = perf + headerlen;
+	for (i = 0; i < entries; i++) {
+		struct nouveau_pm_level *perflvl = &pm->perflvl[pm->nr_perflvl];
+
+		if (entry[0] == 0xff) {
+			entry += recordlen;
+			continue;
+		}
+
+		switch (version) {
+		case 0x12:
+		case 0x13:
+		case 0x15:
+			perflvl->fanspeed = entry[55];
+			perflvl->voltage = entry[56];
+			perflvl->core = ROM32(entry[1]) * 10;
+			perflvl->memory = ROM32(entry[5]) * 20;
+			break;
+		case 0x21:
+		case 0x23:
+		case 0x24:
+			perflvl->fanspeed = entry[4];
+			perflvl->voltage = entry[5];
+			perflvl->core = ROM16(entry[6]) * 1000;
+
+			if (dev_priv->chipset == 0x49 ||
+			    dev_priv->chipset == 0x4b)
+				perflvl->memory = ROM16(entry[11]) * 1000;
+			else
+				perflvl->memory = ROM16(entry[11]) * 2000;
+
+			break;
+		case 0x25:
+			perflvl->fanspeed = entry[4];
+			perflvl->voltage = entry[5];
+			perflvl->core = ROM16(entry[6]) * 1000;
+			perflvl->shader = ROM16(entry[10]) * 1000;
+			perflvl->memory = ROM16(entry[12]) * 1000;
+			break;
+		case 0x30:
+			perflvl->memscript = ROM16(entry[2]);
+		case 0x35:
+			perflvl->fanspeed = entry[6];
+			perflvl->voltage = entry[7];
+			perflvl->core = ROM16(entry[8]) * 1000;
+			perflvl->shader = ROM16(entry[10]) * 1000;
+			perflvl->memory = ROM16(entry[12]) * 1000;
+			/*XXX: confirm on 0x35 */
+			perflvl->unk05 = ROM16(entry[16]) * 1000;
+			break;
+		case 0x40:
+#define subent(n) entry[perf[2] + ((n) * perf[3])]
+			perflvl->fanspeed = 0; /*XXX*/
+			perflvl->voltage = entry[2];
+			perflvl->core = (ROM16(subent(0)) & 0xfff) * 1000;
+			perflvl->shader = (ROM16(subent(1)) & 0xfff) * 1000;
+			perflvl->memory = (ROM16(subent(2)) & 0xfff) * 1000;
+			break;
+		}
+
+		/* make sure vid is valid */
+		if (pm->voltage.supported && perflvl->voltage) {
+			vid = nouveau_volt_vid_lookup(dev, perflvl->voltage);
+			if (vid < 0) {
+				NV_DEBUG(dev, "drop perflvl %d, bad vid\n", i);
+				entry += recordlen;
+				continue;
+			}
+		}
+
+		snprintf(perflvl->name, sizeof(perflvl->name),
+			 "performance_level_%d", i);
+		perflvl->id = i;
+		pm->nr_perflvl++;
+
+		entry += recordlen;
+	}
+}
+
+void
+nouveau_perf_fini(struct drm_device *dev)
+{
+}

+ 518 - 0
drivers/gpu/drm/nouveau/nouveau_pm.c

@@ -0,0 +1,518 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_pm.h"
+
+#include <linux/hwmon.h>
+#include <linux/hwmon-sysfs.h>
+
+static int
+nouveau_pm_clock_set(struct drm_device *dev, struct nouveau_pm_level *perflvl,
+		     u8 id, u32 khz)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+	void *pre_state;
+
+	if (khz == 0)
+		return 0;
+
+	pre_state = pm->clock_pre(dev, perflvl, id, khz);
+	if (IS_ERR(pre_state))
+		return PTR_ERR(pre_state);
+
+	if (pre_state)
+		pm->clock_set(dev, pre_state);
+	return 0;
+}
+
+static int
+nouveau_pm_perflvl_set(struct drm_device *dev, struct nouveau_pm_level *perflvl)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+	int ret;
+
+	if (perflvl == pm->cur)
+		return 0;
+
+	if (pm->voltage.supported && pm->voltage_set && perflvl->voltage) {
+		ret = pm->voltage_set(dev, perflvl->voltage);
+		if (ret) {
+			NV_ERROR(dev, "voltage_set %d failed: %d\n",
+				 perflvl->voltage, ret);
+		}
+	}
+
+	nouveau_pm_clock_set(dev, perflvl, PLL_CORE, perflvl->core);
+	nouveau_pm_clock_set(dev, perflvl, PLL_SHADER, perflvl->shader);
+	nouveau_pm_clock_set(dev, perflvl, PLL_MEMORY, perflvl->memory);
+	nouveau_pm_clock_set(dev, perflvl, PLL_UNK05, perflvl->unk05);
+
+	pm->cur = perflvl;
+	return 0;
+}
+
+static int
+nouveau_pm_profile_set(struct drm_device *dev, const char *profile)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+	struct nouveau_pm_level *perflvl = NULL;
+
+	/* safety precaution, for now */
+	if (nouveau_perflvl_wr != 7777)
+		return -EPERM;
+
+	if (!pm->clock_set)
+		return -EINVAL;
+
+	if (!strncmp(profile, "boot", 4))
+		perflvl = &pm->boot;
+	else {
+		int pl = simple_strtol(profile, NULL, 10);
+		int i;
+
+		for (i = 0; i < pm->nr_perflvl; i++) {
+			if (pm->perflvl[i].id == pl) {
+				perflvl = &pm->perflvl[i];
+				break;
+			}
+		}
+
+		if (!perflvl)
+			return -EINVAL;
+	}
+
+	NV_INFO(dev, "setting performance level: %s\n", profile);
+	return nouveau_pm_perflvl_set(dev, perflvl);
+}
+
+static int
+nouveau_pm_perflvl_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+	int ret;
+
+	if (!pm->clock_get)
+		return -EINVAL;
+
+	memset(perflvl, 0, sizeof(*perflvl));
+
+	ret = pm->clock_get(dev, PLL_CORE);
+	if (ret > 0)
+		perflvl->core = ret;
+
+	ret = pm->clock_get(dev, PLL_MEMORY);
+	if (ret > 0)
+		perflvl->memory = ret;
+
+	ret = pm->clock_get(dev, PLL_SHADER);
+	if (ret > 0)
+		perflvl->shader = ret;
+
+	ret = pm->clock_get(dev, PLL_UNK05);
+	if (ret > 0)
+		perflvl->unk05 = ret;
+
+	if (pm->voltage.supported && pm->voltage_get) {
+		ret = pm->voltage_get(dev);
+		if (ret > 0)
+			perflvl->voltage = ret;
+	}
+
+	return 0;
+}
+
+static void
+nouveau_pm_perflvl_info(struct nouveau_pm_level *perflvl, char *ptr, int len)
+{
+	char c[16], s[16], v[16], f[16];
+
+	c[0] = '\0';
+	if (perflvl->core)
+		snprintf(c, sizeof(c), " core %dMHz", perflvl->core / 1000);
+
+	s[0] = '\0';
+	if (perflvl->shader)
+		snprintf(s, sizeof(s), " shader %dMHz", perflvl->shader / 1000);
+
+	v[0] = '\0';
+	if (perflvl->voltage)
+		snprintf(v, sizeof(v), " voltage %dmV", perflvl->voltage * 10);
+
+	f[0] = '\0';
+	if (perflvl->fanspeed)
+		snprintf(f, sizeof(f), " fanspeed %d%%", perflvl->fanspeed);
+
+	snprintf(ptr, len, "memory %dMHz%s%s%s%s\n", perflvl->memory / 1000,
+		 c, s, v, f);
+}
+
+static ssize_t
+nouveau_pm_get_perflvl_info(struct device *d,
+			    struct device_attribute *a, char *buf)
+{
+	struct nouveau_pm_level *perflvl = (struct nouveau_pm_level *)a;
+	char *ptr = buf;
+	int len = PAGE_SIZE;
+
+	snprintf(ptr, len, "%d: ", perflvl->id);
+	ptr += strlen(buf);
+	len -= strlen(buf);
+
+	nouveau_pm_perflvl_info(perflvl, ptr, len);
+	return strlen(buf);
+}
+
+static ssize_t
+nouveau_pm_get_perflvl(struct device *d, struct device_attribute *a, char *buf)
+{
+	struct drm_device *dev = pci_get_drvdata(to_pci_dev(d));
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+	struct nouveau_pm_level cur;
+	int len = PAGE_SIZE, ret;
+	char *ptr = buf;
+
+	if (!pm->cur)
+		snprintf(ptr, len, "setting: boot\n");
+	else if (pm->cur == &pm->boot)
+		snprintf(ptr, len, "setting: boot\nc: ");
+	else
+		snprintf(ptr, len, "setting: static %d\nc: ", pm->cur->id);
+	ptr += strlen(buf);
+	len -= strlen(buf);
+
+	ret = nouveau_pm_perflvl_get(dev, &cur);
+	if (ret == 0)
+		nouveau_pm_perflvl_info(&cur, ptr, len);
+	return strlen(buf);
+}
+
+static ssize_t
+nouveau_pm_set_perflvl(struct device *d, struct device_attribute *a,
+		       const char *buf, size_t count)
+{
+	struct drm_device *dev = pci_get_drvdata(to_pci_dev(d));
+	int ret;
+
+	ret = nouveau_pm_profile_set(dev, buf);
+	if (ret)
+		return ret;
+	return strlen(buf);
+}
+
+static DEVICE_ATTR(performance_level, S_IRUGO | S_IWUSR,
+		   nouveau_pm_get_perflvl, nouveau_pm_set_perflvl);
+
+static int
+nouveau_sysfs_init(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+	struct device *d = &dev->pdev->dev;
+	int ret, i;
+
+	ret = device_create_file(d, &dev_attr_performance_level);
+	if (ret)
+		return ret;
+
+	for (i = 0; i < pm->nr_perflvl; i++) {
+		struct nouveau_pm_level *perflvl = &pm->perflvl[i];
+
+		perflvl->dev_attr.attr.name = perflvl->name;
+		perflvl->dev_attr.attr.mode = S_IRUGO;
+		perflvl->dev_attr.show = nouveau_pm_get_perflvl_info;
+		perflvl->dev_attr.store = NULL;
+		sysfs_attr_init(&perflvl->dev_attr.attr);
+
+		ret = device_create_file(d, &perflvl->dev_attr);
+		if (ret) {
+			NV_ERROR(dev, "failed pervlvl %d sysfs: %d\n",
+				 perflvl->id, i);
+			perflvl->dev_attr.attr.name = NULL;
+			nouveau_pm_fini(dev);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+static void
+nouveau_sysfs_fini(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+	struct device *d = &dev->pdev->dev;
+	int i;
+
+	device_remove_file(d, &dev_attr_performance_level);
+	for (i = 0; i < pm->nr_perflvl; i++) {
+		struct nouveau_pm_level *pl = &pm->perflvl[i];
+
+		if (!pl->dev_attr.attr.name)
+			break;
+
+		device_remove_file(d, &pl->dev_attr);
+	}
+}
+
+static ssize_t
+nouveau_hwmon_show_temp(struct device *d, struct device_attribute *a, char *buf)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", pm->temp_get(dev)*1000);
+}
+static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, nouveau_hwmon_show_temp,
+						  NULL, 0);
+
+static ssize_t
+nouveau_hwmon_max_temp(struct device *d, struct device_attribute *a, char *buf)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+	struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", temp->down_clock*1000);
+}
+static ssize_t
+nouveau_hwmon_set_max_temp(struct device *d, struct device_attribute *a,
+						const char *buf, size_t count)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+	struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp;
+	long value;
+
+	if (strict_strtol(buf, 10, &value) == -EINVAL)
+		return count;
+
+	temp->down_clock = value/1000;
+
+	nouveau_temp_safety_checks(dev);
+
+	return count;
+}
+static SENSOR_DEVICE_ATTR(temp1_max, S_IRUGO | S_IWUSR, nouveau_hwmon_max_temp,
+						  nouveau_hwmon_set_max_temp,
+						  0);
+
+static ssize_t
+nouveau_hwmon_critical_temp(struct device *d, struct device_attribute *a,
+							char *buf)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+	struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp;
+
+	return snprintf(buf, PAGE_SIZE, "%d\n", temp->critical*1000);
+}
+static ssize_t
+nouveau_hwmon_set_critical_temp(struct device *d, struct device_attribute *a,
+							    const char *buf,
+								size_t count)
+{
+	struct drm_device *dev = dev_get_drvdata(d);
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+	struct nouveau_pm_threshold_temp *temp = &pm->threshold_temp;
+	long value;
+
+	if (strict_strtol(buf, 10, &value) == -EINVAL)
+		return count;
+
+	temp->critical = value/1000;
+
+	nouveau_temp_safety_checks(dev);
+
+	return count;
+}
+static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO | S_IWUSR,
+						nouveau_hwmon_critical_temp,
+						nouveau_hwmon_set_critical_temp,
+						0);
+
+static ssize_t nouveau_hwmon_show_name(struct device *dev,
+				      struct device_attribute *attr,
+				      char *buf)
+{
+	return sprintf(buf, "nouveau\n");
+}
+static SENSOR_DEVICE_ATTR(name, S_IRUGO, nouveau_hwmon_show_name, NULL, 0);
+
+static ssize_t nouveau_hwmon_show_update_rate(struct device *dev,
+				      struct device_attribute *attr,
+				      char *buf)
+{
+	return sprintf(buf, "1000\n");
+}
+static SENSOR_DEVICE_ATTR(update_rate, S_IRUGO,
+						nouveau_hwmon_show_update_rate,
+						NULL, 0);
+
+static struct attribute *hwmon_attributes[] = {
+	&sensor_dev_attr_temp1_input.dev_attr.attr,
+	&sensor_dev_attr_temp1_max.dev_attr.attr,
+	&sensor_dev_attr_temp1_crit.dev_attr.attr,
+	&sensor_dev_attr_name.dev_attr.attr,
+	&sensor_dev_attr_update_rate.dev_attr.attr,
+	NULL
+};
+
+static const struct attribute_group hwmon_attrgroup = {
+	.attrs = hwmon_attributes,
+};
+
+static int
+nouveau_hwmon_init(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+	struct device *hwmon_dev;
+	int ret;
+
+	if (!pm->temp_get)
+		return -ENODEV;
+
+	hwmon_dev = hwmon_device_register(&dev->pdev->dev);
+	if (IS_ERR(hwmon_dev)) {
+		ret = PTR_ERR(hwmon_dev);
+		NV_ERROR(dev,
+			"Unable to register hwmon device: %d\n", ret);
+		return ret;
+	}
+	dev_set_drvdata(hwmon_dev, dev);
+	ret = sysfs_create_group(&hwmon_dev->kobj,
+					&hwmon_attrgroup);
+	if (ret) {
+		NV_ERROR(dev,
+			"Unable to create hwmon sysfs file: %d\n", ret);
+		hwmon_device_unregister(hwmon_dev);
+		return ret;
+	}
+
+	pm->hwmon = hwmon_dev;
+
+	return 0;
+}
+
+static void
+nouveau_hwmon_fini(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+
+	if (pm->hwmon) {
+		sysfs_remove_group(&pm->hwmon->kobj, &hwmon_attrgroup);
+		hwmon_device_unregister(pm->hwmon);
+	}
+}
+
+int
+nouveau_pm_init(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+	char info[256];
+	int ret, i;
+
+	nouveau_volt_init(dev);
+	nouveau_perf_init(dev);
+	nouveau_temp_init(dev);
+	nouveau_mem_timing_init(dev);
+
+	NV_INFO(dev, "%d available performance level(s)\n", pm->nr_perflvl);
+	for (i = 0; i < pm->nr_perflvl; i++) {
+		nouveau_pm_perflvl_info(&pm->perflvl[i], info, sizeof(info));
+		NV_INFO(dev, "%d: %s", pm->perflvl[i].id, info);
+	}
+
+	/* determine current ("boot") performance level */
+	ret = nouveau_pm_perflvl_get(dev, &pm->boot);
+	if (ret == 0) {
+		pm->cur = &pm->boot;
+
+		nouveau_pm_perflvl_info(&pm->boot, info, sizeof(info));
+		NV_INFO(dev, "c: %s", info);
+	}
+
+	/* switch performance levels now if requested */
+	if (nouveau_perflvl != NULL) {
+		ret = nouveau_pm_profile_set(dev, nouveau_perflvl);
+		if (ret) {
+			NV_ERROR(dev, "error setting perflvl \"%s\": %d\n",
+				 nouveau_perflvl, ret);
+		}
+	}
+
+	nouveau_sysfs_init(dev);
+	nouveau_hwmon_init(dev);
+
+	return 0;
+}
+
+void
+nouveau_pm_fini(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+
+	if (pm->cur != &pm->boot)
+		nouveau_pm_perflvl_set(dev, &pm->boot);
+
+	nouveau_mem_timing_fini(dev);
+	nouveau_temp_fini(dev);
+	nouveau_perf_fini(dev);
+	nouveau_volt_fini(dev);
+
+	nouveau_hwmon_fini(dev);
+	nouveau_sysfs_fini(dev);
+}
+
+void
+nouveau_pm_resume(struct drm_device *dev)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
+	struct nouveau_pm_level *perflvl;
+
+	if (pm->cur == &pm->boot)
+		return;
+
+	perflvl = pm->cur;
+	pm->cur = &pm->boot;
+	nouveau_pm_perflvl_set(dev, perflvl);
+}

+ 74 - 0
drivers/gpu/drm/nouveau/nouveau_pm.h

@@ -0,0 +1,74 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifndef __NOUVEAU_PM_H__
+#define __NOUVEAU_PM_H__
+
+/* nouveau_pm.c */
+int  nouveau_pm_init(struct drm_device *dev);
+void nouveau_pm_fini(struct drm_device *dev);
+void nouveau_pm_resume(struct drm_device *dev);
+
+/* nouveau_volt.c */
+void nouveau_volt_init(struct drm_device *);
+void nouveau_volt_fini(struct drm_device *);
+int  nouveau_volt_vid_lookup(struct drm_device *, int voltage);
+int  nouveau_volt_lvl_lookup(struct drm_device *, int vid);
+int  nouveau_voltage_gpio_get(struct drm_device *);
+int  nouveau_voltage_gpio_set(struct drm_device *, int voltage);
+
+/* nouveau_perf.c */
+void nouveau_perf_init(struct drm_device *);
+void nouveau_perf_fini(struct drm_device *);
+
+/* nouveau_mem.c */
+void nouveau_mem_timing_init(struct drm_device *);
+void nouveau_mem_timing_fini(struct drm_device *);
+
+/* nv04_pm.c */
+int nv04_pm_clock_get(struct drm_device *, u32 id);
+void *nv04_pm_clock_pre(struct drm_device *, struct nouveau_pm_level *,
+			u32 id, int khz);
+void nv04_pm_clock_set(struct drm_device *, void *);
+
+/* nv50_pm.c */
+int nv50_pm_clock_get(struct drm_device *, u32 id);
+void *nv50_pm_clock_pre(struct drm_device *, struct nouveau_pm_level *,
+			u32 id, int khz);
+void nv50_pm_clock_set(struct drm_device *, void *);
+
+/* nva3_pm.c */
+int nva3_pm_clock_get(struct drm_device *, u32 id);
+void *nva3_pm_clock_pre(struct drm_device *, struct nouveau_pm_level *,
+			u32 id, int khz);
+void nva3_pm_clock_set(struct drm_device *, void *);
+
+/* nouveau_temp.c */
+void nouveau_temp_init(struct drm_device *dev);
+void nouveau_temp_fini(struct drm_device *dev);
+void nouveau_temp_safety_checks(struct drm_device *dev);
+int nv40_temp_get(struct drm_device *dev);
+int nv84_temp_get(struct drm_device *dev);
+
+#endif

+ 289 - 0
drivers/gpu/drm/nouveau/nouveau_ramht.c

@@ -0,0 +1,289 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "drmP.h"
+
+#include "nouveau_drv.h"
+#include "nouveau_ramht.h"
+
+static u32
+nouveau_ramht_hash_handle(struct nouveau_channel *chan, u32 handle)
+{
+	struct drm_device *dev = chan->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_ramht *ramht = chan->ramht;
+	u32 hash = 0;
+	int i;
+
+	NV_DEBUG(dev, "ch%d handle=0x%08x\n", chan->id, handle);
+
+	for (i = 32; i > 0; i -= ramht->bits) {
+		hash ^= (handle & ((1 << ramht->bits) - 1));
+		handle >>= ramht->bits;
+	}
+
+	if (dev_priv->card_type < NV_50)
+		hash ^= chan->id << (ramht->bits - 4);
+	hash <<= 3;
+
+	NV_DEBUG(dev, "hash=0x%08x\n", hash);
+	return hash;
+}
+
+static int
+nouveau_ramht_entry_valid(struct drm_device *dev, struct nouveau_gpuobj *ramht,
+			  u32 offset)
+{
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	u32 ctx = nv_ro32(ramht, offset + 4);
+
+	if (dev_priv->card_type < NV_40)
+		return ((ctx & NV_RAMHT_CONTEXT_VALID) != 0);
+	return (ctx != 0);
+}
+
+static int
+nouveau_ramht_entry_same_channel(struct nouveau_channel *chan,
+				 struct nouveau_gpuobj *ramht, u32 offset)
+{
+	struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
+	u32 ctx = nv_ro32(ramht, offset + 4);
+
+	if (dev_priv->card_type >= NV_50)
+		return true;
+	else if (dev_priv->card_type >= NV_40)
+		return chan->id ==
+			((ctx >> NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) & 0x1f);
+	else
+		return chan->id ==
+			((ctx >> NV_RAMHT_CONTEXT_CHANNEL_SHIFT) & 0x1f);
+}
+
+int
+nouveau_ramht_insert(struct nouveau_channel *chan, u32 handle,
+		     struct nouveau_gpuobj *gpuobj)
+{
+	struct drm_device *dev = chan->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
+	struct nouveau_ramht_entry *entry;
+	struct nouveau_gpuobj *ramht = chan->ramht->gpuobj;
+	unsigned long flags;
+	u32 ctx, co, ho;
+
+	if (nouveau_ramht_find(chan, handle))
+		return -EEXIST;
+
+	entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+	if (!entry)
+		return -ENOMEM;
+	entry->channel = chan;
+	entry->gpuobj = NULL;
+	entry->handle = handle;
+	nouveau_gpuobj_ref(gpuobj, &entry->gpuobj);
+
+	if (dev_priv->card_type < NV_40) {
+		ctx = NV_RAMHT_CONTEXT_VALID | (gpuobj->cinst >> 4) |
+		      (chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) |
+		      (gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT);
+	} else
+	if (dev_priv->card_type < NV_50) {
+		ctx = (gpuobj->cinst >> 4) |
+		      (chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) |
+		      (gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
+	} else {
+		if (gpuobj->engine == NVOBJ_ENGINE_DISPLAY) {
+			ctx = (gpuobj->cinst << 10) | 2;
+		} else {
+			ctx = (gpuobj->cinst >> 4) |
+			      ((gpuobj->engine <<
+				NV40_RAMHT_CONTEXT_ENGINE_SHIFT));
+		}
+	}
+
+	spin_lock_irqsave(&chan->ramht->lock, flags);
+	list_add(&entry->head, &chan->ramht->entries);
+
+	co = ho = nouveau_ramht_hash_handle(chan, handle);
+	do {
+		if (!nouveau_ramht_entry_valid(dev, ramht, co)) {
+			NV_DEBUG(dev,
+				 "insert ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
+				 chan->id, co, handle, ctx);
+			nv_wo32(ramht, co + 0, handle);
+			nv_wo32(ramht, co + 4, ctx);
+
+			spin_unlock_irqrestore(&chan->ramht->lock, flags);
+			instmem->flush(dev);
+			return 0;
+		}
+		NV_DEBUG(dev, "collision ch%d 0x%08x: h=0x%08x\n",
+			 chan->id, co, nv_ro32(ramht, co));
+
+		co += 8;
+		if (co >= ramht->size)
+			co = 0;
+	} while (co != ho);
+
+	NV_ERROR(dev, "RAMHT space exhausted. ch=%d\n", chan->id);
+	list_del(&entry->head);
+	spin_unlock_irqrestore(&chan->ramht->lock, flags);
+	kfree(entry);
+	return -ENOMEM;
+}
+
+static void
+nouveau_ramht_remove_locked(struct nouveau_channel *chan, u32 handle)
+{
+	struct drm_device *dev = chan->dev;
+	struct drm_nouveau_private *dev_priv = dev->dev_private;
+	struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
+	struct nouveau_gpuobj *ramht = chan->ramht->gpuobj;
+	struct nouveau_ramht_entry *entry, *tmp;
+	u32 co, ho;
+
+	list_for_each_entry_safe(entry, tmp, &chan->ramht->entries, head) {
+		if (entry->channel != chan || entry->handle != handle)
+			continue;
+
+		nouveau_gpuobj_ref(NULL, &entry->gpuobj);
+		list_del(&entry->head);
+		kfree(entry);
+		break;
+	}
+
+	co = ho = nouveau_ramht_hash_handle(chan, handle);
+	do {
+		if (nouveau_ramht_entry_valid(dev, ramht, co) &&
+		    nouveau_ramht_entry_same_channel(chan, ramht, co) &&
+		    (handle == nv_ro32(ramht, co))) {
+			NV_DEBUG(dev,
+				 "remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
+				 chan->id, co, handle, nv_ro32(ramht, co + 4));
+			nv_wo32(ramht, co + 0, 0x00000000);
+			nv_wo32(ramht, co + 4, 0x00000000);
+			instmem->flush(dev);
+			return;
+		}
+
+		co += 8;
+		if (co >= ramht->size)
+			co = 0;
+	} while (co != ho);
+
+	NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n",
+		 chan->id, handle);
+}
+
+void
+nouveau_ramht_remove(struct nouveau_channel *chan, u32 handle)
+{
+	struct nouveau_ramht *ramht = chan->ramht;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ramht->lock, flags);
+	nouveau_ramht_remove_locked(chan, handle);
+	spin_unlock_irqrestore(&ramht->lock, flags);
+}
+
+struct nouveau_gpuobj *
+nouveau_ramht_find(struct nouveau_channel *chan, u32 handle)
+{
+	struct nouveau_ramht *ramht = chan->ramht;
+	struct nouveau_ramht_entry *entry;
+	struct nouveau_gpuobj *gpuobj = NULL;
+	unsigned long flags;
+
+	if (unlikely(!chan->ramht))
+		return NULL;
+
+	spin_lock_irqsave(&ramht->lock, flags);
+	list_for_each_entry(entry, &chan->ramht->entries, head) {
+		if (entry->channel == chan && entry->handle == handle) {
+			gpuobj = entry->gpuobj;
+			break;
+		}
+	}
+	spin_unlock_irqrestore(&ramht->lock, flags);
+
+	return gpuobj;
+}
+
+int
+nouveau_ramht_new(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
+		  struct nouveau_ramht **pramht)
+{
+	struct nouveau_ramht *ramht;
+
+	ramht = kzalloc(sizeof(*ramht), GFP_KERNEL);
+	if (!ramht)
+		return -ENOMEM;
+
+	ramht->dev = dev;
+	kref_init(&ramht->refcount);
+	ramht->bits = drm_order(gpuobj->size / 8);
+	INIT_LIST_HEAD(&ramht->entries);
+	spin_lock_init(&ramht->lock);
+	nouveau_gpuobj_ref(gpuobj, &ramht->gpuobj);
+
+	*pramht = ramht;
+	return 0;
+}
+
+static void
+nouveau_ramht_del(struct kref *ref)
+{
+	struct nouveau_ramht *ramht =
+		container_of(ref, struct nouveau_ramht, refcount);
+
+	nouveau_gpuobj_ref(NULL, &ramht->gpuobj);
+	kfree(ramht);
+}
+
+void
+nouveau_ramht_ref(struct nouveau_ramht *ref, struct nouveau_ramht **ptr,
+		  struct nouveau_channel *chan)
+{
+	struct nouveau_ramht_entry *entry, *tmp;
+	struct nouveau_ramht *ramht;
+	unsigned long flags;
+
+	if (ref)
+		kref_get(&ref->refcount);
+
+	ramht = *ptr;
+	if (ramht) {
+		spin_lock_irqsave(&ramht->lock, flags);
+		list_for_each_entry_safe(entry, tmp, &ramht->entries, head) {
+			if (entry->channel != chan)
+				continue;
+
+			nouveau_ramht_remove_locked(chan, entry->handle);
+		}
+		spin_unlock_irqrestore(&ramht->lock, flags);
+
+		kref_put(&ramht->refcount, nouveau_ramht_del);
+	}
+	*ptr = ref;
+}

+ 55 - 0
drivers/gpu/drm/nouveau/nouveau_ramht.h

@@ -0,0 +1,55 @@
+/*
+ * Copyright 2010 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#ifndef __NOUVEAU_RAMHT_H__
+#define __NOUVEAU_RAMHT_H__
+
+struct nouveau_ramht_entry {
+	struct list_head head;
+	struct nouveau_channel *channel;
+	struct nouveau_gpuobj *gpuobj;
+	u32 handle;
+};
+
+struct nouveau_ramht {
+	struct drm_device *dev;
+	struct kref refcount;
+	spinlock_t lock;
+	struct nouveau_gpuobj *gpuobj;
+	struct list_head entries;
+	int bits;
+};
+
+extern int  nouveau_ramht_new(struct drm_device *, struct nouveau_gpuobj *,
+			      struct nouveau_ramht **);
+extern void nouveau_ramht_ref(struct nouveau_ramht *, struct nouveau_ramht **,
+			      struct nouveau_channel *unref_channel);
+
+extern int  nouveau_ramht_insert(struct nouveau_channel *, u32 handle,
+				 struct nouveau_gpuobj *);
+extern void nouveau_ramht_remove(struct nouveau_channel *, u32 handle);
+extern struct nouveau_gpuobj *
+nouveau_ramht_find(struct nouveau_channel *chan, u32 handle);
+
+#endif

Some files were not shown because too many files changed in this diff