Browse Source

Merge branch 'for-rmk' of git://git.marvell.com/orion into devel

Russell King 16 years ago
parent
commit
d1a7fddf42
100 changed files with 8787 additions and 2015 deletions
  1. 62 0
      Documentation/ABI/stable/sysfs-driver-usb-usbtmc
  2. 16 0
      Documentation/ABI/testing/sysfs-bus-usb
  3. 43 0
      Documentation/ABI/testing/sysfs-bus-usb-devices-usbsevseg
  4. 3 0
      Documentation/DocBook/gadget.tmpl
  5. 3 0
      Documentation/devices.txt
  6. 15 17
      Documentation/filesystems/ext4.txt
  7. 76 0
      Documentation/hwmon/adt7470
  8. 2 2
      Documentation/hwmon/it87
  9. 0 10
      Documentation/hwmon/lm85
  10. 4 5
      Documentation/hwmon/lm87
  11. 29 16
      Documentation/hwmon/lm90
  12. 1 6
      Documentation/hwmon/pc87360
  13. 1 1
      Documentation/hwmon/pc87427
  14. 35 2
      Documentation/hwmon/w83781d
  15. 33 10
      Documentation/hwmon/w83791d
  16. 3 0
      Documentation/ioctl-number.txt
  17. 19 0
      Documentation/kernel-parameters.txt
  18. 1 0
      Documentation/sysctl/kernel.txt
  19. 17 0
      Documentation/usb/anchors.txt
  20. 46 0
      Documentation/usb/misc_usbsevseg.txt
  21. 4 4
      Documentation/usb/power-management.txt
  22. 1 1
      Documentation/video4linux/CARDLIST.au0828
  23. 1 0
      Documentation/video4linux/CARDLIST.tuner
  24. 8 1
      MAINTAINERS
  25. 35 0
      arch/arm/mach-kirkwood/common.c
  26. 2 0
      arch/arm/mach-kirkwood/common.h
  27. 11 0
      arch/arm/mach-kirkwood/rd88f6281-setup.c
  28. 5 0
      arch/arm/mach-mv78xx0/db78x00-bp-setup.c
  29. 37 1
      arch/arm/mach-orion5x/common.c
  30. 2 0
      arch/arm/mach-orion5x/common.h
  31. 11 0
      arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c
  32. 11 0
      arch/arm/mach-orion5x/rd88f5181l-ge-setup.c
  33. 11 0
      arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c
  34. 11 0
      arch/arm/mach-orion5x/wrt350n-v2-setup.c
  35. 2 0
      arch/cris/Makefile
  36. 1 1
      arch/cris/arch-v10/boot/compressed/Makefile
  37. 0 0
      arch/cris/arch-v10/boot/compressed/decompress.lds
  38. 1 1
      arch/cris/arch-v10/boot/rescue/Makefile
  39. 0 0
      arch/cris/arch-v10/boot/rescue/rescue.lds
  40. 1 1
      arch/cris/arch-v32/boot/compressed/Makefile
  41. 0 0
      arch/cris/arch-v32/boot/compressed/decompress.lds
  42. 1 1
      arch/cris/arch-v32/boot/rescue/Makefile
  43. 0 0
      arch/cris/arch-v32/boot/rescue/rescue.lds
  44. 0 0
      arch/cris/configs/artpec_3_defconfig
  45. 0 0
      arch/cris/configs/etrax-100lx_defconfig
  46. 0 0
      arch/cris/configs/etrax-100lx_v2_defconfig
  47. 0 0
      arch/cris/configs/etraxfs_defconfig
  48. 1 0
      arch/x86/mm/highmem_32.c
  49. 30 7
      block/blk-core.c
  50. 18 2
      block/blk-merge.c
  51. 0 2
      block/blk-settings.c
  52. 1 0
      block/blk.h
  53. 5 11
      block/elevator.c
  54. 1 2
      block/genhd.c
  55. 2 0
      drivers/Kconfig
  56. 1 0
      drivers/Makefile
  57. 0 3
      drivers/block/ub.c
  58. 2 1
      drivers/gpu/drm/Kconfig
  59. 3 2
      drivers/gpu/drm/Makefile
  60. 51 1
      drivers/gpu/drm/drm_agpsupport.c
  61. 69 0
      drivers/gpu/drm/drm_cache.c
  62. 6 0
      drivers/gpu/drm/drm_drv.c
  63. 7 1
      drivers/gpu/drm/drm_fops.c
  64. 421 0
      drivers/gpu/drm/drm_gem.c
  65. 387 77
      drivers/gpu/drm/drm_irq.c
  66. 2 0
      drivers/gpu/drm/drm_memory.c
  67. 4 1
      drivers/gpu/drm/drm_mm.c
  68. 122 13
      drivers/gpu/drm/drm_proc.c
  69. 10 1
      drivers/gpu/drm/drm_stub.c
  70. 1 1
      drivers/gpu/drm/drm_sysfs.c
  71. 6 1
      drivers/gpu/drm/i915/Makefile
  72. 226 106
      drivers/gpu/drm/i915/i915_dma.c
  73. 14 462
      drivers/gpu/drm/i915/i915_drv.c
  74. 347 827
      drivers/gpu/drm/i915/i915_drv.h
  75. 2558 0
      drivers/gpu/drm/i915/i915_gem.c
  76. 201 0
      drivers/gpu/drm/i915/i915_gem_debug.c
  77. 292 0
      drivers/gpu/drm/i915/i915_gem_proc.c
  78. 257 0
      drivers/gpu/drm/i915/i915_gem_tiling.c
  79. 378 136
      drivers/gpu/drm/i915/i915_irq.c
  80. 371 0
      drivers/gpu/drm/i915/i915_opregion.c
  81. 1417 0
      drivers/gpu/drm/i915/i915_reg.h
  82. 509 0
      drivers/gpu/drm/i915/i915_suspend.c
  83. 15 14
      drivers/gpu/drm/mga/mga_drv.c
  84. 5 1
      drivers/gpu/drm/mga/mga_drv.h
  85. 53 21
      drivers/gpu/drm/mga/mga_irq.c
  86. 1 1
      drivers/gpu/drm/mga/mga_state.c
  87. 15 14
      drivers/gpu/drm/r128/r128_drv.c
  88. 7 4
      drivers/gpu/drm/r128/r128_drv.h
  89. 35 20
      drivers/gpu/drm/r128/r128_irq.c
  90. 1 1
      drivers/gpu/drm/r128/r128_state.c
  91. 38 15
      drivers/gpu/drm/radeon/radeon_cp.c
  92. 28 4
      drivers/gpu/drm/radeon/radeon_drv.c
  93. 46 11
      drivers/gpu/drm/radeon/radeon_drv.h
  94. 168 100
      drivers/gpu/drm/radeon/radeon_irq.c
  95. 1 1
      drivers/gpu/drm/radeon/radeon_state.c
  96. 5 5
      drivers/gpu/drm/sis/sis_mm.c
  97. 14 12
      drivers/gpu/drm/via/via_drv.c
  98. 10 6
      drivers/gpu/drm/via/via_drv.h
  99. 59 46
      drivers/gpu/drm/via/via_irq.c
  100. 1 2
      drivers/gpu/drm/via/via_mm.c

+ 62 - 0
Documentation/ABI/stable/sysfs-driver-usb-usbtmc

@@ -0,0 +1,62 @@
+What:		/sys/bus/usb/drivers/usbtmc/devices/*/interface_capabilities
+What:		/sys/bus/usb/drivers/usbtmc/devices/*/device_capabilities
+Date:		August 2008
+Contact:	Greg Kroah-Hartman <gregkh@suse.de>
+Description:
+		These files show the various USB TMC capabilities as described
+		by the device itself.  The full description of the bitfields
+		can be found in the USB TMC documents from the USB-IF entitled
+		"Universal Serial Bus Test and Measurement Class Specification
+		(USBTMC) Revision 1.0" section 4.2.1.8.
+
+		The files are read only.
+
+
+What:		/sys/bus/usb/drivers/usbtmc/devices/*/usb488_interface_capabilities
+What:		/sys/bus/usb/drivers/usbtmc/devices/*/usb488_device_capabilities
+Date:		August 2008
+Contact:	Greg Kroah-Hartman <gregkh@suse.de>
+Description:
+		These files show the various USB TMC capabilities as described
+		by the device itself.  The full description of the bitfields
+		can be found in the USB TMC documents from the USB-IF entitled
+		"Universal Serial Bus Test and Measurement Class, Subclass
+		USB488 Specification (USBTMC-USB488) Revision 1.0" section
+		4.2.2.
+
+		The files are read only.
+
+
+What:		/sys/bus/usb/drivers/usbtmc/devices/*/TermChar
+Date:		August 2008
+Contact:	Greg Kroah-Hartman <gregkh@suse.de>
+Description:
+		This file is the TermChar value to be sent to the USB TMC
+		device as described by the document, "Universal Serial Bus Test
+		and Measurement Class Specification
+		(USBTMC) Revision 1.0" as published by the USB-IF.
+
+		Note that the TermCharEnabled file determines if this value is
+		sent to the device or not.
+
+
+What:		/sys/bus/usb/drivers/usbtmc/devices/*/TermCharEnabled
+Date:		August 2008
+Contact:	Greg Kroah-Hartman <gregkh@suse.de>
+Description:
+		This file determines if the TermChar is to be sent to the
+		device on every transaction or not.  For more details about
+		this, please see the document, "Universal Serial Bus Test and
+		Measurement Class Specification (USBTMC) Revision 1.0" as
+		published by the USB-IF.
+
+
+What:		/sys/bus/usb/drivers/usbtmc/devices/*/auto_abort
+Date:		August 2008
+Contact:	Greg Kroah-Hartman <gregkh@suse.de>
+Description:
+		This file determines if the the transaction of the USB TMC
+		device is to be automatically aborted if there is any error.
+		For more details about this, please see the document,
+		"Universal Serial Bus Test and Measurement Class Specification
+		(USBTMC) Revision 1.0" as published by the USB-IF.

+ 16 - 0
Documentation/ABI/testing/sysfs-bus-usb

@@ -85,3 +85,19 @@ Description:
 Users:
 		PowerTOP <power@bughost.org>
 		http://www.lesswatts.org/projects/powertop/
+
+What:		/sys/bus/usb/device/<busnum>-<devnum>...:<config num>-<interface num>/supports_autosuspend
+Date:		January 2008
+KernelVersion:	2.6.27
+Contact:	Sarah Sharp <sarah.a.sharp@intel.com>
+Description:
+		When read, this file returns 1 if the interface driver
+		for this interface supports autosuspend.  It also
+		returns 1 if no driver has claimed this interface, as an
+		unclaimed interface will not stop the device from being
+		autosuspended if all other interface drivers are idle.
+		The file returns 0 if autosuspend support has not been
+		added to the driver.
+Users:
+		USB PM tool
+		git://git.moblin.org/users/sarah/usb-pm-tool/

+ 43 - 0
Documentation/ABI/testing/sysfs-bus-usb-devices-usbsevseg

@@ -0,0 +1,43 @@
+Where:		/sys/bus/usb/.../powered
+Date:		August 2008
+Kernel Version:	2.6.26
+Contact:	Harrison Metzger <harrisonmetz@gmail.com>
+Description:	Controls whether the device's display will powered.
+		A value of 0 is off and a non-zero value is on.
+
+Where:		/sys/bus/usb/.../mode_msb
+Where:		/sys/bus/usb/.../mode_lsb
+Date:		August 2008
+Kernel Version:	2.6.26
+Contact:	Harrison Metzger <harrisonmetz@gmail.com>
+Description:	Controls the devices display mode.
+		For a 6 character display the values are
+			MSB 0x06; LSB 0x3F, and
+		for an 8 character display the values are
+			MSB 0x08; LSB 0xFF.
+
+Where:		/sys/bus/usb/.../textmode
+Date:		August 2008
+Kernel Version:	2.6.26
+Contact:	Harrison Metzger <harrisonmetz@gmail.com>
+Description:	Controls the way the device interprets its text buffer.
+		raw:	each character controls its segment manually
+		hex:	each character is between 0-15
+		ascii:	each character is between '0'-'9' and 'A'-'F'.
+
+Where:		/sys/bus/usb/.../text
+Date:		August 2008
+Kernel Version:	2.6.26
+Contact:	Harrison Metzger <harrisonmetz@gmail.com>
+Description:	The text (or data) for the device to display
+
+Where:		/sys/bus/usb/.../decimals
+Date:		August 2008
+Kernel Version:	2.6.26
+Contact:	Harrison Metzger <harrisonmetz@gmail.com>
+Description:	Controls the decimal places on the device.
+		To set the nth decimal place, give this field
+		the value of 10 ** n. Assume this field has
+		the value k and has 1 or more decimal places set,
+		to set the mth place (where m is not already set),
+		change this fields value to k + 10 ** m.

+ 3 - 0
Documentation/DocBook/gadget.tmpl

@@ -557,6 +557,9 @@ Near-term plans include converting all of them, except for "gadgetfs".
 </para>
 
 !Edrivers/usb/gadget/f_acm.c
+!Edrivers/usb/gadget/f_ecm.c
+!Edrivers/usb/gadget/f_subset.c
+!Edrivers/usb/gadget/f_obex.c
 !Edrivers/usb/gadget/f_serial.c
 
 </sect1>

+ 3 - 0
Documentation/devices.txt

@@ -2571,6 +2571,9 @@ Your cooperation is appreciated.
 		160 = /dev/usb/legousbtower0	1st USB Legotower device
 		    ...
 		175 = /dev/usb/legousbtower15	16th USB Legotower device
+		176 = /dev/usb/usbtmc1	First USB TMC device
+		   ...
+		192 = /dev/usb/usbtmc16	16th USB TMC device
 		240 = /dev/usb/dabusb0	First daubusb device
 		    ...
 		243 = /dev/usb/dabusb3	Fourth dabusb device

+ 15 - 17
Documentation/filesystems/ext4.txt

@@ -2,19 +2,24 @@
 Ext4 Filesystem
 ===============
 
-This is a development version of the ext4 filesystem, an advanced level
-of the ext3 filesystem which incorporates scalability and reliability
-enhancements for supporting large filesystems (64 bit) in keeping with
-increasing disk capacities and state-of-the-art feature requirements.
+Ext4 is an an advanced level of the ext3 filesystem which incorporates
+scalability and reliability enhancements for supporting large filesystems
+(64 bit) in keeping with increasing disk capacities and state-of-the-art
+feature requirements.
 
-Mailing list: linux-ext4@vger.kernel.org
+Mailing list:	linux-ext4@vger.kernel.org
+Web site:	http://ext4.wiki.kernel.org
 
 
 1. Quick usage instructions:
 ===========================
 
+Note: More extensive information for getting started with ext4 can be
+      found at the ext4 wiki site at the URL:
+      http://ext4.wiki.kernel.org/index.php/Ext4_Howto
+
   - Compile and install the latest version of e2fsprogs (as of this
-    writing version 1.41) from:
+    writing version 1.41.3) from:
 
     http://sourceforge.net/project/showfiles.php?group_id=2406
 	
@@ -36,11 +41,9 @@ Mailing list: linux-ext4@vger.kernel.org
 
     	# mke2fs -t ext4 /dev/hda1
 
-    Or configure an existing ext3 filesystem to support extents and set
-    the test_fs flag to indicate that it's ok for an in-development
-    filesystem to touch this filesystem:
+    Or to configure an existing ext3 filesystem to support extents: 
 
-	# tune2fs -O extents -E test_fs /dev/hda1
+	# tune2fs -O extents /dev/hda1
 
     If the filesystem was created with 128 byte inodes, it can be
     converted to use 256 byte for greater efficiency via:
@@ -104,8 +107,8 @@ exist yet so I'm not sure they're in the near-term roadmap.
 The big performance win will come with mballoc, delalloc and flex_bg
 grouping of bitmaps and inode tables.  Some test results available here:
 
- - http://www.bullopensource.org/ext4/20080530/ffsb-write-2.6.26-rc2.html
- - http://www.bullopensource.org/ext4/20080530/ffsb-readwrite-2.6.26-rc2.html
+ - http://www.bullopensource.org/ext4/20080818-ffsb/ffsb-write-2.6.27-rc1.html
+ - http://www.bullopensource.org/ext4/20080818-ffsb/ffsb-readwrite-2.6.27-rc1.html
 
 3. Options
 ==========
@@ -214,9 +217,6 @@ noreservation
 bsddf		(*)	Make 'df' act like BSD.
 minixdf			Make 'df' act like Minix.
 
-check=none		Don't do extra checking of bitmaps on mount.
-nocheck
-
 debug			Extra debugging information is sent to syslog.
 
 errors=remount-ro(*)	Remount the filesystem read-only on an error.
@@ -253,8 +253,6 @@ nobh			(a) cache disk block mapping information
 			"nobh" option tries to avoid associating buffer
 			heads (supported only for "writeback" mode).
 
-mballoc		(*)	Use the multiple block allocator for block allocation
-nomballoc		disabled multiple block allocator for block allocation.
 stripe=n		Number of filesystem blocks that mballoc will try
 			to use for allocation size and alignment. For RAID5/6
 			systems this should be the number of data

+ 76 - 0
Documentation/hwmon/adt7470

@@ -0,0 +1,76 @@
+Kernel driver adt7470
+=====================
+
+Supported chips:
+  * Analog Devices ADT7470
+    Prefix: 'adt7470'
+    Addresses scanned: I2C 0x2C, 0x2E, 0x2F
+    Datasheet: Publicly available at the Analog Devices website
+
+Author: Darrick J. Wong
+
+Description
+-----------
+
+This driver implements support for the Analog Devices ADT7470 chip.  There may
+be other chips that implement this interface.
+
+The ADT7470 uses the 2-wire interface compatible with the SMBus 2.0
+specification. Using an analog to digital converter it measures up to ten (10)
+external temperatures. It has four (4) 16-bit counters for measuring fan speed.
+There are four (4) PWM outputs that can be used to control fan speed.
+
+A sophisticated control system for the PWM outputs is designed into the ADT7470
+that allows fan speed to be adjusted automatically based on any of the ten
+temperature sensors. Each PWM output is individually adjustable and
+programmable. Once configured, the ADT7470 will adjust the PWM outputs in
+response to the measured temperatures with further host intervention.  This
+feature can also be disabled for manual control of the PWM's.
+
+Each of the measured inputs (temperature, fan speed) has corresponding high/low
+limit values. The ADT7470 will signal an ALARM if any measured value exceeds
+either limit.
+
+The ADT7470 DOES NOT sample all inputs continuously.  A single pin on the
+ADT7470 is connected to a multitude of thermal diodes, but the chip must be
+instructed explicitly to read the multitude of diodes.  If you want to use
+automatic fan control mode, you must manually read any of the temperature
+sensors or the fan control algorithm will not run.  The chip WILL NOT DO THIS
+AUTOMATICALLY; this must be done from userspace.  This may be a bug in the chip
+design, given that many other AD chips take care of this.  The driver will not
+read the registers more often than once every 5 seconds.  Further,
+configuration data is only read once per minute.
+
+Special Features
+----------------
+
+The ADT7470 has a 8-bit ADC and is capable of measuring temperatures with 1
+degC resolution.
+
+The Analog Devices datasheet is very detailed and describes a procedure for
+determining an optimal configuration for the automatic PWM control.
+
+Configuration Notes
+-------------------
+
+Besides standard interfaces driver adds the following:
+
+* PWM Control
+
+* pwm#_auto_point1_pwm and pwm#_auto_point1_temp and
+* pwm#_auto_point2_pwm and pwm#_auto_point2_temp -
+
+point1: Set the pwm speed at a lower temperature bound.
+point2: Set the pwm speed at a higher temperature bound.
+
+The ADT7470 will scale the pwm between the lower and higher pwm speed when
+the temperature is between the two temperature boundaries.  PWM values range
+from 0 (off) to 255 (full speed).  Fan speed will be set to maximum when the
+temperature sensor associated with the PWM control exceeds
+pwm#_auto_point2_temp.
+
+Notes
+-----
+
+As stated above, the temperature inputs must be read periodically from
+userspace in order for the automatic pwm algorithm to run.

+ 2 - 2
Documentation/hwmon/it87

@@ -136,10 +136,10 @@ once-only alarms.
 The IT87xx only updates its values each 1.5 seconds; reading it more often
 will do no harm, but will return 'old' values.
 
-To change sensor N to a thermistor, 'echo 2 > tempN_type' where N is 1, 2,
+To change sensor N to a thermistor, 'echo 4 > tempN_type' where N is 1, 2,
 or 3. To change sensor N to a thermal diode, 'echo 3 > tempN_type'.
 Give 0 for unused sensor. Any other value is invalid. To configure this at
-startup, consult lm_sensors's /etc/sensors.conf. (2 = thermistor;
+startup, consult lm_sensors's /etc/sensors.conf. (4 = thermistor;
 3 = thermal diode)
 
 

+ 0 - 10
Documentation/hwmon/lm85

@@ -163,16 +163,6 @@ configured individually according to the following options.
 * pwm#_auto_pwm_min - this specifies the PWM value for temp#_auto_temp_off
                       temperature. (PWM value from 0 to 255)
 
-* pwm#_auto_pwm_freq - select base frequency of PWM output. You can select
-                       in range of 10.0 to 94.0 Hz in .1 Hz units.
-		       (Values 100 to 940).
-
-The pwm#_auto_pwm_freq can be set to one of the following 8 values. Setting the
-frequency to a value not on this list, will result in the next higher frequency
-being selected. The actual device frequency may vary slightly from this
-specification as designed by the manufacturer. Consult the datasheet for more
-details. (PWM Frequency values:  100, 150, 230, 300, 380, 470, 620, 940)
-
 * pwm#_auto_pwm_minctl - this flags selects for temp#_auto_temp_off temperature
                          the bahaviour of fans. Write 1 to let fans spinning at
 			 pwm#_auto_pwm_min or write 0 to let them off.

+ 4 - 5
Documentation/hwmon/lm87

@@ -65,11 +65,10 @@ The LM87 has four pins which can serve one of two possible functions,
 depending on the hardware configuration.
 
 Some functions share pins, so not all functions are available at the same
-time. Which are depends on the hardware setup. This driver assumes that
-the BIOS configured the chip correctly. In that respect, it differs from
-the original driver (from lm_sensors for Linux 2.4), which would force the
-LM87 to an arbitrary, compile-time chosen mode, regardless of the actual
-chipset wiring.
+time. Which are depends on the hardware setup. This driver normally
+assumes that firmware configured the chip correctly. Where this is not
+the case, platform code must set the I2C client's platform_data to point
+to a u8 value to be written to the channel register.
 
 For reference, here is the list of exclusive functions:
  - in0+in5 (default) or temp3

+ 29 - 16
Documentation/hwmon/lm90

@@ -11,7 +11,7 @@ Supported chips:
     Prefix: 'lm99'
     Addresses scanned: I2C 0x4c and 0x4d
     Datasheet: Publicly available at the National Semiconductor website
-               http://www.national.com/pf/LM/LM89.html
+               http://www.national.com/mpf/LM/LM89.html
   * National Semiconductor LM99
     Prefix: 'lm99'
     Addresses scanned: I2C 0x4c and 0x4d
@@ -21,18 +21,32 @@ Supported chips:
     Prefix: 'lm86'
     Addresses scanned: I2C 0x4c
     Datasheet: Publicly available at the National Semiconductor website
-               http://www.national.com/pf/LM/LM86.html
+               http://www.national.com/mpf/LM/LM86.html
   * Analog Devices ADM1032
     Prefix: 'adm1032'
     Addresses scanned: I2C 0x4c and 0x4d
-    Datasheet: Publicly available at the Analog Devices website
-               http://www.analog.com/en/prod/0,2877,ADM1032,00.html
+    Datasheet: Publicly available at the ON Semiconductor website
+               http://www.onsemi.com/PowerSolutions/product.do?id=ADM1032
   * Analog Devices ADT7461
     Prefix: 'adt7461'
     Addresses scanned: I2C 0x4c and 0x4d
-    Datasheet: Publicly available at the Analog Devices website
-               http://www.analog.com/en/prod/0,2877,ADT7461,00.html
-    Note: Only if in ADM1032 compatibility mode
+    Datasheet: Publicly available at the ON Semiconductor website
+               http://www.onsemi.com/PowerSolutions/product.do?id=ADT7461
+  * Maxim MAX6646
+    Prefix: 'max6646'
+    Addresses scanned: I2C 0x4d
+    Datasheet: Publicly available at the Maxim website
+               http://www.maxim-ic.com/quick_view2.cfm/qv_pk/3497
+  * Maxim MAX6647
+    Prefix: 'max6646'
+    Addresses scanned: I2C 0x4e
+    Datasheet: Publicly available at the Maxim website
+               http://www.maxim-ic.com/quick_view2.cfm/qv_pk/3497
+  * Maxim MAX6649
+    Prefix: 'max6646'
+    Addresses scanned: I2C 0x4c
+    Datasheet: Publicly available at the Maxim website
+               http://www.maxim-ic.com/quick_view2.cfm/qv_pk/3497
   * Maxim MAX6657
     Prefix: 'max6657'
     Addresses scanned: I2C 0x4c
@@ -70,25 +84,21 @@ Description
 
 The LM90 is a digital temperature sensor. It senses its own temperature as
 well as the temperature of up to one external diode. It is compatible
-with many other devices such as the LM86, the LM89, the LM99, the ADM1032,
-the MAX6657, MAX6658, MAX6659, MAX6680 and the MAX6681 all of which are
-supported by this driver.
+with many other devices, many of which are supported by this driver.
 
 Note that there is no easy way to differentiate between the MAX6657,
 MAX6658 and MAX6659 variants. The extra address and features of the
 MAX6659 are not supported by this driver. The MAX6680 and MAX6681 only
 differ in their pinout, therefore they obviously can't (and don't need to)
-be distinguished. Additionally, the ADT7461 is supported if found in
-ADM1032 compatibility mode.
+be distinguished.
 
 The specificity of this family of chipsets over the ADM1021/LM84
 family is that it features critical limits with hysteresis, and an
 increased resolution of the remote temperature measurement.
 
 The different chipsets of the family are not strictly identical, although
-very similar. This driver doesn't handle any specific feature for now,
-with the exception of SMBus PEC. For reference, here comes a non-exhaustive
-list of specific features:
+very similar. For reference, here comes a non-exhaustive list of specific
+features:
 
 LM90:
   * Filter and alert configuration register at 0xBF.
@@ -114,9 +124,11 @@ ADT7461:
   * Lower resolution for remote temperature
 
 MAX6657 and MAX6658:
+  * Better local resolution
   * Remote sensor type selection
 
 MAX6659:
+  * Better local resolution
   * Selectable address
   * Second critical temperature limit
   * Remote sensor type selection
@@ -127,7 +139,8 @@ MAX6680 and MAX6681:
 
 All temperature values are given in degrees Celsius. Resolution
 is 1.0 degree for the local temperature, 0.125 degree for the remote
-temperature.
+temperature, except for the MAX6657, MAX6658 and MAX6659 which have a
+resolution of 0.125 degree for both temperatures.
 
 Each sensor has its own high and low limits, plus a critical limit.
 Additionally, there is a relative hysteresis value common to both critical

+ 1 - 6
Documentation/hwmon/pc87360

@@ -5,12 +5,7 @@ Supported chips:
   * National Semiconductor PC87360, PC87363, PC87364, PC87365 and PC87366
     Prefixes: 'pc87360', 'pc87363', 'pc87364', 'pc87365', 'pc87366'
     Addresses scanned: none, address read from Super I/O config space
-    Datasheets:
-        http://www.national.com/pf/PC/PC87360.html
-        http://www.national.com/pf/PC/PC87363.html
-        http://www.national.com/pf/PC/PC87364.html
-        http://www.national.com/pf/PC/PC87365.html
-        http://www.national.com/pf/PC/PC87366.html
+    Datasheets: No longer available
 
 Authors: Jean Delvare <khali@linux-fr.org>
 

+ 1 - 1
Documentation/hwmon/pc87427

@@ -5,7 +5,7 @@ Supported chips:
   * National Semiconductor PC87427
     Prefix: 'pc87427'
     Addresses scanned: none, address read from Super I/O config space
-    Datasheet: http://www.winbond.com.tw/E-WINBONDHTM/partner/apc_007.html
+    Datasheet: No longer available
 
 Author: Jean Delvare <khali@linux-fr.org>
 

+ 35 - 2
Documentation/hwmon/w83781d

@@ -353,7 +353,7 @@ in6=255
 
 # PWM
 
-Additional info about PWM on the AS99127F (may apply to other Asus
+* Additional info about PWM on the AS99127F (may apply to other Asus
 chips as well) by Jean Delvare as of 2004-04-09:
 
 AS99127F revision 2 seems to have two PWM registers at 0x59 and 0x5A,
@@ -396,7 +396,7 @@ Please contact us if you can figure out how it is supposed to work. As
 long as we don't know more, the w83781d driver doesn't handle PWM on
 AS99127F chips at all.
 
-Additional info about PWM on the AS99127F rev.1 by Hector Martin:
+* Additional info about PWM on the AS99127F rev.1 by Hector Martin:
 
 I've been fiddling around with the (in)famous 0x59 register and
 found out the following values do work as a form of coarse pwm:
@@ -418,3 +418,36 @@ change.
 My mobo is an ASUS A7V266-E. This behavior is similar to what I got
 with speedfan under Windows, where 0-15% would be off, 15-2x% (can't
 remember the exact value) would be 70% and higher would be full on.
+
+* Additional info about PWM on the AS99127F rev.1 from lm-sensors
+  ticket #2350:
+
+I conducted some experiment on Asus P3B-F motherboard with AS99127F
+(Ver. 1).
+
+I confirm that 0x59 register control the CPU_Fan Header on this
+motherboard, and 0x5a register control PWR_Fan.
+
+In order to reduce the dependency of specific fan, the measurement is
+conducted with a digital scope without fan connected. I found out that
+P3B-F actually output variable DC voltage on fan header center pin,
+looks like PWM is filtered on this motherboard.
+
+Here are some of measurements:
+
+0x80     20 mV
+0x81     20 mV
+0x82    232 mV
+0x83   1.2  V
+0x84   2.31 V
+0x85   3.44 V
+0x86   4.62 V
+0x87   5.81 V
+0x88   7.01 V
+9x89   8.22 V
+0x8a   9.42 V
+0x8b  10.6  V
+0x8c  11.9  V
+0x8d  12.4  V
+0x8e  12.4  V
+0x8f  12.4  V

+ 33 - 10
Documentation/hwmon/w83791d

@@ -58,29 +58,35 @@ internal state that allows no clean access (Bank with ID register is not
 currently selected). If you know the address of the chip, use a 'force'
 parameter; this will put it into a more well-behaved state first.
 
-The driver implements three temperature sensors, five fan rotation speed
-sensors, and ten voltage sensors.
+The driver implements three temperature sensors, ten voltage sensors,
+five fan rotation speed sensors and manual PWM control of each fan.
 
 Temperatures are measured in degrees Celsius and measurement resolution is 1
 degC for temp1 and 0.5 degC for temp2 and temp3. An alarm is triggered when
 the temperature gets higher than the Overtemperature Shutdown value; it stays
 on until the temperature falls below the Hysteresis value.
 
+Voltage sensors (also known as IN sensors) report their values in millivolts.
+An alarm is triggered if the voltage has crossed a programmable minimum
+or maximum limit.
+
 Fan rotation speeds are reported in RPM (rotations per minute). An alarm is
 triggered if the rotation speed has dropped below a programmable limit. Fan
 readings can be divided by a programmable divider (1, 2, 4, 8, 16,
 32, 64 or 128 for all fans) to give the readings more range or accuracy.
 
-Voltage sensors (also known as IN sensors) report their values in millivolts.
-An alarm is triggered if the voltage has crossed a programmable minimum
-or maximum limit.
+Each fan controlled is controlled by PWM. The PWM duty cycle can be read and
+set for each fan separately. Valid values range from 0 (stop) to 255 (full).
+PWM 1-3 support Thermal Cruise mode, in which the PWMs are automatically
+regulated to keep respectively temp 1-3 at a certain target temperature.
+See below for the description of the sysfs-interface.
 
 The w83791d has a global bit used to enable beeping from the speaker when an
 alarm is triggered as well as a bitmask to enable or disable the beep for
 specific alarms. You need both the global beep enable bit and the
 corresponding beep bit to be on for a triggered alarm to sound a beep.
 
-The sysfs interface to the gloabal enable is via the sysfs beep_enable file.
+The sysfs interface to the global enable is via the sysfs beep_enable file.
 This file is used for both legacy and new code.
 
 The sysfs interface to the beep bitmask has migrated from the original legacy
@@ -105,6 +111,27 @@ going forward.
 The driver reads the hardware chip values at most once every three seconds.
 User mode code requesting values more often will receive cached values.
 
+/sys files
+----------
+The sysfs-interface is documented in the 'sysfs-interface' file. Only
+chip-specific options are documented here.
+
+pwm[1-3]_enable -	this file controls mode of fan/temperature control for
+			fan 1-3. Fan/PWM 4-5 only support manual mode.
+		            * 1 Manual mode
+		            * 2 Thermal Cruise mode
+		            * 3 Fan Speed Cruise mode (no further support)
+
+temp[1-3]_target -	defines the target temperature for Thermal Cruise mode.
+			Unit: millidegree Celsius
+			RW
+
+temp[1-3]_tolerance -	temperature tolerance for Thermal Cruise mode.
+			Specifies an interval around the target temperature
+			in which the fan speed is not changed.
+			Unit: millidegree Celsius
+			RW
+
 Alarms bitmap vs. beep_mask bitmask
 ------------------------------------
 For legacy code using the alarms and beep_mask files:
@@ -132,7 +159,3 @@ tart2        :  alarms: 0x020000 beep_mask: 0x080000 <== mismatch
 tart3        :  alarms: 0x040000 beep_mask: 0x100000 <== mismatch
 case_open    :  alarms: 0x001000 beep_mask: 0x001000
 global_enable:  alarms: -------- beep_mask: 0x800000 (modified via beep_enable)
-
-W83791D TODO:
----------------
-Provide a patch for smart-fan control (still need appropriate motherboard/fans)

+ 3 - 0
Documentation/ioctl-number.txt

@@ -92,6 +92,7 @@ Code	Seq#	Include File		Comments
 'J'	00-1F	drivers/scsi/gdth_ioctl.h
 'K'	all	linux/kd.h
 'L'	00-1F	linux/loop.h
+'L'	20-2F	driver/usb/misc/vstusb.h
 'L'	E0-FF	linux/ppdd.h		encrypted disk device driver
 					<http://linux01.gwdg.de/~alatham/ppdd.html>
 'M'	all	linux/soundcard.h
@@ -110,6 +111,8 @@ Code	Seq#	Include File		Comments
 'W'	00-1F	linux/wanrouter.h	conflict!
 'X'	all	linux/xfs_fs.h
 'Y'	all	linux/cyclades.h
+'['	00-07	linux/usb/usbtmc.h	USB Test and Measurement Devices
+					<mailto:gregkh@suse.de>
 'a'	all				ATM on linux
 					<http://lrcwww.epfl.ch/linux-atm/magic.html>
 'b'	00-FF				bit3 vme host bridge

+ 19 - 0
Documentation/kernel-parameters.txt

@@ -2253,6 +2253,25 @@ and is between 256 and 4096 characters. It is defined in the file
 			autosuspended.  Devices for which the delay is set
 			to a negative value won't be autosuspended at all.
 
+	usbcore.usbfs_snoop=
+			[USB] Set to log all usbfs traffic (default 0 = off).
+
+	usbcore.blinkenlights=
+			[USB] Set to cycle leds on hubs (default 0 = off).
+
+	usbcore.old_scheme_first=
+			[USB] Start with the old device initialization
+			scheme (default 0 = off).
+
+	usbcore.use_both_schemes=
+			[USB] Try the other device initialization scheme
+			if the first one fails (default 1 = enabled).
+
+	usbcore.initial_descriptor_timeout=
+			[USB] Specifies timeout for the initial 64-byte
+                        USB_REQ_GET_DESCRIPTOR request in milliseconds
+			(default 5000 = 5.0 seconds).
+
 	usbhid.mousepoll=
 			[USBHID] The interval which mice are to be polled at.
 

+ 1 - 0
Documentation/sysctl/kernel.txt

@@ -369,4 +369,5 @@ can be ORed together:
   2 - A module was force loaded by insmod -f.
       Set by modutils >= 2.4.9 and module-init-tools.
   4 - Unsafe SMP processors: SMP with CPUs not designed for SMP.
+ 64 - A module from drivers/staging was loaded.
 

+ 17 - 0
Documentation/usb/anchors.txt

@@ -52,6 +52,11 @@ Therefore no guarantee is made that the URBs have been unlinked when
 the call returns. They may be unlinked later but will be unlinked in
 finite time.
 
+usb_scuttle_anchored_urbs()
+---------------------------
+
+All URBs of an anchor are unanchored en masse.
+
 usb_wait_anchor_empty_timeout()
 -------------------------------
 
@@ -59,4 +64,16 @@ This function waits for all URBs associated with an anchor to finish
 or a timeout, whichever comes first. Its return value will tell you
 whether the timeout was reached.
 
+usb_anchor_empty()
+------------------
+
+Returns true if no URBs are associated with an anchor. Locking
+is the caller's responsibility.
+
+usb_get_from_anchor()
+---------------------
 
+Returns the oldest anchored URB of an anchor. The URB is unanchored
+and returned with a reference. As you may mix URBs to several
+destinations in one anchor you have no guarantee the chronologically
+first submitted URB is returned.

+ 46 - 0
Documentation/usb/misc_usbsevseg.txt

@@ -0,0 +1,46 @@
+USB 7-Segment Numeric Display
+Manufactured by Delcom Engineering
+
+Device Information
+------------------
+USB VENDOR_ID	0x0fc5
+USB PRODUCT_ID	0x1227
+Both the 6 character and 8 character displays have PRODUCT_ID,
+and according to Delcom Engineering no queryable information
+can be obtained from the device to tell them apart.
+
+Device Modes
+------------
+By default, the driver assumes the display is only 6 characters
+The mode for 6 characters is:
+	MSB 0x06; LSB 0x3f
+For the 8 character display:
+	MSB 0x08; LSB 0xff
+The device can accept "text" either in raw, hex, or ascii textmode.
+raw controls each segment manually,
+hex expects a value between 0-15 per character,
+ascii expects a value between '0'-'9' and 'A'-'F'.
+The default is ascii.
+
+Device Operation
+----------------
+1.	Turn on the device:
+	echo 1 > /sys/bus/usb/.../powered
+2.	Set the device's mode:
+	echo $mode_msb > /sys/bus/usb/.../mode_msb
+	echo $mode_lsb > /sys/bus/usb/.../mode_lsb
+3.	Set the textmode:
+	echo $textmode > /sys/bus/usb/.../textmode
+4.	set the text (for example):
+	echo "123ABC" > /sys/bus/usb/.../text (ascii)
+	echo "A1B2" > /sys/bus/usb/.../text (ascii)
+	echo -ne "\x01\x02\x03" > /sys/bus/usb/.../text (hex)
+5.	Set the decimal places.
+	The device has either 6 or 8 decimal points.
+	to set the nth decimal place calculate 10 ** n
+	and echo it in to /sys/bus/usb/.../decimals
+	To set multiple decimals points sum up each power.
+	For example, to set the 0th and 3rd decimal place
+	echo 1001 > /sys/bus/usb/.../decimals
+
+

+ 4 - 4
Documentation/usb/power-management.txt

@@ -350,12 +350,12 @@ without holding the mutex.
 
 There also are a couple of utility routines drivers can use:
 
-	usb_autopm_enable() sets pm_usage_cnt to 1 and then calls
-	usb_autopm_set_interface(), which will attempt an autoresume.
-
-	usb_autopm_disable() sets pm_usage_cnt to 0 and then calls
+	usb_autopm_enable() sets pm_usage_cnt to 0 and then calls
 	usb_autopm_set_interface(), which will attempt an autosuspend.
 
+	usb_autopm_disable() sets pm_usage_cnt to 1 and then calls
+	usb_autopm_set_interface(), which will attempt an autoresume.
+
 The conventional usage pattern is that a driver calls
 usb_autopm_get_interface() in its open routine and
 usb_autopm_put_interface() in its close or release routine.  But

+ 1 - 1
Documentation/video4linux/CARDLIST.au0828

@@ -1,5 +1,5 @@
   0 -> Unknown board                            (au0828)
-  1 -> Hauppauge HVR950Q                        (au0828)        [2040:7200,2040:7210,2040:7217,2040:721b,2040:721f,2040:7280,0fd9:0008]
+  1 -> Hauppauge HVR950Q                        (au0828)        [2040:7200,2040:7210,2040:7217,2040:721b,2040:721e,2040:721f,2040:7280,0fd9:0008]
   2 -> Hauppauge HVR850                         (au0828)        [2040:7240]
   3 -> DViCO FusionHDTV USB                     (au0828)        [0fe9:d620]
   4 -> Hauppauge HVR950Q rev xxF8               (au0828)        [2040:7201,2040:7211,2040:7281]

+ 1 - 0
Documentation/video4linux/CARDLIST.tuner

@@ -75,3 +75,4 @@ tuner=73 - Samsung TCPG 6121P30A
 tuner=75 - Philips TEA5761 FM Radio
 tuner=76 - Xceive 5000 tuner
 tuner=77 - TCL tuner MF02GIP-5N-E
+tuner=78 - Philips FMD1216MEX MK3 Hybrid Tuner

+ 8 - 1
MAINTAINERS

@@ -3937,7 +3937,7 @@ M:	jbglaw@lug-owl.de
 L:	linux-kernel@vger.kernel.org
 S:	Maintained
 
-STABLE BRANCH:
+STABLE BRANCH
 P:	Greg Kroah-Hartman
 M:	greg@kroah.com
 P:	Chris Wright
@@ -3945,6 +3945,13 @@ M:	chrisw@sous-sol.org
 L:	stable@kernel.org
 S:	Maintained
 
+STAGING SUBSYSTEM
+P:	Greg Kroah-Hartman
+M:	gregkh@suse.de
+L:	linux-kernel@vger.kernel.org
+T:	quilt kernel.org/pub/linux/kernel/people/gregkh/gregkh-2.6/
+S:	Maintained
+
 STARFIRE/DURALAN NETWORK DRIVER
 P:	Ion Badulescu
 M:	ionut@cs.columbia.edu

+ 35 - 0
arch/arm/mach-kirkwood/common.c

@@ -16,6 +16,7 @@
 #include <linux/mv643xx_eth.h>
 #include <linux/ata_platform.h>
 #include <linux/spi/orion_spi.h>
+#include <net/dsa.h>
 #include <asm/page.h>
 #include <asm/timex.h>
 #include <asm/mach/map.h>
@@ -151,6 +152,40 @@ void __init kirkwood_ge00_init(struct mv643xx_eth_platform_data *eth_data)
 }
 
 
+/*****************************************************************************
+ * Ethernet switch
+ ****************************************************************************/
+static struct resource kirkwood_switch_resources[] = {
+	{
+		.start	= 0,
+		.end	= 0,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device kirkwood_switch_device = {
+	.name		= "dsa",
+	.id		= 0,
+	.num_resources	= 0,
+	.resource	= kirkwood_switch_resources,
+};
+
+void __init kirkwood_ge00_switch_init(struct dsa_platform_data *d, int irq)
+{
+	if (irq != NO_IRQ) {
+		kirkwood_switch_resources[0].start = irq;
+		kirkwood_switch_resources[0].end = irq;
+		kirkwood_switch_device.num_resources = 1;
+	}
+
+	d->mii_bus = &kirkwood_ge00_shared.dev;
+	d->netdev = &kirkwood_ge00.dev;
+	kirkwood_switch_device.dev.platform_data = d;
+
+	platform_device_register(&kirkwood_switch_device);
+}
+
+
 /*****************************************************************************
  * SoC RTC
  ****************************************************************************/

+ 2 - 0
arch/arm/mach-kirkwood/common.h

@@ -11,6 +11,7 @@
 #ifndef __ARCH_KIRKWOOD_COMMON_H
 #define __ARCH_KIRKWOOD_COMMON_H
 
+struct dsa_platform_data;
 struct mv643xx_eth_platform_data;
 struct mv_sata_platform_data;
 
@@ -29,6 +30,7 @@ void kirkwood_pcie_id(u32 *dev, u32 *rev);
 
 void kirkwood_ehci_init(void);
 void kirkwood_ge00_init(struct mv643xx_eth_platform_data *eth_data);
+void kirkwood_ge00_switch_init(struct dsa_platform_data *d, int irq);
 void kirkwood_pcie_init(void);
 void kirkwood_rtc_init(void);
 void kirkwood_sata_init(struct mv_sata_platform_data *sata_data);

+ 11 - 0
arch/arm/mach-kirkwood/rd88f6281-setup.c

@@ -19,6 +19,7 @@
 #include <linux/ata_platform.h>
 #include <linux/mv643xx_eth.h>
 #include <linux/ethtool.h>
+#include <net/dsa.h>
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/pci.h>
@@ -74,6 +75,15 @@ static struct mv643xx_eth_platform_data rd88f6281_ge00_data = {
 	.duplex		= DUPLEX_FULL,
 };
 
+static struct dsa_platform_data rd88f6281_switch_data = {
+	.port_names[0]	= "lan1",
+	.port_names[1]	= "lan2",
+	.port_names[2]	= "lan3",
+	.port_names[3]	= "lan4",
+	.port_names[4]	= "wan",
+	.port_names[5]	= "cpu",
+};
+
 static struct mv_sata_platform_data rd88f6281_sata_data = {
 	.n_ports	= 2,
 };
@@ -87,6 +97,7 @@ static void __init rd88f6281_init(void)
 
 	kirkwood_ehci_init();
 	kirkwood_ge00_init(&rd88f6281_ge00_data);
+	kirkwood_ge00_switch_init(&rd88f6281_switch_data, NO_IRQ);
 	kirkwood_rtc_init();
 	kirkwood_sata_init(&rd88f6281_sata_data);
 	kirkwood_uart0_init();

+ 5 - 0
arch/arm/mach-mv78xx0/db78x00-bp-setup.c

@@ -13,6 +13,7 @@
 #include <linux/platform_device.h>
 #include <linux/ata_platform.h>
 #include <linux/mv643xx_eth.h>
+#include <linux/ethtool.h>
 #include <mach/mv78xx0.h>
 #include <asm/mach-types.h>
 #include <asm/mach/arch.h>
@@ -28,10 +29,14 @@ static struct mv643xx_eth_platform_data db78x00_ge01_data = {
 
 static struct mv643xx_eth_platform_data db78x00_ge10_data = {
 	.phy_addr	= MV643XX_ETH_PHY_NONE,
+	.speed		= SPEED_1000,
+	.duplex		= DUPLEX_FULL,
 };
 
 static struct mv643xx_eth_platform_data db78x00_ge11_data = {
 	.phy_addr	= MV643XX_ETH_PHY_NONE,
+	.speed		= SPEED_1000,
+	.duplex		= DUPLEX_FULL,
 };
 
 static struct mv_sata_platform_data db78x00_sata_data = {

+ 37 - 1
arch/arm/mach-orion5x/common.c

@@ -19,6 +19,7 @@
 #include <linux/mv643xx_i2c.h>
 #include <linux/ata_platform.h>
 #include <linux/spi/orion_spi.h>
+#include <net/dsa.h>
 #include <asm/page.h>
 #include <asm/setup.h>
 #include <asm/timex.h>
@@ -197,6 +198,40 @@ void __init orion5x_eth_init(struct mv643xx_eth_platform_data *eth_data)
 }
 
 
+/*****************************************************************************
+ * Ethernet switch
+ ****************************************************************************/
+static struct resource orion5x_switch_resources[] = {
+	{
+		.start	= 0,
+		.end	= 0,
+		.flags	= IORESOURCE_IRQ,
+	},
+};
+
+static struct platform_device orion5x_switch_device = {
+	.name		= "dsa",
+	.id		= 0,
+	.num_resources	= 0,
+	.resource	= orion5x_switch_resources,
+};
+
+void __init orion5x_eth_switch_init(struct dsa_platform_data *d, int irq)
+{
+	if (irq != NO_IRQ) {
+		orion5x_switch_resources[0].start = irq;
+		orion5x_switch_resources[0].end = irq;
+		orion5x_switch_device.num_resources = 1;
+	}
+
+	d->mii_bus = &orion5x_eth_shared.dev;
+	d->netdev = &orion5x_eth.dev;
+	orion5x_switch_device.dev.platform_data = d;
+
+	platform_device_register(&orion5x_switch_device);
+}
+
+
 /*****************************************************************************
  * I2C
  ****************************************************************************/
@@ -275,7 +310,8 @@ void __init orion5x_sata_init(struct mv_sata_platform_data *sata_data)
  * SPI
  ****************************************************************************/
 static struct orion_spi_info orion5x_spi_plat_data = {
-	.tclk		= 0,
+	.tclk			= 0,
+	.enable_clock_fix	= 1,
 };
 
 static struct resource orion5x_spi_resources[] = {

+ 2 - 0
arch/arm/mach-orion5x/common.h

@@ -1,6 +1,7 @@
 #ifndef __ARCH_ORION5X_COMMON_H
 #define __ARCH_ORION5X_COMMON_H
 
+struct dsa_platform_data;
 struct mv643xx_eth_platform_data;
 struct mv_sata_platform_data;
 
@@ -29,6 +30,7 @@ void orion5x_setup_pcie_wa_win(u32 base, u32 size);
 void orion5x_ehci0_init(void);
 void orion5x_ehci1_init(void);
 void orion5x_eth_init(struct mv643xx_eth_platform_data *eth_data);
+void orion5x_eth_switch_init(struct dsa_platform_data *d, int irq);
 void orion5x_i2c_init(void);
 void orion5x_sata_init(struct mv_sata_platform_data *sata_data);
 void orion5x_spi_init(void);

+ 11 - 0
arch/arm/mach-orion5x/rd88f5181l-fxo-setup.c

@@ -16,6 +16,7 @@
 #include <linux/mtd/physmap.h>
 #include <linux/mv643xx_eth.h>
 #include <linux/ethtool.h>
+#include <net/dsa.h>
 #include <asm/mach-types.h>
 #include <asm/gpio.h>
 #include <asm/leds.h>
@@ -93,6 +94,15 @@ static struct mv643xx_eth_platform_data rd88f5181l_fxo_eth_data = {
 	.duplex		= DUPLEX_FULL,
 };
 
+static struct dsa_platform_data rd88f5181l_fxo_switch_data = {
+	.port_names[0]	= "lan2",
+	.port_names[1]	= "lan1",
+	.port_names[2]	= "wan",
+	.port_names[3]	= "cpu",
+	.port_names[5]	= "lan4",
+	.port_names[7]	= "lan3",
+};
+
 static void __init rd88f5181l_fxo_init(void)
 {
 	/*
@@ -107,6 +117,7 @@ static void __init rd88f5181l_fxo_init(void)
 	 */
 	orion5x_ehci0_init();
 	orion5x_eth_init(&rd88f5181l_fxo_eth_data);
+	orion5x_eth_switch_init(&rd88f5181l_fxo_switch_data, NO_IRQ);
 	orion5x_uart0_init();
 
 	orion5x_setup_dev_boot_win(RD88F5181L_FXO_NOR_BOOT_BASE,

+ 11 - 0
arch/arm/mach-orion5x/rd88f5181l-ge-setup.c

@@ -17,6 +17,7 @@
 #include <linux/mv643xx_eth.h>
 #include <linux/ethtool.h>
 #include <linux/i2c.h>
+#include <net/dsa.h>
 #include <asm/mach-types.h>
 #include <asm/gpio.h>
 #include <asm/leds.h>
@@ -94,6 +95,15 @@ static struct mv643xx_eth_platform_data rd88f5181l_ge_eth_data = {
 	.duplex		= DUPLEX_FULL,
 };
 
+static struct dsa_platform_data rd88f5181l_ge_switch_data = {
+	.port_names[0]	= "lan2",
+	.port_names[1]	= "lan1",
+	.port_names[2]	= "wan",
+	.port_names[3]	= "cpu",
+	.port_names[5]	= "lan4",
+	.port_names[7]	= "lan3",
+};
+
 static struct i2c_board_info __initdata rd88f5181l_ge_i2c_rtc = {
 	I2C_BOARD_INFO("ds1338", 0x68),
 };
@@ -112,6 +122,7 @@ static void __init rd88f5181l_ge_init(void)
 	 */
 	orion5x_ehci0_init();
 	orion5x_eth_init(&rd88f5181l_ge_eth_data);
+	orion5x_eth_switch_init(&rd88f5181l_ge_switch_data, gpio_to_irq(8));
 	orion5x_i2c_init();
 	orion5x_uart0_init();
 

+ 11 - 0
arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c

@@ -19,6 +19,7 @@
 #include <linux/spi/orion_spi.h>
 #include <linux/spi/flash.h>
 #include <linux/ethtool.h>
+#include <net/dsa.h>
 #include <asm/mach-types.h>
 #include <asm/gpio.h>
 #include <asm/leds.h>
@@ -34,6 +35,15 @@ static struct mv643xx_eth_platform_data rd88f6183ap_ge_eth_data = {
 	.duplex		= DUPLEX_FULL,
 };
 
+static struct dsa_platform_data rd88f6183ap_ge_switch_data = {
+	.port_names[0]	= "lan1",
+	.port_names[1]	= "lan2",
+	.port_names[2]	= "lan3",
+	.port_names[3]	= "lan4",
+	.port_names[4]	= "wan",
+	.port_names[5]	= "cpu",
+};
+
 static struct mtd_partition rd88f6183ap_ge_partitions[] = {
 	{
 		.name	= "kernel",
@@ -79,6 +89,7 @@ static void __init rd88f6183ap_ge_init(void)
 	 */
 	orion5x_ehci0_init();
 	orion5x_eth_init(&rd88f6183ap_ge_eth_data);
+	orion5x_eth_switch_init(&rd88f6183ap_ge_switch_data, gpio_to_irq(3));
 	spi_register_board_info(rd88f6183ap_ge_spi_slave_info,
 				ARRAY_SIZE(rd88f6183ap_ge_spi_slave_info));
 	orion5x_spi_init();

+ 11 - 0
arch/arm/mach-orion5x/wrt350n-v2-setup.c

@@ -15,6 +15,7 @@
 #include <linux/mtd/physmap.h>
 #include <linux/mv643xx_eth.h>
 #include <linux/ethtool.h>
+#include <net/dsa.h>
 #include <asm/mach-types.h>
 #include <asm/gpio.h>
 #include <asm/mach/arch.h>
@@ -105,6 +106,15 @@ static struct mv643xx_eth_platform_data wrt350n_v2_eth_data = {
 	.duplex		= DUPLEX_FULL,
 };
 
+static struct dsa_platform_data wrt350n_v2_switch_data = {
+	.port_names[0]	= "lan2",
+	.port_names[1]	= "lan1",
+	.port_names[2]	= "wan",
+	.port_names[3]	= "cpu",
+	.port_names[5]	= "lan3",
+	.port_names[7]	= "lan4",
+};
+
 static void __init wrt350n_v2_init(void)
 {
 	/*
@@ -119,6 +129,7 @@ static void __init wrt350n_v2_init(void)
 	 */
 	orion5x_ehci0_init();
 	orion5x_eth_init(&wrt350n_v2_eth_data);
+	orion5x_eth_switch_init(&wrt350n_v2_switch_data, NO_IRQ);
 	orion5x_uart0_init();
 
 	orion5x_setup_dev_boot_win(WRT350N_V2_NOR_BOOT_BASE,

+ 2 - 0
arch/cris/Makefile

@@ -10,6 +10,8 @@
 # License.  See the file "COPYING" in the main directory of this archive
 # for more details.
 
+KBUILD_DEFCONFIG := etrax-100lx_v2_defconfig
+
 arch-y := v10
 arch-$(CONFIG_ETRAX_ARCH_V10) := v10
 arch-$(CONFIG_ETRAX_ARCH_V32) := v32

+ 1 - 1
arch/cris/arch-v10/boot/compressed/Makefile

@@ -4,7 +4,7 @@
 
 asflags-y += $(LINUXINCLUDE)
 ccflags-y += -O2 $(LINUXINCLUDE)
-ldflags-y += -T $(srctree)/$(obj)/decompress.ld
+ldflags-y += -T $(srctree)/$(src)/decompress.lds
 OBJECTS = $(obj)/head.o $(obj)/misc.o
 OBJCOPYFLAGS = -O binary --remove-section=.bss
 

+ 0 - 0
arch/cris/arch-v10/boot/compressed/decompress.ld → arch/cris/arch-v10/boot/compressed/decompress.lds


+ 1 - 1
arch/cris/arch-v10/boot/rescue/Makefile

@@ -4,7 +4,7 @@
 
 ccflags-y += -O2 $(LINUXINCLUDE)
 asflags-y += $(LINUXINCLUDE)
-ldflags-y += -T $(srctree)/$(obj)/rescue.ld
+ldflags-y += -T $(srctree)/$(src)/rescue.lds
 OBJCOPYFLAGS = -O binary --remove-section=.bss
 obj-$(CONFIG_ETRAX_AXISFLASHMAP) = head.o
 OBJECT := $(obj)/head.o

+ 0 - 0
arch/cris/arch-v10/boot/rescue/rescue.ld → arch/cris/arch-v10/boot/rescue/rescue.lds


+ 1 - 1
arch/cris/arch-v32/boot/compressed/Makefile

@@ -4,7 +4,7 @@
 
 asflags-y += -I $(srctree)/include/asm/mach/ -I $(srctree)/include/asm/arch
 ccflags-y += -O2 -I $(srctree)/include/asm/mach/ -I $(srctree)/include/asm/arch
-ldflags-y += -T $(srctree)/$(obj)/decompress.ld
+ldflags-y += -T $(srctree)/$(src)/decompress.lds
 OBJECTS = $(obj)/head.o $(obj)/misc.o
 OBJCOPYFLAGS = -O binary --remove-section=.bss
 

+ 0 - 0
arch/cris/arch-v32/boot/compressed/decompress.ld → arch/cris/arch-v32/boot/compressed/decompress.lds


+ 1 - 1
arch/cris/arch-v32/boot/rescue/Makefile

@@ -7,7 +7,7 @@ ccflags-y += -O2 -I $(srctree)/include/asm/arch/mach/ \
 		-I $(srctree)/include/asm/arch
 asflags-y += -I $(srctree)/include/asm/arch/mach/ -I $(srctree)/include/asm/arch
 LD = gcc-cris -mlinux -march=v32 -nostdlib
-ldflags-y += -T $(srctree)/$(obj)/rescue.ld
+ldflags-y += -T $(srctree)/$(src)/rescue.lds
 LDPOSTFLAGS = -lgcc
 OBJCOPYFLAGS = -O binary --remove-section=.bss
 obj-$(CONFIG_ETRAX_AXISFLASHMAP) = head.o

+ 0 - 0
arch/cris/arch-v32/boot/rescue/rescue.ld → arch/cris/arch-v32/boot/rescue/rescue.lds


+ 0 - 0
arch/cris/artpec_3_defconfig → arch/cris/configs/artpec_3_defconfig


+ 0 - 0
arch/cris/arch-v10/defconfig → arch/cris/configs/etrax-100lx_defconfig


+ 0 - 0
arch/cris/defconfig → arch/cris/configs/etrax-100lx_v2_defconfig


+ 0 - 0
arch/cris/etraxfs_defconfig → arch/cris/configs/etraxfs_defconfig


+ 1 - 0
arch/x86/mm/highmem_32.c

@@ -137,6 +137,7 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
 
 	return (void*) vaddr;
 }
+EXPORT_SYMBOL_GPL(kmap_atomic_pfn); /* temporarily in use by i915 GEM until vmap */
 
 struct page *kmap_atomic_to_page(void *ptr)
 {

+ 30 - 7
block/blk-core.c

@@ -257,7 +257,6 @@ void __generic_unplug_device(struct request_queue *q)
 
 	q->request_fn(q);
 }
-EXPORT_SYMBOL(__generic_unplug_device);
 
 /**
  * generic_unplug_device - fire a request queue
@@ -325,6 +324,9 @@ EXPORT_SYMBOL(blk_unplug);
 
 static void blk_invoke_request_fn(struct request_queue *q)
 {
+	if (unlikely(blk_queue_stopped(q)))
+		return;
+
 	/*
 	 * one level of recursion is ok and is much faster than kicking
 	 * the unplug handling
@@ -399,8 +401,13 @@ void blk_sync_queue(struct request_queue *q)
 EXPORT_SYMBOL(blk_sync_queue);
 
 /**
- * blk_run_queue - run a single device queue
+ * __blk_run_queue - run a single device queue
  * @q:	The queue to run
+ *
+ * Description:
+ *    See @blk_run_queue. This variant must be called with the queue lock
+ *    held and interrupts disabled.
+ *
  */
 void __blk_run_queue(struct request_queue *q)
 {
@@ -418,6 +425,12 @@ EXPORT_SYMBOL(__blk_run_queue);
 /**
  * blk_run_queue - run a single device queue
  * @q: The queue to run
+ *
+ * Description:
+ *    Invoke request handling on this queue, if it has pending work to do.
+ *    May be used to restart queueing when a request has completed. Also
+ *    See @blk_start_queueing.
+ *
  */
 void blk_run_queue(struct request_queue *q)
 {
@@ -501,6 +514,7 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id)
 	init_timer(&q->unplug_timer);
 	setup_timer(&q->timeout, blk_rq_timed_out_timer, (unsigned long) q);
 	INIT_LIST_HEAD(&q->timeout_list);
+	INIT_WORK(&q->unplug_work, blk_unplug_work);
 
 	kobject_init(&q->kobj, &blk_queue_ktype);
 
@@ -884,7 +898,8 @@ EXPORT_SYMBOL(blk_get_request);
  *
  * This is basically a helper to remove the need to know whether a queue
  * is plugged or not if someone just wants to initiate dispatch of requests
- * for this queue.
+ * for this queue. Should be used to start queueing on a device outside
+ * of ->request_fn() context. Also see @blk_run_queue.
  *
  * The queue lock must be held with interrupts disabled.
  */
@@ -1003,8 +1018,9 @@ static void part_round_stats_single(int cpu, struct hd_struct *part,
 }
 
 /**
- * part_round_stats()	- Round off the performance stats on a struct
- * disk_stats.
+ * part_round_stats() - Round off the performance stats on a struct disk_stats.
+ * @cpu: cpu number for stats access
+ * @part: target partition
  *
  * The average IO queue length and utilisation statistics are maintained
  * by observing the current state of the queue length and the amount of
@@ -1075,8 +1091,15 @@ void init_request_from_bio(struct request *req, struct bio *bio)
 	/*
 	 * inherit FAILFAST from bio (for read-ahead, and explicit FAILFAST)
 	 */
-	if (bio_rw_ahead(bio) || bio_failfast(bio))
-		req->cmd_flags |= REQ_FAILFAST;
+	if (bio_rw_ahead(bio))
+		req->cmd_flags |= (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
+				   REQ_FAILFAST_DRIVER);
+	if (bio_failfast_dev(bio))
+		req->cmd_flags |= REQ_FAILFAST_DEV;
+	if (bio_failfast_transport(bio))
+		req->cmd_flags |= REQ_FAILFAST_TRANSPORT;
+	if (bio_failfast_driver(bio))
+		req->cmd_flags |= REQ_FAILFAST_DRIVER;
 
 	/*
 	 * REQ_BARRIER implies no merging, but lets make it explicit

+ 18 - 2
block/blk-merge.c

@@ -77,12 +77,20 @@ void blk_recalc_rq_segments(struct request *rq)
 			continue;
 		}
 new_segment:
+		if (nr_phys_segs == 1 && seg_size > rq->bio->bi_seg_front_size)
+			rq->bio->bi_seg_front_size = seg_size;
+
 		nr_phys_segs++;
 		bvprv = bv;
 		seg_size = bv->bv_len;
 		highprv = high;
 	}
 
+	if (nr_phys_segs == 1 && seg_size > rq->bio->bi_seg_front_size)
+		rq->bio->bi_seg_front_size = seg_size;
+	if (seg_size > rq->biotail->bi_seg_back_size)
+		rq->biotail->bi_seg_back_size = seg_size;
+
 	rq->nr_phys_segments = nr_phys_segs;
 }
 
@@ -106,7 +114,8 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
 	if (!test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags))
 		return 0;
 
-	if (bio->bi_size + nxt->bi_size > q->max_segment_size)
+	if (bio->bi_seg_back_size + nxt->bi_seg_front_size >
+	    q->max_segment_size)
 		return 0;
 
 	if (!bio_has_data(bio))
@@ -309,6 +318,8 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
 				struct request *next)
 {
 	int total_phys_segments;
+	unsigned int seg_size =
+		req->biotail->bi_seg_back_size + next->bio->bi_seg_front_size;
 
 	/*
 	 * First check if the either of the requests are re-queued
@@ -324,8 +335,13 @@ static int ll_merge_requests_fn(struct request_queue *q, struct request *req,
 		return 0;
 
 	total_phys_segments = req->nr_phys_segments + next->nr_phys_segments;
-	if (blk_phys_contig_segment(q, req->biotail, next->bio))
+	if (blk_phys_contig_segment(q, req->biotail, next->bio)) {
+		if (req->nr_phys_segments == 1)
+			req->bio->bi_seg_front_size = seg_size;
+		if (next->nr_phys_segments == 1)
+			next->biotail->bi_seg_back_size = seg_size;
 		total_phys_segments--;
+	}
 
 	if (total_phys_segments > q->max_phys_segments)
 		return 0;

+ 0 - 2
block/blk-settings.c

@@ -141,8 +141,6 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
 	if (q->unplug_delay == 0)
 		q->unplug_delay = 1;
 
-	INIT_WORK(&q->unplug_work, blk_unplug_work);
-
 	q->unplug_timer.function = blk_unplug_timeout;
 	q->unplug_timer.data = (unsigned long)q;
 

+ 1 - 0
block/blk.h

@@ -20,6 +20,7 @@ void blk_unplug_timeout(unsigned long data);
 void blk_rq_timed_out_timer(unsigned long data);
 void blk_delete_timer(struct request *);
 void blk_add_timer(struct request *);
+void __generic_unplug_device(struct request_queue *);
 
 /*
  * Internal atomic flags for request handling

+ 5 - 11
block/elevator.c

@@ -612,7 +612,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
 		 *   processing.
 		 */
 		blk_remove_plug(q);
-		q->request_fn(q);
+		blk_start_queueing(q);
 		break;
 
 	case ELEVATOR_INSERT_SORT:
@@ -950,7 +950,7 @@ void elv_completed_request(struct request_queue *q, struct request *rq)
 		    blk_ordered_cur_seq(q) == QUEUE_ORDSEQ_DRAIN &&
 		    blk_ordered_req_seq(first_rq) > QUEUE_ORDSEQ_DRAIN) {
 			blk_ordered_complete_seq(q, QUEUE_ORDSEQ_DRAIN, 0);
-			q->request_fn(q);
+			blk_start_queueing(q);
 		}
 	}
 }
@@ -1109,8 +1109,7 @@ static int elevator_switch(struct request_queue *q, struct elevator_type *new_e)
 	elv_drain_elevator(q);
 
 	while (q->rq.elvpriv) {
-		blk_remove_plug(q);
-		q->request_fn(q);
+		blk_start_queueing(q);
 		spin_unlock_irq(q->queue_lock);
 		msleep(10);
 		spin_lock_irq(q->queue_lock);
@@ -1166,15 +1165,10 @@ ssize_t elv_iosched_store(struct request_queue *q, const char *name,
 			  size_t count)
 {
 	char elevator_name[ELV_NAME_MAX];
-	size_t len;
 	struct elevator_type *e;
 
-	elevator_name[sizeof(elevator_name) - 1] = '\0';
-	strncpy(elevator_name, name, sizeof(elevator_name) - 1);
-	len = strlen(elevator_name);
-
-	if (len && elevator_name[len - 1] == '\n')
-		elevator_name[len - 1] = '\0';
+	strlcpy(elevator_name, name, sizeof(elevator_name));
+	strstrip(elevator_name);
 
 	e = elevator_get(elevator_name);
 	if (!e) {

+ 1 - 2
block/genhd.c

@@ -358,7 +358,6 @@ static int blk_mangle_minor(int minor)
 /**
  * blk_alloc_devt - allocate a dev_t for a partition
  * @part: partition to allocate dev_t for
- * @gfp_mask: memory allocation flag
  * @devt: out parameter for resulting dev_t
  *
  * Allocate a dev_t for block device.
@@ -535,7 +534,7 @@ void unlink_gendisk(struct gendisk *disk)
 /**
  * get_gendisk - get partitioning information for a given device
  * @devt: device to get partitioning information for
- * @part: returned partition index
+ * @partno: returned partition index
  *
  * This function gets the structure containing partitioning
  * information for the given device @devt.

+ 2 - 0
drivers/Kconfig

@@ -101,4 +101,6 @@ source "drivers/auxdisplay/Kconfig"
 source "drivers/uio/Kconfig"
 
 source "drivers/xen/Kconfig"
+
+source "drivers/staging/Kconfig"
 endmenu

+ 1 - 0
drivers/Makefile

@@ -99,3 +99,4 @@ obj-$(CONFIG_OF)		+= of/
 obj-$(CONFIG_SSB)		+= ssb/
 obj-$(CONFIG_VIRTIO)		+= virtio/
 obj-$(CONFIG_REGULATOR)		+= regulator/
+obj-$(CONFIG_STAGING)		+= staging/

+ 0 - 3
drivers/block/ub.c

@@ -349,8 +349,6 @@ struct ub_dev {
 
 	struct work_struct reset_work;
 	wait_queue_head_t reset_wait;
-
-	int sg_stat[6];
 };
 
 /*
@@ -685,7 +683,6 @@ static int ub_request_fn_1(struct ub_lun *lun, struct request *rq)
 		goto drop;
 	}
 	urq->nsg = n_elem;
-	sc->sg_stat[n_elem < 5 ? n_elem : 5]++;
 
 	if (blk_pc_request(rq)) {
 		ub_cmd_build_packet(sc, lun, cmd, urq);

+ 2 - 1
drivers/gpu/drm/Kconfig

@@ -6,7 +6,7 @@
 #
 menuconfig DRM
 	tristate "Direct Rendering Manager (XFree86 4.1.0 and higher DRI support)"
-	depends on (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG
+	depends on (AGP || AGP=n) && PCI && !EMULATED_CMPXCHG && SHMEM
 	help
 	  Kernel-level support for the Direct Rendering Infrastructure (DRI)
 	  introduced in XFree86 4.0. If you say Y here, you need to select
@@ -87,6 +87,7 @@ config DRM_MGA
 config DRM_SIS
 	tristate "SiS video cards"
 	depends on DRM && AGP
+	depends on FB_SIS || FB_SIS=n
 	help
 	  Choose this option if you have a SiS 630 or compatible video
           chipset. If M is selected the module will be called sis. AGP

+ 3 - 2
drivers/gpu/drm/Makefile

@@ -4,8 +4,9 @@
 
 ccflags-y := -Iinclude/drm
 
-drm-y       :=	drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \
-		drm_drv.o drm_fops.o drm_ioctl.o drm_irq.o \
+drm-y       :=	drm_auth.o drm_bufs.o drm_cache.o \
+		drm_context.o drm_dma.o drm_drawable.o \
+		drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \
 		drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \
 		drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \
 		drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o

+ 51 - 1
drivers/gpu/drm/drm_agpsupport.c

@@ -33,6 +33,7 @@
 
 #include "drmP.h"
 #include <linux/module.h>
+#include <asm/agp.h>
 
 #if __OS_HAS_AGP
 
@@ -452,4 +453,53 @@ int drm_agp_unbind_memory(DRM_AGP_MEM * handle)
 	return agp_unbind_memory(handle);
 }
 
-#endif				/* __OS_HAS_AGP */
+/**
+ * Binds a collection of pages into AGP memory at the given offset, returning
+ * the AGP memory structure containing them.
+ *
+ * No reference is held on the pages during this time -- it is up to the
+ * caller to handle that.
+ */
+DRM_AGP_MEM *
+drm_agp_bind_pages(struct drm_device *dev,
+		   struct page **pages,
+		   unsigned long num_pages,
+		   uint32_t gtt_offset,
+		   u32 type)
+{
+	DRM_AGP_MEM *mem;
+	int ret, i;
+
+	DRM_DEBUG("\n");
+
+	mem = drm_agp_allocate_memory(dev->agp->bridge, num_pages,
+				      type);
+	if (mem == NULL) {
+		DRM_ERROR("Failed to allocate memory for %ld pages\n",
+			  num_pages);
+		return NULL;
+	}
+
+	for (i = 0; i < num_pages; i++)
+		mem->memory[i] = phys_to_gart(page_to_phys(pages[i]));
+	mem->page_count = num_pages;
+
+	mem->is_flushed = true;
+	ret = drm_agp_bind_memory(mem, gtt_offset / PAGE_SIZE);
+	if (ret != 0) {
+		DRM_ERROR("Failed to bind AGP memory: %d\n", ret);
+		agp_free_memory(mem);
+		return NULL;
+	}
+
+	return mem;
+}
+EXPORT_SYMBOL(drm_agp_bind_pages);
+
+void drm_agp_chipset_flush(struct drm_device *dev)
+{
+	agp_flush_chipset(dev->agp->bridge);
+}
+EXPORT_SYMBOL(drm_agp_chipset_flush);
+
+#endif /* __OS_HAS_AGP */

+ 69 - 0
drivers/gpu/drm/drm_cache.c

@@ -0,0 +1,69 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com>
+ */
+
+#include "drmP.h"
+
+#if defined(CONFIG_X86)
+static void
+drm_clflush_page(struct page *page)
+{
+	uint8_t *page_virtual;
+	unsigned int i;
+
+	if (unlikely(page == NULL))
+		return;
+
+	page_virtual = kmap_atomic(page, KM_USER0);
+	for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
+		clflush(page_virtual + i);
+	kunmap_atomic(page_virtual, KM_USER0);
+}
+#endif
+
+void
+drm_clflush_pages(struct page *pages[], unsigned long num_pages)
+{
+
+#if defined(CONFIG_X86)
+	if (cpu_has_clflush) {
+		unsigned long i;
+
+		mb();
+		for (i = 0; i < num_pages; ++i)
+			drm_clflush_page(*pages++);
+		mb();
+
+		return;
+	}
+
+	wbinvd();
+#endif
+}
+EXPORT_SYMBOL(drm_clflush_pages);

+ 6 - 0
drivers/gpu/drm/drm_drv.c

@@ -116,7 +116,13 @@ static struct drm_ioctl_desc drm_ioctls[] = {
 
 	DRM_IOCTL_DEF(DRM_IOCTL_WAIT_VBLANK, drm_wait_vblank, 0),
 
+	DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0),
+
 	DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+
+	DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0),
+	DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH),
 };
 
 #define DRM_CORE_IOCTL_COUNT	ARRAY_SIZE( drm_ioctls )

+ 7 - 1
drivers/gpu/drm/drm_fops.c

@@ -246,7 +246,7 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
 	memset(priv, 0, sizeof(*priv));
 	filp->private_data = priv;
 	priv->filp = filp;
-	priv->uid = current->euid;
+	priv->uid = current_euid();
 	priv->pid = task_pid_nr(current);
 	priv->minor = idr_find(&drm_minors_idr, minor_id);
 	priv->ioctl_count = 0;
@@ -256,6 +256,9 @@ static int drm_open_helper(struct inode *inode, struct file *filp,
 
 	INIT_LIST_HEAD(&priv->lhead);
 
+	if (dev->driver->driver_features & DRIVER_GEM)
+		drm_gem_open(dev, priv);
+
 	if (dev->driver->open) {
 		ret = dev->driver->open(dev, priv);
 		if (ret < 0)
@@ -400,6 +403,9 @@ int drm_release(struct inode *inode, struct file *filp)
 		dev->driver->reclaim_buffers(dev, file_priv);
 	}
 
+	if (dev->driver->driver_features & DRIVER_GEM)
+		drm_gem_release(dev, file_priv);
+
 	drm_fasync(-1, filp, 0);
 
 	mutex_lock(&dev->ctxlist_mutex);

+ 421 - 0
drivers/gpu/drm/drm_gem.c

@@ -0,0 +1,421 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/uaccess.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/module.h>
+#include <linux/mman.h>
+#include <linux/pagemap.h>
+#include "drmP.h"
+
+/** @file drm_gem.c
+ *
+ * This file provides some of the base ioctls and library routines for
+ * the graphics memory manager implemented by each device driver.
+ *
+ * Because various devices have different requirements in terms of
+ * synchronization and migration strategies, implementing that is left up to
+ * the driver, and all that the general API provides should be generic --
+ * allocating objects, reading/writing data with the cpu, freeing objects.
+ * Even there, platform-dependent optimizations for reading/writing data with
+ * the CPU mean we'll likely hook those out to driver-specific calls.  However,
+ * the DRI2 implementation wants to have at least allocate/mmap be generic.
+ *
+ * The goal was to have swap-backed object allocation managed through
+ * struct file.  However, file descriptors as handles to a struct file have
+ * two major failings:
+ * - Process limits prevent more than 1024 or so being used at a time by
+ *   default.
+ * - Inability to allocate high fds will aggravate the X Server's select()
+ *   handling, and likely that of many GL client applications as well.
+ *
+ * This led to a plan of using our own integer IDs (called handles, following
+ * DRM terminology) to mimic fds, and implement the fd syscalls we need as
+ * ioctls.  The objects themselves will still include the struct file so
+ * that we can transition to fds if the required kernel infrastructure shows
+ * up at a later date, and as our interface with shmfs for memory allocation.
+ */
+
+/**
+ * Initialize the GEM device fields
+ */
+
+int
+drm_gem_init(struct drm_device *dev)
+{
+	spin_lock_init(&dev->object_name_lock);
+	idr_init(&dev->object_name_idr);
+	atomic_set(&dev->object_count, 0);
+	atomic_set(&dev->object_memory, 0);
+	atomic_set(&dev->pin_count, 0);
+	atomic_set(&dev->pin_memory, 0);
+	atomic_set(&dev->gtt_count, 0);
+	atomic_set(&dev->gtt_memory, 0);
+	return 0;
+}
+
+/**
+ * Allocate a GEM object of the specified size with shmfs backing store
+ */
+struct drm_gem_object *
+drm_gem_object_alloc(struct drm_device *dev, size_t size)
+{
+	struct drm_gem_object *obj;
+
+	BUG_ON((size & (PAGE_SIZE - 1)) != 0);
+
+	obj = kcalloc(1, sizeof(*obj), GFP_KERNEL);
+
+	obj->dev = dev;
+	obj->filp = shmem_file_setup("drm mm object", size, 0);
+	if (IS_ERR(obj->filp)) {
+		kfree(obj);
+		return NULL;
+	}
+
+	kref_init(&obj->refcount);
+	kref_init(&obj->handlecount);
+	obj->size = size;
+	if (dev->driver->gem_init_object != NULL &&
+	    dev->driver->gem_init_object(obj) != 0) {
+		fput(obj->filp);
+		kfree(obj);
+		return NULL;
+	}
+	atomic_inc(&dev->object_count);
+	atomic_add(obj->size, &dev->object_memory);
+	return obj;
+}
+EXPORT_SYMBOL(drm_gem_object_alloc);
+
+/**
+ * Removes the mapping from handle to filp for this object.
+ */
+static int
+drm_gem_handle_delete(struct drm_file *filp, int handle)
+{
+	struct drm_device *dev;
+	struct drm_gem_object *obj;
+
+	/* This is gross. The idr system doesn't let us try a delete and
+	 * return an error code.  It just spews if you fail at deleting.
+	 * So, we have to grab a lock around finding the object and then
+	 * doing the delete on it and dropping the refcount, or the user
+	 * could race us to double-decrement the refcount and cause a
+	 * use-after-free later.  Given the frequency of our handle lookups,
+	 * we may want to use ida for number allocation and a hash table
+	 * for the pointers, anyway.
+	 */
+	spin_lock(&filp->table_lock);
+
+	/* Check if we currently have a reference on the object */
+	obj = idr_find(&filp->object_idr, handle);
+	if (obj == NULL) {
+		spin_unlock(&filp->table_lock);
+		return -EINVAL;
+	}
+	dev = obj->dev;
+
+	/* Release reference and decrement refcount. */
+	idr_remove(&filp->object_idr, handle);
+	spin_unlock(&filp->table_lock);
+
+	mutex_lock(&dev->struct_mutex);
+	drm_gem_object_handle_unreference(obj);
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+/**
+ * Create a handle for this object. This adds a handle reference
+ * to the object, which includes a regular reference count. Callers
+ * will likely want to dereference the object afterwards.
+ */
+int
+drm_gem_handle_create(struct drm_file *file_priv,
+		       struct drm_gem_object *obj,
+		       int *handlep)
+{
+	int	ret;
+
+	/*
+	 * Get the user-visible handle using idr.
+	 */
+again:
+	/* ensure there is space available to allocate a handle */
+	if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)
+		return -ENOMEM;
+
+	/* do the allocation under our spinlock */
+	spin_lock(&file_priv->table_lock);
+	ret = idr_get_new_above(&file_priv->object_idr, obj, 1, handlep);
+	spin_unlock(&file_priv->table_lock);
+	if (ret == -EAGAIN)
+		goto again;
+
+	if (ret != 0)
+		return ret;
+
+	drm_gem_object_handle_reference(obj);
+	return 0;
+}
+EXPORT_SYMBOL(drm_gem_handle_create);
+
+/** Returns a reference to the object named by the handle. */
+struct drm_gem_object *
+drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,
+		      int handle)
+{
+	struct drm_gem_object *obj;
+
+	spin_lock(&filp->table_lock);
+
+	/* Check if we currently have a reference on the object */
+	obj = idr_find(&filp->object_idr, handle);
+	if (obj == NULL) {
+		spin_unlock(&filp->table_lock);
+		return NULL;
+	}
+
+	drm_gem_object_reference(obj);
+
+	spin_unlock(&filp->table_lock);
+
+	return obj;
+}
+EXPORT_SYMBOL(drm_gem_object_lookup);
+
+/**
+ * Releases the handle to an mm object.
+ */
+int
+drm_gem_close_ioctl(struct drm_device *dev, void *data,
+		    struct drm_file *file_priv)
+{
+	struct drm_gem_close *args = data;
+	int ret;
+
+	if (!(dev->driver->driver_features & DRIVER_GEM))
+		return -ENODEV;
+
+	ret = drm_gem_handle_delete(file_priv, args->handle);
+
+	return ret;
+}
+
+/**
+ * Create a global name for an object, returning the name.
+ *
+ * Note that the name does not hold a reference; when the object
+ * is freed, the name goes away.
+ */
+int
+drm_gem_flink_ioctl(struct drm_device *dev, void *data,
+		    struct drm_file *file_priv)
+{
+	struct drm_gem_flink *args = data;
+	struct drm_gem_object *obj;
+	int ret;
+
+	if (!(dev->driver->driver_features & DRIVER_GEM))
+		return -ENODEV;
+
+	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+	if (obj == NULL)
+		return -EBADF;
+
+again:
+	if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0)
+		return -ENOMEM;
+
+	spin_lock(&dev->object_name_lock);
+	if (obj->name) {
+		args->name = obj->name;
+		spin_unlock(&dev->object_name_lock);
+		return 0;
+	}
+	ret = idr_get_new_above(&dev->object_name_idr, obj, 1,
+				 &obj->name);
+	spin_unlock(&dev->object_name_lock);
+	if (ret == -EAGAIN)
+		goto again;
+
+	if (ret != 0) {
+		mutex_lock(&dev->struct_mutex);
+		drm_gem_object_unreference(obj);
+		mutex_unlock(&dev->struct_mutex);
+		return ret;
+	}
+
+	/*
+	 * Leave the reference from the lookup around as the
+	 * name table now holds one
+	 */
+	args->name = (uint64_t) obj->name;
+
+	return 0;
+}
+
+/**
+ * Open an object using the global name, returning a handle and the size.
+ *
+ * This handle (of course) holds a reference to the object, so the object
+ * will not go away until the handle is deleted.
+ */
+int
+drm_gem_open_ioctl(struct drm_device *dev, void *data,
+		   struct drm_file *file_priv)
+{
+	struct drm_gem_open *args = data;
+	struct drm_gem_object *obj;
+	int ret;
+	int handle;
+
+	if (!(dev->driver->driver_features & DRIVER_GEM))
+		return -ENODEV;
+
+	spin_lock(&dev->object_name_lock);
+	obj = idr_find(&dev->object_name_idr, (int) args->name);
+	if (obj)
+		drm_gem_object_reference(obj);
+	spin_unlock(&dev->object_name_lock);
+	if (!obj)
+		return -ENOENT;
+
+	ret = drm_gem_handle_create(file_priv, obj, &handle);
+	mutex_lock(&dev->struct_mutex);
+	drm_gem_object_unreference(obj);
+	mutex_unlock(&dev->struct_mutex);
+	if (ret)
+		return ret;
+
+	args->handle = handle;
+	args->size = obj->size;
+
+	return 0;
+}
+
+/**
+ * Called at device open time, sets up the structure for handling refcounting
+ * of mm objects.
+ */
+void
+drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
+{
+	idr_init(&file_private->object_idr);
+	spin_lock_init(&file_private->table_lock);
+}
+
+/**
+ * Called at device close to release the file's
+ * handle references on objects.
+ */
+static int
+drm_gem_object_release_handle(int id, void *ptr, void *data)
+{
+	struct drm_gem_object *obj = ptr;
+
+	drm_gem_object_handle_unreference(obj);
+
+	return 0;
+}
+
+/**
+ * Called at close time when the filp is going away.
+ *
+ * Releases any remaining references on objects by this filp.
+ */
+void
+drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
+{
+	mutex_lock(&dev->struct_mutex);
+	idr_for_each(&file_private->object_idr,
+		     &drm_gem_object_release_handle, NULL);
+
+	idr_destroy(&file_private->object_idr);
+	mutex_unlock(&dev->struct_mutex);
+}
+
+/**
+ * Called after the last reference to the object has been lost.
+ *
+ * Frees the object
+ */
+void
+drm_gem_object_free(struct kref *kref)
+{
+	struct drm_gem_object *obj = (struct drm_gem_object *) kref;
+	struct drm_device *dev = obj->dev;
+
+	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+
+	if (dev->driver->gem_free_object != NULL)
+		dev->driver->gem_free_object(obj);
+
+	fput(obj->filp);
+	atomic_dec(&dev->object_count);
+	atomic_sub(obj->size, &dev->object_memory);
+	kfree(obj);
+}
+EXPORT_SYMBOL(drm_gem_object_free);
+
+/**
+ * Called after the last handle to the object has been closed
+ *
+ * Removes any name for the object. Note that this must be
+ * called before drm_gem_object_free or we'll be touching
+ * freed memory
+ */
+void
+drm_gem_object_handle_free(struct kref *kref)
+{
+	struct drm_gem_object *obj = container_of(kref,
+						  struct drm_gem_object,
+						  handlecount);
+	struct drm_device *dev = obj->dev;
+
+	/* Remove any name for this object */
+	spin_lock(&dev->object_name_lock);
+	if (obj->name) {
+		idr_remove(&dev->object_name_idr, obj->name);
+		spin_unlock(&dev->object_name_lock);
+		/*
+		 * The object name held a reference to this object, drop
+		 * that now.
+		 */
+		drm_gem_object_unreference(obj);
+	} else
+		spin_unlock(&dev->object_name_lock);
+
+}
+EXPORT_SYMBOL(drm_gem_object_handle_free);
+

+ 387 - 77
drivers/gpu/drm/drm_irq.c

@@ -63,7 +63,7 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
 	    p->devnum != PCI_SLOT(dev->pdev->devfn) || p->funcnum != PCI_FUNC(dev->pdev->devfn))
 		return -EINVAL;
 
-	p->irq = dev->irq;
+	p->irq = dev->pdev->irq;
 
 	DRM_DEBUG("%d:%d:%d => IRQ %d\n", p->busnum, p->devnum, p->funcnum,
 		  p->irq);
@@ -71,25 +71,137 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
 	return 0;
 }
 
+static void vblank_disable_fn(unsigned long arg)
+{
+	struct drm_device *dev = (struct drm_device *)arg;
+	unsigned long irqflags;
+	int i;
+
+	if (!dev->vblank_disable_allowed)
+		return;
+
+	for (i = 0; i < dev->num_crtcs; i++) {
+		spin_lock_irqsave(&dev->vbl_lock, irqflags);
+		if (atomic_read(&dev->vblank_refcount[i]) == 0 &&
+		    dev->vblank_enabled[i]) {
+			DRM_DEBUG("disabling vblank on crtc %d\n", i);
+			dev->last_vblank[i] =
+				dev->driver->get_vblank_counter(dev, i);
+			dev->driver->disable_vblank(dev, i);
+			dev->vblank_enabled[i] = 0;
+		}
+		spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+	}
+}
+
+static void drm_vblank_cleanup(struct drm_device *dev)
+{
+	/* Bail if the driver didn't call drm_vblank_init() */
+	if (dev->num_crtcs == 0)
+		return;
+
+	del_timer(&dev->vblank_disable_timer);
+
+	vblank_disable_fn((unsigned long)dev);
+
+	drm_free(dev->vbl_queue, sizeof(*dev->vbl_queue) * dev->num_crtcs,
+		 DRM_MEM_DRIVER);
+	drm_free(dev->vbl_sigs, sizeof(*dev->vbl_sigs) * dev->num_crtcs,
+		 DRM_MEM_DRIVER);
+	drm_free(dev->_vblank_count, sizeof(*dev->_vblank_count) *
+		 dev->num_crtcs, DRM_MEM_DRIVER);
+	drm_free(dev->vblank_refcount, sizeof(*dev->vblank_refcount) *
+		 dev->num_crtcs, DRM_MEM_DRIVER);
+	drm_free(dev->vblank_enabled, sizeof(*dev->vblank_enabled) *
+		 dev->num_crtcs, DRM_MEM_DRIVER);
+	drm_free(dev->last_vblank, sizeof(*dev->last_vblank) * dev->num_crtcs,
+		 DRM_MEM_DRIVER);
+	drm_free(dev->vblank_inmodeset, sizeof(*dev->vblank_inmodeset) *
+		 dev->num_crtcs, DRM_MEM_DRIVER);
+
+	dev->num_crtcs = 0;
+}
+
+int drm_vblank_init(struct drm_device *dev, int num_crtcs)
+{
+	int i, ret = -ENOMEM;
+
+	setup_timer(&dev->vblank_disable_timer, vblank_disable_fn,
+		    (unsigned long)dev);
+	spin_lock_init(&dev->vbl_lock);
+	atomic_set(&dev->vbl_signal_pending, 0);
+	dev->num_crtcs = num_crtcs;
+
+	dev->vbl_queue = drm_alloc(sizeof(wait_queue_head_t) * num_crtcs,
+				   DRM_MEM_DRIVER);
+	if (!dev->vbl_queue)
+		goto err;
+
+	dev->vbl_sigs = drm_alloc(sizeof(struct list_head) * num_crtcs,
+				  DRM_MEM_DRIVER);
+	if (!dev->vbl_sigs)
+		goto err;
+
+	dev->_vblank_count = drm_alloc(sizeof(atomic_t) * num_crtcs,
+				      DRM_MEM_DRIVER);
+	if (!dev->_vblank_count)
+		goto err;
+
+	dev->vblank_refcount = drm_alloc(sizeof(atomic_t) * num_crtcs,
+					 DRM_MEM_DRIVER);
+	if (!dev->vblank_refcount)
+		goto err;
+
+	dev->vblank_enabled = drm_calloc(num_crtcs, sizeof(int),
+					 DRM_MEM_DRIVER);
+	if (!dev->vblank_enabled)
+		goto err;
+
+	dev->last_vblank = drm_calloc(num_crtcs, sizeof(u32), DRM_MEM_DRIVER);
+	if (!dev->last_vblank)
+		goto err;
+
+	dev->vblank_inmodeset = drm_calloc(num_crtcs, sizeof(int),
+					 DRM_MEM_DRIVER);
+	if (!dev->vblank_inmodeset)
+		goto err;
+
+	/* Zero per-crtc vblank stuff */
+	for (i = 0; i < num_crtcs; i++) {
+		init_waitqueue_head(&dev->vbl_queue[i]);
+		INIT_LIST_HEAD(&dev->vbl_sigs[i]);
+		atomic_set(&dev->_vblank_count[i], 0);
+		atomic_set(&dev->vblank_refcount[i], 0);
+	}
+
+	dev->vblank_disable_allowed = 0;
+
+	return 0;
+
+err:
+	drm_vblank_cleanup(dev);
+	return ret;
+}
+EXPORT_SYMBOL(drm_vblank_init);
+
 /**
  * Install IRQ handler.
  *
  * \param dev DRM device.
- * \param irq IRQ number.
  *
- * Initializes the IRQ related data, and setups drm_device::vbl_queue. Installs the handler, calling the driver
+ * Initializes the IRQ related data. Installs the handler, calling the driver
  * \c drm_driver_irq_preinstall() and \c drm_driver_irq_postinstall() functions
  * before and after the installation.
  */
-static int drm_irq_install(struct drm_device * dev)
+int drm_irq_install(struct drm_device *dev)
 {
-	int ret;
+	int ret = 0;
 	unsigned long sh_flags = 0;
 
 	if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
 		return -EINVAL;
 
-	if (dev->irq == 0)
+	if (dev->pdev->irq == 0)
 		return -EINVAL;
 
 	mutex_lock(&dev->struct_mutex);
@@ -107,18 +219,7 @@ static int drm_irq_install(struct drm_device * dev)
 	dev->irq_enabled = 1;
 	mutex_unlock(&dev->struct_mutex);
 
-	DRM_DEBUG("irq=%d\n", dev->irq);
-
-	if (drm_core_check_feature(dev, DRIVER_IRQ_VBL)) {
-		init_waitqueue_head(&dev->vbl_queue);
-
-		spin_lock_init(&dev->vbl_lock);
-
-		INIT_LIST_HEAD(&dev->vbl_sigs);
-		INIT_LIST_HEAD(&dev->vbl_sigs2);
-
-		dev->vbl_pending = 0;
-	}
+	DRM_DEBUG("irq=%d\n", dev->pdev->irq);
 
 	/* Before installing handler */
 	dev->driver->irq_preinstall(dev);
@@ -127,8 +228,9 @@ static int drm_irq_install(struct drm_device * dev)
 	if (drm_core_check_feature(dev, DRIVER_IRQ_SHARED))
 		sh_flags = IRQF_SHARED;
 
-	ret = request_irq(dev->irq, dev->driver->irq_handler,
+	ret = request_irq(drm_dev_to_irq(dev), dev->driver->irq_handler,
 			  sh_flags, dev->devname, dev);
+
 	if (ret < 0) {
 		mutex_lock(&dev->struct_mutex);
 		dev->irq_enabled = 0;
@@ -137,10 +239,16 @@ static int drm_irq_install(struct drm_device * dev)
 	}
 
 	/* After installing handler */
-	dev->driver->irq_postinstall(dev);
+	ret = dev->driver->irq_postinstall(dev);
+	if (ret < 0) {
+		mutex_lock(&dev->struct_mutex);
+		dev->irq_enabled = 0;
+		mutex_unlock(&dev->struct_mutex);
+	}
 
-	return 0;
+	return ret;
 }
+EXPORT_SYMBOL(drm_irq_install);
 
 /**
  * Uninstall the IRQ handler.
@@ -164,17 +272,18 @@ int drm_irq_uninstall(struct drm_device * dev)
 	if (!irq_enabled)
 		return -EINVAL;
 
-	DRM_DEBUG("irq=%d\n", dev->irq);
+	DRM_DEBUG("irq=%d\n", dev->pdev->irq);
 
 	dev->driver->irq_uninstall(dev);
 
-	free_irq(dev->irq, dev);
+	free_irq(dev->pdev->irq, dev);
+
+	drm_vblank_cleanup(dev);
 
 	dev->locked_tasklet_func = NULL;
 
 	return 0;
 }
-
 EXPORT_SYMBOL(drm_irq_uninstall);
 
 /**
@@ -201,7 +310,7 @@ int drm_control(struct drm_device *dev, void *data,
 		if (!drm_core_check_feature(dev, DRIVER_HAVE_IRQ))
 			return 0;
 		if (dev->if_version < DRM_IF_VERSION(1, 2) &&
-		    ctl->irq != dev->irq)
+		    ctl->irq != dev->pdev->irq)
 			return -EINVAL;
 		return drm_irq_install(dev);
 	case DRM_UNINST_HANDLER:
@@ -213,6 +322,174 @@ int drm_control(struct drm_device *dev, void *data,
 	}
 }
 
+/**
+ * drm_vblank_count - retrieve "cooked" vblank counter value
+ * @dev: DRM device
+ * @crtc: which counter to retrieve
+ *
+ * Fetches the "cooked" vblank count value that represents the number of
+ * vblank events since the system was booted, including lost events due to
+ * modesetting activity.
+ */
+u32 drm_vblank_count(struct drm_device *dev, int crtc)
+{
+	return atomic_read(&dev->_vblank_count[crtc]);
+}
+EXPORT_SYMBOL(drm_vblank_count);
+
+/**
+ * drm_update_vblank_count - update the master vblank counter
+ * @dev: DRM device
+ * @crtc: counter to update
+ *
+ * Call back into the driver to update the appropriate vblank counter
+ * (specified by @crtc).  Deal with wraparound, if it occurred, and
+ * update the last read value so we can deal with wraparound on the next
+ * call if necessary.
+ *
+ * Only necessary when going from off->on, to account for frames we
+ * didn't get an interrupt for.
+ *
+ * Note: caller must hold dev->vbl_lock since this reads & writes
+ * device vblank fields.
+ */
+static void drm_update_vblank_count(struct drm_device *dev, int crtc)
+{
+	u32 cur_vblank, diff;
+
+	/*
+	 * Interrupts were disabled prior to this call, so deal with counter
+	 * wrap if needed.
+	 * NOTE!  It's possible we lost a full dev->max_vblank_count events
+	 * here if the register is small or we had vblank interrupts off for
+	 * a long time.
+	 */
+	cur_vblank = dev->driver->get_vblank_counter(dev, crtc);
+	diff = cur_vblank - dev->last_vblank[crtc];
+	if (cur_vblank < dev->last_vblank[crtc]) {
+		diff += dev->max_vblank_count;
+
+		DRM_DEBUG("last_vblank[%d]=0x%x, cur_vblank=0x%x => diff=0x%x\n",
+			  crtc, dev->last_vblank[crtc], cur_vblank, diff);
+	}
+
+	DRM_DEBUG("enabling vblank interrupts on crtc %d, missed %d\n",
+		  crtc, diff);
+
+	atomic_add(diff, &dev->_vblank_count[crtc]);
+}
+
+/**
+ * drm_vblank_get - get a reference count on vblank events
+ * @dev: DRM device
+ * @crtc: which CRTC to own
+ *
+ * Acquire a reference count on vblank events to avoid having them disabled
+ * while in use.
+ *
+ * RETURNS
+ * Zero on success, nonzero on failure.
+ */
+int drm_vblank_get(struct drm_device *dev, int crtc)
+{
+	unsigned long irqflags;
+	int ret = 0;
+
+	spin_lock_irqsave(&dev->vbl_lock, irqflags);
+	/* Going from 0->1 means we have to enable interrupts again */
+	if (atomic_add_return(1, &dev->vblank_refcount[crtc]) == 1 &&
+	    !dev->vblank_enabled[crtc]) {
+		ret = dev->driver->enable_vblank(dev, crtc);
+		DRM_DEBUG("enabling vblank on crtc %d, ret: %d\n", crtc, ret);
+		if (ret)
+			atomic_dec(&dev->vblank_refcount[crtc]);
+		else {
+			dev->vblank_enabled[crtc] = 1;
+			drm_update_vblank_count(dev, crtc);
+		}
+	}
+	spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+
+	return ret;
+}
+EXPORT_SYMBOL(drm_vblank_get);
+
+/**
+ * drm_vblank_put - give up ownership of vblank events
+ * @dev: DRM device
+ * @crtc: which counter to give up
+ *
+ * Release ownership of a given vblank counter, turning off interrupts
+ * if possible.
+ */
+void drm_vblank_put(struct drm_device *dev, int crtc)
+{
+	/* Last user schedules interrupt disable */
+	if (atomic_dec_and_test(&dev->vblank_refcount[crtc]))
+		mod_timer(&dev->vblank_disable_timer, jiffies + 5*DRM_HZ);
+}
+EXPORT_SYMBOL(drm_vblank_put);
+
+/**
+ * drm_modeset_ctl - handle vblank event counter changes across mode switch
+ * @DRM_IOCTL_ARGS: standard ioctl arguments
+ *
+ * Applications should call the %_DRM_PRE_MODESET and %_DRM_POST_MODESET
+ * ioctls around modesetting so that any lost vblank events are accounted for.
+ *
+ * Generally the counter will reset across mode sets.  If interrupts are
+ * enabled around this call, we don't have to do anything since the counter
+ * will have already been incremented.
+ */
+int drm_modeset_ctl(struct drm_device *dev, void *data,
+		    struct drm_file *file_priv)
+{
+	struct drm_modeset_ctl *modeset = data;
+	unsigned long irqflags;
+	int crtc, ret = 0;
+
+	/* If drm_vblank_init() hasn't been called yet, just no-op */
+	if (!dev->num_crtcs)
+		goto out;
+
+	crtc = modeset->crtc;
+	if (crtc >= dev->num_crtcs) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/*
+	 * To avoid all the problems that might happen if interrupts
+	 * were enabled/disabled around or between these calls, we just
+	 * have the kernel take a reference on the CRTC (just once though
+	 * to avoid corrupting the count if multiple, mismatch calls occur),
+	 * so that interrupts remain enabled in the interim.
+	 */
+	switch (modeset->cmd) {
+	case _DRM_PRE_MODESET:
+		if (!dev->vblank_inmodeset[crtc]) {
+			dev->vblank_inmodeset[crtc] = 1;
+			drm_vblank_get(dev, crtc);
+		}
+		break;
+	case _DRM_POST_MODESET:
+		if (dev->vblank_inmodeset[crtc]) {
+			spin_lock_irqsave(&dev->vbl_lock, irqflags);
+			dev->vblank_disable_allowed = 1;
+			dev->vblank_inmodeset[crtc] = 0;
+			spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
+			drm_vblank_put(dev, crtc);
+		}
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+out:
+	return ret;
+}
+
 /**
  * Wait for VBLANK.
  *
@@ -232,14 +509,14 @@ int drm_control(struct drm_device *dev, void *data,
  *
  * If a signal is not requested, then calls vblank_wait().
  */
-int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_priv)
+int drm_wait_vblank(struct drm_device *dev, void *data,
+		    struct drm_file *file_priv)
 {
 	union drm_wait_vblank *vblwait = data;
-	struct timeval now;
 	int ret = 0;
-	unsigned int flags, seq;
+	unsigned int flags, seq, crtc;
 
-	if ((!dev->irq) || (!dev->irq_enabled))
+	if ((!dev->pdev->irq) || (!dev->irq_enabled))
 		return -EINVAL;
 
 	if (vblwait->request.type &
@@ -251,13 +528,17 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
 	}
 
 	flags = vblwait->request.type & _DRM_VBLANK_FLAGS_MASK;
+	crtc = flags & _DRM_VBLANK_SECONDARY ? 1 : 0;
 
-	if (!drm_core_check_feature(dev, (flags & _DRM_VBLANK_SECONDARY) ?
-				    DRIVER_IRQ_VBL2 : DRIVER_IRQ_VBL))
+	if (crtc >= dev->num_crtcs)
 		return -EINVAL;
 
-	seq = atomic_read((flags & _DRM_VBLANK_SECONDARY) ? &dev->vbl_received2
-			  : &dev->vbl_received);
+	ret = drm_vblank_get(dev, crtc);
+	if (ret) {
+		DRM_ERROR("failed to acquire vblank counter, %d\n", ret);
+		return ret;
+	}
+	seq = drm_vblank_count(dev, crtc);
 
 	switch (vblwait->request.type & _DRM_VBLANK_TYPES_MASK) {
 	case _DRM_VBLANK_RELATIVE:
@@ -266,7 +547,8 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
 	case _DRM_VBLANK_ABSOLUTE:
 		break;
 	default:
-		return -EINVAL;
+		ret = -EINVAL;
+		goto done;
 	}
 
 	if ((flags & _DRM_VBLANK_NEXTONMISS) &&
@@ -276,8 +558,7 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
 
 	if (flags & _DRM_VBLANK_SIGNAL) {
 		unsigned long irqflags;
-		struct list_head *vbl_sigs = (flags & _DRM_VBLANK_SECONDARY)
-				      ? &dev->vbl_sigs2 : &dev->vbl_sigs;
+		struct list_head *vbl_sigs = &dev->vbl_sigs[crtc];
 		struct drm_vbl_sig *vbl_sig;
 
 		spin_lock_irqsave(&dev->vbl_lock, irqflags);
@@ -298,22 +579,29 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
 			}
 		}
 
-		if (dev->vbl_pending >= 100) {
+		if (atomic_read(&dev->vbl_signal_pending) >= 100) {
 			spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
-			return -EBUSY;
+			ret = -EBUSY;
+			goto done;
 		}
 
-		dev->vbl_pending++;
-
 		spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
 
-		if (!
-		    (vbl_sig =
-		     drm_alloc(sizeof(struct drm_vbl_sig), DRM_MEM_DRIVER))) {
-			return -ENOMEM;
+		vbl_sig = drm_calloc(1, sizeof(struct drm_vbl_sig),
+				     DRM_MEM_DRIVER);
+		if (!vbl_sig) {
+			ret = -ENOMEM;
+			goto done;
+		}
+
+		ret = drm_vblank_get(dev, crtc);
+		if (ret) {
+			drm_free(vbl_sig, sizeof(struct drm_vbl_sig),
+				 DRM_MEM_DRIVER);
+			return ret;
 		}
 
-		memset((void *)vbl_sig, 0, sizeof(*vbl_sig));
+		atomic_inc(&dev->vbl_signal_pending);
 
 		vbl_sig->sequence = vblwait->request.sequence;
 		vbl_sig->info.si_signo = vblwait->request.signal;
@@ -327,20 +615,29 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
 
 		vblwait->reply.sequence = seq;
 	} else {
-		if (flags & _DRM_VBLANK_SECONDARY) {
-			if (dev->driver->vblank_wait2)
-				ret = dev->driver->vblank_wait2(dev, &vblwait->request.sequence);
-		} else if (dev->driver->vblank_wait)
-			ret =
-			    dev->driver->vblank_wait(dev,
-						     &vblwait->request.sequence);
-
-		do_gettimeofday(&now);
-		vblwait->reply.tval_sec = now.tv_sec;
-		vblwait->reply.tval_usec = now.tv_usec;
+		DRM_DEBUG("waiting on vblank count %d, crtc %d\n",
+			  vblwait->request.sequence, crtc);
+		DRM_WAIT_ON(ret, dev->vbl_queue[crtc], 3 * DRM_HZ,
+			    ((drm_vblank_count(dev, crtc)
+			      - vblwait->request.sequence) <= (1 << 23)));
+
+		if (ret != -EINTR) {
+			struct timeval now;
+
+			do_gettimeofday(&now);
+
+			vblwait->reply.tval_sec = now.tv_sec;
+			vblwait->reply.tval_usec = now.tv_usec;
+			vblwait->reply.sequence = drm_vblank_count(dev, crtc);
+			DRM_DEBUG("returning %d to client\n",
+				  vblwait->reply.sequence);
+		} else {
+			DRM_DEBUG("vblank wait interrupted by signal\n");
+		}
 	}
 
-      done:
+done:
+	drm_vblank_put(dev, crtc);
 	return ret;
 }
 
@@ -348,44 +645,57 @@ int drm_wait_vblank(struct drm_device *dev, void *data, struct drm_file *file_pr
  * Send the VBLANK signals.
  *
  * \param dev DRM device.
+ * \param crtc CRTC where the vblank event occurred
  *
  * Sends a signal for each task in drm_device::vbl_sigs and empties the list.
  *
  * If a signal is not requested, then calls vblank_wait().
  */
-void drm_vbl_send_signals(struct drm_device * dev)
+static void drm_vbl_send_signals(struct drm_device *dev, int crtc)
 {
+	struct drm_vbl_sig *vbl_sig, *tmp;
+	struct list_head *vbl_sigs;
+	unsigned int vbl_seq;
 	unsigned long flags;
-	int i;
 
 	spin_lock_irqsave(&dev->vbl_lock, flags);
 
-	for (i = 0; i < 2; i++) {
-		struct drm_vbl_sig *vbl_sig, *tmp;
-		struct list_head *vbl_sigs = i ? &dev->vbl_sigs2 : &dev->vbl_sigs;
-		unsigned int vbl_seq = atomic_read(i ? &dev->vbl_received2 :
-						   &dev->vbl_received);
+	vbl_sigs = &dev->vbl_sigs[crtc];
+	vbl_seq = drm_vblank_count(dev, crtc);
 
-		list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) {
-			if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
-				vbl_sig->info.si_code = vbl_seq;
-				send_sig_info(vbl_sig->info.si_signo,
-					      &vbl_sig->info, vbl_sig->task);
+	list_for_each_entry_safe(vbl_sig, tmp, vbl_sigs, head) {
+	    if ((vbl_seq - vbl_sig->sequence) <= (1 << 23)) {
+		vbl_sig->info.si_code = vbl_seq;
+		send_sig_info(vbl_sig->info.si_signo,
+			      &vbl_sig->info, vbl_sig->task);
 
-				list_del(&vbl_sig->head);
-
-				drm_free(vbl_sig, sizeof(*vbl_sig),
-					 DRM_MEM_DRIVER);
+		list_del(&vbl_sig->head);
 
-				dev->vbl_pending--;
-			}
-		}
+		drm_free(vbl_sig, sizeof(*vbl_sig),
+			 DRM_MEM_DRIVER);
+		atomic_dec(&dev->vbl_signal_pending);
+		drm_vblank_put(dev, crtc);
+	    }
 	}
 
 	spin_unlock_irqrestore(&dev->vbl_lock, flags);
 }
 
-EXPORT_SYMBOL(drm_vbl_send_signals);
+/**
+ * drm_handle_vblank - handle a vblank event
+ * @dev: DRM device
+ * @crtc: where this event occurred
+ *
+ * Drivers should call this routine in their vblank interrupt handlers to
+ * update the vblank counter and send any signals that may be pending.
+ */
+void drm_handle_vblank(struct drm_device *dev, int crtc)
+{
+	atomic_inc(&dev->_vblank_count[crtc]);
+	DRM_WAKEUP(&dev->vbl_queue[crtc]);
+	drm_vbl_send_signals(dev, crtc);
+}
+EXPORT_SYMBOL(drm_handle_vblank);
 
 /**
  * Tasklet wrapper function.

+ 2 - 0
drivers/gpu/drm/drm_memory.c

@@ -133,6 +133,7 @@ int drm_free_agp(DRM_AGP_MEM * handle, int pages)
 {
 	return drm_agp_free_memory(handle) ? 0 : -EINVAL;
 }
+EXPORT_SYMBOL(drm_free_agp);
 
 /** Wrapper around agp_bind_memory() */
 int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start)
@@ -145,6 +146,7 @@ int drm_unbind_agp(DRM_AGP_MEM * handle)
 {
 	return drm_agp_unbind_memory(handle);
 }
+EXPORT_SYMBOL(drm_unbind_agp);
 
 #else  /*  __OS_HAS_AGP  */
 static inline void *agp_remap(unsigned long offset, unsigned long size,

+ 4 - 1
drivers/gpu/drm/drm_mm.c

@@ -169,6 +169,7 @@ struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
 
 	return child;
 }
+EXPORT_SYMBOL(drm_mm_get_block);
 
 /*
  * Put a block. Merge with the previous and / or next block if they are free.
@@ -217,6 +218,7 @@ void drm_mm_put_block(struct drm_mm_node * cur)
 		drm_free(cur, sizeof(*cur), DRM_MEM_MM);
 	}
 }
+EXPORT_SYMBOL(drm_mm_put_block);
 
 struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm,
 				  unsigned long size,
@@ -265,6 +267,7 @@ int drm_mm_clean(struct drm_mm * mm)
 
 	return (head->next->next == head);
 }
+EXPORT_SYMBOL(drm_mm_search_free);
 
 int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
 {
@@ -273,7 +276,7 @@ int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size)
 
 	return drm_mm_create_tail_node(mm, start, size);
 }
-
+EXPORT_SYMBOL(drm_mm_init);
 
 void drm_mm_takedown(struct drm_mm * mm)
 {

+ 122 - 13
drivers/gpu/drm/drm_proc.c

@@ -49,6 +49,10 @@ static int drm_queues_info(char *buf, char **start, off_t offset,
 			   int request, int *eof, void *data);
 static int drm_bufs_info(char *buf, char **start, off_t offset,
 			 int request, int *eof, void *data);
+static int drm_gem_name_info(char *buf, char **start, off_t offset,
+			     int request, int *eof, void *data);
+static int drm_gem_object_info(char *buf, char **start, off_t offset,
+			       int request, int *eof, void *data);
 #if DRM_DEBUG_CODE
 static int drm_vma_info(char *buf, char **start, off_t offset,
 			int request, int *eof, void *data);
@@ -60,13 +64,16 @@ static int drm_vma_info(char *buf, char **start, off_t offset,
 static struct drm_proc_list {
 	const char *name;	/**< file name */
 	int (*f) (char *, char **, off_t, int, int *, void *);		/**< proc callback*/
+	u32 driver_features; /**< Required driver features for this entry */
 } drm_proc_list[] = {
-	{"name", drm_name_info},
-	{"mem", drm_mem_info},
-	{"vm", drm_vm_info},
-	{"clients", drm_clients_info},
-	{"queues", drm_queues_info},
-	{"bufs", drm_bufs_info},
+	{"name", drm_name_info, 0},
+	{"mem", drm_mem_info, 0},
+	{"vm", drm_vm_info, 0},
+	{"clients", drm_clients_info, 0},
+	{"queues", drm_queues_info, 0},
+	{"bufs", drm_bufs_info, 0},
+	{"gem_names", drm_gem_name_info, DRIVER_GEM},
+	{"gem_objects", drm_gem_object_info, DRIVER_GEM},
 #if DRM_DEBUG_CODE
 	{"vma", drm_vma_info},
 #endif
@@ -90,8 +97,9 @@ static struct drm_proc_list {
 int drm_proc_init(struct drm_minor *minor, int minor_id,
 		  struct proc_dir_entry *root)
 {
+	struct drm_device *dev = minor->dev;
 	struct proc_dir_entry *ent;
-	int i, j;
+	int i, j, ret;
 	char name[64];
 
 	sprintf(name, "%d", minor_id);
@@ -102,23 +110,42 @@ int drm_proc_init(struct drm_minor *minor, int minor_id,
 	}
 
 	for (i = 0; i < DRM_PROC_ENTRIES; i++) {
+		u32 features = drm_proc_list[i].driver_features;
+
+		if (features != 0 &&
+		    (dev->driver->driver_features & features) != features)
+			continue;
+
 		ent = create_proc_entry(drm_proc_list[i].name,
 					S_IFREG | S_IRUGO, minor->dev_root);
 		if (!ent) {
 			DRM_ERROR("Cannot create /proc/dri/%s/%s\n",
 				  name, drm_proc_list[i].name);
-			for (j = 0; j < i; j++)
-				remove_proc_entry(drm_proc_list[i].name,
-						  minor->dev_root);
-			remove_proc_entry(name, root);
-			minor->dev_root = NULL;
-			return -1;
+			ret = -1;
+			goto fail;
 		}
 		ent->read_proc = drm_proc_list[i].f;
 		ent->data = minor;
 	}
 
+	if (dev->driver->proc_init) {
+		ret = dev->driver->proc_init(minor);
+		if (ret) {
+			DRM_ERROR("DRM: Driver failed to initialize "
+				  "/proc/dri.\n");
+			goto fail;
+		}
+	}
+
 	return 0;
+ fail:
+
+	for (j = 0; j < i; j++)
+		remove_proc_entry(drm_proc_list[i].name,
+				  minor->dev_root);
+	remove_proc_entry(name, root);
+	minor->dev_root = NULL;
+	return ret;
 }
 
 /**
@@ -133,12 +160,16 @@ int drm_proc_init(struct drm_minor *minor, int minor_id,
  */
 int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root)
 {
+	struct drm_device *dev = minor->dev;
 	int i;
 	char name[64];
 
 	if (!root || !minor->dev_root)
 		return 0;
 
+	if (dev->driver->proc_cleanup)
+		dev->driver->proc_cleanup(minor);
+
 	for (i = 0; i < DRM_PROC_ENTRIES; i++)
 		remove_proc_entry(drm_proc_list[i].name, minor->dev_root);
 	sprintf(name, "%d", minor->index);
@@ -480,6 +511,84 @@ static int drm_clients_info(char *buf, char **start, off_t offset,
 	return ret;
 }
 
+struct drm_gem_name_info_data {
+       int                     len;
+       char                    *buf;
+       int                     eof;
+};
+
+static int drm_gem_one_name_info(int id, void *ptr, void *data)
+{
+	struct drm_gem_object *obj = ptr;
+	struct drm_gem_name_info_data   *nid = data;
+
+	DRM_INFO("name %d size %d\n", obj->name, obj->size);
+	if (nid->eof)
+		return 0;
+
+	nid->len += sprintf(&nid->buf[nid->len],
+			    "%6d%9d%8d%9d\n",
+			    obj->name, obj->size,
+			    atomic_read(&obj->handlecount.refcount),
+			    atomic_read(&obj->refcount.refcount));
+	if (nid->len > DRM_PROC_LIMIT) {
+		nid->eof = 1;
+		return 0;
+	}
+	return 0;
+}
+
+static int drm_gem_name_info(char *buf, char **start, off_t offset,
+			     int request, int *eof, void *data)
+{
+	struct drm_minor *minor = (struct drm_minor *) data;
+	struct drm_device *dev = minor->dev;
+	struct drm_gem_name_info_data nid;
+
+	if (offset > DRM_PROC_LIMIT) {
+		*eof = 1;
+		return 0;
+	}
+
+	nid.len = sprintf(buf, "  name     size handles refcount\n");
+	nid.buf = buf;
+	nid.eof = 0;
+	idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, &nid);
+
+	*start = &buf[offset];
+	*eof = 0;
+	if (nid.len > request + offset)
+		return request;
+	*eof = 1;
+	return nid.len - offset;
+}
+
+static int drm_gem_object_info(char *buf, char **start, off_t offset,
+			       int request, int *eof, void *data)
+{
+	struct drm_minor *minor = (struct drm_minor *) data;
+	struct drm_device *dev = minor->dev;
+	int len = 0;
+
+	if (offset > DRM_PROC_LIMIT) {
+		*eof = 1;
+		return 0;
+	}
+
+	*start = &buf[offset];
+	*eof = 0;
+	DRM_PROC_PRINT("%d objects\n", atomic_read(&dev->object_count));
+	DRM_PROC_PRINT("%d object bytes\n", atomic_read(&dev->object_memory));
+	DRM_PROC_PRINT("%d pinned\n", atomic_read(&dev->pin_count));
+	DRM_PROC_PRINT("%d pin bytes\n", atomic_read(&dev->pin_memory));
+	DRM_PROC_PRINT("%d gtt bytes\n", atomic_read(&dev->gtt_memory));
+	DRM_PROC_PRINT("%d gtt total\n", dev->gtt_total);
+	if (len > request + offset)
+		return request;
+	*eof = 1;
+	return len - offset;
+}
+
 #if DRM_DEBUG_CODE
 
 static int drm__vma_info(char *buf, char **start, off_t offset, int request,

+ 10 - 1
drivers/gpu/drm/drm_stub.c

@@ -107,7 +107,6 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
 #ifdef __alpha__
 	dev->hose = pdev->sysdata;
 #endif
-	dev->irq = pdev->irq;
 
 	if (drm_ht_create(&dev->map_hash, 12)) {
 		return -ENOMEM;
@@ -152,6 +151,15 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev,
 		goto error_out_unreg;
 	}
 
+	if (driver->driver_features & DRIVER_GEM) {
+		retcode = drm_gem_init(dev);
+		if (retcode) {
+			DRM_ERROR("Cannot initialize graphics execution "
+				  "manager (GEM)\n");
+			goto error_out_unreg;
+		}
+	}
+
 	return 0;
 
       error_out_unreg:
@@ -317,6 +325,7 @@ int drm_put_dev(struct drm_device * dev)
 int drm_put_minor(struct drm_minor **minor_p)
 {
 	struct drm_minor *minor = *minor_p;
+
 	DRM_DEBUG("release secondary minor %d\n", minor->index);
 
 	if (minor->type == DRM_MINOR_LEGACY)

+ 1 - 1
drivers/gpu/drm/drm_sysfs.c

@@ -184,7 +184,7 @@ int drm_sysfs_device_add(struct drm_minor *minor)
 err_out_files:
 	if (i > 0)
 		for (j = 0; j < i; j++)
-			device_remove_file(&minor->kdev, &device_attrs[i]);
+			device_remove_file(&minor->kdev, &device_attrs[j]);
 	device_unregister(&minor->kdev);
 err_out:
 

+ 6 - 1
drivers/gpu/drm/i915/Makefile

@@ -3,7 +3,12 @@
 # Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
 
 ccflags-y := -Iinclude/drm
-i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o
+i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_opregion.o \
+          i915_suspend.o \
+	  i915_gem.o \
+	  i915_gem_debug.o \
+	  i915_gem_proc.o \
+	  i915_gem_tiling.o
 
 i915-$(CONFIG_COMPAT)   += i915_ioc32.o
 

+ 226 - 106
drivers/gpu/drm/i915/i915_dma.c

@@ -40,40 +40,96 @@ int i915_wait_ring(struct drm_device * dev, int n, const char *caller)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
-	u32 last_head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
+	u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
+	u32 last_acthd = I915_READ(acthd_reg);
+	u32 acthd;
+	u32 last_head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
 	int i;
 
-	for (i = 0; i < 10000; i++) {
-		ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
+	for (i = 0; i < 100000; i++) {
+		ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
+		acthd = I915_READ(acthd_reg);
 		ring->space = ring->head - (ring->tail + 8);
 		if (ring->space < 0)
 			ring->space += ring->Size;
 		if (ring->space >= n)
 			return 0;
 
-		dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
+		if (dev_priv->sarea_priv)
+			dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
 
 		if (ring->head != last_head)
 			i = 0;
+		if (acthd != last_acthd)
+			i = 0;
 
 		last_head = ring->head;
+		last_acthd = acthd;
+		msleep_interruptible(10);
+
 	}
 
 	return -EBUSY;
 }
 
+/**
+ * Sets up the hardware status page for devices that need a physical address
+ * in the register.
+ */
+static int i915_init_phys_hws(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	/* Program Hardware Status Page */
+	dev_priv->status_page_dmah =
+		drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
+
+	if (!dev_priv->status_page_dmah) {
+		DRM_ERROR("Can not allocate hardware status page\n");
+		return -ENOMEM;
+	}
+	dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
+	dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
+
+	memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
+
+	I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
+	DRM_DEBUG("Enabled hardware status page\n");
+	return 0;
+}
+
+/**
+ * Frees the hardware status page, whether it's a physical address or a virtual
+ * address set up by the X Server.
+ */
+static void i915_free_hws(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	if (dev_priv->status_page_dmah) {
+		drm_pci_free(dev, dev_priv->status_page_dmah);
+		dev_priv->status_page_dmah = NULL;
+	}
+
+	if (dev_priv->status_gfx_addr) {
+		dev_priv->status_gfx_addr = 0;
+		drm_core_ioremapfree(&dev_priv->hws_map, dev);
+	}
+
+	/* Need to rewrite hardware status page */
+	I915_WRITE(HWS_PGA, 0x1ffff000);
+}
+
 void i915_kernel_lost_context(struct drm_device * dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	drm_i915_ring_buffer_t *ring = &(dev_priv->ring);
 
-	ring->head = I915_READ(LP_RING + RING_HEAD) & HEAD_ADDR;
-	ring->tail = I915_READ(LP_RING + RING_TAIL) & TAIL_ADDR;
+	ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
+	ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
 	ring->space = ring->head - (ring->tail + 8);
 	if (ring->space < 0)
 		ring->space += ring->Size;
 
-	if (ring->head == ring->tail)
+	if (ring->head == ring->tail && dev_priv->sarea_priv)
 		dev_priv->sarea_priv->perf_boxes |= I915_BOX_RING_EMPTY;
 }
 
@@ -84,28 +140,19 @@ static int i915_dma_cleanup(struct drm_device * dev)
 	 * may not have been called from userspace and after dev_private
 	 * is freed, it's too late.
 	 */
-	if (dev->irq)
+	if (dev->irq_enabled)
 		drm_irq_uninstall(dev);
 
 	if (dev_priv->ring.virtual_start) {
 		drm_core_ioremapfree(&dev_priv->ring.map, dev);
-		dev_priv->ring.virtual_start = 0;
-		dev_priv->ring.map.handle = 0;
+		dev_priv->ring.virtual_start = NULL;
+		dev_priv->ring.map.handle = NULL;
 		dev_priv->ring.map.size = 0;
 	}
 
-	if (dev_priv->status_page_dmah) {
-		drm_pci_free(dev, dev_priv->status_page_dmah);
-		dev_priv->status_page_dmah = NULL;
-		/* Need to rewrite hardware status page */
-		I915_WRITE(0x02080, 0x1ffff000);
-	}
-
-	if (dev_priv->status_gfx_addr) {
-		dev_priv->status_gfx_addr = 0;
-		drm_core_ioremapfree(&dev_priv->hws_map, dev);
-		I915_WRITE(0x2080, 0x1ffff000);
-	}
+	/* Clear the HWS virtual address at teardown */
+	if (I915_NEED_GFX_HWS(dev))
+		i915_free_hws(dev);
 
 	return 0;
 }
@@ -121,34 +168,34 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
 		return -EINVAL;
 	}
 
-	dev_priv->mmio_map = drm_core_findmap(dev, init->mmio_offset);
-	if (!dev_priv->mmio_map) {
-		i915_dma_cleanup(dev);
-		DRM_ERROR("can not find mmio map!\n");
-		return -EINVAL;
-	}
-
 	dev_priv->sarea_priv = (drm_i915_sarea_t *)
 	    ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset);
 
-	dev_priv->ring.Start = init->ring_start;
-	dev_priv->ring.End = init->ring_end;
-	dev_priv->ring.Size = init->ring_size;
-	dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
+	if (init->ring_size != 0) {
+		if (dev_priv->ring.ring_obj != NULL) {
+			i915_dma_cleanup(dev);
+			DRM_ERROR("Client tried to initialize ringbuffer in "
+				  "GEM mode\n");
+			return -EINVAL;
+		}
 
-	dev_priv->ring.map.offset = init->ring_start;
-	dev_priv->ring.map.size = init->ring_size;
-	dev_priv->ring.map.type = 0;
-	dev_priv->ring.map.flags = 0;
-	dev_priv->ring.map.mtrr = 0;
+		dev_priv->ring.Size = init->ring_size;
+		dev_priv->ring.tail_mask = dev_priv->ring.Size - 1;
 
-	drm_core_ioremap(&dev_priv->ring.map, dev);
+		dev_priv->ring.map.offset = init->ring_start;
+		dev_priv->ring.map.size = init->ring_size;
+		dev_priv->ring.map.type = 0;
+		dev_priv->ring.map.flags = 0;
+		dev_priv->ring.map.mtrr = 0;
 
-	if (dev_priv->ring.map.handle == NULL) {
-		i915_dma_cleanup(dev);
-		DRM_ERROR("can not ioremap virtual address for"
-			  " ring buffer\n");
-		return -ENOMEM;
+		drm_core_ioremap(&dev_priv->ring.map, dev);
+
+		if (dev_priv->ring.map.handle == NULL) {
+			i915_dma_cleanup(dev);
+			DRM_ERROR("can not ioremap virtual address for"
+				  " ring buffer\n");
+			return -ENOMEM;
+		}
 	}
 
 	dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
@@ -159,34 +206,10 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
 	dev_priv->current_page = 0;
 	dev_priv->sarea_priv->pf_current_page = dev_priv->current_page;
 
-	/* We are using separate values as placeholders for mechanisms for
-	 * private backbuffer/depthbuffer usage.
-	 */
-	dev_priv->use_mi_batchbuffer_start = 0;
-	if (IS_I965G(dev)) /* 965 doesn't support older method */
-		dev_priv->use_mi_batchbuffer_start = 1;
-
 	/* Allow hardware batchbuffers unless told otherwise.
 	 */
 	dev_priv->allow_batchbuffer = 1;
 
-	/* Program Hardware Status Page */
-	if (!I915_NEED_GFX_HWS(dev)) {
-		dev_priv->status_page_dmah =
-			drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE, 0xffffffff);
-
-		if (!dev_priv->status_page_dmah) {
-			i915_dma_cleanup(dev);
-			DRM_ERROR("Can not allocate hardware status page\n");
-			return -ENOMEM;
-		}
-		dev_priv->hw_status_page = dev_priv->status_page_dmah->vaddr;
-		dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
-
-		memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
-		I915_WRITE(0x02080, dev_priv->dma_status_page);
-	}
-	DRM_DEBUG("Enabled hardware status page\n");
 	return 0;
 }
 
@@ -201,11 +224,6 @@ static int i915_dma_resume(struct drm_device * dev)
 		return -EINVAL;
 	}
 
-	if (!dev_priv->mmio_map) {
-		DRM_ERROR("can not find mmio map!\n");
-		return -EINVAL;
-	}
-
 	if (dev_priv->ring.map.handle == NULL) {
 		DRM_ERROR("can not ioremap virtual address for"
 			  " ring buffer\n");
@@ -220,9 +238,9 @@ static int i915_dma_resume(struct drm_device * dev)
 	DRM_DEBUG("hw status page @ %p\n", dev_priv->hw_status_page);
 
 	if (dev_priv->status_gfx_addr != 0)
-		I915_WRITE(0x02080, dev_priv->status_gfx_addr);
+		I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
 	else
-		I915_WRITE(0x02080, dev_priv->dma_status_page);
+		I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
 	DRM_DEBUG("Enabled hardware status page\n");
 
 	return 0;
@@ -367,9 +385,10 @@ static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwor
 	return 0;
 }
 
-static int i915_emit_box(struct drm_device * dev,
-			 struct drm_clip_rect __user * boxes,
-			 int i, int DR1, int DR4)
+int
+i915_emit_box(struct drm_device *dev,
+	      struct drm_clip_rect __user *boxes,
+	      int i, int DR1, int DR4)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	struct drm_clip_rect box;
@@ -415,14 +434,15 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	RING_LOCALS;
 
-	dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter;
-
+	dev_priv->counter++;
 	if (dev_priv->counter > 0x7FFFFFFFUL)
-		dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1;
+		dev_priv->counter = 0;
+	if (dev_priv->sarea_priv)
+		dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
 
 	BEGIN_LP_RING(4);
-	OUT_RING(CMD_STORE_DWORD_IDX);
-	OUT_RING(20);
+	OUT_RING(MI_STORE_DWORD_INDEX);
+	OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT);
 	OUT_RING(dev_priv->counter);
 	OUT_RING(0);
 	ADVANCE_LP_RING();
@@ -486,7 +506,7 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
 				return ret;
 		}
 
-		if (dev_priv->use_mi_batchbuffer_start) {
+		if (!IS_I830(dev) && !IS_845G(dev)) {
 			BEGIN_LP_RING(2);
 			if (IS_I965G(dev)) {
 				OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
@@ -516,6 +536,9 @@ static int i915_dispatch_flip(struct drm_device * dev)
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	RING_LOCALS;
 
+	if (!dev_priv->sarea_priv)
+		return -EINVAL;
+
 	DRM_DEBUG("%s: page=%d pfCurrentPage=%d\n",
 		  __func__,
 		  dev_priv->current_page,
@@ -524,7 +547,7 @@ static int i915_dispatch_flip(struct drm_device * dev)
 	i915_kernel_lost_context(dev);
 
 	BEGIN_LP_RING(2);
-	OUT_RING(INST_PARSER_CLIENT | INST_OP_FLUSH | INST_FLUSH_MAP_CACHE);
+	OUT_RING(MI_FLUSH | MI_READ_FLUSH);
 	OUT_RING(0);
 	ADVANCE_LP_RING();
 
@@ -549,8 +572,8 @@ static int i915_dispatch_flip(struct drm_device * dev)
 	dev_priv->sarea_priv->last_enqueue = dev_priv->counter++;
 
 	BEGIN_LP_RING(4);
-	OUT_RING(CMD_STORE_DWORD_IDX);
-	OUT_RING(20);
+	OUT_RING(MI_STORE_DWORD_INDEX);
+	OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT);
 	OUT_RING(dev_priv->counter);
 	OUT_RING(0);
 	ADVANCE_LP_RING();
@@ -570,9 +593,15 @@ static int i915_quiescent(struct drm_device * dev)
 static int i915_flush_ioctl(struct drm_device *dev, void *data,
 			    struct drm_file *file_priv)
 {
-	LOCK_TEST_WITH_RETURN(dev, file_priv);
+	int ret;
+
+	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	mutex_lock(&dev->struct_mutex);
+	ret = i915_quiescent(dev);
+	mutex_unlock(&dev->struct_mutex);
 
-	return i915_quiescent(dev);
+	return ret;
 }
 
 static int i915_batchbuffer(struct drm_device *dev, void *data,
@@ -593,16 +622,19 @@ static int i915_batchbuffer(struct drm_device *dev, void *data,
 	DRM_DEBUG("i915 batchbuffer, start %x used %d cliprects %d\n",
 		  batch->start, batch->used, batch->num_cliprects);
 
-	LOCK_TEST_WITH_RETURN(dev, file_priv);
+	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
 
 	if (batch->num_cliprects && DRM_VERIFYAREA_READ(batch->cliprects,
 						       batch->num_cliprects *
 						       sizeof(struct drm_clip_rect)))
 		return -EFAULT;
 
+	mutex_lock(&dev->struct_mutex);
 	ret = i915_dispatch_batchbuffer(dev, batch);
+	mutex_unlock(&dev->struct_mutex);
 
-	sarea_priv->last_dispatch = (int)hw_status[5];
+	if (sarea_priv)
+		sarea_priv->last_dispatch = (int)hw_status[5];
 	return ret;
 }
 
@@ -619,7 +651,7 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
 	DRM_DEBUG("i915 cmdbuffer, buf %p sz %d cliprects %d\n",
 		  cmdbuf->buf, cmdbuf->sz, cmdbuf->num_cliprects);
 
-	LOCK_TEST_WITH_RETURN(dev, file_priv);
+	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
 
 	if (cmdbuf->num_cliprects &&
 	    DRM_VERIFYAREA_READ(cmdbuf->cliprects,
@@ -629,24 +661,33 @@ static int i915_cmdbuffer(struct drm_device *dev, void *data,
 		return -EFAULT;
 	}
 
+	mutex_lock(&dev->struct_mutex);
 	ret = i915_dispatch_cmdbuffer(dev, cmdbuf);
+	mutex_unlock(&dev->struct_mutex);
 	if (ret) {
 		DRM_ERROR("i915_dispatch_cmdbuffer failed\n");
 		return ret;
 	}
 
-	sarea_priv->last_dispatch = (int)hw_status[5];
+	if (sarea_priv)
+		sarea_priv->last_dispatch = (int)hw_status[5];
 	return 0;
 }
 
 static int i915_flip_bufs(struct drm_device *dev, void *data,
 			  struct drm_file *file_priv)
 {
+	int ret;
+
 	DRM_DEBUG("%s\n", __func__);
 
-	LOCK_TEST_WITH_RETURN(dev, file_priv);
+	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
+
+	mutex_lock(&dev->struct_mutex);
+	ret = i915_dispatch_flip(dev);
+	mutex_unlock(&dev->struct_mutex);
 
-	return i915_dispatch_flip(dev);
+	return ret;
 }
 
 static int i915_getparam(struct drm_device *dev, void *data,
@@ -663,7 +704,7 @@ static int i915_getparam(struct drm_device *dev, void *data,
 
 	switch (param->param) {
 	case I915_PARAM_IRQ_ACTIVE:
-		value = dev->irq ? 1 : 0;
+		value = dev->pdev->irq ? 1 : 0;
 		break;
 	case I915_PARAM_ALLOW_BATCHBUFFER:
 		value = dev_priv->allow_batchbuffer ? 1 : 0;
@@ -671,6 +712,12 @@ static int i915_getparam(struct drm_device *dev, void *data,
 	case I915_PARAM_LAST_DISPATCH:
 		value = READ_BREADCRUMB(dev_priv);
 		break;
+	case I915_PARAM_CHIPSET_ID:
+		value = dev->pci_device;
+		break;
+	case I915_PARAM_HAS_GEM:
+		value = 1;
+		break;
 	default:
 		DRM_ERROR("Unknown parameter %d\n", param->param);
 		return -EINVAL;
@@ -697,8 +744,6 @@ static int i915_setparam(struct drm_device *dev, void *data,
 
 	switch (param->param) {
 	case I915_SETPARAM_USE_MI_BATCHBUFFER_START:
-		if (!IS_I965G(dev))
-			dev_priv->use_mi_batchbuffer_start = param->value;
 		break;
 	case I915_SETPARAM_TEX_LRU_LOG_GRANULARITY:
 		dev_priv->tex_lru_log_granularity = param->value;
@@ -749,8 +794,8 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
 	dev_priv->hw_status_page = dev_priv->hws_map.handle;
 
 	memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
-	I915_WRITE(0x02080, dev_priv->status_gfx_addr);
-	DRM_DEBUG("load hws 0x2080 with gfx mem 0x%x\n",
+	I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
+	DRM_DEBUG("load hws HWS_PGA with gfx mem 0x%x\n",
 			dev_priv->status_gfx_addr);
 	DRM_DEBUG("load hws at %p\n", dev_priv->hw_status_page);
 	return 0;
@@ -776,14 +821,38 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
 	memset(dev_priv, 0, sizeof(drm_i915_private_t));
 
 	dev->dev_private = (void *)dev_priv;
+	dev_priv->dev = dev;
 
 	/* Add register map (needed for suspend/resume) */
 	base = drm_get_resource_start(dev, mmio_bar);
 	size = drm_get_resource_len(dev, mmio_bar);
 
-	ret = drm_addmap(dev, base, size, _DRM_REGISTERS,
-			 _DRM_KERNEL | _DRM_DRIVER,
-			 &dev_priv->mmio_map);
+	dev_priv->regs = ioremap(base, size);
+
+	i915_gem_load(dev);
+
+	/* Init HWS */
+	if (!I915_NEED_GFX_HWS(dev)) {
+		ret = i915_init_phys_hws(dev);
+		if (ret != 0)
+			return ret;
+	}
+
+	/* On the 945G/GM, the chipset reports the MSI capability on the
+	 * integrated graphics even though the support isn't actually there
+	 * according to the published specs.  It doesn't appear to function
+	 * correctly in testing on 945G.
+	 * This may be a side effect of MSI having been made available for PEG
+	 * and the registers being closely associated.
+	 */
+	if (!IS_I945G(dev) && !IS_I945GM(dev))
+		if (pci_enable_msi(dev->pdev))
+			DRM_ERROR("failed to enable MSI\n");
+
+	intel_opregion_init(dev);
+
+	spin_lock_init(&dev_priv->user_irq_lock);
+
 	return ret;
 }
 
@@ -791,8 +860,15 @@ int i915_driver_unload(struct drm_device *dev)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
 
-	if (dev_priv->mmio_map)
-		drm_rmmap(dev, dev_priv->mmio_map);
+	if (dev->pdev->msi_enabled)
+		pci_disable_msi(dev->pdev);
+
+	i915_free_hws(dev);
+
+	if (dev_priv->regs != NULL)
+		iounmap(dev_priv->regs);
+
+	intel_opregion_free(dev);
 
 	drm_free(dev->dev_private, sizeof(drm_i915_private_t),
 		 DRM_MEM_DRIVER);
@@ -800,6 +876,25 @@ int i915_driver_unload(struct drm_device *dev)
 	return 0;
 }
 
+int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv)
+{
+	struct drm_i915_file_private *i915_file_priv;
+
+	DRM_DEBUG("\n");
+	i915_file_priv = (struct drm_i915_file_private *)
+	    drm_alloc(sizeof(*i915_file_priv), DRM_MEM_FILES);
+
+	if (!i915_file_priv)
+		return -ENOMEM;
+
+	file_priv->driver_priv = i915_file_priv;
+
+	i915_file_priv->mm.last_gem_seqno = 0;
+	i915_file_priv->mm.last_gem_throttle_seqno = 0;
+
+	return 0;
+}
+
 void i915_driver_lastclose(struct drm_device * dev)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
@@ -807,6 +902,8 @@ void i915_driver_lastclose(struct drm_device * dev)
 	if (!dev_priv)
 		return;
 
+	i915_gem_lastclose(dev);
+
 	if (dev_priv->agp_heap)
 		i915_mem_takedown(&(dev_priv->agp_heap));
 
@@ -819,6 +916,13 @@ void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv)
 	i915_mem_release(dev, file_priv, dev_priv->agp_heap);
 }
 
+void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv)
+{
+	struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
+
+	drm_free(i915_file_priv, sizeof(*i915_file_priv), DRM_MEM_FILES);
+}
+
 struct drm_ioctl_desc i915_ioctls[] = {
 	DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
 	DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH),
@@ -836,7 +940,23 @@ struct drm_ioctl_desc i915_ioctls[] = {
 	DRM_IOCTL_DEF(DRM_I915_SET_VBLANK_PIPE,  i915_vblank_pipe_set, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY ),
 	DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE,  i915_vblank_pipe_get, DRM_AUTH ),
 	DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH),
-	DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH),
+	DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY),
+	DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, 0),
+	DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, 0),
+	DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, 0),
+	DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0),
+	DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0),
+	DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0),
+	DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0),
+	DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0),
 };
 
 int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls);

+ 14 - 462
drivers/gpu/drm/i915/i915_drv.c

@@ -38,211 +38,9 @@ static struct pci_device_id pciidlist[] = {
 	i915_PCI_IDS
 };
 
-enum pipe {
-    PIPE_A = 0,
-    PIPE_B,
-};
-
-static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	if (pipe == PIPE_A)
-		return (I915_READ(DPLL_A) & DPLL_VCO_ENABLE);
-	else
-		return (I915_READ(DPLL_B) & DPLL_VCO_ENABLE);
-}
-
-static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
-	u32 *array;
-	int i;
-
-	if (!i915_pipe_enabled(dev, pipe))
-		return;
-
-	if (pipe == PIPE_A)
-		array = dev_priv->save_palette_a;
-	else
-		array = dev_priv->save_palette_b;
-
-	for(i = 0; i < 256; i++)
-		array[i] = I915_READ(reg + (i << 2));
-}
-
-static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
-	u32 *array;
-	int i;
-
-	if (!i915_pipe_enabled(dev, pipe))
-		return;
-
-	if (pipe == PIPE_A)
-		array = dev_priv->save_palette_a;
-	else
-		array = dev_priv->save_palette_b;
-
-	for(i = 0; i < 256; i++)
-		I915_WRITE(reg + (i << 2), array[i]);
-}
-
-static u8 i915_read_indexed(u16 index_port, u16 data_port, u8 reg)
-{
-	outb(reg, index_port);
-	return inb(data_port);
-}
-
-static u8 i915_read_ar(u16 st01, u8 reg, u16 palette_enable)
-{
-	inb(st01);
-	outb(palette_enable | reg, VGA_AR_INDEX);
-	return inb(VGA_AR_DATA_READ);
-}
-
-static void i915_write_ar(u8 st01, u8 reg, u8 val, u16 palette_enable)
-{
-	inb(st01);
-	outb(palette_enable | reg, VGA_AR_INDEX);
-	outb(val, VGA_AR_DATA_WRITE);
-}
-
-static void i915_write_indexed(u16 index_port, u16 data_port, u8 reg, u8 val)
-{
-	outb(reg, index_port);
-	outb(val, data_port);
-}
-
-static void i915_save_vga(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int i;
-	u16 cr_index, cr_data, st01;
-
-	/* VGA color palette registers */
-	dev_priv->saveDACMASK = inb(VGA_DACMASK);
-	/* DACCRX automatically increments during read */
-	outb(0, VGA_DACRX);
-	/* Read 3 bytes of color data from each index */
-	for (i = 0; i < 256 * 3; i++)
-		dev_priv->saveDACDATA[i] = inb(VGA_DACDATA);
-
-	/* MSR bits */
-	dev_priv->saveMSR = inb(VGA_MSR_READ);
-	if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
-		cr_index = VGA_CR_INDEX_CGA;
-		cr_data = VGA_CR_DATA_CGA;
-		st01 = VGA_ST01_CGA;
-	} else {
-		cr_index = VGA_CR_INDEX_MDA;
-		cr_data = VGA_CR_DATA_MDA;
-		st01 = VGA_ST01_MDA;
-	}
-
-	/* CRT controller regs */
-	i915_write_indexed(cr_index, cr_data, 0x11,
-			   i915_read_indexed(cr_index, cr_data, 0x11) &
-			   (~0x80));
-	for (i = 0; i <= 0x24; i++)
-		dev_priv->saveCR[i] =
-			i915_read_indexed(cr_index, cr_data, i);
-	/* Make sure we don't turn off CR group 0 writes */
-	dev_priv->saveCR[0x11] &= ~0x80;
-
-	/* Attribute controller registers */
-	inb(st01);
-	dev_priv->saveAR_INDEX = inb(VGA_AR_INDEX);
-	for (i = 0; i <= 0x14; i++)
-		dev_priv->saveAR[i] = i915_read_ar(st01, i, 0);
-	inb(st01);
-	outb(dev_priv->saveAR_INDEX, VGA_AR_INDEX);
-	inb(st01);
-
-	/* Graphics controller registers */
-	for (i = 0; i < 9; i++)
-		dev_priv->saveGR[i] =
-			i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, i);
-
-	dev_priv->saveGR[0x10] =
-		i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x10);
-	dev_priv->saveGR[0x11] =
-		i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x11);
-	dev_priv->saveGR[0x18] =
-		i915_read_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x18);
-
-	/* Sequencer registers */
-	for (i = 0; i < 8; i++)
-		dev_priv->saveSR[i] =
-			i915_read_indexed(VGA_SR_INDEX, VGA_SR_DATA, i);
-}
-
-static void i915_restore_vga(struct drm_device *dev)
-{
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int i;
-	u16 cr_index, cr_data, st01;
-
-	/* MSR bits */
-	outb(dev_priv->saveMSR, VGA_MSR_WRITE);
-	if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
-		cr_index = VGA_CR_INDEX_CGA;
-		cr_data = VGA_CR_DATA_CGA;
-		st01 = VGA_ST01_CGA;
-	} else {
-		cr_index = VGA_CR_INDEX_MDA;
-		cr_data = VGA_CR_DATA_MDA;
-		st01 = VGA_ST01_MDA;
-	}
-
-	/* Sequencer registers, don't write SR07 */
-	for (i = 0; i < 7; i++)
-		i915_write_indexed(VGA_SR_INDEX, VGA_SR_DATA, i,
-				   dev_priv->saveSR[i]);
-
-	/* CRT controller regs */
-	/* Enable CR group 0 writes */
-	i915_write_indexed(cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]);
-	for (i = 0; i <= 0x24; i++)
-		i915_write_indexed(cr_index, cr_data, i, dev_priv->saveCR[i]);
-
-	/* Graphics controller regs */
-	for (i = 0; i < 9; i++)
-		i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, i,
-				   dev_priv->saveGR[i]);
-
-	i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x10,
-			   dev_priv->saveGR[0x10]);
-	i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x11,
-			   dev_priv->saveGR[0x11]);
-	i915_write_indexed(VGA_GR_INDEX, VGA_GR_DATA, 0x18,
-			   dev_priv->saveGR[0x18]);
-
-	/* Attribute controller registers */
-	inb(st01);
-	for (i = 0; i <= 0x14; i++)
-		i915_write_ar(st01, i, dev_priv->saveAR[i], 0);
-	inb(st01); /* switch back to index mode */
-	outb(dev_priv->saveAR_INDEX | 0x20, VGA_AR_INDEX);
-	inb(st01);
-
-	/* VGA color palette registers */
-	outb(dev_priv->saveDACMASK, VGA_DACMASK);
-	/* DACCRX automatically increments during read */
-	outb(0, VGA_DACWX);
-	/* Read 3 bytes of color data from each index */
-	for (i = 0; i < 256 * 3; i++)
-		outb(dev_priv->saveDACDATA[i], VGA_DACDATA);
-
-}
-
 static int i915_suspend(struct drm_device *dev, pm_message_t state)
 {
 	struct drm_i915_private *dev_priv = dev->dev_private;
-	int i;
 
 	if (!dev || !dev_priv) {
 		printk(KERN_ERR "dev: %p, dev_priv: %p\n", dev, dev_priv);
@@ -254,122 +52,10 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
 		return 0;
 
 	pci_save_state(dev->pdev);
-	pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
-
-	/* Display arbitration control */
-	dev_priv->saveDSPARB = I915_READ(DSPARB);
-
-	/* Pipe & plane A info */
-	dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
-	dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
-	dev_priv->saveFPA0 = I915_READ(FPA0);
-	dev_priv->saveFPA1 = I915_READ(FPA1);
-	dev_priv->saveDPLL_A = I915_READ(DPLL_A);
-	if (IS_I965G(dev))
-		dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
-	dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
-	dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
-	dev_priv->saveHSYNC_A = I915_READ(HSYNC_A);
-	dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A);
-	dev_priv->saveVBLANK_A = I915_READ(VBLANK_A);
-	dev_priv->saveVSYNC_A = I915_READ(VSYNC_A);
-	dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
-
-	dev_priv->saveDSPACNTR = I915_READ(DSPACNTR);
-	dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE);
-	dev_priv->saveDSPASIZE = I915_READ(DSPASIZE);
-	dev_priv->saveDSPAPOS = I915_READ(DSPAPOS);
-	dev_priv->saveDSPABASE = I915_READ(DSPABASE);
-	if (IS_I965G(dev)) {
-		dev_priv->saveDSPASURF = I915_READ(DSPASURF);
-		dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF);
-	}
-	i915_save_palette(dev, PIPE_A);
-	dev_priv->savePIPEASTAT = I915_READ(I915REG_PIPEASTAT);
-
-	/* Pipe & plane B info */
-	dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
-	dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC);
-	dev_priv->saveFPB0 = I915_READ(FPB0);
-	dev_priv->saveFPB1 = I915_READ(FPB1);
-	dev_priv->saveDPLL_B = I915_READ(DPLL_B);
-	if (IS_I965G(dev))
-		dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
-	dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
-	dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
-	dev_priv->saveHSYNC_B = I915_READ(HSYNC_B);
-	dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B);
-	dev_priv->saveVBLANK_B = I915_READ(VBLANK_B);
-	dev_priv->saveVSYNC_B = I915_READ(VSYNC_B);
-	dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
-
-	dev_priv->saveDSPBCNTR = I915_READ(DSPBCNTR);
-	dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE);
-	dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE);
-	dev_priv->saveDSPBPOS = I915_READ(DSPBPOS);
-	dev_priv->saveDSPBBASE = I915_READ(DSPBBASE);
-	if (IS_I965GM(dev) || IS_IGD_GM(dev)) {
-		dev_priv->saveDSPBSURF = I915_READ(DSPBSURF);
-		dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF);
-	}
-	i915_save_palette(dev, PIPE_B);
-	dev_priv->savePIPEBSTAT = I915_READ(I915REG_PIPEBSTAT);
 
-	/* CRT state */
-	dev_priv->saveADPA = I915_READ(ADPA);
+	i915_save_state(dev);
 
-	/* LVDS state */
-	dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL);
-	dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
-	dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
-	if (IS_I965G(dev))
-		dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
-	if (IS_MOBILE(dev) && !IS_I830(dev))
-		dev_priv->saveLVDS = I915_READ(LVDS);
-	if (!IS_I830(dev) && !IS_845G(dev))
-		dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
-	dev_priv->saveLVDSPP_ON = I915_READ(LVDSPP_ON);
-	dev_priv->saveLVDSPP_OFF = I915_READ(LVDSPP_OFF);
-	dev_priv->savePP_CYCLE = I915_READ(PP_CYCLE);
-
-	/* FIXME: save TV & SDVO state */
-
-	/* FBC state */
-	dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
-	dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
-	dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
-	dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
-
-	/* Interrupt state */
-	dev_priv->saveIIR = I915_READ(I915REG_INT_IDENTITY_R);
-	dev_priv->saveIER = I915_READ(I915REG_INT_ENABLE_R);
-	dev_priv->saveIMR = I915_READ(I915REG_INT_MASK_R);
-
-	/* VGA state */
-	dev_priv->saveVCLK_DIVISOR_VGA0 = I915_READ(VCLK_DIVISOR_VGA0);
-	dev_priv->saveVCLK_DIVISOR_VGA1 = I915_READ(VCLK_DIVISOR_VGA1);
-	dev_priv->saveVCLK_POST_DIV = I915_READ(VCLK_POST_DIV);
-	dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
-
-	/* Clock gating state */
-	dev_priv->saveD_STATE = I915_READ(D_STATE);
-	dev_priv->saveDSPCLK_GATE_D = I915_READ(DSPCLK_GATE_D);
-
-	/* Cache mode state */
-	dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
-
-	/* Memory Arbitration state */
-	dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
-
-	/* Scratch space */
-	for (i = 0; i < 16; i++) {
-		dev_priv->saveSWF0[i] = I915_READ(SWF0 + (i << 2));
-		dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2));
-	}
-	for (i = 0; i < 3; i++)
-		dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
-
-	i915_save_vga(dev);
+	intel_opregion_free(dev);
 
 	if (state.event == PM_EVENT_SUSPEND) {
 		/* Shut down the device */
@@ -382,155 +68,15 @@ static int i915_suspend(struct drm_device *dev, pm_message_t state)
 
 static int i915_resume(struct drm_device *dev)
 {
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	int i;
-
 	pci_set_power_state(dev->pdev, PCI_D0);
 	pci_restore_state(dev->pdev);
 	if (pci_enable_device(dev->pdev))
 		return -1;
 	pci_set_master(dev->pdev);
 
-	pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
-
-	I915_WRITE(DSPARB, dev_priv->saveDSPARB);
-
-	/* Pipe & plane A info */
-	/* Prime the clock */
-	if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
-		I915_WRITE(DPLL_A, dev_priv->saveDPLL_A &
-			   ~DPLL_VCO_ENABLE);
-		udelay(150);
-	}
-	I915_WRITE(FPA0, dev_priv->saveFPA0);
-	I915_WRITE(FPA1, dev_priv->saveFPA1);
-	/* Actually enable it */
-	I915_WRITE(DPLL_A, dev_priv->saveDPLL_A);
-	udelay(150);
-	if (IS_I965G(dev))
-		I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
-	udelay(150);
-
-	/* Restore mode */
-	I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A);
-	I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A);
-	I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A);
-	I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
-	I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
-	I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
-	I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
-
-	/* Restore plane info */
-	I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE);
-	I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS);
-	I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC);
-	I915_WRITE(DSPABASE, dev_priv->saveDSPABASE);
-	I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE);
-	if (IS_I965G(dev)) {
-		I915_WRITE(DSPASURF, dev_priv->saveDSPASURF);
-		I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF);
-	}
-
-	I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF);
-
-	i915_restore_palette(dev, PIPE_A);
-	/* Enable the plane */
-	I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR);
-	I915_WRITE(DSPABASE, I915_READ(DSPABASE));
-
-	/* Pipe & plane B info */
-	if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
-		I915_WRITE(DPLL_B, dev_priv->saveDPLL_B &
-			   ~DPLL_VCO_ENABLE);
-		udelay(150);
-	}
-	I915_WRITE(FPB0, dev_priv->saveFPB0);
-	I915_WRITE(FPB1, dev_priv->saveFPB1);
-	/* Actually enable it */
-	I915_WRITE(DPLL_B, dev_priv->saveDPLL_B);
-	udelay(150);
-	if (IS_I965G(dev))
-		I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
-	udelay(150);
-
-	/* Restore mode */
-	I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B);
-	I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B);
-	I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B);
-	I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
-	I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
-	I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
-	I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
-
-	/* Restore plane info */
-	I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE);
-	I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS);
-	I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC);
-	I915_WRITE(DSPBBASE, dev_priv->saveDSPBBASE);
-	I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
-	if (IS_I965G(dev)) {
-		I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF);
-		I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
-	}
-
-	I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF);
-
-	i915_restore_palette(dev, PIPE_B);
-	/* Enable the plane */
-	I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
-	I915_WRITE(DSPBBASE, I915_READ(DSPBBASE));
-
-	/* CRT state */
-	I915_WRITE(ADPA, dev_priv->saveADPA);
-
-	/* LVDS state */
-	if (IS_I965G(dev))
-		I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
-	if (IS_MOBILE(dev) && !IS_I830(dev))
-		I915_WRITE(LVDS, dev_priv->saveLVDS);
-	if (!IS_I830(dev) && !IS_845G(dev))
-		I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
-
-	I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
-	I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
-	I915_WRITE(LVDSPP_ON, dev_priv->saveLVDSPP_ON);
-	I915_WRITE(LVDSPP_OFF, dev_priv->saveLVDSPP_OFF);
-	I915_WRITE(PP_CYCLE, dev_priv->savePP_CYCLE);
-	I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
-
-	/* FIXME: restore TV & SDVO state */
-
-	/* FBC info */
-	I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
-	I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
-	I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
-	I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
-
-	/* VGA state */
-	I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
-	I915_WRITE(VCLK_DIVISOR_VGA0, dev_priv->saveVCLK_DIVISOR_VGA0);
-	I915_WRITE(VCLK_DIVISOR_VGA1, dev_priv->saveVCLK_DIVISOR_VGA1);
-	I915_WRITE(VCLK_POST_DIV, dev_priv->saveVCLK_POST_DIV);
-	udelay(150);
-
-	/* Clock gating state */
-	I915_WRITE (D_STATE, dev_priv->saveD_STATE);
-	I915_WRITE (DSPCLK_GATE_D, dev_priv->saveDSPCLK_GATE_D);
-
-	/* Cache mode state */
-	I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
-
-	/* Memory arbitration state */
-	I915_WRITE (MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000);
-
-	for (i = 0; i < 16; i++) {
-		I915_WRITE(SWF0 + (i << 2), dev_priv->saveSWF0[i]);
-		I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i+7]);
-	}
-	for (i = 0; i < 3; i++)
-		I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
+	i915_restore_state(dev);
 
-	i915_restore_vga(dev);
+	intel_opregion_init(dev);
 
 	return 0;
 }
@@ -541,17 +87,19 @@ static struct drm_driver driver = {
 	 */
 	.driver_features =
 	    DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/
-	    DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL |
-	    DRIVER_IRQ_VBL2,
+	    DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM,
 	.load = i915_driver_load,
 	.unload = i915_driver_unload,
+	.open = i915_driver_open,
 	.lastclose = i915_driver_lastclose,
 	.preclose = i915_driver_preclose,
+	.postclose = i915_driver_postclose,
 	.suspend = i915_suspend,
 	.resume = i915_resume,
 	.device_is_agp = i915_driver_device_is_agp,
-	.vblank_wait = i915_driver_vblank_wait,
-	.vblank_wait2 = i915_driver_vblank_wait2,
+	.get_vblank_counter = i915_get_vblank_counter,
+	.enable_vblank = i915_enable_vblank,
+	.disable_vblank = i915_disable_vblank,
 	.irq_preinstall = i915_driver_irq_preinstall,
 	.irq_postinstall = i915_driver_irq_postinstall,
 	.irq_uninstall = i915_driver_irq_uninstall,
@@ -559,6 +107,10 @@ static struct drm_driver driver = {
 	.reclaim_buffers = drm_core_reclaim_buffers,
 	.get_map_ofs = drm_core_get_map_ofs,
 	.get_reg_ofs = drm_core_get_reg_ofs,
+	.proc_init = i915_gem_proc_init,
+	.proc_cleanup = i915_gem_proc_cleanup,
+	.gem_init_object = i915_gem_init_object,
+	.gem_free_object = i915_gem_free_object,
 	.ioctls = i915_ioctls,
 	.fops = {
 		 .owner = THIS_MODULE,

File diff suppressed because it is too large
+ 347 - 827
drivers/gpu/drm/i915/i915_drv.h


+ 2558 - 0
drivers/gpu/drm/i915/i915_gem.c

@@ -0,0 +1,2558 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+#include <linux/swap.h>
+
+static int
+i915_gem_object_set_domain(struct drm_gem_object *obj,
+			    uint32_t read_domains,
+			    uint32_t write_domain);
+static int
+i915_gem_object_set_domain_range(struct drm_gem_object *obj,
+				 uint64_t offset,
+				 uint64_t size,
+				 uint32_t read_domains,
+				 uint32_t write_domain);
+static int
+i915_gem_set_domain(struct drm_gem_object *obj,
+		    struct drm_file *file_priv,
+		    uint32_t read_domains,
+		    uint32_t write_domain);
+static int i915_gem_object_get_page_list(struct drm_gem_object *obj);
+static void i915_gem_object_free_page_list(struct drm_gem_object *obj);
+static int i915_gem_object_wait_rendering(struct drm_gem_object *obj);
+
+static void
+i915_gem_cleanup_ringbuffer(struct drm_device *dev);
+
+int
+i915_gem_init_ioctl(struct drm_device *dev, void *data,
+		    struct drm_file *file_priv)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_gem_init *args = data;
+
+	mutex_lock(&dev->struct_mutex);
+
+	if (args->gtt_start >= args->gtt_end ||
+	    (args->gtt_start & (PAGE_SIZE - 1)) != 0 ||
+	    (args->gtt_end & (PAGE_SIZE - 1)) != 0) {
+		mutex_unlock(&dev->struct_mutex);
+		return -EINVAL;
+	}
+
+	drm_mm_init(&dev_priv->mm.gtt_space, args->gtt_start,
+	    args->gtt_end - args->gtt_start);
+
+	dev->gtt_total = (uint32_t) (args->gtt_end - args->gtt_start);
+
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+
+/**
+ * Creates a new mm object and returns a handle to it.
+ */
+int
+i915_gem_create_ioctl(struct drm_device *dev, void *data,
+		      struct drm_file *file_priv)
+{
+	struct drm_i915_gem_create *args = data;
+	struct drm_gem_object *obj;
+	int handle, ret;
+
+	args->size = roundup(args->size, PAGE_SIZE);
+
+	/* Allocate the new object */
+	obj = drm_gem_object_alloc(dev, args->size);
+	if (obj == NULL)
+		return -ENOMEM;
+
+	ret = drm_gem_handle_create(file_priv, obj, &handle);
+	mutex_lock(&dev->struct_mutex);
+	drm_gem_object_handle_unreference(obj);
+	mutex_unlock(&dev->struct_mutex);
+
+	if (ret)
+		return ret;
+
+	args->handle = handle;
+
+	return 0;
+}
+
+/**
+ * Reads data from the object referenced by handle.
+ *
+ * On error, the contents of *data are undefined.
+ */
+int
+i915_gem_pread_ioctl(struct drm_device *dev, void *data,
+		     struct drm_file *file_priv)
+{
+	struct drm_i915_gem_pread *args = data;
+	struct drm_gem_object *obj;
+	struct drm_i915_gem_object *obj_priv;
+	ssize_t read;
+	loff_t offset;
+	int ret;
+
+	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+	if (obj == NULL)
+		return -EBADF;
+	obj_priv = obj->driver_private;
+
+	/* Bounds check source.
+	 *
+	 * XXX: This could use review for overflow issues...
+	 */
+	if (args->offset > obj->size || args->size > obj->size ||
+	    args->offset + args->size > obj->size) {
+		drm_gem_object_unreference(obj);
+		return -EINVAL;
+	}
+
+	mutex_lock(&dev->struct_mutex);
+
+	ret = i915_gem_object_set_domain_range(obj, args->offset, args->size,
+					       I915_GEM_DOMAIN_CPU, 0);
+	if (ret != 0) {
+		drm_gem_object_unreference(obj);
+		mutex_unlock(&dev->struct_mutex);
+		return ret;
+	}
+
+	offset = args->offset;
+
+	read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr,
+			args->size, &offset);
+	if (read != args->size) {
+		drm_gem_object_unreference(obj);
+		mutex_unlock(&dev->struct_mutex);
+		if (read < 0)
+			return read;
+		else
+			return -EINVAL;
+	}
+
+	drm_gem_object_unreference(obj);
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+static int
+i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
+		    struct drm_i915_gem_pwrite *args,
+		    struct drm_file *file_priv)
+{
+	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	ssize_t remain;
+	loff_t offset;
+	char __user *user_data;
+	char __iomem *vaddr;
+	char *vaddr_atomic;
+	int i, o, l;
+	int ret = 0;
+	unsigned long pfn;
+	unsigned long unwritten;
+
+	user_data = (char __user *) (uintptr_t) args->data_ptr;
+	remain = args->size;
+	if (!access_ok(VERIFY_READ, user_data, remain))
+		return -EFAULT;
+
+
+	mutex_lock(&dev->struct_mutex);
+	ret = i915_gem_object_pin(obj, 0);
+	if (ret) {
+		mutex_unlock(&dev->struct_mutex);
+		return ret;
+	}
+	ret = i915_gem_set_domain(obj, file_priv,
+				  I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT);
+	if (ret)
+		goto fail;
+
+	obj_priv = obj->driver_private;
+	offset = obj_priv->gtt_offset + args->offset;
+	obj_priv->dirty = 1;
+
+	while (remain > 0) {
+		/* Operation in this page
+		 *
+		 * i = page number
+		 * o = offset within page
+		 * l = bytes to copy
+		 */
+		i = offset >> PAGE_SHIFT;
+		o = offset & (PAGE_SIZE-1);
+		l = remain;
+		if ((o + l) > PAGE_SIZE)
+			l = PAGE_SIZE - o;
+
+		pfn = (dev->agp->base >> PAGE_SHIFT) + i;
+
+#ifdef CONFIG_HIGHMEM
+		/* This is a workaround for the low performance of iounmap
+		 * (approximate 10% cpu cost on normal 3D workloads).
+		 * kmap_atomic on HIGHMEM kernels happens to let us map card
+		 * memory without taking IPIs.  When the vmap rework lands
+		 * we should be able to dump this hack.
+		 */
+		vaddr_atomic = kmap_atomic_pfn(pfn, KM_USER0);
+#if WATCH_PWRITE
+		DRM_INFO("pwrite i %d o %d l %d pfn %ld vaddr %p\n",
+			 i, o, l, pfn, vaddr_atomic);
+#endif
+		unwritten = __copy_from_user_inatomic_nocache(vaddr_atomic + o,
+							      user_data, l);
+		kunmap_atomic(vaddr_atomic, KM_USER0);
+
+		if (unwritten)
+#endif /* CONFIG_HIGHMEM */
+		{
+			vaddr = ioremap_wc(pfn << PAGE_SHIFT, PAGE_SIZE);
+#if WATCH_PWRITE
+			DRM_INFO("pwrite slow i %d o %d l %d "
+				 "pfn %ld vaddr %p\n",
+				 i, o, l, pfn, vaddr);
+#endif
+			if (vaddr == NULL) {
+				ret = -EFAULT;
+				goto fail;
+			}
+			unwritten = __copy_from_user(vaddr + o, user_data, l);
+#if WATCH_PWRITE
+			DRM_INFO("unwritten %ld\n", unwritten);
+#endif
+			iounmap(vaddr);
+			if (unwritten) {
+				ret = -EFAULT;
+				goto fail;
+			}
+		}
+
+		remain -= l;
+		user_data += l;
+		offset += l;
+	}
+#if WATCH_PWRITE && 1
+	i915_gem_clflush_object(obj);
+	i915_gem_dump_object(obj, args->offset + args->size, __func__, ~0);
+	i915_gem_clflush_object(obj);
+#endif
+
+fail:
+	i915_gem_object_unpin(obj);
+	mutex_unlock(&dev->struct_mutex);
+
+	return ret;
+}
+
+static int
+i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
+		      struct drm_i915_gem_pwrite *args,
+		      struct drm_file *file_priv)
+{
+	int ret;
+	loff_t offset;
+	ssize_t written;
+
+	mutex_lock(&dev->struct_mutex);
+
+	ret = i915_gem_set_domain(obj, file_priv,
+				  I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU);
+	if (ret) {
+		mutex_unlock(&dev->struct_mutex);
+		return ret;
+	}
+
+	offset = args->offset;
+
+	written = vfs_write(obj->filp,
+			    (char __user *)(uintptr_t) args->data_ptr,
+			    args->size, &offset);
+	if (written != args->size) {
+		mutex_unlock(&dev->struct_mutex);
+		if (written < 0)
+			return written;
+		else
+			return -EINVAL;
+	}
+
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+/**
+ * Writes data to the object referenced by handle.
+ *
+ * On error, the contents of the buffer that were to be modified are undefined.
+ */
+int
+i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
+		      struct drm_file *file_priv)
+{
+	struct drm_i915_gem_pwrite *args = data;
+	struct drm_gem_object *obj;
+	struct drm_i915_gem_object *obj_priv;
+	int ret = 0;
+
+	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+	if (obj == NULL)
+		return -EBADF;
+	obj_priv = obj->driver_private;
+
+	/* Bounds check destination.
+	 *
+	 * XXX: This could use review for overflow issues...
+	 */
+	if (args->offset > obj->size || args->size > obj->size ||
+	    args->offset + args->size > obj->size) {
+		drm_gem_object_unreference(obj);
+		return -EINVAL;
+	}
+
+	/* We can only do the GTT pwrite on untiled buffers, as otherwise
+	 * it would end up going through the fenced access, and we'll get
+	 * different detiling behavior between reading and writing.
+	 * pread/pwrite currently are reading and writing from the CPU
+	 * perspective, requiring manual detiling by the client.
+	 */
+	if (obj_priv->tiling_mode == I915_TILING_NONE &&
+	    dev->gtt_total != 0)
+		ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv);
+	else
+		ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv);
+
+#if WATCH_PWRITE
+	if (ret)
+		DRM_INFO("pwrite failed %d\n", ret);
+#endif
+
+	drm_gem_object_unreference(obj);
+
+	return ret;
+}
+
+/**
+ * Called when user space prepares to use an object
+ */
+int
+i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
+			  struct drm_file *file_priv)
+{
+	struct drm_i915_gem_set_domain *args = data;
+	struct drm_gem_object *obj;
+	int ret;
+
+	if (!(dev->driver->driver_features & DRIVER_GEM))
+		return -ENODEV;
+
+	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+	if (obj == NULL)
+		return -EBADF;
+
+	mutex_lock(&dev->struct_mutex);
+#if WATCH_BUF
+	DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n",
+		 obj, obj->size, args->read_domains, args->write_domain);
+#endif
+	ret = i915_gem_set_domain(obj, file_priv,
+				  args->read_domains, args->write_domain);
+	drm_gem_object_unreference(obj);
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
+/**
+ * Called when user space has done writes to this buffer
+ */
+int
+i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
+		      struct drm_file *file_priv)
+{
+	struct drm_i915_gem_sw_finish *args = data;
+	struct drm_gem_object *obj;
+	struct drm_i915_gem_object *obj_priv;
+	int ret = 0;
+
+	if (!(dev->driver->driver_features & DRIVER_GEM))
+		return -ENODEV;
+
+	mutex_lock(&dev->struct_mutex);
+	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+	if (obj == NULL) {
+		mutex_unlock(&dev->struct_mutex);
+		return -EBADF;
+	}
+
+#if WATCH_BUF
+	DRM_INFO("%s: sw_finish %d (%p %d)\n",
+		 __func__, args->handle, obj, obj->size);
+#endif
+	obj_priv = obj->driver_private;
+
+	/* Pinned buffers may be scanout, so flush the cache */
+	if ((obj->write_domain & I915_GEM_DOMAIN_CPU) && obj_priv->pin_count) {
+		i915_gem_clflush_object(obj);
+		drm_agp_chipset_flush(dev);
+	}
+	drm_gem_object_unreference(obj);
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
+/**
+ * Maps the contents of an object, returning the address it is mapped
+ * into.
+ *
+ * While the mapping holds a reference on the contents of the object, it doesn't
+ * imply a ref on the object itself.
+ */
+int
+i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
+		   struct drm_file *file_priv)
+{
+	struct drm_i915_gem_mmap *args = data;
+	struct drm_gem_object *obj;
+	loff_t offset;
+	unsigned long addr;
+
+	if (!(dev->driver->driver_features & DRIVER_GEM))
+		return -ENODEV;
+
+	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+	if (obj == NULL)
+		return -EBADF;
+
+	offset = args->offset;
+
+	down_write(&current->mm->mmap_sem);
+	addr = do_mmap(obj->filp, 0, args->size,
+		       PROT_READ | PROT_WRITE, MAP_SHARED,
+		       args->offset);
+	up_write(&current->mm->mmap_sem);
+	mutex_lock(&dev->struct_mutex);
+	drm_gem_object_unreference(obj);
+	mutex_unlock(&dev->struct_mutex);
+	if (IS_ERR((void *)addr))
+		return addr;
+
+	args->addr_ptr = (uint64_t) addr;
+
+	return 0;
+}
+
+static void
+i915_gem_object_free_page_list(struct drm_gem_object *obj)
+{
+	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	int page_count = obj->size / PAGE_SIZE;
+	int i;
+
+	if (obj_priv->page_list == NULL)
+		return;
+
+
+	for (i = 0; i < page_count; i++)
+		if (obj_priv->page_list[i] != NULL) {
+			if (obj_priv->dirty)
+				set_page_dirty(obj_priv->page_list[i]);
+			mark_page_accessed(obj_priv->page_list[i]);
+			page_cache_release(obj_priv->page_list[i]);
+		}
+	obj_priv->dirty = 0;
+
+	drm_free(obj_priv->page_list,
+		 page_count * sizeof(struct page *),
+		 DRM_MEM_DRIVER);
+	obj_priv->page_list = NULL;
+}
+
+static void
+i915_gem_object_move_to_active(struct drm_gem_object *obj)
+{
+	struct drm_device *dev = obj->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+	/* Add a reference if we're newly entering the active list. */
+	if (!obj_priv->active) {
+		drm_gem_object_reference(obj);
+		obj_priv->active = 1;
+	}
+	/* Move from whatever list we were on to the tail of execution. */
+	list_move_tail(&obj_priv->list,
+		       &dev_priv->mm.active_list);
+}
+
+
+static void
+i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
+{
+	struct drm_device *dev = obj->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+	i915_verify_inactive(dev, __FILE__, __LINE__);
+	if (obj_priv->pin_count != 0)
+		list_del_init(&obj_priv->list);
+	else
+		list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list);
+
+	if (obj_priv->active) {
+		obj_priv->active = 0;
+		drm_gem_object_unreference(obj);
+	}
+	i915_verify_inactive(dev, __FILE__, __LINE__);
+}
+
+/**
+ * Creates a new sequence number, emitting a write of it to the status page
+ * plus an interrupt, which will trigger i915_user_interrupt_handler.
+ *
+ * Must be called with struct_lock held.
+ *
+ * Returned sequence numbers are nonzero on success.
+ */
+static uint32_t
+i915_add_request(struct drm_device *dev, uint32_t flush_domains)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_gem_request *request;
+	uint32_t seqno;
+	int was_empty;
+	RING_LOCALS;
+
+	request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER);
+	if (request == NULL)
+		return 0;
+
+	/* Grab the seqno we're going to make this request be, and bump the
+	 * next (skipping 0 so it can be the reserved no-seqno value).
+	 */
+	seqno = dev_priv->mm.next_gem_seqno;
+	dev_priv->mm.next_gem_seqno++;
+	if (dev_priv->mm.next_gem_seqno == 0)
+		dev_priv->mm.next_gem_seqno++;
+
+	BEGIN_LP_RING(4);
+	OUT_RING(MI_STORE_DWORD_INDEX);
+	OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+	OUT_RING(seqno);
+
+	OUT_RING(MI_USER_INTERRUPT);
+	ADVANCE_LP_RING();
+
+	DRM_DEBUG("%d\n", seqno);
+
+	request->seqno = seqno;
+	request->emitted_jiffies = jiffies;
+	request->flush_domains = flush_domains;
+	was_empty = list_empty(&dev_priv->mm.request_list);
+	list_add_tail(&request->list, &dev_priv->mm.request_list);
+
+	if (was_empty && !dev_priv->mm.suspended)
+		schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
+	return seqno;
+}
+
+/**
+ * Command execution barrier
+ *
+ * Ensures that all commands in the ring are finished
+ * before signalling the CPU
+ */
+static uint32_t
+i915_retire_commands(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
+	uint32_t flush_domains = 0;
+	RING_LOCALS;
+
+	/* The sampler always gets flushed on i965 (sigh) */
+	if (IS_I965G(dev))
+		flush_domains |= I915_GEM_DOMAIN_SAMPLER;
+	BEGIN_LP_RING(2);
+	OUT_RING(cmd);
+	OUT_RING(0); /* noop */
+	ADVANCE_LP_RING();
+	return flush_domains;
+}
+
+/**
+ * Moves buffers associated only with the given active seqno from the active
+ * to inactive list, potentially freeing them.
+ */
+static void
+i915_gem_retire_request(struct drm_device *dev,
+			struct drm_i915_gem_request *request)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+
+	/* Move any buffers on the active list that are no longer referenced
+	 * by the ringbuffer to the flushing/inactive lists as appropriate.
+	 */
+	while (!list_empty(&dev_priv->mm.active_list)) {
+		struct drm_gem_object *obj;
+		struct drm_i915_gem_object *obj_priv;
+
+		obj_priv = list_first_entry(&dev_priv->mm.active_list,
+					    struct drm_i915_gem_object,
+					    list);
+		obj = obj_priv->obj;
+
+		/* If the seqno being retired doesn't match the oldest in the
+		 * list, then the oldest in the list must still be newer than
+		 * this seqno.
+		 */
+		if (obj_priv->last_rendering_seqno != request->seqno)
+			return;
+#if WATCH_LRU
+		DRM_INFO("%s: retire %d moves to inactive list %p\n",
+			 __func__, request->seqno, obj);
+#endif
+
+		if (obj->write_domain != 0) {
+			list_move_tail(&obj_priv->list,
+				       &dev_priv->mm.flushing_list);
+		} else {
+			i915_gem_object_move_to_inactive(obj);
+		}
+	}
+
+	if (request->flush_domains != 0) {
+		struct drm_i915_gem_object *obj_priv, *next;
+
+		/* Clear the write domain and activity from any buffers
+		 * that are just waiting for a flush matching the one retired.
+		 */
+		list_for_each_entry_safe(obj_priv, next,
+					 &dev_priv->mm.flushing_list, list) {
+			struct drm_gem_object *obj = obj_priv->obj;
+
+			if (obj->write_domain & request->flush_domains) {
+				obj->write_domain = 0;
+				i915_gem_object_move_to_inactive(obj);
+			}
+		}
+
+	}
+}
+
+/**
+ * Returns true if seq1 is later than seq2.
+ */
+static int
+i915_seqno_passed(uint32_t seq1, uint32_t seq2)
+{
+	return (int32_t)(seq1 - seq2) >= 0;
+}
+
+uint32_t
+i915_get_gem_seqno(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+
+	return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX);
+}
+
+/**
+ * This function clears the request list as sequence numbers are passed.
+ */
+void
+i915_gem_retire_requests(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	uint32_t seqno;
+
+	seqno = i915_get_gem_seqno(dev);
+
+	while (!list_empty(&dev_priv->mm.request_list)) {
+		struct drm_i915_gem_request *request;
+		uint32_t retiring_seqno;
+
+		request = list_first_entry(&dev_priv->mm.request_list,
+					   struct drm_i915_gem_request,
+					   list);
+		retiring_seqno = request->seqno;
+
+		if (i915_seqno_passed(seqno, retiring_seqno) ||
+		    dev_priv->mm.wedged) {
+			i915_gem_retire_request(dev, request);
+
+			list_del(&request->list);
+			drm_free(request, sizeof(*request), DRM_MEM_DRIVER);
+		} else
+			break;
+	}
+}
+
+void
+i915_gem_retire_work_handler(struct work_struct *work)
+{
+	drm_i915_private_t *dev_priv;
+	struct drm_device *dev;
+
+	dev_priv = container_of(work, drm_i915_private_t,
+				mm.retire_work.work);
+	dev = dev_priv->dev;
+
+	mutex_lock(&dev->struct_mutex);
+	i915_gem_retire_requests(dev);
+	if (!dev_priv->mm.suspended &&
+	    !list_empty(&dev_priv->mm.request_list))
+		schedule_delayed_work(&dev_priv->mm.retire_work, HZ);
+	mutex_unlock(&dev->struct_mutex);
+}
+
+/**
+ * Waits for a sequence number to be signaled, and cleans up the
+ * request and object lists appropriately for that event.
+ */
+static int
+i915_wait_request(struct drm_device *dev, uint32_t seqno)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	int ret = 0;
+
+	BUG_ON(seqno == 0);
+
+	if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) {
+		dev_priv->mm.waiting_gem_seqno = seqno;
+		i915_user_irq_get(dev);
+		ret = wait_event_interruptible(dev_priv->irq_queue,
+					       i915_seqno_passed(i915_get_gem_seqno(dev),
+								 seqno) ||
+					       dev_priv->mm.wedged);
+		i915_user_irq_put(dev);
+		dev_priv->mm.waiting_gem_seqno = 0;
+	}
+	if (dev_priv->mm.wedged)
+		ret = -EIO;
+
+	if (ret && ret != -ERESTARTSYS)
+		DRM_ERROR("%s returns %d (awaiting %d at %d)\n",
+			  __func__, ret, seqno, i915_get_gem_seqno(dev));
+
+	/* Directly dispatch request retiring.  While we have the work queue
+	 * to handle this, the waiter on a request often wants an associated
+	 * buffer to have made it to the inactive list, and we would need
+	 * a separate wait queue to handle that.
+	 */
+	if (ret == 0)
+		i915_gem_retire_requests(dev);
+
+	return ret;
+}
+
+static void
+i915_gem_flush(struct drm_device *dev,
+	       uint32_t invalidate_domains,
+	       uint32_t flush_domains)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	uint32_t cmd;
+	RING_LOCALS;
+
+#if WATCH_EXEC
+	DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
+		  invalidate_domains, flush_domains);
+#endif
+
+	if (flush_domains & I915_GEM_DOMAIN_CPU)
+		drm_agp_chipset_flush(dev);
+
+	if ((invalidate_domains | flush_domains) & ~(I915_GEM_DOMAIN_CPU |
+						     I915_GEM_DOMAIN_GTT)) {
+		/*
+		 * read/write caches:
+		 *
+		 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
+		 * only flushed if MI_NO_WRITE_FLUSH is unset.  On 965, it is
+		 * also flushed at 2d versus 3d pipeline switches.
+		 *
+		 * read-only caches:
+		 *
+		 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
+		 * MI_READ_FLUSH is set, and is always flushed on 965.
+		 *
+		 * I915_GEM_DOMAIN_COMMAND may not exist?
+		 *
+		 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
+		 * invalidated when MI_EXE_FLUSH is set.
+		 *
+		 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
+		 * invalidated with every MI_FLUSH.
+		 *
+		 * TLBs:
+		 *
+		 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
+		 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
+		 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
+		 * are flushed at any MI_FLUSH.
+		 */
+
+		cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
+		if ((invalidate_domains|flush_domains) &
+		    I915_GEM_DOMAIN_RENDER)
+			cmd &= ~MI_NO_WRITE_FLUSH;
+		if (!IS_I965G(dev)) {
+			/*
+			 * On the 965, the sampler cache always gets flushed
+			 * and this bit is reserved.
+			 */
+			if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
+				cmd |= MI_READ_FLUSH;
+		}
+		if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
+			cmd |= MI_EXE_FLUSH;
+
+#if WATCH_EXEC
+		DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
+#endif
+		BEGIN_LP_RING(2);
+		OUT_RING(cmd);
+		OUT_RING(0); /* noop */
+		ADVANCE_LP_RING();
+	}
+}
+
+/**
+ * Ensures that all rendering to the object has completed and the object is
+ * safe to unbind from the GTT or access from the CPU.
+ */
+static int
+i915_gem_object_wait_rendering(struct drm_gem_object *obj)
+{
+	struct drm_device *dev = obj->dev;
+	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	int ret;
+
+	/* If there are writes queued to the buffer, flush and
+	 * create a new seqno to wait for.
+	 */
+	if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) {
+		uint32_t write_domain = obj->write_domain;
+#if WATCH_BUF
+		DRM_INFO("%s: flushing object %p from write domain %08x\n",
+			  __func__, obj, write_domain);
+#endif
+		i915_gem_flush(dev, 0, write_domain);
+
+		i915_gem_object_move_to_active(obj);
+		obj_priv->last_rendering_seqno = i915_add_request(dev,
+								  write_domain);
+		BUG_ON(obj_priv->last_rendering_seqno == 0);
+#if WATCH_LRU
+		DRM_INFO("%s: flush moves to exec list %p\n", __func__, obj);
+#endif
+	}
+
+	/* If there is rendering queued on the buffer being evicted, wait for
+	 * it.
+	 */
+	if (obj_priv->active) {
+#if WATCH_BUF
+		DRM_INFO("%s: object %p wait for seqno %08x\n",
+			  __func__, obj, obj_priv->last_rendering_seqno);
+#endif
+		ret = i915_wait_request(dev, obj_priv->last_rendering_seqno);
+		if (ret != 0)
+			return ret;
+	}
+
+	return 0;
+}
+
+/**
+ * Unbinds an object from the GTT aperture.
+ */
+static int
+i915_gem_object_unbind(struct drm_gem_object *obj)
+{
+	struct drm_device *dev = obj->dev;
+	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	int ret = 0;
+
+#if WATCH_BUF
+	DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj);
+	DRM_INFO("gtt_space %p\n", obj_priv->gtt_space);
+#endif
+	if (obj_priv->gtt_space == NULL)
+		return 0;
+
+	if (obj_priv->pin_count != 0) {
+		DRM_ERROR("Attempting to unbind pinned buffer\n");
+		return -EINVAL;
+	}
+
+	/* Wait for any rendering to complete
+	 */
+	ret = i915_gem_object_wait_rendering(obj);
+	if (ret) {
+		DRM_ERROR("wait_rendering failed: %d\n", ret);
+		return ret;
+	}
+
+	/* Move the object to the CPU domain to ensure that
+	 * any possible CPU writes while it's not in the GTT
+	 * are flushed when we go to remap it. This will
+	 * also ensure that all pending GPU writes are finished
+	 * before we unbind.
+	 */
+	ret = i915_gem_object_set_domain(obj, I915_GEM_DOMAIN_CPU,
+					 I915_GEM_DOMAIN_CPU);
+	if (ret) {
+		DRM_ERROR("set_domain failed: %d\n", ret);
+		return ret;
+	}
+
+	if (obj_priv->agp_mem != NULL) {
+		drm_unbind_agp(obj_priv->agp_mem);
+		drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
+		obj_priv->agp_mem = NULL;
+	}
+
+	BUG_ON(obj_priv->active);
+
+	i915_gem_object_free_page_list(obj);
+
+	if (obj_priv->gtt_space) {
+		atomic_dec(&dev->gtt_count);
+		atomic_sub(obj->size, &dev->gtt_memory);
+
+		drm_mm_put_block(obj_priv->gtt_space);
+		obj_priv->gtt_space = NULL;
+	}
+
+	/* Remove ourselves from the LRU list if present. */
+	if (!list_empty(&obj_priv->list))
+		list_del_init(&obj_priv->list);
+
+	return 0;
+}
+
+static int
+i915_gem_evict_something(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_gem_object *obj;
+	struct drm_i915_gem_object *obj_priv;
+	int ret = 0;
+
+	for (;;) {
+		/* If there's an inactive buffer available now, grab it
+		 * and be done.
+		 */
+		if (!list_empty(&dev_priv->mm.inactive_list)) {
+			obj_priv = list_first_entry(&dev_priv->mm.inactive_list,
+						    struct drm_i915_gem_object,
+						    list);
+			obj = obj_priv->obj;
+			BUG_ON(obj_priv->pin_count != 0);
+#if WATCH_LRU
+			DRM_INFO("%s: evicting %p\n", __func__, obj);
+#endif
+			BUG_ON(obj_priv->active);
+
+			/* Wait on the rendering and unbind the buffer. */
+			ret = i915_gem_object_unbind(obj);
+			break;
+		}
+
+		/* If we didn't get anything, but the ring is still processing
+		 * things, wait for one of those things to finish and hopefully
+		 * leave us a buffer to evict.
+		 */
+		if (!list_empty(&dev_priv->mm.request_list)) {
+			struct drm_i915_gem_request *request;
+
+			request = list_first_entry(&dev_priv->mm.request_list,
+						   struct drm_i915_gem_request,
+						   list);
+
+			ret = i915_wait_request(dev, request->seqno);
+			if (ret)
+				break;
+
+			/* if waiting caused an object to become inactive,
+			 * then loop around and wait for it. Otherwise, we
+			 * assume that waiting freed and unbound something,
+			 * so there should now be some space in the GTT
+			 */
+			if (!list_empty(&dev_priv->mm.inactive_list))
+				continue;
+			break;
+		}
+
+		/* If we didn't have anything on the request list but there
+		 * are buffers awaiting a flush, emit one and try again.
+		 * When we wait on it, those buffers waiting for that flush
+		 * will get moved to inactive.
+		 */
+		if (!list_empty(&dev_priv->mm.flushing_list)) {
+			obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
+						    struct drm_i915_gem_object,
+						    list);
+			obj = obj_priv->obj;
+
+			i915_gem_flush(dev,
+				       obj->write_domain,
+				       obj->write_domain);
+			i915_add_request(dev, obj->write_domain);
+
+			obj = NULL;
+			continue;
+		}
+
+		DRM_ERROR("inactive empty %d request empty %d "
+			  "flushing empty %d\n",
+			  list_empty(&dev_priv->mm.inactive_list),
+			  list_empty(&dev_priv->mm.request_list),
+			  list_empty(&dev_priv->mm.flushing_list));
+		/* If we didn't do any of the above, there's nothing to be done
+		 * and we just can't fit it in.
+		 */
+		return -ENOMEM;
+	}
+	return ret;
+}
+
+static int
+i915_gem_object_get_page_list(struct drm_gem_object *obj)
+{
+	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	int page_count, i;
+	struct address_space *mapping;
+	struct inode *inode;
+	struct page *page;
+	int ret;
+
+	if (obj_priv->page_list)
+		return 0;
+
+	/* Get the list of pages out of our struct file.  They'll be pinned
+	 * at this point until we release them.
+	 */
+	page_count = obj->size / PAGE_SIZE;
+	BUG_ON(obj_priv->page_list != NULL);
+	obj_priv->page_list = drm_calloc(page_count, sizeof(struct page *),
+					 DRM_MEM_DRIVER);
+	if (obj_priv->page_list == NULL) {
+		DRM_ERROR("Faled to allocate page list\n");
+		return -ENOMEM;
+	}
+
+	inode = obj->filp->f_path.dentry->d_inode;
+	mapping = inode->i_mapping;
+	for (i = 0; i < page_count; i++) {
+		page = read_mapping_page(mapping, i, NULL);
+		if (IS_ERR(page)) {
+			ret = PTR_ERR(page);
+			DRM_ERROR("read_mapping_page failed: %d\n", ret);
+			i915_gem_object_free_page_list(obj);
+			return ret;
+		}
+		obj_priv->page_list[i] = page;
+	}
+	return 0;
+}
+
+/**
+ * Finds free space in the GTT aperture and binds the object there.
+ */
+static int
+i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
+{
+	struct drm_device *dev = obj->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	struct drm_mm_node *free_space;
+	int page_count, ret;
+
+	if (alignment == 0)
+		alignment = PAGE_SIZE;
+	if (alignment & (PAGE_SIZE - 1)) {
+		DRM_ERROR("Invalid object alignment requested %u\n", alignment);
+		return -EINVAL;
+	}
+
+ search_free:
+	free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
+					obj->size, alignment, 0);
+	if (free_space != NULL) {
+		obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
+						       alignment);
+		if (obj_priv->gtt_space != NULL) {
+			obj_priv->gtt_space->private = obj;
+			obj_priv->gtt_offset = obj_priv->gtt_space->start;
+		}
+	}
+	if (obj_priv->gtt_space == NULL) {
+		/* If the gtt is empty and we're still having trouble
+		 * fitting our object in, we're out of memory.
+		 */
+#if WATCH_LRU
+		DRM_INFO("%s: GTT full, evicting something\n", __func__);
+#endif
+		if (list_empty(&dev_priv->mm.inactive_list) &&
+		    list_empty(&dev_priv->mm.flushing_list) &&
+		    list_empty(&dev_priv->mm.active_list)) {
+			DRM_ERROR("GTT full, but LRU list empty\n");
+			return -ENOMEM;
+		}
+
+		ret = i915_gem_evict_something(dev);
+		if (ret != 0) {
+			DRM_ERROR("Failed to evict a buffer %d\n", ret);
+			return ret;
+		}
+		goto search_free;
+	}
+
+#if WATCH_BUF
+	DRM_INFO("Binding object of size %d at 0x%08x\n",
+		 obj->size, obj_priv->gtt_offset);
+#endif
+	ret = i915_gem_object_get_page_list(obj);
+	if (ret) {
+		drm_mm_put_block(obj_priv->gtt_space);
+		obj_priv->gtt_space = NULL;
+		return ret;
+	}
+
+	page_count = obj->size / PAGE_SIZE;
+	/* Create an AGP memory structure pointing at our pages, and bind it
+	 * into the GTT.
+	 */
+	obj_priv->agp_mem = drm_agp_bind_pages(dev,
+					       obj_priv->page_list,
+					       page_count,
+					       obj_priv->gtt_offset,
+					       obj_priv->agp_type);
+	if (obj_priv->agp_mem == NULL) {
+		i915_gem_object_free_page_list(obj);
+		drm_mm_put_block(obj_priv->gtt_space);
+		obj_priv->gtt_space = NULL;
+		return -ENOMEM;
+	}
+	atomic_inc(&dev->gtt_count);
+	atomic_add(obj->size, &dev->gtt_memory);
+
+	/* Assert that the object is not currently in any GPU domain. As it
+	 * wasn't in the GTT, there shouldn't be any way it could have been in
+	 * a GPU cache
+	 */
+	BUG_ON(obj->read_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
+	BUG_ON(obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
+
+	return 0;
+}
+
+void
+i915_gem_clflush_object(struct drm_gem_object *obj)
+{
+	struct drm_i915_gem_object	*obj_priv = obj->driver_private;
+
+	/* If we don't have a page list set up, then we're not pinned
+	 * to GPU, and we can ignore the cache flush because it'll happen
+	 * again at bind time.
+	 */
+	if (obj_priv->page_list == NULL)
+		return;
+
+	drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE);
+}
+
+/*
+ * Set the next domain for the specified object. This
+ * may not actually perform the necessary flushing/invaliding though,
+ * as that may want to be batched with other set_domain operations
+ *
+ * This is (we hope) the only really tricky part of gem. The goal
+ * is fairly simple -- track which caches hold bits of the object
+ * and make sure they remain coherent. A few concrete examples may
+ * help to explain how it works. For shorthand, we use the notation
+ * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
+ * a pair of read and write domain masks.
+ *
+ * Case 1: the batch buffer
+ *
+ *	1. Allocated
+ *	2. Written by CPU
+ *	3. Mapped to GTT
+ *	4. Read by GPU
+ *	5. Unmapped from GTT
+ *	6. Freed
+ *
+ *	Let's take these a step at a time
+ *
+ *	1. Allocated
+ *		Pages allocated from the kernel may still have
+ *		cache contents, so we set them to (CPU, CPU) always.
+ *	2. Written by CPU (using pwrite)
+ *		The pwrite function calls set_domain (CPU, CPU) and
+ *		this function does nothing (as nothing changes)
+ *	3. Mapped by GTT
+ *		This function asserts that the object is not
+ *		currently in any GPU-based read or write domains
+ *	4. Read by GPU
+ *		i915_gem_execbuffer calls set_domain (COMMAND, 0).
+ *		As write_domain is zero, this function adds in the
+ *		current read domains (CPU+COMMAND, 0).
+ *		flush_domains is set to CPU.
+ *		invalidate_domains is set to COMMAND
+ *		clflush is run to get data out of the CPU caches
+ *		then i915_dev_set_domain calls i915_gem_flush to
+ *		emit an MI_FLUSH and drm_agp_chipset_flush
+ *	5. Unmapped from GTT
+ *		i915_gem_object_unbind calls set_domain (CPU, CPU)
+ *		flush_domains and invalidate_domains end up both zero
+ *		so no flushing/invalidating happens
+ *	6. Freed
+ *		yay, done
+ *
+ * Case 2: The shared render buffer
+ *
+ *	1. Allocated
+ *	2. Mapped to GTT
+ *	3. Read/written by GPU
+ *	4. set_domain to (CPU,CPU)
+ *	5. Read/written by CPU
+ *	6. Read/written by GPU
+ *
+ *	1. Allocated
+ *		Same as last example, (CPU, CPU)
+ *	2. Mapped to GTT
+ *		Nothing changes (assertions find that it is not in the GPU)
+ *	3. Read/written by GPU
+ *		execbuffer calls set_domain (RENDER, RENDER)
+ *		flush_domains gets CPU
+ *		invalidate_domains gets GPU
+ *		clflush (obj)
+ *		MI_FLUSH and drm_agp_chipset_flush
+ *	4. set_domain (CPU, CPU)
+ *		flush_domains gets GPU
+ *		invalidate_domains gets CPU
+ *		wait_rendering (obj) to make sure all drawing is complete.
+ *		This will include an MI_FLUSH to get the data from GPU
+ *		to memory
+ *		clflush (obj) to invalidate the CPU cache
+ *		Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
+ *	5. Read/written by CPU
+ *		cache lines are loaded and dirtied
+ *	6. Read written by GPU
+ *		Same as last GPU access
+ *
+ * Case 3: The constant buffer
+ *
+ *	1. Allocated
+ *	2. Written by CPU
+ *	3. Read by GPU
+ *	4. Updated (written) by CPU again
+ *	5. Read by GPU
+ *
+ *	1. Allocated
+ *		(CPU, CPU)
+ *	2. Written by CPU
+ *		(CPU, CPU)
+ *	3. Read by GPU
+ *		(CPU+RENDER, 0)
+ *		flush_domains = CPU
+ *		invalidate_domains = RENDER
+ *		clflush (obj)
+ *		MI_FLUSH
+ *		drm_agp_chipset_flush
+ *	4. Updated (written) by CPU again
+ *		(CPU, CPU)
+ *		flush_domains = 0 (no previous write domain)
+ *		invalidate_domains = 0 (no new read domains)
+ *	5. Read by GPU
+ *		(CPU+RENDER, 0)
+ *		flush_domains = CPU
+ *		invalidate_domains = RENDER
+ *		clflush (obj)
+ *		MI_FLUSH
+ *		drm_agp_chipset_flush
+ */
+static int
+i915_gem_object_set_domain(struct drm_gem_object *obj,
+			    uint32_t read_domains,
+			    uint32_t write_domain)
+{
+	struct drm_device		*dev = obj->dev;
+	struct drm_i915_gem_object	*obj_priv = obj->driver_private;
+	uint32_t			invalidate_domains = 0;
+	uint32_t			flush_domains = 0;
+	int				ret;
+
+#if WATCH_BUF
+	DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n",
+		 __func__, obj,
+		 obj->read_domains, read_domains,
+		 obj->write_domain, write_domain);
+#endif
+	/*
+	 * If the object isn't moving to a new write domain,
+	 * let the object stay in multiple read domains
+	 */
+	if (write_domain == 0)
+		read_domains |= obj->read_domains;
+	else
+		obj_priv->dirty = 1;
+
+	/*
+	 * Flush the current write domain if
+	 * the new read domains don't match. Invalidate
+	 * any read domains which differ from the old
+	 * write domain
+	 */
+	if (obj->write_domain && obj->write_domain != read_domains) {
+		flush_domains |= obj->write_domain;
+		invalidate_domains |= read_domains & ~obj->write_domain;
+	}
+	/*
+	 * Invalidate any read caches which may have
+	 * stale data. That is, any new read domains.
+	 */
+	invalidate_domains |= read_domains & ~obj->read_domains;
+	if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) {
+#if WATCH_BUF
+		DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n",
+			 __func__, flush_domains, invalidate_domains);
+#endif
+		/*
+		 * If we're invaliding the CPU cache and flushing a GPU cache,
+		 * then pause for rendering so that the GPU caches will be
+		 * flushed before the cpu cache is invalidated
+		 */
+		if ((invalidate_domains & I915_GEM_DOMAIN_CPU) &&
+		    (flush_domains & ~(I915_GEM_DOMAIN_CPU |
+				       I915_GEM_DOMAIN_GTT))) {
+			ret = i915_gem_object_wait_rendering(obj);
+			if (ret)
+				return ret;
+		}
+		i915_gem_clflush_object(obj);
+	}
+
+	if ((write_domain | flush_domains) != 0)
+		obj->write_domain = write_domain;
+
+	/* If we're invalidating the CPU domain, clear the per-page CPU
+	 * domain list as well.
+	 */
+	if (obj_priv->page_cpu_valid != NULL &&
+	    (write_domain != 0 ||
+	     read_domains & I915_GEM_DOMAIN_CPU)) {
+		drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE,
+			 DRM_MEM_DRIVER);
+		obj_priv->page_cpu_valid = NULL;
+	}
+	obj->read_domains = read_domains;
+
+	dev->invalidate_domains |= invalidate_domains;
+	dev->flush_domains |= flush_domains;
+#if WATCH_BUF
+	DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n",
+		 __func__,
+		 obj->read_domains, obj->write_domain,
+		 dev->invalidate_domains, dev->flush_domains);
+#endif
+	return 0;
+}
+
+/**
+ * Set the read/write domain on a range of the object.
+ *
+ * Currently only implemented for CPU reads, otherwise drops to normal
+ * i915_gem_object_set_domain().
+ */
+static int
+i915_gem_object_set_domain_range(struct drm_gem_object *obj,
+				 uint64_t offset,
+				 uint64_t size,
+				 uint32_t read_domains,
+				 uint32_t write_domain)
+{
+	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	int ret, i;
+
+	if (obj->read_domains & I915_GEM_DOMAIN_CPU)
+		return 0;
+
+	if (read_domains != I915_GEM_DOMAIN_CPU ||
+	    write_domain != 0)
+		return i915_gem_object_set_domain(obj,
+						  read_domains, write_domain);
+
+	/* Wait on any GPU rendering to the object to be flushed. */
+	if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) {
+		ret = i915_gem_object_wait_rendering(obj);
+		if (ret)
+			return ret;
+	}
+
+	if (obj_priv->page_cpu_valid == NULL) {
+		obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE,
+						      DRM_MEM_DRIVER);
+	}
+
+	/* Flush the cache on any pages that are still invalid from the CPU's
+	 * perspective.
+	 */
+	for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE; i++) {
+		if (obj_priv->page_cpu_valid[i])
+			continue;
+
+		drm_clflush_pages(obj_priv->page_list + i, 1);
+
+		obj_priv->page_cpu_valid[i] = 1;
+	}
+
+	return 0;
+}
+
+/**
+ * Once all of the objects have been set in the proper domain,
+ * perform the necessary flush and invalidate operations.
+ *
+ * Returns the write domains flushed, for use in flush tracking.
+ */
+static uint32_t
+i915_gem_dev_set_domain(struct drm_device *dev)
+{
+	uint32_t flush_domains = dev->flush_domains;
+
+	/*
+	 * Now that all the buffers are synced to the proper domains,
+	 * flush and invalidate the collected domains
+	 */
+	if (dev->invalidate_domains | dev->flush_domains) {
+#if WATCH_EXEC
+		DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
+			  __func__,
+			 dev->invalidate_domains,
+			 dev->flush_domains);
+#endif
+		i915_gem_flush(dev,
+			       dev->invalidate_domains,
+			       dev->flush_domains);
+		dev->invalidate_domains = 0;
+		dev->flush_domains = 0;
+	}
+
+	return flush_domains;
+}
+
+/**
+ * Pin an object to the GTT and evaluate the relocations landing in it.
+ */
+static int
+i915_gem_object_pin_and_relocate(struct drm_gem_object *obj,
+				 struct drm_file *file_priv,
+				 struct drm_i915_gem_exec_object *entry)
+{
+	struct drm_device *dev = obj->dev;
+	struct drm_i915_gem_relocation_entry reloc;
+	struct drm_i915_gem_relocation_entry __user *relocs;
+	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	int i, ret;
+	uint32_t last_reloc_offset = -1;
+	void __iomem *reloc_page = NULL;
+
+	/* Choose the GTT offset for our buffer and put it there. */
+	ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment);
+	if (ret)
+		return ret;
+
+	entry->offset = obj_priv->gtt_offset;
+
+	relocs = (struct drm_i915_gem_relocation_entry __user *)
+		 (uintptr_t) entry->relocs_ptr;
+	/* Apply the relocations, using the GTT aperture to avoid cache
+	 * flushing requirements.
+	 */
+	for (i = 0; i < entry->relocation_count; i++) {
+		struct drm_gem_object *target_obj;
+		struct drm_i915_gem_object *target_obj_priv;
+		uint32_t reloc_val, reloc_offset;
+		uint32_t __iomem *reloc_entry;
+
+		ret = copy_from_user(&reloc, relocs + i, sizeof(reloc));
+		if (ret != 0) {
+			i915_gem_object_unpin(obj);
+			return ret;
+		}
+
+		target_obj = drm_gem_object_lookup(obj->dev, file_priv,
+						   reloc.target_handle);
+		if (target_obj == NULL) {
+			i915_gem_object_unpin(obj);
+			return -EBADF;
+		}
+		target_obj_priv = target_obj->driver_private;
+
+		/* The target buffer should have appeared before us in the
+		 * exec_object list, so it should have a GTT space bound by now.
+		 */
+		if (target_obj_priv->gtt_space == NULL) {
+			DRM_ERROR("No GTT space found for object %d\n",
+				  reloc.target_handle);
+			drm_gem_object_unreference(target_obj);
+			i915_gem_object_unpin(obj);
+			return -EINVAL;
+		}
+
+		if (reloc.offset > obj->size - 4) {
+			DRM_ERROR("Relocation beyond object bounds: "
+				  "obj %p target %d offset %d size %d.\n",
+				  obj, reloc.target_handle,
+				  (int) reloc.offset, (int) obj->size);
+			drm_gem_object_unreference(target_obj);
+			i915_gem_object_unpin(obj);
+			return -EINVAL;
+		}
+		if (reloc.offset & 3) {
+			DRM_ERROR("Relocation not 4-byte aligned: "
+				  "obj %p target %d offset %d.\n",
+				  obj, reloc.target_handle,
+				  (int) reloc.offset);
+			drm_gem_object_unreference(target_obj);
+			i915_gem_object_unpin(obj);
+			return -EINVAL;
+		}
+
+		if (reloc.write_domain && target_obj->pending_write_domain &&
+		    reloc.write_domain != target_obj->pending_write_domain) {
+			DRM_ERROR("Write domain conflict: "
+				  "obj %p target %d offset %d "
+				  "new %08x old %08x\n",
+				  obj, reloc.target_handle,
+				  (int) reloc.offset,
+				  reloc.write_domain,
+				  target_obj->pending_write_domain);
+			drm_gem_object_unreference(target_obj);
+			i915_gem_object_unpin(obj);
+			return -EINVAL;
+		}
+
+#if WATCH_RELOC
+		DRM_INFO("%s: obj %p offset %08x target %d "
+			 "read %08x write %08x gtt %08x "
+			 "presumed %08x delta %08x\n",
+			 __func__,
+			 obj,
+			 (int) reloc.offset,
+			 (int) reloc.target_handle,
+			 (int) reloc.read_domains,
+			 (int) reloc.write_domain,
+			 (int) target_obj_priv->gtt_offset,
+			 (int) reloc.presumed_offset,
+			 reloc.delta);
+#endif
+
+		target_obj->pending_read_domains |= reloc.read_domains;
+		target_obj->pending_write_domain |= reloc.write_domain;
+
+		/* If the relocation already has the right value in it, no
+		 * more work needs to be done.
+		 */
+		if (target_obj_priv->gtt_offset == reloc.presumed_offset) {
+			drm_gem_object_unreference(target_obj);
+			continue;
+		}
+
+		/* Now that we're going to actually write some data in,
+		 * make sure that any rendering using this buffer's contents
+		 * is completed.
+		 */
+		i915_gem_object_wait_rendering(obj);
+
+		/* As we're writing through the gtt, flush
+		 * any CPU writes before we write the relocations
+		 */
+		if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
+			i915_gem_clflush_object(obj);
+			drm_agp_chipset_flush(dev);
+			obj->write_domain = 0;
+		}
+
+		/* Map the page containing the relocation we're going to
+		 * perform.
+		 */
+		reloc_offset = obj_priv->gtt_offset + reloc.offset;
+		if (reloc_page == NULL ||
+		    (last_reloc_offset & ~(PAGE_SIZE - 1)) !=
+		    (reloc_offset & ~(PAGE_SIZE - 1))) {
+			if (reloc_page != NULL)
+				iounmap(reloc_page);
+
+			reloc_page = ioremap_wc(dev->agp->base +
+						(reloc_offset &
+						 ~(PAGE_SIZE - 1)),
+						PAGE_SIZE);
+			last_reloc_offset = reloc_offset;
+			if (reloc_page == NULL) {
+				drm_gem_object_unreference(target_obj);
+				i915_gem_object_unpin(obj);
+				return -ENOMEM;
+			}
+		}
+
+		reloc_entry = (uint32_t __iomem *)(reloc_page +
+					   (reloc_offset & (PAGE_SIZE - 1)));
+		reloc_val = target_obj_priv->gtt_offset + reloc.delta;
+
+#if WATCH_BUF
+		DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n",
+			  obj, (unsigned int) reloc.offset,
+			  readl(reloc_entry), reloc_val);
+#endif
+		writel(reloc_val, reloc_entry);
+
+		/* Write the updated presumed offset for this entry back out
+		 * to the user.
+		 */
+		reloc.presumed_offset = target_obj_priv->gtt_offset;
+		ret = copy_to_user(relocs + i, &reloc, sizeof(reloc));
+		if (ret != 0) {
+			drm_gem_object_unreference(target_obj);
+			i915_gem_object_unpin(obj);
+			return ret;
+		}
+
+		drm_gem_object_unreference(target_obj);
+	}
+
+	if (reloc_page != NULL)
+		iounmap(reloc_page);
+
+#if WATCH_BUF
+	if (0)
+		i915_gem_dump_object(obj, 128, __func__, ~0);
+#endif
+	return 0;
+}
+
+/** Dispatch a batchbuffer to the ring
+ */
+static int
+i915_dispatch_gem_execbuffer(struct drm_device *dev,
+			      struct drm_i915_gem_execbuffer *exec,
+			      uint64_t exec_offset)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *)
+					     (uintptr_t) exec->cliprects_ptr;
+	int nbox = exec->num_cliprects;
+	int i = 0, count;
+	uint32_t	exec_start, exec_len;
+	RING_LOCALS;
+
+	exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
+	exec_len = (uint32_t) exec->batch_len;
+
+	if ((exec_start | exec_len) & 0x7) {
+		DRM_ERROR("alignment\n");
+		return -EINVAL;
+	}
+
+	if (!exec_start)
+		return -EINVAL;
+
+	count = nbox ? nbox : 1;
+
+	for (i = 0; i < count; i++) {
+		if (i < nbox) {
+			int ret = i915_emit_box(dev, boxes, i,
+						exec->DR1, exec->DR4);
+			if (ret)
+				return ret;
+		}
+
+		if (IS_I830(dev) || IS_845G(dev)) {
+			BEGIN_LP_RING(4);
+			OUT_RING(MI_BATCH_BUFFER);
+			OUT_RING(exec_start | MI_BATCH_NON_SECURE);
+			OUT_RING(exec_start + exec_len - 4);
+			OUT_RING(0);
+			ADVANCE_LP_RING();
+		} else {
+			BEGIN_LP_RING(2);
+			if (IS_I965G(dev)) {
+				OUT_RING(MI_BATCH_BUFFER_START |
+					 (2 << 6) |
+					 MI_BATCH_NON_SECURE_I965);
+				OUT_RING(exec_start);
+			} else {
+				OUT_RING(MI_BATCH_BUFFER_START |
+					 (2 << 6));
+				OUT_RING(exec_start | MI_BATCH_NON_SECURE);
+			}
+			ADVANCE_LP_RING();
+		}
+	}
+
+	/* XXX breadcrumb */
+	return 0;
+}
+
+/* Throttle our rendering by waiting until the ring has completed our requests
+ * emitted over 20 msec ago.
+ *
+ * This should get us reasonable parallelism between CPU and GPU but also
+ * relatively low latency when blocking on a particular request to finish.
+ */
+static int
+i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv)
+{
+	struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
+	int ret = 0;
+	uint32_t seqno;
+
+	mutex_lock(&dev->struct_mutex);
+	seqno = i915_file_priv->mm.last_gem_throttle_seqno;
+	i915_file_priv->mm.last_gem_throttle_seqno =
+		i915_file_priv->mm.last_gem_seqno;
+	if (seqno)
+		ret = i915_wait_request(dev, seqno);
+	mutex_unlock(&dev->struct_mutex);
+	return ret;
+}
+
+int
+i915_gem_execbuffer(struct drm_device *dev, void *data,
+		    struct drm_file *file_priv)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv;
+	struct drm_i915_gem_execbuffer *args = data;
+	struct drm_i915_gem_exec_object *exec_list = NULL;
+	struct drm_gem_object **object_list = NULL;
+	struct drm_gem_object *batch_obj;
+	int ret, i, pinned = 0;
+	uint64_t exec_offset;
+	uint32_t seqno, flush_domains;
+
+#if WATCH_EXEC
+	DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
+		  (int) args->buffers_ptr, args->buffer_count, args->batch_len);
+#endif
+
+	if (args->buffer_count < 1) {
+		DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
+		return -EINVAL;
+	}
+	/* Copy in the exec list from userland */
+	exec_list = drm_calloc(sizeof(*exec_list), args->buffer_count,
+			       DRM_MEM_DRIVER);
+	object_list = drm_calloc(sizeof(*object_list), args->buffer_count,
+				 DRM_MEM_DRIVER);
+	if (exec_list == NULL || object_list == NULL) {
+		DRM_ERROR("Failed to allocate exec or object list "
+			  "for %d buffers\n",
+			  args->buffer_count);
+		ret = -ENOMEM;
+		goto pre_mutex_err;
+	}
+	ret = copy_from_user(exec_list,
+			     (struct drm_i915_relocation_entry __user *)
+			     (uintptr_t) args->buffers_ptr,
+			     sizeof(*exec_list) * args->buffer_count);
+	if (ret != 0) {
+		DRM_ERROR("copy %d exec entries failed %d\n",
+			  args->buffer_count, ret);
+		goto pre_mutex_err;
+	}
+
+	mutex_lock(&dev->struct_mutex);
+
+	i915_verify_inactive(dev, __FILE__, __LINE__);
+
+	if (dev_priv->mm.wedged) {
+		DRM_ERROR("Execbuf while wedged\n");
+		mutex_unlock(&dev->struct_mutex);
+		return -EIO;
+	}
+
+	if (dev_priv->mm.suspended) {
+		DRM_ERROR("Execbuf while VT-switched.\n");
+		mutex_unlock(&dev->struct_mutex);
+		return -EBUSY;
+	}
+
+	/* Zero the gloabl flush/invalidate flags. These
+	 * will be modified as each object is bound to the
+	 * gtt
+	 */
+	dev->invalidate_domains = 0;
+	dev->flush_domains = 0;
+
+	/* Look up object handles and perform the relocations */
+	for (i = 0; i < args->buffer_count; i++) {
+		object_list[i] = drm_gem_object_lookup(dev, file_priv,
+						       exec_list[i].handle);
+		if (object_list[i] == NULL) {
+			DRM_ERROR("Invalid object handle %d at index %d\n",
+				   exec_list[i].handle, i);
+			ret = -EBADF;
+			goto err;
+		}
+
+		object_list[i]->pending_read_domains = 0;
+		object_list[i]->pending_write_domain = 0;
+		ret = i915_gem_object_pin_and_relocate(object_list[i],
+						       file_priv,
+						       &exec_list[i]);
+		if (ret) {
+			DRM_ERROR("object bind and relocate failed %d\n", ret);
+			goto err;
+		}
+		pinned = i + 1;
+	}
+
+	/* Set the pending read domains for the batch buffer to COMMAND */
+	batch_obj = object_list[args->buffer_count-1];
+	batch_obj->pending_read_domains = I915_GEM_DOMAIN_COMMAND;
+	batch_obj->pending_write_domain = 0;
+
+	i915_verify_inactive(dev, __FILE__, __LINE__);
+
+	for (i = 0; i < args->buffer_count; i++) {
+		struct drm_gem_object *obj = object_list[i];
+		struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+		if (obj_priv->gtt_space == NULL) {
+			/* We evicted the buffer in the process of validating
+			 * our set of buffers in.  We could try to recover by
+			 * kicking them everything out and trying again from
+			 * the start.
+			 */
+			ret = -ENOMEM;
+			goto err;
+		}
+
+		/* make sure all previous memory operations have passed */
+		ret = i915_gem_object_set_domain(obj,
+						 obj->pending_read_domains,
+						 obj->pending_write_domain);
+		if (ret)
+			goto err;
+	}
+
+	i915_verify_inactive(dev, __FILE__, __LINE__);
+
+	/* Flush/invalidate caches and chipset buffer */
+	flush_domains = i915_gem_dev_set_domain(dev);
+
+	i915_verify_inactive(dev, __FILE__, __LINE__);
+
+#if WATCH_COHERENCY
+	for (i = 0; i < args->buffer_count; i++) {
+		i915_gem_object_check_coherency(object_list[i],
+						exec_list[i].handle);
+	}
+#endif
+
+	exec_offset = exec_list[args->buffer_count - 1].offset;
+
+#if WATCH_EXEC
+	i915_gem_dump_object(object_list[args->buffer_count - 1],
+			      args->batch_len,
+			      __func__,
+			      ~0);
+#endif
+
+	(void)i915_add_request(dev, flush_domains);
+
+	/* Exec the batchbuffer */
+	ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset);
+	if (ret) {
+		DRM_ERROR("dispatch failed %d\n", ret);
+		goto err;
+	}
+
+	/*
+	 * Ensure that the commands in the batch buffer are
+	 * finished before the interrupt fires
+	 */
+	flush_domains = i915_retire_commands(dev);
+
+	i915_verify_inactive(dev, __FILE__, __LINE__);
+
+	/*
+	 * Get a seqno representing the execution of the current buffer,
+	 * which we can wait on.  We would like to mitigate these interrupts,
+	 * likely by only creating seqnos occasionally (so that we have
+	 * *some* interrupts representing completion of buffers that we can
+	 * wait on when trying to clear up gtt space).
+	 */
+	seqno = i915_add_request(dev, flush_domains);
+	BUG_ON(seqno == 0);
+	i915_file_priv->mm.last_gem_seqno = seqno;
+	for (i = 0; i < args->buffer_count; i++) {
+		struct drm_gem_object *obj = object_list[i];
+		struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+		i915_gem_object_move_to_active(obj);
+		obj_priv->last_rendering_seqno = seqno;
+#if WATCH_LRU
+		DRM_INFO("%s: move to exec list %p\n", __func__, obj);
+#endif
+	}
+#if WATCH_LRU
+	i915_dump_lru(dev, __func__);
+#endif
+
+	i915_verify_inactive(dev, __FILE__, __LINE__);
+
+	/* Copy the new buffer offsets back to the user's exec list. */
+	ret = copy_to_user((struct drm_i915_relocation_entry __user *)
+			   (uintptr_t) args->buffers_ptr,
+			   exec_list,
+			   sizeof(*exec_list) * args->buffer_count);
+	if (ret)
+		DRM_ERROR("failed to copy %d exec entries "
+			  "back to user (%d)\n",
+			   args->buffer_count, ret);
+err:
+	if (object_list != NULL) {
+		for (i = 0; i < pinned; i++)
+			i915_gem_object_unpin(object_list[i]);
+
+		for (i = 0; i < args->buffer_count; i++)
+			drm_gem_object_unreference(object_list[i]);
+	}
+	mutex_unlock(&dev->struct_mutex);
+
+pre_mutex_err:
+	drm_free(object_list, sizeof(*object_list) * args->buffer_count,
+		 DRM_MEM_DRIVER);
+	drm_free(exec_list, sizeof(*exec_list) * args->buffer_count,
+		 DRM_MEM_DRIVER);
+
+	return ret;
+}
+
+int
+i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
+{
+	struct drm_device *dev = obj->dev;
+	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	int ret;
+
+	i915_verify_inactive(dev, __FILE__, __LINE__);
+	if (obj_priv->gtt_space == NULL) {
+		ret = i915_gem_object_bind_to_gtt(obj, alignment);
+		if (ret != 0) {
+			DRM_ERROR("Failure to bind: %d", ret);
+			return ret;
+		}
+	}
+	obj_priv->pin_count++;
+
+	/* If the object is not active and not pending a flush,
+	 * remove it from the inactive list
+	 */
+	if (obj_priv->pin_count == 1) {
+		atomic_inc(&dev->pin_count);
+		atomic_add(obj->size, &dev->pin_memory);
+		if (!obj_priv->active &&
+		    (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
+					   I915_GEM_DOMAIN_GTT)) == 0 &&
+		    !list_empty(&obj_priv->list))
+			list_del_init(&obj_priv->list);
+	}
+	i915_verify_inactive(dev, __FILE__, __LINE__);
+
+	return 0;
+}
+
+void
+i915_gem_object_unpin(struct drm_gem_object *obj)
+{
+	struct drm_device *dev = obj->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+	i915_verify_inactive(dev, __FILE__, __LINE__);
+	obj_priv->pin_count--;
+	BUG_ON(obj_priv->pin_count < 0);
+	BUG_ON(obj_priv->gtt_space == NULL);
+
+	/* If the object is no longer pinned, and is
+	 * neither active nor being flushed, then stick it on
+	 * the inactive list
+	 */
+	if (obj_priv->pin_count == 0) {
+		if (!obj_priv->active &&
+		    (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
+					   I915_GEM_DOMAIN_GTT)) == 0)
+			list_move_tail(&obj_priv->list,
+				       &dev_priv->mm.inactive_list);
+		atomic_dec(&dev->pin_count);
+		atomic_sub(obj->size, &dev->pin_memory);
+	}
+	i915_verify_inactive(dev, __FILE__, __LINE__);
+}
+
+int
+i915_gem_pin_ioctl(struct drm_device *dev, void *data,
+		   struct drm_file *file_priv)
+{
+	struct drm_i915_gem_pin *args = data;
+	struct drm_gem_object *obj;
+	struct drm_i915_gem_object *obj_priv;
+	int ret;
+
+	mutex_lock(&dev->struct_mutex);
+
+	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+	if (obj == NULL) {
+		DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n",
+			  args->handle);
+		mutex_unlock(&dev->struct_mutex);
+		return -EBADF;
+	}
+	obj_priv = obj->driver_private;
+
+	ret = i915_gem_object_pin(obj, args->alignment);
+	if (ret != 0) {
+		drm_gem_object_unreference(obj);
+		mutex_unlock(&dev->struct_mutex);
+		return ret;
+	}
+
+	/* XXX - flush the CPU caches for pinned objects
+	 * as the X server doesn't manage domains yet
+	 */
+	if (obj->write_domain & I915_GEM_DOMAIN_CPU) {
+		i915_gem_clflush_object(obj);
+		drm_agp_chipset_flush(dev);
+		obj->write_domain = 0;
+	}
+	args->offset = obj_priv->gtt_offset;
+	drm_gem_object_unreference(obj);
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+int
+i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
+		     struct drm_file *file_priv)
+{
+	struct drm_i915_gem_pin *args = data;
+	struct drm_gem_object *obj;
+
+	mutex_lock(&dev->struct_mutex);
+
+	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+	if (obj == NULL) {
+		DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n",
+			  args->handle);
+		mutex_unlock(&dev->struct_mutex);
+		return -EBADF;
+	}
+
+	i915_gem_object_unpin(obj);
+
+	drm_gem_object_unreference(obj);
+	mutex_unlock(&dev->struct_mutex);
+	return 0;
+}
+
+int
+i915_gem_busy_ioctl(struct drm_device *dev, void *data,
+		    struct drm_file *file_priv)
+{
+	struct drm_i915_gem_busy *args = data;
+	struct drm_gem_object *obj;
+	struct drm_i915_gem_object *obj_priv;
+
+	mutex_lock(&dev->struct_mutex);
+	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+	if (obj == NULL) {
+		DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n",
+			  args->handle);
+		mutex_unlock(&dev->struct_mutex);
+		return -EBADF;
+	}
+
+	obj_priv = obj->driver_private;
+	args->busy = obj_priv->active;
+
+	drm_gem_object_unreference(obj);
+	mutex_unlock(&dev->struct_mutex);
+	return 0;
+}
+
+int
+i915_gem_throttle_ioctl(struct drm_device *dev, void *data,
+			struct drm_file *file_priv)
+{
+    return i915_gem_ring_throttle(dev, file_priv);
+}
+
+int i915_gem_init_object(struct drm_gem_object *obj)
+{
+	struct drm_i915_gem_object *obj_priv;
+
+	obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER);
+	if (obj_priv == NULL)
+		return -ENOMEM;
+
+	/*
+	 * We've just allocated pages from the kernel,
+	 * so they've just been written by the CPU with
+	 * zeros. They'll need to be clflushed before we
+	 * use them with the GPU.
+	 */
+	obj->write_domain = I915_GEM_DOMAIN_CPU;
+	obj->read_domains = I915_GEM_DOMAIN_CPU;
+
+	obj_priv->agp_type = AGP_USER_MEMORY;
+
+	obj->driver_private = obj_priv;
+	obj_priv->obj = obj;
+	INIT_LIST_HEAD(&obj_priv->list);
+	return 0;
+}
+
+void i915_gem_free_object(struct drm_gem_object *obj)
+{
+	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+	while (obj_priv->pin_count > 0)
+		i915_gem_object_unpin(obj);
+
+	i915_gem_object_unbind(obj);
+
+	drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER);
+	drm_free(obj->driver_private, 1, DRM_MEM_DRIVER);
+}
+
+static int
+i915_gem_set_domain(struct drm_gem_object *obj,
+		    struct drm_file *file_priv,
+		    uint32_t read_domains,
+		    uint32_t write_domain)
+{
+	struct drm_device *dev = obj->dev;
+	int ret;
+	uint32_t flush_domains;
+
+	BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+
+	ret = i915_gem_object_set_domain(obj, read_domains, write_domain);
+	if (ret)
+		return ret;
+	flush_domains = i915_gem_dev_set_domain(obj->dev);
+
+	if (flush_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT))
+		(void) i915_add_request(dev, flush_domains);
+
+	return 0;
+}
+
+/** Unbinds all objects that are on the given buffer list. */
+static int
+i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head)
+{
+	struct drm_gem_object *obj;
+	struct drm_i915_gem_object *obj_priv;
+	int ret;
+
+	while (!list_empty(head)) {
+		obj_priv = list_first_entry(head,
+					    struct drm_i915_gem_object,
+					    list);
+		obj = obj_priv->obj;
+
+		if (obj_priv->pin_count != 0) {
+			DRM_ERROR("Pinned object in unbind list\n");
+			mutex_unlock(&dev->struct_mutex);
+			return -EINVAL;
+		}
+
+		ret = i915_gem_object_unbind(obj);
+		if (ret != 0) {
+			DRM_ERROR("Error unbinding object in LeaveVT: %d\n",
+				  ret);
+			mutex_unlock(&dev->struct_mutex);
+			return ret;
+		}
+	}
+
+
+	return 0;
+}
+
+static int
+i915_gem_idle(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	uint32_t seqno, cur_seqno, last_seqno;
+	int stuck, ret;
+
+	mutex_lock(&dev->struct_mutex);
+
+	if (dev_priv->mm.suspended || dev_priv->ring.ring_obj == NULL) {
+		mutex_unlock(&dev->struct_mutex);
+		return 0;
+	}
+
+	/* Hack!  Don't let anybody do execbuf while we don't control the chip.
+	 * We need to replace this with a semaphore, or something.
+	 */
+	dev_priv->mm.suspended = 1;
+
+	/* Cancel the retire work handler, wait for it to finish if running
+	 */
+	mutex_unlock(&dev->struct_mutex);
+	cancel_delayed_work_sync(&dev_priv->mm.retire_work);
+	mutex_lock(&dev->struct_mutex);
+
+	i915_kernel_lost_context(dev);
+
+	/* Flush the GPU along with all non-CPU write domains
+	 */
+	i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT),
+		       ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT));
+	seqno = i915_add_request(dev, ~(I915_GEM_DOMAIN_CPU |
+					I915_GEM_DOMAIN_GTT));
+
+	if (seqno == 0) {
+		mutex_unlock(&dev->struct_mutex);
+		return -ENOMEM;
+	}
+
+	dev_priv->mm.waiting_gem_seqno = seqno;
+	last_seqno = 0;
+	stuck = 0;
+	for (;;) {
+		cur_seqno = i915_get_gem_seqno(dev);
+		if (i915_seqno_passed(cur_seqno, seqno))
+			break;
+		if (last_seqno == cur_seqno) {
+			if (stuck++ > 100) {
+				DRM_ERROR("hardware wedged\n");
+				dev_priv->mm.wedged = 1;
+				DRM_WAKEUP(&dev_priv->irq_queue);
+				break;
+			}
+		}
+		msleep(10);
+		last_seqno = cur_seqno;
+	}
+	dev_priv->mm.waiting_gem_seqno = 0;
+
+	i915_gem_retire_requests(dev);
+
+	/* Active and flushing should now be empty as we've
+	 * waited for a sequence higher than any pending execbuffer
+	 */
+	BUG_ON(!list_empty(&dev_priv->mm.active_list));
+	BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
+
+	/* Request should now be empty as we've also waited
+	 * for the last request in the list
+	 */
+	BUG_ON(!list_empty(&dev_priv->mm.request_list));
+
+	/* Move all buffers out of the GTT. */
+	ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list);
+	if (ret) {
+		mutex_unlock(&dev->struct_mutex);
+		return ret;
+	}
+
+	BUG_ON(!list_empty(&dev_priv->mm.active_list));
+	BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
+	BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
+	BUG_ON(!list_empty(&dev_priv->mm.request_list));
+
+	i915_gem_cleanup_ringbuffer(dev);
+	mutex_unlock(&dev->struct_mutex);
+
+	return 0;
+}
+
+static int
+i915_gem_init_hws(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_gem_object *obj;
+	struct drm_i915_gem_object *obj_priv;
+	int ret;
+
+	/* If we need a physical address for the status page, it's already
+	 * initialized at driver load time.
+	 */
+	if (!I915_NEED_GFX_HWS(dev))
+		return 0;
+
+	obj = drm_gem_object_alloc(dev, 4096);
+	if (obj == NULL) {
+		DRM_ERROR("Failed to allocate status page\n");
+		return -ENOMEM;
+	}
+	obj_priv = obj->driver_private;
+	obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
+
+	ret = i915_gem_object_pin(obj, 4096);
+	if (ret != 0) {
+		drm_gem_object_unreference(obj);
+		return ret;
+	}
+
+	dev_priv->status_gfx_addr = obj_priv->gtt_offset;
+
+	dev_priv->hw_status_page = kmap(obj_priv->page_list[0]);
+	if (dev_priv->hw_status_page == NULL) {
+		DRM_ERROR("Failed to map status page.\n");
+		memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
+		drm_gem_object_unreference(obj);
+		return -EINVAL;
+	}
+	dev_priv->hws_obj = obj;
+	memset(dev_priv->hw_status_page, 0, PAGE_SIZE);
+	I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr);
+	I915_READ(HWS_PGA); /* posting read */
+	DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr);
+
+	return 0;
+}
+
+static int
+i915_gem_init_ringbuffer(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_gem_object *obj;
+	struct drm_i915_gem_object *obj_priv;
+	int ret;
+	u32 head;
+
+	ret = i915_gem_init_hws(dev);
+	if (ret != 0)
+		return ret;
+
+	obj = drm_gem_object_alloc(dev, 128 * 1024);
+	if (obj == NULL) {
+		DRM_ERROR("Failed to allocate ringbuffer\n");
+		return -ENOMEM;
+	}
+	obj_priv = obj->driver_private;
+
+	ret = i915_gem_object_pin(obj, 4096);
+	if (ret != 0) {
+		drm_gem_object_unreference(obj);
+		return ret;
+	}
+
+	/* Set up the kernel mapping for the ring. */
+	dev_priv->ring.Size = obj->size;
+	dev_priv->ring.tail_mask = obj->size - 1;
+
+	dev_priv->ring.map.offset = dev->agp->base + obj_priv->gtt_offset;
+	dev_priv->ring.map.size = obj->size;
+	dev_priv->ring.map.type = 0;
+	dev_priv->ring.map.flags = 0;
+	dev_priv->ring.map.mtrr = 0;
+
+	drm_core_ioremap_wc(&dev_priv->ring.map, dev);
+	if (dev_priv->ring.map.handle == NULL) {
+		DRM_ERROR("Failed to map ringbuffer.\n");
+		memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
+		drm_gem_object_unreference(obj);
+		return -EINVAL;
+	}
+	dev_priv->ring.ring_obj = obj;
+	dev_priv->ring.virtual_start = dev_priv->ring.map.handle;
+
+	/* Stop the ring if it's running. */
+	I915_WRITE(PRB0_CTL, 0);
+	I915_WRITE(PRB0_TAIL, 0);
+	I915_WRITE(PRB0_HEAD, 0);
+
+	/* Initialize the ring. */
+	I915_WRITE(PRB0_START, obj_priv->gtt_offset);
+	head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
+
+	/* G45 ring initialization fails to reset head to zero */
+	if (head != 0) {
+		DRM_ERROR("Ring head not reset to zero "
+			  "ctl %08x head %08x tail %08x start %08x\n",
+			  I915_READ(PRB0_CTL),
+			  I915_READ(PRB0_HEAD),
+			  I915_READ(PRB0_TAIL),
+			  I915_READ(PRB0_START));
+		I915_WRITE(PRB0_HEAD, 0);
+
+		DRM_ERROR("Ring head forced to zero "
+			  "ctl %08x head %08x tail %08x start %08x\n",
+			  I915_READ(PRB0_CTL),
+			  I915_READ(PRB0_HEAD),
+			  I915_READ(PRB0_TAIL),
+			  I915_READ(PRB0_START));
+	}
+
+	I915_WRITE(PRB0_CTL,
+		   ((obj->size - 4096) & RING_NR_PAGES) |
+		   RING_NO_REPORT |
+		   RING_VALID);
+
+	head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
+
+	/* If the head is still not zero, the ring is dead */
+	if (head != 0) {
+		DRM_ERROR("Ring initialization failed "
+			  "ctl %08x head %08x tail %08x start %08x\n",
+			  I915_READ(PRB0_CTL),
+			  I915_READ(PRB0_HEAD),
+			  I915_READ(PRB0_TAIL),
+			  I915_READ(PRB0_START));
+		return -EIO;
+	}
+
+	/* Update our cache of the ring state */
+	i915_kernel_lost_context(dev);
+
+	return 0;
+}
+
+static void
+i915_gem_cleanup_ringbuffer(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+
+	if (dev_priv->ring.ring_obj == NULL)
+		return;
+
+	drm_core_ioremapfree(&dev_priv->ring.map, dev);
+
+	i915_gem_object_unpin(dev_priv->ring.ring_obj);
+	drm_gem_object_unreference(dev_priv->ring.ring_obj);
+	dev_priv->ring.ring_obj = NULL;
+	memset(&dev_priv->ring, 0, sizeof(dev_priv->ring));
+
+	if (dev_priv->hws_obj != NULL) {
+		struct drm_gem_object *obj = dev_priv->hws_obj;
+		struct drm_i915_gem_object *obj_priv = obj->driver_private;
+
+		kunmap(obj_priv->page_list[0]);
+		i915_gem_object_unpin(obj);
+		drm_gem_object_unreference(obj);
+		dev_priv->hws_obj = NULL;
+		memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
+		dev_priv->hw_status_page = NULL;
+
+		/* Write high address into HWS_PGA when disabling. */
+		I915_WRITE(HWS_PGA, 0x1ffff000);
+	}
+}
+
+int
+i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
+		       struct drm_file *file_priv)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	int ret;
+
+	if (dev_priv->mm.wedged) {
+		DRM_ERROR("Reenabling wedged hardware, good luck\n");
+		dev_priv->mm.wedged = 0;
+	}
+
+	ret = i915_gem_init_ringbuffer(dev);
+	if (ret != 0)
+		return ret;
+
+	mutex_lock(&dev->struct_mutex);
+	BUG_ON(!list_empty(&dev_priv->mm.active_list));
+	BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
+	BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
+	BUG_ON(!list_empty(&dev_priv->mm.request_list));
+	dev_priv->mm.suspended = 0;
+	mutex_unlock(&dev->struct_mutex);
+
+	drm_irq_install(dev);
+
+	return 0;
+}
+
+int
+i915_gem_leavevt_ioctl(struct drm_device *dev, void *data,
+		       struct drm_file *file_priv)
+{
+	int ret;
+
+	ret = i915_gem_idle(dev);
+	drm_irq_uninstall(dev);
+
+	return ret;
+}
+
+void
+i915_gem_lastclose(struct drm_device *dev)
+{
+	int ret;
+
+	ret = i915_gem_idle(dev);
+	if (ret)
+		DRM_ERROR("failed to idle hardware: %d\n", ret);
+}
+
+void
+i915_gem_load(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+
+	INIT_LIST_HEAD(&dev_priv->mm.active_list);
+	INIT_LIST_HEAD(&dev_priv->mm.flushing_list);
+	INIT_LIST_HEAD(&dev_priv->mm.inactive_list);
+	INIT_LIST_HEAD(&dev_priv->mm.request_list);
+	INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
+			  i915_gem_retire_work_handler);
+	INIT_WORK(&dev_priv->mm.vblank_work,
+		  i915_gem_vblank_work_handler);
+	dev_priv->mm.next_gem_seqno = 1;
+
+	i915_gem_detect_bit_6_swizzle(dev);
+}

+ 201 - 0
drivers/gpu/drm/i915/i915_gem_debug.c

@@ -0,0 +1,201 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Keith Packard <keithp@keithp.com>
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+#if WATCH_INACTIVE
+void
+i915_verify_inactive(struct drm_device *dev, char *file, int line)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_gem_object *obj;
+	struct drm_i915_gem_object *obj_priv;
+
+	list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
+		obj = obj_priv->obj;
+		if (obj_priv->pin_count || obj_priv->active ||
+		    (obj->write_domain & ~(I915_GEM_DOMAIN_CPU |
+					   I915_GEM_DOMAIN_GTT)))
+			DRM_ERROR("inactive %p (p %d a %d w %x)  %s:%d\n",
+				  obj,
+				  obj_priv->pin_count, obj_priv->active,
+				  obj->write_domain, file, line);
+	}
+}
+#endif /* WATCH_INACTIVE */
+
+
+#if WATCH_BUF | WATCH_EXEC | WATCH_PWRITE
+static void
+i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end,
+		   uint32_t bias, uint32_t mark)
+{
+	uint32_t *mem = kmap_atomic(page, KM_USER0);
+	int i;
+	for (i = start; i < end; i += 4)
+		DRM_INFO("%08x: %08x%s\n",
+			  (int) (bias + i), mem[i / 4],
+			  (bias + i == mark) ? " ********" : "");
+	kunmap_atomic(mem, KM_USER0);
+	/* give syslog time to catch up */
+	msleep(1);
+}
+
+void
+i915_gem_dump_object(struct drm_gem_object *obj, int len,
+		     const char *where, uint32_t mark)
+{
+	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	int page;
+
+	DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset);
+	for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) {
+		int page_len, chunk, chunk_len;
+
+		page_len = len - page * PAGE_SIZE;
+		if (page_len > PAGE_SIZE)
+			page_len = PAGE_SIZE;
+
+		for (chunk = 0; chunk < page_len; chunk += 128) {
+			chunk_len = page_len - chunk;
+			if (chunk_len > 128)
+				chunk_len = 128;
+			i915_gem_dump_page(obj_priv->page_list[page],
+					   chunk, chunk + chunk_len,
+					   obj_priv->gtt_offset +
+					   page * PAGE_SIZE,
+					   mark);
+		}
+	}
+}
+#endif
+
+#if WATCH_LRU
+void
+i915_dump_lru(struct drm_device *dev, const char *where)
+{
+	drm_i915_private_t		*dev_priv = dev->dev_private;
+	struct drm_i915_gem_object	*obj_priv;
+
+	DRM_INFO("active list %s {\n", where);
+	list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
+			    list)
+	{
+		DRM_INFO("    %p: %08x\n", obj_priv,
+			 obj_priv->last_rendering_seqno);
+	}
+	DRM_INFO("}\n");
+	DRM_INFO("flushing list %s {\n", where);
+	list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
+			    list)
+	{
+		DRM_INFO("    %p: %08x\n", obj_priv,
+			 obj_priv->last_rendering_seqno);
+	}
+	DRM_INFO("}\n");
+	DRM_INFO("inactive %s {\n", where);
+	list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) {
+		DRM_INFO("    %p: %08x\n", obj_priv,
+			 obj_priv->last_rendering_seqno);
+	}
+	DRM_INFO("}\n");
+}
+#endif
+
+
+#if WATCH_COHERENCY
+void
+i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
+{
+	struct drm_device *dev = obj->dev;
+	struct drm_i915_gem_object *obj_priv = obj->driver_private;
+	int page;
+	uint32_t *gtt_mapping;
+	uint32_t *backing_map = NULL;
+	int bad_count = 0;
+
+	DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %dkb):\n",
+		 __func__, obj, obj_priv->gtt_offset, handle,
+		 obj->size / 1024);
+
+	gtt_mapping = ioremap(dev->agp->base + obj_priv->gtt_offset,
+			      obj->size);
+	if (gtt_mapping == NULL) {
+		DRM_ERROR("failed to map GTT space\n");
+		return;
+	}
+
+	for (page = 0; page < obj->size / PAGE_SIZE; page++) {
+		int i;
+
+		backing_map = kmap_atomic(obj_priv->page_list[page], KM_USER0);
+
+		if (backing_map == NULL) {
+			DRM_ERROR("failed to map backing page\n");
+			goto out;
+		}
+
+		for (i = 0; i < PAGE_SIZE / 4; i++) {
+			uint32_t cpuval = backing_map[i];
+			uint32_t gttval = readl(gtt_mapping +
+						page * 1024 + i);
+
+			if (cpuval != gttval) {
+				DRM_INFO("incoherent CPU vs GPU at 0x%08x: "
+					 "0x%08x vs 0x%08x\n",
+					 (int)(obj_priv->gtt_offset +
+					       page * PAGE_SIZE + i * 4),
+					 cpuval, gttval);
+				if (bad_count++ >= 8) {
+					DRM_INFO("...\n");
+					goto out;
+				}
+			}
+		}
+		kunmap_atomic(backing_map, KM_USER0);
+		backing_map = NULL;
+	}
+
+ out:
+	if (backing_map != NULL)
+		kunmap_atomic(backing_map, KM_USER0);
+	iounmap(gtt_mapping);
+
+	/* give syslog time to catch up */
+	msleep(1);
+
+	/* Directly flush the object, since we just loaded values with the CPU
+	 * from the backing pages and we don't want to disturb the cache
+	 * management that we're trying to observe.
+	 */
+
+	i915_gem_clflush_object(obj);
+}
+#endif

+ 292 - 0
drivers/gpu/drm/i915/i915_gem_proc.c

@@ -0,0 +1,292 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *    Keith Packard <keithp@keithp.com>
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+static int i915_gem_active_info(char *buf, char **start, off_t offset,
+				int request, int *eof, void *data)
+{
+	struct drm_minor *minor = (struct drm_minor *) data;
+	struct drm_device *dev = minor->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj_priv;
+	int len = 0;
+
+	if (offset > DRM_PROC_LIMIT) {
+		*eof = 1;
+		return 0;
+	}
+
+	*start = &buf[offset];
+	*eof = 0;
+	DRM_PROC_PRINT("Active:\n");
+	list_for_each_entry(obj_priv, &dev_priv->mm.active_list,
+			    list)
+	{
+		struct drm_gem_object *obj = obj_priv->obj;
+		if (obj->name) {
+			DRM_PROC_PRINT("    %p(%d): %08x %08x %d\n",
+				       obj, obj->name,
+				       obj->read_domains, obj->write_domain,
+				       obj_priv->last_rendering_seqno);
+		} else {
+			DRM_PROC_PRINT("       %p: %08x %08x %d\n",
+				       obj,
+				       obj->read_domains, obj->write_domain,
+				       obj_priv->last_rendering_seqno);
+		}
+	}
+	if (len > request + offset)
+		return request;
+	*eof = 1;
+	return len - offset;
+}
+
+static int i915_gem_flushing_info(char *buf, char **start, off_t offset,
+				  int request, int *eof, void *data)
+{
+	struct drm_minor *minor = (struct drm_minor *) data;
+	struct drm_device *dev = minor->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj_priv;
+	int len = 0;
+
+	if (offset > DRM_PROC_LIMIT) {
+		*eof = 1;
+		return 0;
+	}
+
+	*start = &buf[offset];
+	*eof = 0;
+	DRM_PROC_PRINT("Flushing:\n");
+	list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list,
+			    list)
+	{
+		struct drm_gem_object *obj = obj_priv->obj;
+		if (obj->name) {
+			DRM_PROC_PRINT("    %p(%d): %08x %08x %d\n",
+				       obj, obj->name,
+				       obj->read_domains, obj->write_domain,
+				       obj_priv->last_rendering_seqno);
+		} else {
+			DRM_PROC_PRINT("       %p: %08x %08x %d\n", obj,
+				       obj->read_domains, obj->write_domain,
+				       obj_priv->last_rendering_seqno);
+		}
+	}
+	if (len > request + offset)
+		return request;
+	*eof = 1;
+	return len - offset;
+}
+
+static int i915_gem_inactive_info(char *buf, char **start, off_t offset,
+				  int request, int *eof, void *data)
+{
+	struct drm_minor *minor = (struct drm_minor *) data;
+	struct drm_device *dev = minor->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_gem_object *obj_priv;
+	int len = 0;
+
+	if (offset > DRM_PROC_LIMIT) {
+		*eof = 1;
+		return 0;
+	}
+
+	*start = &buf[offset];
+	*eof = 0;
+	DRM_PROC_PRINT("Inactive:\n");
+	list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list,
+			    list)
+	{
+		struct drm_gem_object *obj = obj_priv->obj;
+		if (obj->name) {
+			DRM_PROC_PRINT("    %p(%d): %08x %08x %d\n",
+				       obj, obj->name,
+				       obj->read_domains, obj->write_domain,
+				       obj_priv->last_rendering_seqno);
+		} else {
+			DRM_PROC_PRINT("       %p: %08x %08x %d\n", obj,
+				       obj->read_domains, obj->write_domain,
+				       obj_priv->last_rendering_seqno);
+		}
+	}
+	if (len > request + offset)
+		return request;
+	*eof = 1;
+	return len - offset;
+}
+
+static int i915_gem_request_info(char *buf, char **start, off_t offset,
+				 int request, int *eof, void *data)
+{
+	struct drm_minor *minor = (struct drm_minor *) data;
+	struct drm_device *dev = minor->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_i915_gem_request *gem_request;
+	int len = 0;
+
+	if (offset > DRM_PROC_LIMIT) {
+		*eof = 1;
+		return 0;
+	}
+
+	*start = &buf[offset];
+	*eof = 0;
+	DRM_PROC_PRINT("Request:\n");
+	list_for_each_entry(gem_request, &dev_priv->mm.request_list,
+			    list)
+	{
+		DRM_PROC_PRINT("    %d @ %d %08x\n",
+			       gem_request->seqno,
+			       (int) (jiffies - gem_request->emitted_jiffies),
+			       gem_request->flush_domains);
+	}
+	if (len > request + offset)
+		return request;
+	*eof = 1;
+	return len - offset;
+}
+
+static int i915_gem_seqno_info(char *buf, char **start, off_t offset,
+			       int request, int *eof, void *data)
+{
+	struct drm_minor *minor = (struct drm_minor *) data;
+	struct drm_device *dev = minor->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	int len = 0;
+
+	if (offset > DRM_PROC_LIMIT) {
+		*eof = 1;
+		return 0;
+	}
+
+	*start = &buf[offset];
+	*eof = 0;
+	DRM_PROC_PRINT("Current sequence: %d\n", i915_get_gem_seqno(dev));
+	DRM_PROC_PRINT("Waiter sequence:  %d\n",
+		       dev_priv->mm.waiting_gem_seqno);
+	DRM_PROC_PRINT("IRQ sequence:     %d\n", dev_priv->mm.irq_gem_seqno);
+	if (len > request + offset)
+		return request;
+	*eof = 1;
+	return len - offset;
+}
+
+
+static int i915_interrupt_info(char *buf, char **start, off_t offset,
+			       int request, int *eof, void *data)
+{
+	struct drm_minor *minor = (struct drm_minor *) data;
+	struct drm_device *dev = minor->dev;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	int len = 0;
+
+	if (offset > DRM_PROC_LIMIT) {
+		*eof = 1;
+		return 0;
+	}
+
+	*start = &buf[offset];
+	*eof = 0;
+	DRM_PROC_PRINT("Interrupt enable:    %08x\n",
+		       I915_READ(IER));
+	DRM_PROC_PRINT("Interrupt identity:  %08x\n",
+		       I915_READ(IIR));
+	DRM_PROC_PRINT("Interrupt mask:      %08x\n",
+		       I915_READ(IMR));
+	DRM_PROC_PRINT("Pipe A stat:         %08x\n",
+		       I915_READ(PIPEASTAT));
+	DRM_PROC_PRINT("Pipe B stat:         %08x\n",
+		       I915_READ(PIPEBSTAT));
+	DRM_PROC_PRINT("Interrupts received: %d\n",
+		       atomic_read(&dev_priv->irq_received));
+	DRM_PROC_PRINT("Current sequence:    %d\n",
+		       i915_get_gem_seqno(dev));
+	DRM_PROC_PRINT("Waiter sequence:     %d\n",
+		       dev_priv->mm.waiting_gem_seqno);
+	DRM_PROC_PRINT("IRQ sequence:        %d\n",
+		       dev_priv->mm.irq_gem_seqno);
+	if (len > request + offset)
+		return request;
+	*eof = 1;
+	return len - offset;
+}
+
+static struct drm_proc_list {
+	/** file name */
+	const char *name;
+	/** proc callback*/
+	int (*f) (char *, char **, off_t, int, int *, void *);
+} i915_gem_proc_list[] = {
+	{"i915_gem_active", i915_gem_active_info},
+	{"i915_gem_flushing", i915_gem_flushing_info},
+	{"i915_gem_inactive", i915_gem_inactive_info},
+	{"i915_gem_request", i915_gem_request_info},
+	{"i915_gem_seqno", i915_gem_seqno_info},
+	{"i915_gem_interrupt", i915_interrupt_info},
+};
+
+#define I915_GEM_PROC_ENTRIES ARRAY_SIZE(i915_gem_proc_list)
+
+int i915_gem_proc_init(struct drm_minor *minor)
+{
+	struct proc_dir_entry *ent;
+	int i, j;
+
+	for (i = 0; i < I915_GEM_PROC_ENTRIES; i++) {
+		ent = create_proc_entry(i915_gem_proc_list[i].name,
+					S_IFREG | S_IRUGO, minor->dev_root);
+		if (!ent) {
+			DRM_ERROR("Cannot create /proc/dri/.../%s\n",
+				  i915_gem_proc_list[i].name);
+			for (j = 0; j < i; j++)
+				remove_proc_entry(i915_gem_proc_list[i].name,
+						  minor->dev_root);
+			return -1;
+		}
+		ent->read_proc = i915_gem_proc_list[i].f;
+		ent->data = minor;
+	}
+	return 0;
+}
+
+void i915_gem_proc_cleanup(struct drm_minor *minor)
+{
+	int i;
+
+	if (!minor->dev_root)
+		return;
+
+	for (i = 0; i < I915_GEM_PROC_ENTRIES; i++)
+		remove_proc_entry(i915_gem_proc_list[i].name, minor->dev_root);
+}

+ 257 - 0
drivers/gpu/drm/i915/i915_gem_tiling.c

@@ -0,0 +1,257 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ *    Eric Anholt <eric@anholt.net>
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+/** @file i915_gem_tiling.c
+ *
+ * Support for managing tiling state of buffer objects.
+ *
+ * The idea behind tiling is to increase cache hit rates by rearranging
+ * pixel data so that a group of pixel accesses are in the same cacheline.
+ * Performance improvement from doing this on the back/depth buffer are on
+ * the order of 30%.
+ *
+ * Intel architectures make this somewhat more complicated, though, by
+ * adjustments made to addressing of data when the memory is in interleaved
+ * mode (matched pairs of DIMMS) to improve memory bandwidth.
+ * For interleaved memory, the CPU sends every sequential 64 bytes
+ * to an alternate memory channel so it can get the bandwidth from both.
+ *
+ * The GPU also rearranges its accesses for increased bandwidth to interleaved
+ * memory, and it matches what the CPU does for non-tiled.  However, when tiled
+ * it does it a little differently, since one walks addresses not just in the
+ * X direction but also Y.  So, along with alternating channels when bit
+ * 6 of the address flips, it also alternates when other bits flip --  Bits 9
+ * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines)
+ * are common to both the 915 and 965-class hardware.
+ *
+ * The CPU also sometimes XORs in higher bits as well, to improve
+ * bandwidth doing strided access like we do so frequently in graphics.  This
+ * is called "Channel XOR Randomization" in the MCH documentation.  The result
+ * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address
+ * decode.
+ *
+ * All of this bit 6 XORing has an effect on our memory management,
+ * as we need to make sure that the 3d driver can correctly address object
+ * contents.
+ *
+ * If we don't have interleaved memory, all tiling is safe and no swizzling is
+ * required.
+ *
+ * When bit 17 is XORed in, we simply refuse to tile at all.  Bit
+ * 17 is not just a page offset, so as we page an objet out and back in,
+ * individual pages in it will have different bit 17 addresses, resulting in
+ * each 64 bytes being swapped with its neighbor!
+ *
+ * Otherwise, if interleaved, we have to tell the 3d driver what the address
+ * swizzling it needs to do is, since it's writing with the CPU to the pages
+ * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the
+ * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling
+ * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order
+ * to match what the GPU expects.
+ */
+
+/**
+ * Detects bit 6 swizzling of address lookup between IGD access and CPU
+ * access through main memory.
+ */
+void
+i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+	uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+
+	if (!IS_I9XX(dev)) {
+		/* As far as we know, the 865 doesn't have these bit 6
+		 * swizzling issues.
+		 */
+		swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+		swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+	} else if ((!IS_I965G(dev) && !IS_G33(dev)) || IS_I965GM(dev) ||
+		   IS_GM45(dev)) {
+		uint32_t dcc;
+
+		/* On 915-945 and GM965, channel interleave by the CPU is
+		 * determined by DCC.  The CPU will alternate based on bit 6
+		 * in interleaved mode, and the GPU will then also alternate
+		 * on bit 6, 9, and 10 for X, but the CPU may also optionally
+		 * alternate based on bit 17 (XOR not disabled and XOR
+		 * bit == 17).
+		 */
+		dcc = I915_READ(DCC);
+		switch (dcc & DCC_ADDRESSING_MODE_MASK) {
+		case DCC_ADDRESSING_MODE_SINGLE_CHANNEL:
+		case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC:
+			swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+			swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+			break;
+		case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED:
+			if (IS_I915G(dev) || IS_I915GM(dev) ||
+			    dcc & DCC_CHANNEL_XOR_DISABLE) {
+				swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+				swizzle_y = I915_BIT_6_SWIZZLE_9;
+			} else if (IS_I965GM(dev) || IS_GM45(dev)) {
+				/* GM965 only does bit 11-based channel
+				 * randomization
+				 */
+				swizzle_x = I915_BIT_6_SWIZZLE_9_10_11;
+				swizzle_y = I915_BIT_6_SWIZZLE_9_11;
+			} else {
+				/* Bit 17 or perhaps other swizzling */
+				swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+				swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+			}
+			break;
+		}
+		if (dcc == 0xffffffff) {
+			DRM_ERROR("Couldn't read from MCHBAR.  "
+				  "Disabling tiling.\n");
+			swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
+			swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
+		}
+	} else {
+		/* The 965, G33, and newer, have a very flexible memory
+		 * configuration.  It will enable dual-channel mode
+		 * (interleaving) on as much memory as it can, and the GPU
+		 * will additionally sometimes enable different bit 6
+		 * swizzling for tiled objects from the CPU.
+		 *
+		 * Here's what I found on the G965:
+		 *    slot fill         memory size  swizzling
+		 * 0A   0B   1A   1B    1-ch   2-ch
+		 * 512  0    0    0     512    0     O
+		 * 512  0    512  0     16     1008  X
+		 * 512  0    0    512   16     1008  X
+		 * 0    512  0    512   16     1008  X
+		 * 1024 1024 1024 0     2048   1024  O
+		 *
+		 * We could probably detect this based on either the DRB
+		 * matching, which was the case for the swizzling required in
+		 * the table above, or from the 1-ch value being less than
+		 * the minimum size of a rank.
+		 */
+		if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) {
+			swizzle_x = I915_BIT_6_SWIZZLE_NONE;
+			swizzle_y = I915_BIT_6_SWIZZLE_NONE;
+		} else {
+			swizzle_x = I915_BIT_6_SWIZZLE_9_10;
+			swizzle_y = I915_BIT_6_SWIZZLE_9;
+		}
+	}
+
+	dev_priv->mm.bit_6_swizzle_x = swizzle_x;
+	dev_priv->mm.bit_6_swizzle_y = swizzle_y;
+}
+
+/**
+ * Sets the tiling mode of an object, returning the required swizzling of
+ * bit 6 of addresses in the object.
+ */
+int
+i915_gem_set_tiling(struct drm_device *dev, void *data,
+		   struct drm_file *file_priv)
+{
+	struct drm_i915_gem_set_tiling *args = data;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_gem_object *obj;
+	struct drm_i915_gem_object *obj_priv;
+
+	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+	if (obj == NULL)
+		return -EINVAL;
+	obj_priv = obj->driver_private;
+
+	mutex_lock(&dev->struct_mutex);
+
+	if (args->tiling_mode == I915_TILING_NONE) {
+		obj_priv->tiling_mode = I915_TILING_NONE;
+		args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
+	} else {
+		if (args->tiling_mode == I915_TILING_X)
+			args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
+		else
+			args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
+		/* If we can't handle the swizzling, make it untiled. */
+		if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) {
+			args->tiling_mode = I915_TILING_NONE;
+			args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
+		}
+	}
+	obj_priv->tiling_mode = args->tiling_mode;
+
+	mutex_unlock(&dev->struct_mutex);
+
+	drm_gem_object_unreference(obj);
+
+	return 0;
+}
+
+/**
+ * Returns the current tiling mode and required bit 6 swizzling for the object.
+ */
+int
+i915_gem_get_tiling(struct drm_device *dev, void *data,
+		   struct drm_file *file_priv)
+{
+	struct drm_i915_gem_get_tiling *args = data;
+	drm_i915_private_t *dev_priv = dev->dev_private;
+	struct drm_gem_object *obj;
+	struct drm_i915_gem_object *obj_priv;
+
+	obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+	if (obj == NULL)
+		return -EINVAL;
+	obj_priv = obj->driver_private;
+
+	mutex_lock(&dev->struct_mutex);
+
+	args->tiling_mode = obj_priv->tiling_mode;
+	switch (obj_priv->tiling_mode) {
+	case I915_TILING_X:
+		args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
+		break;
+	case I915_TILING_Y:
+		args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y;
+		break;
+	case I915_TILING_NONE:
+		args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE;
+		break;
+	default:
+		DRM_ERROR("unknown tiling mode\n");
+	}
+
+	mutex_unlock(&dev->struct_mutex);
+
+	drm_gem_object_unreference(obj);
+
+	return 0;
+}

+ 378 - 136
drivers/gpu/drm/i915/i915_irq.c

@@ -31,12 +31,92 @@
 #include "i915_drm.h"
 #include "i915_drv.h"
 
-#define USER_INT_FLAG (1<<1)
-#define VSYNC_PIPEB_FLAG (1<<5)
-#define VSYNC_PIPEA_FLAG (1<<7)
-
 #define MAX_NOPID ((u32)~0)
 
+/** These are the interrupts used by the driver */
+#define I915_INTERRUPT_ENABLE_MASK (I915_USER_INTERRUPT |		\
+				    I915_ASLE_INTERRUPT |		\
+				    I915_DISPLAY_PIPE_A_EVENT_INTERRUPT | \
+				    I915_DISPLAY_PIPE_B_EVENT_INTERRUPT)
+
+void
+i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
+{
+	if ((dev_priv->irq_mask_reg & mask) != 0) {
+		dev_priv->irq_mask_reg &= ~mask;
+		I915_WRITE(IMR, dev_priv->irq_mask_reg);
+		(void) I915_READ(IMR);
+	}
+}
+
+static inline void
+i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
+{
+	if ((dev_priv->irq_mask_reg & mask) != mask) {
+		dev_priv->irq_mask_reg |= mask;
+		I915_WRITE(IMR, dev_priv->irq_mask_reg);
+		(void) I915_READ(IMR);
+	}
+}
+
+/**
+ * i915_get_pipe - return the the pipe associated with a given plane
+ * @dev: DRM device
+ * @plane: plane to look for
+ *
+ * The Intel Mesa & 2D drivers call the vblank routines with a plane number
+ * rather than a pipe number, since they may not always be equal.  This routine
+ * maps the given @plane back to a pipe number.
+ */
+static int
+i915_get_pipe(struct drm_device *dev, int plane)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	u32 dspcntr;
+
+	dspcntr = plane ? I915_READ(DSPBCNTR) : I915_READ(DSPACNTR);
+
+	return dspcntr & DISPPLANE_SEL_PIPE_MASK ? 1 : 0;
+}
+
+/**
+ * i915_get_plane - return the the plane associated with a given pipe
+ * @dev: DRM device
+ * @pipe: pipe to look for
+ *
+ * The Intel Mesa & 2D drivers call the vblank routines with a plane number
+ * rather than a plane number, since they may not always be equal.  This routine
+ * maps the given @pipe back to a plane number.
+ */
+static int
+i915_get_plane(struct drm_device *dev, int pipe)
+{
+	if (i915_get_pipe(dev, 0) == pipe)
+		return 0;
+	return 1;
+}
+
+/**
+ * i915_pipe_enabled - check if a pipe is enabled
+ * @dev: DRM device
+ * @pipe: pipe to check
+ *
+ * Reading certain registers when the pipe is disabled can hang the chip.
+ * Use this routine to make sure the PLL is running and the pipe is active
+ * before reading such registers if unsure.
+ */
+static int
+i915_pipe_enabled(struct drm_device *dev, int pipe)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	unsigned long pipeconf = pipe ? PIPEBCONF : PIPEACONF;
+
+	if (I915_READ(pipeconf) & PIPEACONF_ENABLE)
+		return 1;
+
+	return 0;
+}
+
 /**
  * Emit blits for scheduled buffer swaps.
  *
@@ -48,8 +128,7 @@ static void i915_vblank_tasklet(struct drm_device *dev)
 	unsigned long irqflags;
 	struct list_head *list, *tmp, hits, *hit;
 	int nhits, nrects, slice[2], upper[2], lower[2], i;
-	unsigned counter[2] = { atomic_read(&dev->vbl_received),
-				atomic_read(&dev->vbl_received2) };
+	unsigned counter[2];
 	struct drm_drawable_info *drw;
 	drm_i915_sarea_t *sarea_priv = dev_priv->sarea_priv;
 	u32 cpp = dev_priv->cpp;
@@ -71,6 +150,9 @@ static void i915_vblank_tasklet(struct drm_device *dev)
 		src_pitch >>= 2;
 	}
 
+	counter[0] = drm_vblank_count(dev, 0);
+	counter[1] = drm_vblank_count(dev, 1);
+
 	DRM_DEBUG("\n");
 
 	INIT_LIST_HEAD(&hits);
@@ -83,12 +165,14 @@ static void i915_vblank_tasklet(struct drm_device *dev)
 	list_for_each_safe(list, tmp, &dev_priv->vbl_swaps.head) {
 		drm_i915_vbl_swap_t *vbl_swap =
 			list_entry(list, drm_i915_vbl_swap_t, head);
+		int pipe = i915_get_pipe(dev, vbl_swap->plane);
 
-		if ((counter[vbl_swap->pipe] - vbl_swap->sequence) > (1<<23))
+		if ((counter[pipe] - vbl_swap->sequence) > (1<<23))
 			continue;
 
 		list_del(list);
 		dev_priv->swaps_pending--;
+		drm_vblank_put(dev, pipe);
 
 		spin_unlock(&dev_priv->swaps_lock);
 		spin_lock(&dev->drw_lock);
@@ -181,7 +265,7 @@ static void i915_vblank_tasklet(struct drm_device *dev)
 			drm_i915_vbl_swap_t *swap_hit =
 				list_entry(hit, drm_i915_vbl_swap_t, head);
 			struct drm_clip_rect *rect;
-			int num_rects, pipe;
+			int num_rects, plane;
 			unsigned short top, bottom;
 
 			drw = drm_get_drawable_info(dev, swap_hit->drw_id);
@@ -190,9 +274,9 @@ static void i915_vblank_tasklet(struct drm_device *dev)
 				continue;
 
 			rect = drw->rects;
-			pipe = swap_hit->pipe;
-			top = upper[pipe];
-			bottom = lower[pipe];
+			plane = swap_hit->plane;
+			top = upper[plane];
+			bottom = lower[plane];
 
 			for (num_rects = drw->num_rects; num_rects--; rect++) {
 				int y1 = max(rect->y1, top);
@@ -229,61 +313,139 @@ static void i915_vblank_tasklet(struct drm_device *dev)
 	}
 }
 
+u32 i915_get_vblank_counter(struct drm_device *dev, int plane)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	unsigned long high_frame;
+	unsigned long low_frame;
+	u32 high1, high2, low, count;
+	int pipe;
+
+	pipe = i915_get_pipe(dev, plane);
+	high_frame = pipe ? PIPEBFRAMEHIGH : PIPEAFRAMEHIGH;
+	low_frame = pipe ? PIPEBFRAMEPIXEL : PIPEAFRAMEPIXEL;
+
+	if (!i915_pipe_enabled(dev, pipe)) {
+		DRM_ERROR("trying to get vblank count for disabled pipe %d\n", pipe);
+		return 0;
+	}
+
+	/*
+	 * High & low register fields aren't synchronized, so make sure
+	 * we get a low value that's stable across two reads of the high
+	 * register.
+	 */
+	do {
+		high1 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
+			 PIPE_FRAME_HIGH_SHIFT);
+		low =  ((I915_READ(low_frame) & PIPE_FRAME_LOW_MASK) >>
+			PIPE_FRAME_LOW_SHIFT);
+		high2 = ((I915_READ(high_frame) & PIPE_FRAME_HIGH_MASK) >>
+			 PIPE_FRAME_HIGH_SHIFT);
+	} while (high1 != high2);
+
+	count = (high1 << 8) | low;
+
+	return count;
+}
+
+void
+i915_gem_vblank_work_handler(struct work_struct *work)
+{
+	drm_i915_private_t *dev_priv;
+	struct drm_device *dev;
+
+	dev_priv = container_of(work, drm_i915_private_t,
+				mm.vblank_work);
+	dev = dev_priv->dev;
+
+	mutex_lock(&dev->struct_mutex);
+	i915_vblank_tasklet(dev);
+	mutex_unlock(&dev->struct_mutex);
+}
+
 irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
 {
 	struct drm_device *dev = (struct drm_device *) arg;
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-	u16 temp;
+	u32 iir;
 	u32 pipea_stats, pipeb_stats;
+	int vblank = 0;
 
-	pipea_stats = I915_READ(I915REG_PIPEASTAT);
-	pipeb_stats = I915_READ(I915REG_PIPEBSTAT);
+	atomic_inc(&dev_priv->irq_received);
 
-	temp = I915_READ16(I915REG_INT_IDENTITY_R);
+	if (dev->pdev->msi_enabled)
+		I915_WRITE(IMR, ~0);
+	iir = I915_READ(IIR);
 
-	temp &= (USER_INT_FLAG | VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG);
+	if (iir == 0) {
+		if (dev->pdev->msi_enabled) {
+			I915_WRITE(IMR, dev_priv->irq_mask_reg);
+			(void) I915_READ(IMR);
+		}
+		return IRQ_NONE;
+	}
 
-	DRM_DEBUG("%s flag=%08x\n", __FUNCTION__, temp);
+	/*
+	 * Clear the PIPE(A|B)STAT regs before the IIR otherwise
+	 * we may get extra interrupts.
+	 */
+	if (iir & I915_DISPLAY_PIPE_A_EVENT_INTERRUPT) {
+		pipea_stats = I915_READ(PIPEASTAT);
+		if (!(dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A))
+			pipea_stats &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
+					 PIPE_VBLANK_INTERRUPT_ENABLE);
+		else if (pipea_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
+					PIPE_VBLANK_INTERRUPT_STATUS)) {
+			vblank++;
+			drm_handle_vblank(dev, i915_get_plane(dev, 0));
+		}
 
-	if (temp == 0)
-		return IRQ_NONE;
+		I915_WRITE(PIPEASTAT, pipea_stats);
+	}
+	if (iir & I915_DISPLAY_PIPE_B_EVENT_INTERRUPT) {
+		pipeb_stats = I915_READ(PIPEBSTAT);
+		/* Ack the event */
+		I915_WRITE(PIPEBSTAT, pipeb_stats);
+
+		/* The vblank interrupt gets enabled even if we didn't ask for
+		   it, so make sure it's shut down again */
+		if (!(dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B))
+			pipeb_stats &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
+					 PIPE_VBLANK_INTERRUPT_ENABLE);
+		else if (pipeb_stats & (PIPE_START_VBLANK_INTERRUPT_STATUS|
+					PIPE_VBLANK_INTERRUPT_STATUS)) {
+			vblank++;
+			drm_handle_vblank(dev, i915_get_plane(dev, 1));
+		}
 
-	I915_WRITE16(I915REG_INT_IDENTITY_R, temp);
-	(void) I915_READ16(I915REG_INT_IDENTITY_R);
-	DRM_READMEMORYBARRIER();
+		if (pipeb_stats & I915_LEGACY_BLC_EVENT_STATUS)
+			opregion_asle_intr(dev);
+		I915_WRITE(PIPEBSTAT, pipeb_stats);
+	}
+
+	I915_WRITE(IIR, iir);
+	if (dev->pdev->msi_enabled)
+		I915_WRITE(IMR, dev_priv->irq_mask_reg);
+	(void) I915_READ(IIR); /* Flush posted writes */
 
-	dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
+	if (dev_priv->sarea_priv)
+		dev_priv->sarea_priv->last_dispatch =
+			READ_BREADCRUMB(dev_priv);
 
-	if (temp & USER_INT_FLAG)
+	if (iir & I915_USER_INTERRUPT) {
+		dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev);
 		DRM_WAKEUP(&dev_priv->irq_queue);
+	}
 
-	if (temp & (VSYNC_PIPEA_FLAG | VSYNC_PIPEB_FLAG)) {
-		int vblank_pipe = dev_priv->vblank_pipe;
-
-		if ((vblank_pipe &
-		     (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B))
-		    == (DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B)) {
-			if (temp & VSYNC_PIPEA_FLAG)
-				atomic_inc(&dev->vbl_received);
-			if (temp & VSYNC_PIPEB_FLAG)
-				atomic_inc(&dev->vbl_received2);
-		} else if (((temp & VSYNC_PIPEA_FLAG) &&
-			    (vblank_pipe & DRM_I915_VBLANK_PIPE_A)) ||
-			   ((temp & VSYNC_PIPEB_FLAG) &&
-			    (vblank_pipe & DRM_I915_VBLANK_PIPE_B)))
-			atomic_inc(&dev->vbl_received);
-
-		DRM_WAKEUP(&dev->vbl_queue);
-		drm_vbl_send_signals(dev);
-
-		if (dev_priv->swaps_pending > 0)
+	if (iir & I915_ASLE_INTERRUPT)
+		opregion_asle_intr(dev);
+
+	if (vblank && dev_priv->swaps_pending > 0) {
+		if (dev_priv->ring.ring_obj == NULL)
 			drm_locked_tasklet(dev, i915_vblank_tasklet);
-		I915_WRITE(I915REG_PIPEASTAT,
-			pipea_stats|I915_VBLANK_INTERRUPT_ENABLE|
-			I915_VBLANK_CLEAR);
-		I915_WRITE(I915REG_PIPEBSTAT,
-			pipeb_stats|I915_VBLANK_INTERRUPT_ENABLE|
-			I915_VBLANK_CLEAR);
+		else
+			schedule_work(&dev_priv->mm.vblank_work);
 	}
 
 	return IRQ_HANDLED;
@@ -298,23 +460,45 @@ static int i915_emit_irq(struct drm_device * dev)
 
 	DRM_DEBUG("\n");
 
-	dev_priv->sarea_priv->last_enqueue = ++dev_priv->counter;
-
+	dev_priv->counter++;
 	if (dev_priv->counter > 0x7FFFFFFFUL)
-		dev_priv->sarea_priv->last_enqueue = dev_priv->counter = 1;
+		dev_priv->counter = 1;
+	if (dev_priv->sarea_priv)
+		dev_priv->sarea_priv->last_enqueue = dev_priv->counter;
 
 	BEGIN_LP_RING(6);
-	OUT_RING(CMD_STORE_DWORD_IDX);
-	OUT_RING(20);
+	OUT_RING(MI_STORE_DWORD_INDEX);
+	OUT_RING(5 << MI_STORE_DWORD_INDEX_SHIFT);
 	OUT_RING(dev_priv->counter);
 	OUT_RING(0);
 	OUT_RING(0);
-	OUT_RING(GFX_OP_USER_INTERRUPT);
+	OUT_RING(MI_USER_INTERRUPT);
 	ADVANCE_LP_RING();
 
 	return dev_priv->counter;
 }
 
+void i915_user_irq_get(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+	spin_lock(&dev_priv->user_irq_lock);
+	if (dev->irq_enabled && (++dev_priv->user_irq_refcount == 1))
+		i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
+	spin_unlock(&dev_priv->user_irq_lock);
+}
+
+void i915_user_irq_put(struct drm_device *dev)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+
+	spin_lock(&dev_priv->user_irq_lock);
+	BUG_ON(dev->irq_enabled && dev_priv->user_irq_refcount <= 0);
+	if (dev->irq_enabled && (--dev_priv->user_irq_refcount == 0))
+		i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
+	spin_unlock(&dev_priv->user_irq_lock);
+}
+
 static int i915_wait_irq(struct drm_device * dev, int irq_nr)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
@@ -323,55 +507,34 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
 	DRM_DEBUG("irq_nr=%d breadcrumb=%d\n", irq_nr,
 		  READ_BREADCRUMB(dev_priv));
 
-	if (READ_BREADCRUMB(dev_priv) >= irq_nr)
+	if (READ_BREADCRUMB(dev_priv) >= irq_nr) {
+		if (dev_priv->sarea_priv) {
+			dev_priv->sarea_priv->last_dispatch =
+				READ_BREADCRUMB(dev_priv);
+		}
 		return 0;
+	}
 
-	dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
+	if (dev_priv->sarea_priv)
+		dev_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
 
+	i915_user_irq_get(dev);
 	DRM_WAIT_ON(ret, dev_priv->irq_queue, 3 * DRM_HZ,
 		    READ_BREADCRUMB(dev_priv) >= irq_nr);
+	i915_user_irq_put(dev);
 
 	if (ret == -EBUSY) {
 		DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
 			  READ_BREADCRUMB(dev_priv), (int)dev_priv->counter);
 	}
 
-	dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv);
-	return ret;
-}
-
-static int i915_driver_vblank_do_wait(struct drm_device *dev, unsigned int *sequence,
-				      atomic_t *counter)
-{
-	drm_i915_private_t *dev_priv = dev->dev_private;
-	unsigned int cur_vblank;
-	int ret = 0;
-
-	if (!dev_priv) {
-		DRM_ERROR("called with no initialization\n");
-		return -EINVAL;
-	}
-
-	DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
-		    (((cur_vblank = atomic_read(counter))
-			- *sequence) <= (1<<23)));
-
-	*sequence = cur_vblank;
+	if (dev_priv->sarea_priv)
+		dev_priv->sarea_priv->last_dispatch =
+			READ_BREADCRUMB(dev_priv);
 
 	return ret;
 }
 
-
-int i915_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence)
-{
-	return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received);
-}
-
-int i915_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence)
-{
-	return i915_driver_vblank_do_wait(dev, sequence, &dev->vbl_received2);
-}
-
 /* Needs the lock as it touches the ring.
  */
 int i915_irq_emit(struct drm_device *dev, void *data,
@@ -381,14 +544,15 @@ int i915_irq_emit(struct drm_device *dev, void *data,
 	drm_i915_irq_emit_t *emit = data;
 	int result;
 
-	LOCK_TEST_WITH_RETURN(dev, file_priv);
+	RING_LOCK_TEST_WITH_RETURN(dev, file_priv);
 
 	if (!dev_priv) {
 		DRM_ERROR("called with no initialization\n");
 		return -EINVAL;
 	}
-
+	mutex_lock(&dev->struct_mutex);
 	result = i915_emit_irq(dev);
+	mutex_unlock(&dev->struct_mutex);
 
 	if (DRM_COPY_TO_USER(emit->irq_seq, &result, sizeof(int))) {
 		DRM_ERROR("copy_to_user\n");
@@ -414,18 +578,74 @@ int i915_irq_wait(struct drm_device *dev, void *data,
 	return i915_wait_irq(dev, irqwait->irq_seq);
 }
 
-static void i915_enable_interrupt (struct drm_device *dev)
+int i915_enable_vblank(struct drm_device *dev, int plane)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-	u16 flag;
+	int pipe = i915_get_pipe(dev, plane);
+	u32	pipestat_reg = 0;
+	u32	pipestat;
+
+	switch (pipe) {
+	case 0:
+		pipestat_reg = PIPEASTAT;
+		i915_enable_irq(dev_priv, I915_DISPLAY_PIPE_A_EVENT_INTERRUPT);
+		break;
+	case 1:
+		pipestat_reg = PIPEBSTAT;
+		i915_enable_irq(dev_priv, I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
+		break;
+	default:
+		DRM_ERROR("tried to enable vblank on non-existent pipe %d\n",
+			  pipe);
+		break;
+	}
+
+	if (pipestat_reg) {
+		pipestat = I915_READ(pipestat_reg);
+		if (IS_I965G(dev))
+			pipestat |= PIPE_START_VBLANK_INTERRUPT_ENABLE;
+		else
+			pipestat |= PIPE_VBLANK_INTERRUPT_ENABLE;
+		/* Clear any stale interrupt status */
+		pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
+			     PIPE_VBLANK_INTERRUPT_STATUS);
+		I915_WRITE(pipestat_reg, pipestat);
+	}
+
+	return 0;
+}
 
-	flag = 0;
-	if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_A)
-		flag |= VSYNC_PIPEA_FLAG;
-	if (dev_priv->vblank_pipe & DRM_I915_VBLANK_PIPE_B)
-		flag |= VSYNC_PIPEB_FLAG;
+void i915_disable_vblank(struct drm_device *dev, int plane)
+{
+	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	int pipe = i915_get_pipe(dev, plane);
+	u32	pipestat_reg = 0;
+	u32	pipestat;
+
+	switch (pipe) {
+	case 0:
+		pipestat_reg = PIPEASTAT;
+		i915_disable_irq(dev_priv, I915_DISPLAY_PIPE_A_EVENT_INTERRUPT);
+		break;
+	case 1:
+		pipestat_reg = PIPEBSTAT;
+		i915_disable_irq(dev_priv, I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
+		break;
+	default:
+		DRM_ERROR("tried to disable vblank on non-existent pipe %d\n",
+			  pipe);
+		break;
+	}
 
-	I915_WRITE16(I915REG_INT_ENABLE_R, USER_INT_FLAG | flag);
+	if (pipestat_reg) {
+		pipestat = I915_READ(pipestat_reg);
+		pipestat &= ~(PIPE_START_VBLANK_INTERRUPT_ENABLE |
+			      PIPE_VBLANK_INTERRUPT_ENABLE);
+		/* Clear any stale interrupt status */
+		pipestat |= (PIPE_START_VBLANK_INTERRUPT_STATUS |
+			     PIPE_VBLANK_INTERRUPT_STATUS);
+		I915_WRITE(pipestat_reg, pipestat);
+	}
 }
 
 /* Set the vblank monitor pipe
@@ -434,22 +654,12 @@ int i915_vblank_pipe_set(struct drm_device *dev, void *data,
 			 struct drm_file *file_priv)
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
-	drm_i915_vblank_pipe_t *pipe = data;
 
 	if (!dev_priv) {
 		DRM_ERROR("called with no initialization\n");
 		return -EINVAL;
 	}
 
-	if (pipe->pipe & ~(DRM_I915_VBLANK_PIPE_A|DRM_I915_VBLANK_PIPE_B)) {
-		DRM_ERROR("called with invalid pipe 0x%x\n", pipe->pipe);
-		return -EINVAL;
-	}
-
-	dev_priv->vblank_pipe = pipe->pipe;
-
-	i915_enable_interrupt (dev);
-
 	return 0;
 }
 
@@ -458,19 +668,13 @@ int i915_vblank_pipe_get(struct drm_device *dev, void *data,
 {
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	drm_i915_vblank_pipe_t *pipe = data;
-	u16 flag;
 
 	if (!dev_priv) {
 		DRM_ERROR("called with no initialization\n");
 		return -EINVAL;
 	}
 
-	flag = I915_READ(I915REG_INT_ENABLE_R);
-	pipe->pipe = 0;
-	if (flag & VSYNC_PIPEA_FLAG)
-		pipe->pipe |= DRM_I915_VBLANK_PIPE_A;
-	if (flag & VSYNC_PIPEB_FLAG)
-		pipe->pipe |= DRM_I915_VBLANK_PIPE_B;
+	pipe->pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
 
 	return 0;
 }
@@ -484,11 +688,12 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
 	drm_i915_private_t *dev_priv = dev->dev_private;
 	drm_i915_vblank_swap_t *swap = data;
 	drm_i915_vbl_swap_t *vbl_swap;
-	unsigned int pipe, seqtype, curseq;
+	unsigned int pipe, seqtype, curseq, plane;
 	unsigned long irqflags;
 	struct list_head *list;
+	int ret;
 
-	if (!dev_priv) {
+	if (!dev_priv || !dev_priv->sarea_priv) {
 		DRM_ERROR("%s called with no initialization\n", __func__);
 		return -EINVAL;
 	}
@@ -504,7 +709,8 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
 		return -EINVAL;
 	}
 
-	pipe = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
+	plane = (swap->seqtype & _DRM_VBLANK_SECONDARY) ? 1 : 0;
+	pipe = i915_get_pipe(dev, plane);
 
 	seqtype = swap->seqtype & (_DRM_VBLANK_RELATIVE | _DRM_VBLANK_ABSOLUTE);
 
@@ -523,7 +729,14 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
 
 	spin_unlock_irqrestore(&dev->drw_lock, irqflags);
 
-	curseq = atomic_read(pipe ? &dev->vbl_received2 : &dev->vbl_received);
+	/*
+	 * We take the ref here and put it when the swap actually completes
+	 * in the tasklet.
+	 */
+	ret = drm_vblank_get(dev, pipe);
+	if (ret)
+		return ret;
+	curseq = drm_vblank_count(dev, pipe);
 
 	if (seqtype == _DRM_VBLANK_RELATIVE)
 		swap->sequence += curseq;
@@ -533,6 +746,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
 			swap->sequence = curseq + 1;
 		} else {
 			DRM_DEBUG("Missed target sequence\n");
+			drm_vblank_put(dev, pipe);
 			return -EINVAL;
 		}
 	}
@@ -543,7 +757,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
 		vbl_swap = list_entry(list, drm_i915_vbl_swap_t, head);
 
 		if (vbl_swap->drw_id == swap->drawable &&
-		    vbl_swap->pipe == pipe &&
+		    vbl_swap->plane == plane &&
 		    vbl_swap->sequence == swap->sequence) {
 			spin_unlock_irqrestore(&dev_priv->swaps_lock, irqflags);
 			DRM_DEBUG("Already scheduled\n");
@@ -555,6 +769,7 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
 
 	if (dev_priv->swaps_pending >= 100) {
 		DRM_DEBUG("Too many swaps queued\n");
+		drm_vblank_put(dev, pipe);
 		return -EBUSY;
 	}
 
@@ -562,13 +777,14 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
 
 	if (!vbl_swap) {
 		DRM_ERROR("Failed to allocate memory to queue swap\n");
+		drm_vblank_put(dev, pipe);
 		return -ENOMEM;
 	}
 
 	DRM_DEBUG("\n");
 
 	vbl_swap->drw_id = swap->drawable;
-	vbl_swap->pipe = pipe;
+	vbl_swap->plane = plane;
 	vbl_swap->sequence = swap->sequence;
 
 	spin_lock_irqsave(&dev_priv->swaps_lock, irqflags);
@@ -587,37 +803,63 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
 
-	I915_WRITE16(I915REG_HWSTAM, 0xfffe);
-	I915_WRITE16(I915REG_INT_MASK_R, 0x0);
-	I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
+	I915_WRITE(HWSTAM, 0xeffe);
+	I915_WRITE(IMR, 0xffffffff);
+	I915_WRITE(IER, 0x0);
 }
 
-void i915_driver_irq_postinstall(struct drm_device * dev)
+int i915_driver_irq_postinstall(struct drm_device *dev)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+	int ret, num_pipes = 2;
 
 	spin_lock_init(&dev_priv->swaps_lock);
 	INIT_LIST_HEAD(&dev_priv->vbl_swaps.head);
 	dev_priv->swaps_pending = 0;
 
-	if (!dev_priv->vblank_pipe)
-		dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A;
-	i915_enable_interrupt(dev);
+	/* Set initial unmasked IRQs to just the selected vblank pipes. */
+	dev_priv->irq_mask_reg = ~0;
+
+	ret = drm_vblank_init(dev, num_pipes);
+	if (ret)
+		return ret;
+
+	dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
+	dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT;
+	dev_priv->irq_mask_reg &= ~I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT;
+
+	dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
+
+	dev_priv->irq_mask_reg &= I915_INTERRUPT_ENABLE_MASK;
+
+	I915_WRITE(IMR, dev_priv->irq_mask_reg);
+	I915_WRITE(IER, I915_INTERRUPT_ENABLE_MASK);
+	(void) I915_READ(IER);
+
+	opregion_enable_asle(dev);
 	DRM_INIT_WAITQUEUE(&dev_priv->irq_queue);
+
+	return 0;
 }
 
 void i915_driver_irq_uninstall(struct drm_device * dev)
 {
 	drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
-	u16 temp;
+	u32 temp;
 
 	if (!dev_priv)
 		return;
 
-	I915_WRITE16(I915REG_HWSTAM, 0xffff);
-	I915_WRITE16(I915REG_INT_MASK_R, 0xffff);
-	I915_WRITE16(I915REG_INT_ENABLE_R, 0x0);
+	dev_priv->vblank_pipe = 0;
+
+	I915_WRITE(HWSTAM, 0xffffffff);
+	I915_WRITE(IMR, 0xffffffff);
+	I915_WRITE(IER, 0x0);
 
-	temp = I915_READ16(I915REG_INT_IDENTITY_R);
-	I915_WRITE16(I915REG_INT_IDENTITY_R, temp);
+	temp = I915_READ(PIPEASTAT);
+	I915_WRITE(PIPEASTAT, temp);
+	temp = I915_READ(PIPEBSTAT);
+	I915_WRITE(PIPEBSTAT, temp);
+	temp = I915_READ(IIR);
+	I915_WRITE(IIR, temp);
 }

+ 371 - 0
drivers/gpu/drm/i915/i915_opregion.c

@@ -0,0 +1,371 @@
+/*
+ * Copyright 2008 Intel Corporation <hong.liu@intel.com>
+ * Copyright 2008 Red Hat <mjg@redhat.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial
+ * portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NON-INFRINGEMENT.  IN NO EVENT SHALL INTEL AND/OR ITS SUPPLIERS BE
+ * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include <linux/acpi.h>
+
+#include "drmP.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+#define PCI_ASLE 0xe4
+#define PCI_LBPC 0xf4
+#define PCI_ASLS 0xfc
+
+#define OPREGION_SZ            (8*1024)
+#define OPREGION_HEADER_OFFSET 0
+#define OPREGION_ACPI_OFFSET   0x100
+#define OPREGION_SWSCI_OFFSET  0x200
+#define OPREGION_ASLE_OFFSET   0x300
+#define OPREGION_VBT_OFFSET    0x1000
+
+#define OPREGION_SIGNATURE "IntelGraphicsMem"
+#define MBOX_ACPI      (1<<0)
+#define MBOX_SWSCI     (1<<1)
+#define MBOX_ASLE      (1<<2)
+
+struct opregion_header {
+       u8 signature[16];
+       u32 size;
+       u32 opregion_ver;
+       u8 bios_ver[32];
+       u8 vbios_ver[16];
+       u8 driver_ver[16];
+       u32 mboxes;
+       u8 reserved[164];
+} __attribute__((packed));
+
+/* OpRegion mailbox #1: public ACPI methods */
+struct opregion_acpi {
+       u32 drdy;       /* driver readiness */
+       u32 csts;       /* notification status */
+       u32 cevt;       /* current event */
+       u8 rsvd1[20];
+       u32 didl[8];    /* supported display devices ID list */
+       u32 cpdl[8];    /* currently presented display list */
+       u32 cadl[8];    /* currently active display list */
+       u32 nadl[8];    /* next active devices list */
+       u32 aslp;       /* ASL sleep time-out */
+       u32 tidx;       /* toggle table index */
+       u32 chpd;       /* current hotplug enable indicator */
+       u32 clid;       /* current lid state*/
+       u32 cdck;       /* current docking state */
+       u32 sxsw;       /* Sx state resume */
+       u32 evts;       /* ASL supported events */
+       u32 cnot;       /* current OS notification */
+       u32 nrdy;       /* driver status */
+       u8 rsvd2[60];
+} __attribute__((packed));
+
+/* OpRegion mailbox #2: SWSCI */
+struct opregion_swsci {
+       u32 scic;       /* SWSCI command|status|data */
+       u32 parm;       /* command parameters */
+       u32 dslp;       /* driver sleep time-out */
+       u8 rsvd[244];
+} __attribute__((packed));
+
+/* OpRegion mailbox #3: ASLE */
+struct opregion_asle {
+       u32 ardy;       /* driver readiness */
+       u32 aslc;       /* ASLE interrupt command */
+       u32 tche;       /* technology enabled indicator */
+       u32 alsi;       /* current ALS illuminance reading */
+       u32 bclp;       /* backlight brightness to set */
+       u32 pfit;       /* panel fitting state */
+       u32 cblv;       /* current brightness level */
+       u16 bclm[20];   /* backlight level duty cycle mapping table */
+       u32 cpfm;       /* current panel fitting mode */
+       u32 epfm;       /* enabled panel fitting modes */
+       u8 plut[74];    /* panel LUT and identifier */
+       u32 pfmb;       /* PWM freq and min brightness */
+       u8 rsvd[102];
+} __attribute__((packed));
+
+/* ASLE irq request bits */
+#define ASLE_SET_ALS_ILLUM     (1 << 0)
+#define ASLE_SET_BACKLIGHT     (1 << 1)
+#define ASLE_SET_PFIT          (1 << 2)
+#define ASLE_SET_PWM_FREQ      (1 << 3)
+#define ASLE_REQ_MSK           0xf
+
+/* response bits of ASLE irq request */
+#define ASLE_ALS_ILLUM_FAIL    (2<<10)
+#define ASLE_BACKLIGHT_FAIL    (2<<12)
+#define ASLE_PFIT_FAIL         (2<<14)
+#define ASLE_PWM_FREQ_FAIL     (2<<16)
+
+/* ASLE backlight brightness to set */
+#define ASLE_BCLP_VALID                (1<<31)
+#define ASLE_BCLP_MSK          (~(1<<31))
+
+/* ASLE panel fitting request */
+#define ASLE_PFIT_VALID         (1<<31)
+#define ASLE_PFIT_CENTER (1<<0)
+#define ASLE_PFIT_STRETCH_TEXT (1<<1)
+#define ASLE_PFIT_STRETCH_GFX (1<<2)
+
+/* PWM frequency and minimum brightness */
+#define ASLE_PFMB_BRIGHTNESS_MASK (0xff)
+#define ASLE_PFMB_BRIGHTNESS_VALID (1<<8)
+#define ASLE_PFMB_PWM_MASK (0x7ffffe00)
+#define ASLE_PFMB_PWM_VALID (1<<31)
+
+#define ASLE_CBLV_VALID         (1<<31)
+
+static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct opregion_asle *asle = dev_priv->opregion.asle;
+	u32 blc_pwm_ctl, blc_pwm_ctl2;
+
+	if (!(bclp & ASLE_BCLP_VALID))
+		return ASLE_BACKLIGHT_FAIL;
+
+	bclp &= ASLE_BCLP_MSK;
+	if (bclp < 0 || bclp > 255)
+		return ASLE_BACKLIGHT_FAIL;
+
+	blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
+	blc_pwm_ctl &= ~BACKLIGHT_DUTY_CYCLE_MASK;
+	blc_pwm_ctl2 = I915_READ(BLC_PWM_CTL2);
+
+	if (blc_pwm_ctl2 & BLM_COMBINATION_MODE)
+		pci_write_config_dword(dev->pdev, PCI_LBPC, bclp);
+	else
+		I915_WRITE(BLC_PWM_CTL, blc_pwm_ctl | ((bclp * 0x101)-1));
+
+	asle->cblv = (bclp*0x64)/0xff | ASLE_CBLV_VALID;
+
+	return 0;
+}
+
+static u32 asle_set_als_illum(struct drm_device *dev, u32 alsi)
+{
+	/* alsi is the current ALS reading in lux. 0 indicates below sensor
+	   range, 0xffff indicates above sensor range. 1-0xfffe are valid */
+	return 0;
+}
+
+static u32 asle_set_pwm_freq(struct drm_device *dev, u32 pfmb)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	if (pfmb & ASLE_PFMB_PWM_VALID) {
+		u32 blc_pwm_ctl = I915_READ(BLC_PWM_CTL);
+		u32 pwm = pfmb & ASLE_PFMB_PWM_MASK;
+		blc_pwm_ctl &= BACKLIGHT_DUTY_CYCLE_MASK;
+		pwm = pwm >> 9;
+		/* FIXME - what do we do with the PWM? */
+	}
+	return 0;
+}
+
+static u32 asle_set_pfit(struct drm_device *dev, u32 pfit)
+{
+	/* Panel fitting is currently controlled by the X code, so this is a
+	   noop until modesetting support works fully */
+	if (!(pfit & ASLE_PFIT_VALID))
+		return ASLE_PFIT_FAIL;
+	return 0;
+}
+
+void opregion_asle_intr(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct opregion_asle *asle = dev_priv->opregion.asle;
+	u32 asle_stat = 0;
+	u32 asle_req;
+
+	if (!asle)
+		return;
+
+	asle_req = asle->aslc & ASLE_REQ_MSK;
+
+	if (!asle_req) {
+		DRM_DEBUG("non asle set request??\n");
+		return;
+	}
+
+	if (asle_req & ASLE_SET_ALS_ILLUM)
+		asle_stat |= asle_set_als_illum(dev, asle->alsi);
+
+	if (asle_req & ASLE_SET_BACKLIGHT)
+		asle_stat |= asle_set_backlight(dev, asle->bclp);
+
+	if (asle_req & ASLE_SET_PFIT)
+		asle_stat |= asle_set_pfit(dev, asle->pfit);
+
+	if (asle_req & ASLE_SET_PWM_FREQ)
+		asle_stat |= asle_set_pwm_freq(dev, asle->pfmb);
+
+	asle->aslc = asle_stat;
+}
+
+#define ASLE_ALS_EN    (1<<0)
+#define ASLE_BLC_EN    (1<<1)
+#define ASLE_PFIT_EN   (1<<2)
+#define ASLE_PFMB_EN   (1<<3)
+
+void opregion_enable_asle(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct opregion_asle *asle = dev_priv->opregion.asle;
+
+	if (asle) {
+		u32 pipeb_stats = I915_READ(PIPEBSTAT);
+		if (IS_MOBILE(dev)) {
+			/* Many devices trigger events with a write to the
+			   legacy backlight controller, so we need to ensure
+			   that it's able to generate interrupts */
+			I915_WRITE(PIPEBSTAT, pipeb_stats |=
+				   I915_LEGACY_BLC_EVENT_ENABLE);
+			i915_enable_irq(dev_priv, I915_ASLE_INTERRUPT |
+					I915_DISPLAY_PIPE_B_EVENT_INTERRUPT);
+		} else
+			i915_enable_irq(dev_priv, I915_ASLE_INTERRUPT);
+
+		asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN |
+			ASLE_PFMB_EN;
+		asle->ardy = 1;
+	}
+}
+
+#define ACPI_EV_DISPLAY_SWITCH (1<<0)
+#define ACPI_EV_LID            (1<<1)
+#define ACPI_EV_DOCK           (1<<2)
+
+static struct intel_opregion *system_opregion;
+
+int intel_opregion_video_event(struct notifier_block *nb, unsigned long val,
+			       void *data)
+{
+	/* The only video events relevant to opregion are 0x80. These indicate
+	   either a docking event, lid switch or display switch request. In
+	   Linux, these are handled by the dock, button and video drivers.
+	   We might want to fix the video driver to be opregion-aware in
+	   future, but right now we just indicate to the firmware that the
+	   request has been handled */
+
+	struct opregion_acpi *acpi;
+
+	if (!system_opregion)
+		return NOTIFY_DONE;
+
+	acpi = system_opregion->acpi;
+	acpi->csts = 0;
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block intel_opregion_notifier = {
+	.notifier_call = intel_opregion_video_event,
+};
+
+int intel_opregion_init(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_opregion *opregion = &dev_priv->opregion;
+	void *base;
+	u32 asls, mboxes;
+	int err = 0;
+
+	pci_read_config_dword(dev->pdev, PCI_ASLS, &asls);
+	DRM_DEBUG("graphic opregion physical addr: 0x%x\n", asls);
+	if (asls == 0) {
+		DRM_DEBUG("ACPI OpRegion not supported!\n");
+		return -ENOTSUPP;
+	}
+
+	base = ioremap(asls, OPREGION_SZ);
+	if (!base)
+		return -ENOMEM;
+
+	opregion->header = base;
+	if (memcmp(opregion->header->signature, OPREGION_SIGNATURE, 16)) {
+		DRM_DEBUG("opregion signature mismatch\n");
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	mboxes = opregion->header->mboxes;
+	if (mboxes & MBOX_ACPI) {
+		DRM_DEBUG("Public ACPI methods supported\n");
+		opregion->acpi = base + OPREGION_ACPI_OFFSET;
+	} else {
+		DRM_DEBUG("Public ACPI methods not supported\n");
+		err = -ENOTSUPP;
+		goto err_out;
+	}
+	opregion->enabled = 1;
+
+	if (mboxes & MBOX_SWSCI) {
+		DRM_DEBUG("SWSCI supported\n");
+		opregion->swsci = base + OPREGION_SWSCI_OFFSET;
+	}
+	if (mboxes & MBOX_ASLE) {
+		DRM_DEBUG("ASLE supported\n");
+		opregion->asle = base + OPREGION_ASLE_OFFSET;
+	}
+
+	/* Notify BIOS we are ready to handle ACPI video ext notifs.
+	 * Right now, all the events are handled by the ACPI video module.
+	 * We don't actually need to do anything with them. */
+	opregion->acpi->csts = 0;
+	opregion->acpi->drdy = 1;
+
+	system_opregion = opregion;
+	register_acpi_notifier(&intel_opregion_notifier);
+
+	return 0;
+
+err_out:
+	iounmap(opregion->header);
+	opregion->header = NULL;
+	return err;
+}
+
+void intel_opregion_free(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	struct intel_opregion *opregion = &dev_priv->opregion;
+
+	if (!opregion->enabled)
+		return;
+
+	opregion->acpi->drdy = 0;
+
+	system_opregion = NULL;
+	unregister_acpi_notifier(&intel_opregion_notifier);
+
+	/* just clear all opregion memory pointers now */
+	iounmap(opregion->header);
+	opregion->header = NULL;
+	opregion->acpi = NULL;
+	opregion->swsci = NULL;
+	opregion->asle = NULL;
+
+	opregion->enabled = 0;
+}

+ 1417 - 0
drivers/gpu/drm/i915/i915_reg.h

@@ -0,0 +1,1417 @@
+/* Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _I915_REG_H_
+#define _I915_REG_H_
+
+/*
+ * The Bridge device's PCI config space has information about the
+ * fb aperture size and the amount of pre-reserved memory.
+ */
+#define INTEL_GMCH_CTRL		0x52
+#define INTEL_GMCH_ENABLED	0x4
+#define INTEL_GMCH_MEM_MASK	0x1
+#define INTEL_GMCH_MEM_64M	0x1
+#define INTEL_GMCH_MEM_128M	0
+
+#define INTEL_855_GMCH_GMS_MASK		(0x7 << 4)
+#define INTEL_855_GMCH_GMS_DISABLED	(0x0 << 4)
+#define INTEL_855_GMCH_GMS_STOLEN_1M	(0x1 << 4)
+#define INTEL_855_GMCH_GMS_STOLEN_4M	(0x2 << 4)
+#define INTEL_855_GMCH_GMS_STOLEN_8M	(0x3 << 4)
+#define INTEL_855_GMCH_GMS_STOLEN_16M	(0x4 << 4)
+#define INTEL_855_GMCH_GMS_STOLEN_32M	(0x5 << 4)
+
+#define INTEL_915G_GMCH_GMS_STOLEN_48M	(0x6 << 4)
+#define INTEL_915G_GMCH_GMS_STOLEN_64M	(0x7 << 4)
+
+/* PCI config space */
+
+#define HPLLCC	0xc0 /* 855 only */
+#define   GC_CLOCK_CONTROL_MASK		(3 << 0)
+#define   GC_CLOCK_133_200		(0 << 0)
+#define   GC_CLOCK_100_200		(1 << 0)
+#define   GC_CLOCK_100_133		(2 << 0)
+#define   GC_CLOCK_166_250		(3 << 0)
+#define GCFGC	0xf0 /* 915+ only */
+#define   GC_LOW_FREQUENCY_ENABLE	(1 << 7)
+#define   GC_DISPLAY_CLOCK_190_200_MHZ	(0 << 4)
+#define   GC_DISPLAY_CLOCK_333_MHZ	(4 << 4)
+#define   GC_DISPLAY_CLOCK_MASK		(7 << 4)
+#define LBB	0xf4
+
+/* VGA stuff */
+
+#define VGA_ST01_MDA 0x3ba
+#define VGA_ST01_CGA 0x3da
+
+#define VGA_MSR_WRITE 0x3c2
+#define VGA_MSR_READ 0x3cc
+#define   VGA_MSR_MEM_EN (1<<1)
+#define   VGA_MSR_CGA_MODE (1<<0)
+
+#define VGA_SR_INDEX 0x3c4
+#define VGA_SR_DATA 0x3c5
+
+#define VGA_AR_INDEX 0x3c0
+#define   VGA_AR_VID_EN (1<<5)
+#define VGA_AR_DATA_WRITE 0x3c0
+#define VGA_AR_DATA_READ 0x3c1
+
+#define VGA_GR_INDEX 0x3ce
+#define VGA_GR_DATA 0x3cf
+/* GR05 */
+#define   VGA_GR_MEM_READ_MODE_SHIFT 3
+#define     VGA_GR_MEM_READ_MODE_PLANE 1
+/* GR06 */
+#define   VGA_GR_MEM_MODE_MASK 0xc
+#define   VGA_GR_MEM_MODE_SHIFT 2
+#define   VGA_GR_MEM_A0000_AFFFF 0
+#define   VGA_GR_MEM_A0000_BFFFF 1
+#define   VGA_GR_MEM_B0000_B7FFF 2
+#define   VGA_GR_MEM_B0000_BFFFF 3
+
+#define VGA_DACMASK 0x3c6
+#define VGA_DACRX 0x3c7
+#define VGA_DACWX 0x3c8
+#define VGA_DACDATA 0x3c9
+
+#define VGA_CR_INDEX_MDA 0x3b4
+#define VGA_CR_DATA_MDA 0x3b5
+#define VGA_CR_INDEX_CGA 0x3d4
+#define VGA_CR_DATA_CGA 0x3d5
+
+/*
+ * Memory interface instructions used by the kernel
+ */
+#define MI_INSTR(opcode, flags) (((opcode) << 23) | (flags))
+
+#define MI_NOOP			MI_INSTR(0, 0)
+#define MI_USER_INTERRUPT	MI_INSTR(0x02, 0)
+#define MI_WAIT_FOR_EVENT       MI_INSTR(0x03, 0)
+#define   MI_WAIT_FOR_PLANE_B_FLIP      (1<<6)
+#define   MI_WAIT_FOR_PLANE_A_FLIP      (1<<2)
+#define   MI_WAIT_FOR_PLANE_A_SCANLINES (1<<1)
+#define MI_FLUSH		MI_INSTR(0x04, 0)
+#define   MI_READ_FLUSH		(1 << 0)
+#define   MI_EXE_FLUSH		(1 << 1)
+#define   MI_NO_WRITE_FLUSH	(1 << 2)
+#define   MI_SCENE_COUNT	(1 << 3) /* just increment scene count */
+#define   MI_END_SCENE		(1 << 4) /* flush binner and incr scene count */
+#define MI_BATCH_BUFFER_END	MI_INSTR(0x0a, 0)
+#define MI_REPORT_HEAD		MI_INSTR(0x07, 0)
+#define MI_LOAD_SCAN_LINES_INCL MI_INSTR(0x12, 0)
+#define MI_STORE_DWORD_IMM	MI_INSTR(0x20, 1)
+#define   MI_MEM_VIRTUAL	(1 << 22) /* 965+ only */
+#define MI_STORE_DWORD_INDEX	MI_INSTR(0x21, 1)
+#define   MI_STORE_DWORD_INDEX_SHIFT 2
+#define MI_LOAD_REGISTER_IMM	MI_INSTR(0x22, 1)
+#define MI_BATCH_BUFFER		MI_INSTR(0x30, 1)
+#define   MI_BATCH_NON_SECURE	(1)
+#define   MI_BATCH_NON_SECURE_I965 (1<<8)
+#define MI_BATCH_BUFFER_START	MI_INSTR(0x31, 0)
+
+/*
+ * 3D instructions used by the kernel
+ */
+#define GFX_INSTR(opcode, flags) ((0x3 << 29) | ((opcode) << 24) | (flags))
+
+#define GFX_OP_RASTER_RULES    ((0x3<<29)|(0x7<<24))
+#define GFX_OP_SCISSOR         ((0x3<<29)|(0x1c<<24)|(0x10<<19))
+#define   SC_UPDATE_SCISSOR       (0x1<<1)
+#define   SC_ENABLE_MASK          (0x1<<0)
+#define   SC_ENABLE               (0x1<<0)
+#define GFX_OP_LOAD_INDIRECT   ((0x3<<29)|(0x1d<<24)|(0x7<<16))
+#define GFX_OP_SCISSOR_INFO    ((0x3<<29)|(0x1d<<24)|(0x81<<16)|(0x1))
+#define   SCI_YMIN_MASK      (0xffff<<16)
+#define   SCI_XMIN_MASK      (0xffff<<0)
+#define   SCI_YMAX_MASK      (0xffff<<16)
+#define   SCI_XMAX_MASK      (0xffff<<0)
+#define GFX_OP_SCISSOR_ENABLE	 ((0x3<<29)|(0x1c<<24)|(0x10<<19))
+#define GFX_OP_SCISSOR_RECT	 ((0x3<<29)|(0x1d<<24)|(0x81<<16)|1)
+#define GFX_OP_COLOR_FACTOR      ((0x3<<29)|(0x1d<<24)|(0x1<<16)|0x0)
+#define GFX_OP_STIPPLE           ((0x3<<29)|(0x1d<<24)|(0x83<<16))
+#define GFX_OP_MAP_INFO          ((0x3<<29)|(0x1d<<24)|0x4)
+#define GFX_OP_DESTBUFFER_VARS   ((0x3<<29)|(0x1d<<24)|(0x85<<16)|0x0)
+#define GFX_OP_DESTBUFFER_INFO	 ((0x3<<29)|(0x1d<<24)|(0x8e<<16)|1)
+#define GFX_OP_DRAWRECT_INFO     ((0x3<<29)|(0x1d<<24)|(0x80<<16)|(0x3))
+#define GFX_OP_DRAWRECT_INFO_I965  ((0x7900<<16)|0x2)
+#define SRC_COPY_BLT_CMD                ((2<<29)|(0x43<<22)|4)
+#define XY_SRC_COPY_BLT_CMD		((2<<29)|(0x53<<22)|6)
+#define XY_MONO_SRC_COPY_IMM_BLT	((2<<29)|(0x71<<22)|5)
+#define XY_SRC_COPY_BLT_WRITE_ALPHA	(1<<21)
+#define XY_SRC_COPY_BLT_WRITE_RGB	(1<<20)
+#define   BLT_DEPTH_8			(0<<24)
+#define   BLT_DEPTH_16_565		(1<<24)
+#define   BLT_DEPTH_16_1555		(2<<24)
+#define   BLT_DEPTH_32			(3<<24)
+#define   BLT_ROP_GXCOPY		(0xcc<<16)
+#define XY_SRC_COPY_BLT_SRC_TILED	(1<<15) /* 965+ only */
+#define XY_SRC_COPY_BLT_DST_TILED	(1<<11) /* 965+ only */
+#define CMD_OP_DISPLAYBUFFER_INFO ((0x0<<29)|(0x14<<23)|2)
+#define   ASYNC_FLIP                (1<<22)
+#define   DISPLAY_PLANE_A           (0<<20)
+#define   DISPLAY_PLANE_B           (1<<20)
+
+/*
+ * Instruction and interrupt control regs
+ */
+
+#define PRB0_TAIL	0x02030
+#define PRB0_HEAD	0x02034
+#define PRB0_START	0x02038
+#define PRB0_CTL	0x0203c
+#define   TAIL_ADDR		0x001FFFF8
+#define   HEAD_WRAP_COUNT	0xFFE00000
+#define   HEAD_WRAP_ONE		0x00200000
+#define   HEAD_ADDR		0x001FFFFC
+#define   RING_NR_PAGES		0x001FF000
+#define   RING_REPORT_MASK	0x00000006
+#define   RING_REPORT_64K	0x00000002
+#define   RING_REPORT_128K	0x00000004
+#define   RING_NO_REPORT	0x00000000
+#define   RING_VALID_MASK	0x00000001
+#define   RING_VALID		0x00000001
+#define   RING_INVALID		0x00000000
+#define PRB1_TAIL	0x02040 /* 915+ only */
+#define PRB1_HEAD	0x02044 /* 915+ only */
+#define PRB1_START	0x02048 /* 915+ only */
+#define PRB1_CTL	0x0204c /* 915+ only */
+#define ACTHD_I965	0x02074
+#define HWS_PGA		0x02080
+#define HWS_ADDRESS_MASK	0xfffff000
+#define HWS_START_ADDRESS_SHIFT	4
+#define IPEIR		0x02088
+#define NOPID		0x02094
+#define HWSTAM		0x02098
+#define SCPD0		0x0209c /* 915+ only */
+#define IER		0x020a0
+#define IIR		0x020a4
+#define IMR		0x020a8
+#define ISR		0x020ac
+#define   I915_PIPE_CONTROL_NOTIFY_INTERRUPT		(1<<18)
+#define   I915_DISPLAY_PORT_INTERRUPT			(1<<17)
+#define   I915_RENDER_COMMAND_PARSER_ERROR_INTERRUPT	(1<<15)
+#define   I915_GMCH_THERMAL_SENSOR_EVENT_INTERRUPT	(1<<14)
+#define   I915_HWB_OOM_INTERRUPT			(1<<13)
+#define   I915_SYNC_STATUS_INTERRUPT			(1<<12)
+#define   I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT	(1<<11)
+#define   I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT	(1<<10)
+#define   I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT	(1<<9)
+#define   I915_DISPLAY_PLANE_C_FLIP_PENDING_INTERRUPT	(1<<8)
+#define   I915_DISPLAY_PIPE_A_VBLANK_INTERRUPT		(1<<7)
+#define   I915_DISPLAY_PIPE_A_EVENT_INTERRUPT		(1<<6)
+#define   I915_DISPLAY_PIPE_B_VBLANK_INTERRUPT		(1<<5)
+#define   I915_DISPLAY_PIPE_B_EVENT_INTERRUPT		(1<<4)
+#define   I915_DEBUG_INTERRUPT				(1<<2)
+#define   I915_USER_INTERRUPT				(1<<1)
+#define   I915_ASLE_INTERRUPT				(1<<0)
+#define EIR		0x020b0
+#define EMR		0x020b4
+#define ESR		0x020b8
+#define INSTPM	        0x020c0
+#define ACTHD	        0x020c8
+#define FW_BLC		0x020d8
+#define FW_BLC_SELF	0x020e0 /* 915+ only */
+#define MI_ARB_STATE	0x020e4 /* 915+ only */
+#define CACHE_MODE_0	0x02120 /* 915+ only */
+#define   CM0_MASK_SHIFT          16
+#define   CM0_IZ_OPT_DISABLE      (1<<6)
+#define   CM0_ZR_OPT_DISABLE      (1<<5)
+#define   CM0_DEPTH_EVICT_DISABLE (1<<4)
+#define   CM0_COLOR_EVICT_DISABLE (1<<3)
+#define   CM0_DEPTH_WRITE_DISABLE (1<<1)
+#define   CM0_RC_OP_FLUSH_DISABLE (1<<0)
+#define GFX_FLSH_CNTL	0x02170 /* 915+ only */
+
+/*
+ * Framebuffer compression (915+ only)
+ */
+
+#define FBC_CFB_BASE		0x03200 /* 4k page aligned */
+#define FBC_LL_BASE		0x03204 /* 4k page aligned */
+#define FBC_CONTROL		0x03208
+#define   FBC_CTL_EN		(1<<31)
+#define   FBC_CTL_PERIODIC	(1<<30)
+#define   FBC_CTL_INTERVAL_SHIFT (16)
+#define   FBC_CTL_UNCOMPRESSIBLE (1<<14)
+#define   FBC_CTL_STRIDE_SHIFT	(5)
+#define   FBC_CTL_FENCENO	(1<<0)
+#define FBC_COMMAND		0x0320c
+#define   FBC_CMD_COMPRESS	(1<<0)
+#define FBC_STATUS		0x03210
+#define   FBC_STAT_COMPRESSING	(1<<31)
+#define   FBC_STAT_COMPRESSED	(1<<30)
+#define   FBC_STAT_MODIFIED	(1<<29)
+#define   FBC_STAT_CURRENT_LINE	(1<<0)
+#define FBC_CONTROL2		0x03214
+#define   FBC_CTL_FENCE_DBL	(0<<4)
+#define   FBC_CTL_IDLE_IMM	(0<<2)
+#define   FBC_CTL_IDLE_FULL	(1<<2)
+#define   FBC_CTL_IDLE_LINE	(2<<2)
+#define   FBC_CTL_IDLE_DEBUG	(3<<2)
+#define   FBC_CTL_CPU_FENCE	(1<<1)
+#define   FBC_CTL_PLANEA	(0<<0)
+#define   FBC_CTL_PLANEB	(1<<0)
+#define FBC_FENCE_OFF		0x0321b
+
+#define FBC_LL_SIZE		(1536)
+
+/*
+ * GPIO regs
+ */
+#define GPIOA			0x5010
+#define GPIOB			0x5014
+#define GPIOC			0x5018
+#define GPIOD			0x501c
+#define GPIOE			0x5020
+#define GPIOF			0x5024
+#define GPIOG			0x5028
+#define GPIOH			0x502c
+# define GPIO_CLOCK_DIR_MASK		(1 << 0)
+# define GPIO_CLOCK_DIR_IN		(0 << 1)
+# define GPIO_CLOCK_DIR_OUT		(1 << 1)
+# define GPIO_CLOCK_VAL_MASK		(1 << 2)
+# define GPIO_CLOCK_VAL_OUT		(1 << 3)
+# define GPIO_CLOCK_VAL_IN		(1 << 4)
+# define GPIO_CLOCK_PULLUP_DISABLE	(1 << 5)
+# define GPIO_DATA_DIR_MASK		(1 << 8)
+# define GPIO_DATA_DIR_IN		(0 << 9)
+# define GPIO_DATA_DIR_OUT		(1 << 9)
+# define GPIO_DATA_VAL_MASK		(1 << 10)
+# define GPIO_DATA_VAL_OUT		(1 << 11)
+# define GPIO_DATA_VAL_IN		(1 << 12)
+# define GPIO_DATA_PULLUP_DISABLE	(1 << 13)
+
+/*
+ * Clock control & power management
+ */
+
+#define VGA0	0x6000
+#define VGA1	0x6004
+#define VGA_PD	0x6010
+#define   VGA0_PD_P2_DIV_4	(1 << 7)
+#define   VGA0_PD_P1_DIV_2	(1 << 5)
+#define   VGA0_PD_P1_SHIFT	0
+#define   VGA0_PD_P1_MASK	(0x1f << 0)
+#define   VGA1_PD_P2_DIV_4	(1 << 15)
+#define   VGA1_PD_P1_DIV_2	(1 << 13)
+#define   VGA1_PD_P1_SHIFT	8
+#define   VGA1_PD_P1_MASK	(0x1f << 8)
+#define DPLL_A	0x06014
+#define DPLL_B	0x06018
+#define   DPLL_VCO_ENABLE		(1 << 31)
+#define   DPLL_DVO_HIGH_SPEED		(1 << 30)
+#define   DPLL_SYNCLOCK_ENABLE		(1 << 29)
+#define   DPLL_VGA_MODE_DIS		(1 << 28)
+#define   DPLLB_MODE_DAC_SERIAL		(1 << 26) /* i915 */
+#define   DPLLB_MODE_LVDS		(2 << 26) /* i915 */
+#define   DPLL_MODE_MASK		(3 << 26)
+#define   DPLL_DAC_SERIAL_P2_CLOCK_DIV_10 (0 << 24) /* i915 */
+#define   DPLL_DAC_SERIAL_P2_CLOCK_DIV_5 (1 << 24) /* i915 */
+#define   DPLLB_LVDS_P2_CLOCK_DIV_14	(0 << 24) /* i915 */
+#define   DPLLB_LVDS_P2_CLOCK_DIV_7	(1 << 24) /* i915 */
+#define   DPLL_P2_CLOCK_DIV_MASK	0x03000000 /* i915 */
+#define   DPLL_FPA01_P1_POST_DIV_MASK	0x00ff0000 /* i915 */
+
+#define I915_FIFO_UNDERRUN_STATUS		(1UL<<31)
+#define I915_CRC_ERROR_ENABLE			(1UL<<29)
+#define I915_CRC_DONE_ENABLE			(1UL<<28)
+#define I915_GMBUS_EVENT_ENABLE			(1UL<<27)
+#define I915_VSYNC_INTERRUPT_ENABLE		(1UL<<25)
+#define I915_DISPLAY_LINE_COMPARE_ENABLE	(1UL<<24)
+#define I915_DPST_EVENT_ENABLE			(1UL<<23)
+#define I915_LEGACY_BLC_EVENT_ENABLE		(1UL<<22)
+#define I915_ODD_FIELD_INTERRUPT_ENABLE		(1UL<<21)
+#define I915_EVEN_FIELD_INTERRUPT_ENABLE	(1UL<<20)
+#define I915_START_VBLANK_INTERRUPT_ENABLE	(1UL<<18)	/* 965 or later */
+#define I915_VBLANK_INTERRUPT_ENABLE		(1UL<<17)
+#define I915_OVERLAY_UPDATED_ENABLE		(1UL<<16)
+#define I915_CRC_ERROR_INTERRUPT_STATUS		(1UL<<13)
+#define I915_CRC_DONE_INTERRUPT_STATUS		(1UL<<12)
+#define I915_GMBUS_INTERRUPT_STATUS		(1UL<<11)
+#define I915_VSYNC_INTERRUPT_STATUS		(1UL<<9)
+#define I915_DISPLAY_LINE_COMPARE_STATUS	(1UL<<8)
+#define I915_DPST_EVENT_STATUS			(1UL<<7)
+#define I915_LEGACY_BLC_EVENT_STATUS		(1UL<<6)
+#define I915_ODD_FIELD_INTERRUPT_STATUS		(1UL<<5)
+#define I915_EVEN_FIELD_INTERRUPT_STATUS	(1UL<<4)
+#define I915_START_VBLANK_INTERRUPT_STATUS	(1UL<<2)	/* 965 or later */
+#define I915_VBLANK_INTERRUPT_STATUS		(1UL<<1)
+#define I915_OVERLAY_UPDATED_STATUS		(1UL<<0)
+
+#define SRX_INDEX		0x3c4
+#define SRX_DATA		0x3c5
+#define SR01			1
+#define SR01_SCREEN_OFF		(1<<5)
+
+#define PPCR			0x61204
+#define PPCR_ON			(1<<0)
+
+#define DVOB			0x61140
+#define DVOB_ON			(1<<31)
+#define DVOC			0x61160
+#define DVOC_ON			(1<<31)
+#define LVDS			0x61180
+#define LVDS_ON			(1<<31)
+
+#define ADPA			0x61100
+#define ADPA_DPMS_MASK		(~(3<<10))
+#define ADPA_DPMS_ON		(0<<10)
+#define ADPA_DPMS_SUSPEND	(1<<10)
+#define ADPA_DPMS_STANDBY	(2<<10)
+#define ADPA_DPMS_OFF		(3<<10)
+
+#define RING_TAIL		0x00
+#define TAIL_ADDR		0x001FFFF8
+#define RING_HEAD		0x04
+#define HEAD_WRAP_COUNT		0xFFE00000
+#define HEAD_WRAP_ONE		0x00200000
+#define HEAD_ADDR		0x001FFFFC
+#define RING_START		0x08
+#define START_ADDR		0xFFFFF000
+#define RING_LEN		0x0C
+#define RING_NR_PAGES		0x001FF000
+#define RING_REPORT_MASK	0x00000006
+#define RING_REPORT_64K		0x00000002
+#define RING_REPORT_128K	0x00000004
+#define RING_NO_REPORT		0x00000000
+#define RING_VALID_MASK		0x00000001
+#define RING_VALID		0x00000001
+#define RING_INVALID		0x00000000
+
+/* Scratch pad debug 0 reg:
+ */
+#define   DPLL_FPA01_P1_POST_DIV_MASK_I830	0x001f0000
+/*
+ * The i830 generation, in LVDS mode, defines P1 as the bit number set within
+ * this field (only one bit may be set).
+ */
+#define   DPLL_FPA01_P1_POST_DIV_MASK_I830_LVDS	0x003f0000
+#define   DPLL_FPA01_P1_POST_DIV_SHIFT	16
+/* i830, required in DVO non-gang */
+#define   PLL_P2_DIVIDE_BY_4		(1 << 23)
+#define   PLL_P1_DIVIDE_BY_TWO		(1 << 21) /* i830 */
+#define   PLL_REF_INPUT_DREFCLK		(0 << 13)
+#define   PLL_REF_INPUT_TVCLKINA	(1 << 13) /* i830 */
+#define   PLL_REF_INPUT_TVCLKINBC	(2 << 13) /* SDVO TVCLKIN */
+#define   PLLB_REF_INPUT_SPREADSPECTRUMIN (3 << 13)
+#define   PLL_REF_INPUT_MASK		(3 << 13)
+#define   PLL_LOAD_PULSE_PHASE_SHIFT		9
+/*
+ * Parallel to Serial Load Pulse phase selection.
+ * Selects the phase for the 10X DPLL clock for the PCIe
+ * digital display port. The range is 4 to 13; 10 or more
+ * is just a flip delay. The default is 6
+ */
+#define   PLL_LOAD_PULSE_PHASE_MASK		(0xf << PLL_LOAD_PULSE_PHASE_SHIFT)
+#define   DISPLAY_RATE_SELECT_FPA1		(1 << 8)
+/*
+ * SDVO multiplier for 945G/GM. Not used on 965.
+ */
+#define   SDVO_MULTIPLIER_MASK			0x000000ff
+#define   SDVO_MULTIPLIER_SHIFT_HIRES		4
+#define   SDVO_MULTIPLIER_SHIFT_VGA		0
+#define DPLL_A_MD 0x0601c /* 965+ only */
+/*
+ * UDI pixel divider, controlling how many pixels are stuffed into a packet.
+ *
+ * Value is pixels minus 1.  Must be set to 1 pixel for SDVO.
+ */
+#define   DPLL_MD_UDI_DIVIDER_MASK		0x3f000000
+#define   DPLL_MD_UDI_DIVIDER_SHIFT		24
+/* UDI pixel divider for VGA, same as DPLL_MD_UDI_DIVIDER_MASK. */
+#define   DPLL_MD_VGA_UDI_DIVIDER_MASK		0x003f0000
+#define   DPLL_MD_VGA_UDI_DIVIDER_SHIFT		16
+/*
+ * SDVO/UDI pixel multiplier.
+ *
+ * SDVO requires that the bus clock rate be between 1 and 2 Ghz, and the bus
+ * clock rate is 10 times the DPLL clock.  At low resolution/refresh rate
+ * modes, the bus rate would be below the limits, so SDVO allows for stuffing
+ * dummy bytes in the datastream at an increased clock rate, with both sides of
+ * the link knowing how many bytes are fill.
+ *
+ * So, for a mode with a dotclock of 65Mhz, we would want to double the clock
+ * rate to 130Mhz to get a bus rate of 1.30Ghz.  The DPLL clock rate would be
+ * set to 130Mhz, and the SDVO multiplier set to 2x in this register and
+ * through an SDVO command.
+ *
+ * This register field has values of multiplication factor minus 1, with
+ * a maximum multiplier of 5 for SDVO.
+ */
+#define   DPLL_MD_UDI_MULTIPLIER_MASK		0x00003f00
+#define   DPLL_MD_UDI_MULTIPLIER_SHIFT		8
+/*
+ * SDVO/UDI pixel multiplier for VGA, same as DPLL_MD_UDI_MULTIPLIER_MASK.
+ * This best be set to the default value (3) or the CRT won't work. No,
+ * I don't entirely understand what this does...
+ */
+#define   DPLL_MD_VGA_UDI_MULTIPLIER_MASK	0x0000003f
+#define   DPLL_MD_VGA_UDI_MULTIPLIER_SHIFT	0
+#define DPLL_B_MD 0x06020 /* 965+ only */
+#define FPA0	0x06040
+#define FPA1	0x06044
+#define FPB0	0x06048
+#define FPB1	0x0604c
+#define   FP_N_DIV_MASK		0x003f0000
+#define   FP_N_DIV_SHIFT		16
+#define   FP_M1_DIV_MASK	0x00003f00
+#define   FP_M1_DIV_SHIFT		 8
+#define   FP_M2_DIV_MASK	0x0000003f
+#define   FP_M2_DIV_SHIFT		 0
+#define DPLL_TEST	0x606c
+#define   DPLLB_TEST_SDVO_DIV_1		(0 << 22)
+#define   DPLLB_TEST_SDVO_DIV_2		(1 << 22)
+#define   DPLLB_TEST_SDVO_DIV_4		(2 << 22)
+#define   DPLLB_TEST_SDVO_DIV_MASK	(3 << 22)
+#define   DPLLB_TEST_N_BYPASS		(1 << 19)
+#define   DPLLB_TEST_M_BYPASS		(1 << 18)
+#define   DPLLB_INPUT_BUFFER_ENABLE	(1 << 16)
+#define   DPLLA_TEST_N_BYPASS		(1 << 3)
+#define   DPLLA_TEST_M_BYPASS		(1 << 2)
+#define   DPLLA_INPUT_BUFFER_ENABLE	(1 << 0)
+#define D_STATE		0x6104
+#define CG_2D_DIS	0x6200
+#define CG_3D_DIS	0x6204
+
+/*
+ * Palette regs
+ */
+
+#define PALETTE_A		0x0a000
+#define PALETTE_B		0x0a800
+
+/* MCH MMIO space */
+
+/*
+ * MCHBAR mirror.
+ *
+ * This mirrors the MCHBAR MMIO space whose location is determined by
+ * device 0 function 0's pci config register 0x44 or 0x48 and matches it in
+ * every way.  It is not accessible from the CP register read instructions.
+ *
+ */
+#define MCHBAR_MIRROR_BASE	0x10000
+
+/** 915-945 and GM965 MCH register controlling DRAM channel access */
+#define DCC			0x10200
+#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL		(0 << 0)
+#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC	(1 << 0)
+#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED	(2 << 0)
+#define DCC_ADDRESSING_MODE_MASK			(3 << 0)
+#define DCC_CHANNEL_XOR_DISABLE				(1 << 10)
+
+/** 965 MCH register controlling DRAM channel configuration */
+#define C0DRB3			0x10206
+#define C1DRB3			0x10606
+
+/*
+ * Overlay regs
+ */
+
+#define OVADD			0x30000
+#define DOVSTA			0x30008
+#define OC_BUF			(0x3<<20)
+#define OGAMC5			0x30010
+#define OGAMC4			0x30014
+#define OGAMC3			0x30018
+#define OGAMC2			0x3001c
+#define OGAMC1			0x30020
+#define OGAMC0			0x30024
+
+/*
+ * Display engine regs
+ */
+
+/* Pipe A timing regs */
+#define HTOTAL_A	0x60000
+#define HBLANK_A	0x60004
+#define HSYNC_A		0x60008
+#define VTOTAL_A	0x6000c
+#define VBLANK_A	0x60010
+#define VSYNC_A		0x60014
+#define PIPEASRC	0x6001c
+#define BCLRPAT_A	0x60020
+
+/* Pipe B timing regs */
+#define HTOTAL_B	0x61000
+#define HBLANK_B	0x61004
+#define HSYNC_B		0x61008
+#define VTOTAL_B	0x6100c
+#define VBLANK_B	0x61010
+#define VSYNC_B		0x61014
+#define PIPEBSRC	0x6101c
+#define BCLRPAT_B	0x61020
+
+/* VGA port control */
+#define ADPA			0x61100
+#define   ADPA_DAC_ENABLE	(1<<31)
+#define   ADPA_DAC_DISABLE	0
+#define   ADPA_PIPE_SELECT_MASK	(1<<30)
+#define   ADPA_PIPE_A_SELECT	0
+#define   ADPA_PIPE_B_SELECT	(1<<30)
+#define   ADPA_USE_VGA_HVPOLARITY (1<<15)
+#define   ADPA_SETS_HVPOLARITY	0
+#define   ADPA_VSYNC_CNTL_DISABLE (1<<11)
+#define   ADPA_VSYNC_CNTL_ENABLE 0
+#define   ADPA_HSYNC_CNTL_DISABLE (1<<10)
+#define   ADPA_HSYNC_CNTL_ENABLE 0
+#define   ADPA_VSYNC_ACTIVE_HIGH (1<<4)
+#define   ADPA_VSYNC_ACTIVE_LOW	0
+#define   ADPA_HSYNC_ACTIVE_HIGH (1<<3)
+#define   ADPA_HSYNC_ACTIVE_LOW	0
+#define   ADPA_DPMS_MASK	(~(3<<10))
+#define   ADPA_DPMS_ON		(0<<10)
+#define   ADPA_DPMS_SUSPEND	(1<<10)
+#define   ADPA_DPMS_STANDBY	(2<<10)
+#define   ADPA_DPMS_OFF		(3<<10)
+
+/* Hotplug control (945+ only) */
+#define PORT_HOTPLUG_EN		0x61110
+#define   SDVOB_HOTPLUG_INT_EN			(1 << 26)
+#define   SDVOC_HOTPLUG_INT_EN			(1 << 25)
+#define   TV_HOTPLUG_INT_EN			(1 << 18)
+#define   CRT_HOTPLUG_INT_EN			(1 << 9)
+#define   CRT_HOTPLUG_FORCE_DETECT		(1 << 3)
+
+#define PORT_HOTPLUG_STAT	0x61114
+#define   CRT_HOTPLUG_INT_STATUS		(1 << 11)
+#define   TV_HOTPLUG_INT_STATUS			(1 << 10)
+#define   CRT_HOTPLUG_MONITOR_MASK		(3 << 8)
+#define   CRT_HOTPLUG_MONITOR_COLOR		(3 << 8)
+#define   CRT_HOTPLUG_MONITOR_MONO		(2 << 8)
+#define   CRT_HOTPLUG_MONITOR_NONE		(0 << 8)
+#define   SDVOC_HOTPLUG_INT_STATUS		(1 << 7)
+#define   SDVOB_HOTPLUG_INT_STATUS		(1 << 6)
+
+/* SDVO port control */
+#define SDVOB			0x61140
+#define SDVOC			0x61160
+#define   SDVO_ENABLE		(1 << 31)
+#define   SDVO_PIPE_B_SELECT	(1 << 30)
+#define   SDVO_STALL_SELECT	(1 << 29)
+#define   SDVO_INTERRUPT_ENABLE	(1 << 26)
+/**
+ * 915G/GM SDVO pixel multiplier.
+ *
+ * Programmed value is multiplier - 1, up to 5x.
+ *
+ * \sa DPLL_MD_UDI_MULTIPLIER_MASK
+ */
+#define   SDVO_PORT_MULTIPLY_MASK	(7 << 23)
+#define   SDVO_PORT_MULTIPLY_SHIFT		23
+#define   SDVO_PHASE_SELECT_MASK	(15 << 19)
+#define   SDVO_PHASE_SELECT_DEFAULT	(6 << 19)
+#define   SDVO_CLOCK_OUTPUT_INVERT	(1 << 18)
+#define   SDVOC_GANG_MODE		(1 << 16)
+#define   SDVO_BORDER_ENABLE		(1 << 7)
+#define   SDVOB_PCIE_CONCURRENCY	(1 << 3)
+#define   SDVO_DETECTED			(1 << 2)
+/* Bits to be preserved when writing */
+#define   SDVOB_PRESERVE_MASK ((1 << 17) | (1 << 16) | (1 << 14) | (1 << 26))
+#define   SDVOC_PRESERVE_MASK ((1 << 17) | (1 << 26))
+
+/* DVO port control */
+#define DVOA			0x61120
+#define DVOB			0x61140
+#define DVOC			0x61160
+#define   DVO_ENABLE			(1 << 31)
+#define   DVO_PIPE_B_SELECT		(1 << 30)
+#define   DVO_PIPE_STALL_UNUSED		(0 << 28)
+#define   DVO_PIPE_STALL		(1 << 28)
+#define   DVO_PIPE_STALL_TV		(2 << 28)
+#define   DVO_PIPE_STALL_MASK		(3 << 28)
+#define   DVO_USE_VGA_SYNC		(1 << 15)
+#define   DVO_DATA_ORDER_I740		(0 << 14)
+#define   DVO_DATA_ORDER_FP		(1 << 14)
+#define   DVO_VSYNC_DISABLE		(1 << 11)
+#define   DVO_HSYNC_DISABLE		(1 << 10)
+#define   DVO_VSYNC_TRISTATE		(1 << 9)
+#define   DVO_HSYNC_TRISTATE		(1 << 8)
+#define   DVO_BORDER_ENABLE		(1 << 7)
+#define   DVO_DATA_ORDER_GBRG		(1 << 6)
+#define   DVO_DATA_ORDER_RGGB		(0 << 6)
+#define   DVO_DATA_ORDER_GBRG_ERRATA	(0 << 6)
+#define   DVO_DATA_ORDER_RGGB_ERRATA	(1 << 6)
+#define   DVO_VSYNC_ACTIVE_HIGH		(1 << 4)
+#define   DVO_HSYNC_ACTIVE_HIGH		(1 << 3)
+#define   DVO_BLANK_ACTIVE_HIGH		(1 << 2)
+#define   DVO_OUTPUT_CSTATE_PIXELS	(1 << 1)	/* SDG only */
+#define   DVO_OUTPUT_SOURCE_SIZE_PIXELS	(1 << 0)	/* SDG only */
+#define   DVO_PRESERVE_MASK		(0x7<<24)
+#define DVOA_SRCDIM		0x61124
+#define DVOB_SRCDIM		0x61144
+#define DVOC_SRCDIM		0x61164
+#define   DVO_SRCDIM_HORIZONTAL_SHIFT	12
+#define   DVO_SRCDIM_VERTICAL_SHIFT	0
+
+/* LVDS port control */
+#define LVDS			0x61180
+/*
+ * Enables the LVDS port.  This bit must be set before DPLLs are enabled, as
+ * the DPLL semantics change when the LVDS is assigned to that pipe.
+ */
+#define   LVDS_PORT_EN			(1 << 31)
+/* Selects pipe B for LVDS data.  Must be set on pre-965. */
+#define   LVDS_PIPEB_SELECT		(1 << 30)
+/*
+ * Enables the A0-A2 data pairs and CLKA, containing 18 bits of color data per
+ * pixel.
+ */
+#define   LVDS_A0A2_CLKA_POWER_MASK	(3 << 8)
+#define   LVDS_A0A2_CLKA_POWER_DOWN	(0 << 8)
+#define   LVDS_A0A2_CLKA_POWER_UP	(3 << 8)
+/*
+ * Controls the A3 data pair, which contains the additional LSBs for 24 bit
+ * mode.  Only enabled if LVDS_A0A2_CLKA_POWER_UP also indicates it should be
+ * on.
+ */
+#define   LVDS_A3_POWER_MASK		(3 << 6)
+#define   LVDS_A3_POWER_DOWN		(0 << 6)
+#define   LVDS_A3_POWER_UP		(3 << 6)
+/*
+ * Controls the CLKB pair.  This should only be set when LVDS_B0B3_POWER_UP
+ * is set.
+ */
+#define   LVDS_CLKB_POWER_MASK		(3 << 4)
+#define   LVDS_CLKB_POWER_DOWN		(0 << 4)
+#define   LVDS_CLKB_POWER_UP		(3 << 4)
+/*
+ * Controls the B0-B3 data pairs.  This must be set to match the DPLL p2
+ * setting for whether we are in dual-channel mode.  The B3 pair will
+ * additionally only be powered up when LVDS_A3_POWER_UP is set.
+ */
+#define   LVDS_B0B3_POWER_MASK		(3 << 2)
+#define   LVDS_B0B3_POWER_DOWN		(0 << 2)
+#define   LVDS_B0B3_POWER_UP		(3 << 2)
+
+/* Panel power sequencing */
+#define PP_STATUS	0x61200
+#define   PP_ON		(1 << 31)
+/*
+ * Indicates that all dependencies of the panel are on:
+ *
+ * - PLL enabled
+ * - pipe enabled
+ * - LVDS/DVOB/DVOC on
+ */
+#define   PP_READY		(1 << 30)
+#define   PP_SEQUENCE_NONE	(0 << 28)
+#define   PP_SEQUENCE_ON	(1 << 28)
+#define   PP_SEQUENCE_OFF	(2 << 28)
+#define   PP_SEQUENCE_MASK	0x30000000
+#define PP_CONTROL	0x61204
+#define   POWER_TARGET_ON	(1 << 0)
+#define PP_ON_DELAYS	0x61208
+#define PP_OFF_DELAYS	0x6120c
+#define PP_DIVISOR	0x61210
+
+/* Panel fitting */
+#define PFIT_CONTROL	0x61230
+#define   PFIT_ENABLE		(1 << 31)
+#define   PFIT_PIPE_MASK	(3 << 29)
+#define   PFIT_PIPE_SHIFT	29
+#define   VERT_INTERP_DISABLE	(0 << 10)
+#define   VERT_INTERP_BILINEAR	(1 << 10)
+#define   VERT_INTERP_MASK	(3 << 10)
+#define   VERT_AUTO_SCALE	(1 << 9)
+#define   HORIZ_INTERP_DISABLE	(0 << 6)
+#define   HORIZ_INTERP_BILINEAR	(1 << 6)
+#define   HORIZ_INTERP_MASK	(3 << 6)
+#define   HORIZ_AUTO_SCALE	(1 << 5)
+#define   PANEL_8TO6_DITHER_ENABLE (1 << 3)
+#define PFIT_PGM_RATIOS	0x61234
+#define   PFIT_VERT_SCALE_MASK			0xfff00000
+#define   PFIT_HORIZ_SCALE_MASK			0x0000fff0
+#define PFIT_AUTO_RATIOS 0x61238
+
+/* Backlight control */
+#define BLC_PWM_CTL		0x61254
+#define   BACKLIGHT_MODULATION_FREQ_SHIFT		(17)
+#define BLC_PWM_CTL2		0x61250 /* 965+ only */
+#define   BLM_COMBINATION_MODE (1 << 30)
+/*
+ * This is the most significant 15 bits of the number of backlight cycles in a
+ * complete cycle of the modulated backlight control.
+ *
+ * The actual value is this field multiplied by two.
+ */
+#define   BACKLIGHT_MODULATION_FREQ_MASK		(0x7fff << 17)
+#define   BLM_LEGACY_MODE				(1 << 16)
+/*
+ * This is the number of cycles out of the backlight modulation cycle for which
+ * the backlight is on.
+ *
+ * This field must be no greater than the number of cycles in the complete
+ * backlight modulation cycle.
+ */
+#define   BACKLIGHT_DUTY_CYCLE_SHIFT		(0)
+#define   BACKLIGHT_DUTY_CYCLE_MASK		(0xffff)
+
+/* TV port control */
+#define TV_CTL			0x68000
+/** Enables the TV encoder */
+# define TV_ENC_ENABLE			(1 << 31)
+/** Sources the TV encoder input from pipe B instead of A. */
+# define TV_ENC_PIPEB_SELECT		(1 << 30)
+/** Outputs composite video (DAC A only) */
+# define TV_ENC_OUTPUT_COMPOSITE	(0 << 28)
+/** Outputs SVideo video (DAC B/C) */
+# define TV_ENC_OUTPUT_SVIDEO		(1 << 28)
+/** Outputs Component video (DAC A/B/C) */
+# define TV_ENC_OUTPUT_COMPONENT	(2 << 28)
+/** Outputs Composite and SVideo (DAC A/B/C) */
+# define TV_ENC_OUTPUT_SVIDEO_COMPOSITE	(3 << 28)
+# define TV_TRILEVEL_SYNC		(1 << 21)
+/** Enables slow sync generation (945GM only) */
+# define TV_SLOW_SYNC			(1 << 20)
+/** Selects 4x oversampling for 480i and 576p */
+# define TV_OVERSAMPLE_4X		(0 << 18)
+/** Selects 2x oversampling for 720p and 1080i */
+# define TV_OVERSAMPLE_2X		(1 << 18)
+/** Selects no oversampling for 1080p */
+# define TV_OVERSAMPLE_NONE		(2 << 18)
+/** Selects 8x oversampling */
+# define TV_OVERSAMPLE_8X		(3 << 18)
+/** Selects progressive mode rather than interlaced */
+# define TV_PROGRESSIVE			(1 << 17)
+/** Sets the colorburst to PAL mode.  Required for non-M PAL modes. */
+# define TV_PAL_BURST			(1 << 16)
+/** Field for setting delay of Y compared to C */
+# define TV_YC_SKEW_MASK		(7 << 12)
+/** Enables a fix for 480p/576p standard definition modes on the 915GM only */
+# define TV_ENC_SDP_FIX			(1 << 11)
+/**
+ * Enables a fix for the 915GM only.
+ *
+ * Not sure what it does.
+ */
+# define TV_ENC_C0_FIX			(1 << 10)
+/** Bits that must be preserved by software */
+# define TV_CTL_SAVE			((3 << 8) | (3 << 6))
+# define TV_FUSE_STATE_MASK		(3 << 4)
+/** Read-only state that reports all features enabled */
+# define TV_FUSE_STATE_ENABLED		(0 << 4)
+/** Read-only state that reports that Macrovision is disabled in hardware*/
+# define TV_FUSE_STATE_NO_MACROVISION	(1 << 4)
+/** Read-only state that reports that TV-out is disabled in hardware. */
+# define TV_FUSE_STATE_DISABLED		(2 << 4)
+/** Normal operation */
+# define TV_TEST_MODE_NORMAL		(0 << 0)
+/** Encoder test pattern 1 - combo pattern */
+# define TV_TEST_MODE_PATTERN_1		(1 << 0)
+/** Encoder test pattern 2 - full screen vertical 75% color bars */
+# define TV_TEST_MODE_PATTERN_2		(2 << 0)
+/** Encoder test pattern 3 - full screen horizontal 75% color bars */
+# define TV_TEST_MODE_PATTERN_3		(3 << 0)
+/** Encoder test pattern 4 - random noise */
+# define TV_TEST_MODE_PATTERN_4		(4 << 0)
+/** Encoder test pattern 5 - linear color ramps */
+# define TV_TEST_MODE_PATTERN_5		(5 << 0)
+/**
+ * This test mode forces the DACs to 50% of full output.
+ *
+ * This is used for load detection in combination with TVDAC_SENSE_MASK
+ */
+# define TV_TEST_MODE_MONITOR_DETECT	(7 << 0)
+# define TV_TEST_MODE_MASK		(7 << 0)
+
+#define TV_DAC			0x68004
+/**
+ * Reports that DAC state change logic has reported change (RO).
+ *
+ * This gets cleared when TV_DAC_STATE_EN is cleared
+*/
+# define TVDAC_STATE_CHG		(1 << 31)
+# define TVDAC_SENSE_MASK		(7 << 28)
+/** Reports that DAC A voltage is above the detect threshold */
+# define TVDAC_A_SENSE			(1 << 30)
+/** Reports that DAC B voltage is above the detect threshold */
+# define TVDAC_B_SENSE			(1 << 29)
+/** Reports that DAC C voltage is above the detect threshold */
+# define TVDAC_C_SENSE			(1 << 28)
+/**
+ * Enables DAC state detection logic, for load-based TV detection.
+ *
+ * The PLL of the chosen pipe (in TV_CTL) must be running, and the encoder set
+ * to off, for load detection to work.
+ */
+# define TVDAC_STATE_CHG_EN		(1 << 27)
+/** Sets the DAC A sense value to high */
+# define TVDAC_A_SENSE_CTL		(1 << 26)
+/** Sets the DAC B sense value to high */
+# define TVDAC_B_SENSE_CTL		(1 << 25)
+/** Sets the DAC C sense value to high */
+# define TVDAC_C_SENSE_CTL		(1 << 24)
+/** Overrides the ENC_ENABLE and DAC voltage levels */
+# define DAC_CTL_OVERRIDE		(1 << 7)
+/** Sets the slew rate.  Must be preserved in software */
+# define ENC_TVDAC_SLEW_FAST		(1 << 6)
+# define DAC_A_1_3_V			(0 << 4)
+# define DAC_A_1_1_V			(1 << 4)
+# define DAC_A_0_7_V			(2 << 4)
+# define DAC_A_OFF			(3 << 4)
+# define DAC_B_1_3_V			(0 << 2)
+# define DAC_B_1_1_V			(1 << 2)
+# define DAC_B_0_7_V			(2 << 2)
+# define DAC_B_OFF			(3 << 2)
+# define DAC_C_1_3_V			(0 << 0)
+# define DAC_C_1_1_V			(1 << 0)
+# define DAC_C_0_7_V			(2 << 0)
+# define DAC_C_OFF			(3 << 0)
+
+/**
+ * CSC coefficients are stored in a floating point format with 9 bits of
+ * mantissa and 2 or 3 bits of exponent.  The exponent is represented as 2**-n,
+ * where 2-bit exponents are unsigned n, and 3-bit exponents are signed n with
+ * -1 (0x3) being the only legal negative value.
+ */
+#define TV_CSC_Y		0x68010
+# define TV_RY_MASK			0x07ff0000
+# define TV_RY_SHIFT			16
+# define TV_GY_MASK			0x00000fff
+# define TV_GY_SHIFT			0
+
+#define TV_CSC_Y2		0x68014
+# define TV_BY_MASK			0x07ff0000
+# define TV_BY_SHIFT			16
+/**
+ * Y attenuation for component video.
+ *
+ * Stored in 1.9 fixed point.
+ */
+# define TV_AY_MASK			0x000003ff
+# define TV_AY_SHIFT			0
+
+#define TV_CSC_U		0x68018
+# define TV_RU_MASK			0x07ff0000
+# define TV_RU_SHIFT			16
+# define TV_GU_MASK			0x000007ff
+# define TV_GU_SHIFT			0
+
+#define TV_CSC_U2		0x6801c
+# define TV_BU_MASK			0x07ff0000
+# define TV_BU_SHIFT			16
+/**
+ * U attenuation for component video.
+ *
+ * Stored in 1.9 fixed point.
+ */
+# define TV_AU_MASK			0x000003ff
+# define TV_AU_SHIFT			0
+
+#define TV_CSC_V		0x68020
+# define TV_RV_MASK			0x0fff0000
+# define TV_RV_SHIFT			16
+# define TV_GV_MASK			0x000007ff
+# define TV_GV_SHIFT			0
+
+#define TV_CSC_V2		0x68024
+# define TV_BV_MASK			0x07ff0000
+# define TV_BV_SHIFT			16
+/**
+ * V attenuation for component video.
+ *
+ * Stored in 1.9 fixed point.
+ */
+# define TV_AV_MASK			0x000007ff
+# define TV_AV_SHIFT			0
+
+#define TV_CLR_KNOBS		0x68028
+/** 2s-complement brightness adjustment */
+# define TV_BRIGHTNESS_MASK		0xff000000
+# define TV_BRIGHTNESS_SHIFT		24
+/** Contrast adjustment, as a 2.6 unsigned floating point number */
+# define TV_CONTRAST_MASK		0x00ff0000
+# define TV_CONTRAST_SHIFT		16
+/** Saturation adjustment, as a 2.6 unsigned floating point number */
+# define TV_SATURATION_MASK		0x0000ff00
+# define TV_SATURATION_SHIFT		8
+/** Hue adjustment, as an integer phase angle in degrees */
+# define TV_HUE_MASK			0x000000ff
+# define TV_HUE_SHIFT			0
+
+#define TV_CLR_LEVEL		0x6802c
+/** Controls the DAC level for black */
+# define TV_BLACK_LEVEL_MASK		0x01ff0000
+# define TV_BLACK_LEVEL_SHIFT		16
+/** Controls the DAC level for blanking */
+# define TV_BLANK_LEVEL_MASK		0x000001ff
+# define TV_BLANK_LEVEL_SHIFT		0
+
+#define TV_H_CTL_1		0x68030
+/** Number of pixels in the hsync. */
+# define TV_HSYNC_END_MASK		0x1fff0000
+# define TV_HSYNC_END_SHIFT		16
+/** Total number of pixels minus one in the line (display and blanking). */
+# define TV_HTOTAL_MASK			0x00001fff
+# define TV_HTOTAL_SHIFT		0
+
+#define TV_H_CTL_2		0x68034
+/** Enables the colorburst (needed for non-component color) */
+# define TV_BURST_ENA			(1 << 31)
+/** Offset of the colorburst from the start of hsync, in pixels minus one. */
+# define TV_HBURST_START_SHIFT		16
+# define TV_HBURST_START_MASK		0x1fff0000
+/** Length of the colorburst */
+# define TV_HBURST_LEN_SHIFT		0
+# define TV_HBURST_LEN_MASK		0x0001fff
+
+#define TV_H_CTL_3		0x68038
+/** End of hblank, measured in pixels minus one from start of hsync */
+# define TV_HBLANK_END_SHIFT		16
+# define TV_HBLANK_END_MASK		0x1fff0000
+/** Start of hblank, measured in pixels minus one from start of hsync */
+# define TV_HBLANK_START_SHIFT		0
+# define TV_HBLANK_START_MASK		0x0001fff
+
+#define TV_V_CTL_1		0x6803c
+/** XXX */
+# define TV_NBR_END_SHIFT		16
+# define TV_NBR_END_MASK		0x07ff0000
+/** XXX */
+# define TV_VI_END_F1_SHIFT		8
+# define TV_VI_END_F1_MASK		0x00003f00
+/** XXX */
+# define TV_VI_END_F2_SHIFT		0
+# define TV_VI_END_F2_MASK		0x0000003f
+
+#define TV_V_CTL_2		0x68040
+/** Length of vsync, in half lines */
+# define TV_VSYNC_LEN_MASK		0x07ff0000
+# define TV_VSYNC_LEN_SHIFT		16
+/** Offset of the start of vsync in field 1, measured in one less than the
+ * number of half lines.
+ */
+# define TV_VSYNC_START_F1_MASK		0x00007f00
+# define TV_VSYNC_START_F1_SHIFT	8
+/**
+ * Offset of the start of vsync in field 2, measured in one less than the
+ * number of half lines.
+ */
+# define TV_VSYNC_START_F2_MASK		0x0000007f
+# define TV_VSYNC_START_F2_SHIFT	0
+
+#define TV_V_CTL_3		0x68044
+/** Enables generation of the equalization signal */
+# define TV_EQUAL_ENA			(1 << 31)
+/** Length of vsync, in half lines */
+# define TV_VEQ_LEN_MASK		0x007f0000
+# define TV_VEQ_LEN_SHIFT		16
+/** Offset of the start of equalization in field 1, measured in one less than
+ * the number of half lines.
+ */
+# define TV_VEQ_START_F1_MASK		0x0007f00
+# define TV_VEQ_START_F1_SHIFT		8
+/**
+ * Offset of the start of equalization in field 2, measured in one less than
+ * the number of half lines.
+ */
+# define TV_VEQ_START_F2_MASK		0x000007f
+# define TV_VEQ_START_F2_SHIFT		0
+
+#define TV_V_CTL_4		0x68048
+/**
+ * Offset to start of vertical colorburst, measured in one less than the
+ * number of lines from vertical start.
+ */
+# define TV_VBURST_START_F1_MASK	0x003f0000
+# define TV_VBURST_START_F1_SHIFT	16
+/**
+ * Offset to the end of vertical colorburst, measured in one less than the
+ * number of lines from the start of NBR.
+ */
+# define TV_VBURST_END_F1_MASK		0x000000ff
+# define TV_VBURST_END_F1_SHIFT		0
+
+#define TV_V_CTL_5		0x6804c
+/**
+ * Offset to start of vertical colorburst, measured in one less than the
+ * number of lines from vertical start.
+ */
+# define TV_VBURST_START_F2_MASK	0x003f0000
+# define TV_VBURST_START_F2_SHIFT	16
+/**
+ * Offset to the end of vertical colorburst, measured in one less than the
+ * number of lines from the start of NBR.
+ */
+# define TV_VBURST_END_F2_MASK		0x000000ff
+# define TV_VBURST_END_F2_SHIFT		0
+
+#define TV_V_CTL_6		0x68050
+/**
+ * Offset to start of vertical colorburst, measured in one less than the
+ * number of lines from vertical start.
+ */
+# define TV_VBURST_START_F3_MASK	0x003f0000
+# define TV_VBURST_START_F3_SHIFT	16
+/**
+ * Offset to the end of vertical colorburst, measured in one less than the
+ * number of lines from the start of NBR.
+ */
+# define TV_VBURST_END_F3_MASK		0x000000ff
+# define TV_VBURST_END_F3_SHIFT		0
+
+#define TV_V_CTL_7		0x68054
+/**
+ * Offset to start of vertical colorburst, measured in one less than the
+ * number of lines from vertical start.
+ */
+# define TV_VBURST_START_F4_MASK	0x003f0000
+# define TV_VBURST_START_F4_SHIFT	16
+/**
+ * Offset to the end of vertical colorburst, measured in one less than the
+ * number of lines from the start of NBR.
+ */
+# define TV_VBURST_END_F4_MASK		0x000000ff
+# define TV_VBURST_END_F4_SHIFT		0
+
+#define TV_SC_CTL_1		0x68060
+/** Turns on the first subcarrier phase generation DDA */
+# define TV_SC_DDA1_EN			(1 << 31)
+/** Turns on the first subcarrier phase generation DDA */
+# define TV_SC_DDA2_EN			(1 << 30)
+/** Turns on the first subcarrier phase generation DDA */
+# define TV_SC_DDA3_EN			(1 << 29)
+/** Sets the subcarrier DDA to reset frequency every other field */
+# define TV_SC_RESET_EVERY_2		(0 << 24)
+/** Sets the subcarrier DDA to reset frequency every fourth field */
+# define TV_SC_RESET_EVERY_4		(1 << 24)
+/** Sets the subcarrier DDA to reset frequency every eighth field */
+# define TV_SC_RESET_EVERY_8		(2 << 24)
+/** Sets the subcarrier DDA to never reset the frequency */
+# define TV_SC_RESET_NEVER		(3 << 24)
+/** Sets the peak amplitude of the colorburst.*/
+# define TV_BURST_LEVEL_MASK		0x00ff0000
+# define TV_BURST_LEVEL_SHIFT		16
+/** Sets the increment of the first subcarrier phase generation DDA */
+# define TV_SCDDA1_INC_MASK		0x00000fff
+# define TV_SCDDA1_INC_SHIFT		0
+
+#define TV_SC_CTL_2		0x68064
+/** Sets the rollover for the second subcarrier phase generation DDA */
+# define TV_SCDDA2_SIZE_MASK		0x7fff0000
+# define TV_SCDDA2_SIZE_SHIFT		16
+/** Sets the increent of the second subcarrier phase generation DDA */
+# define TV_SCDDA2_INC_MASK		0x00007fff
+# define TV_SCDDA2_INC_SHIFT		0
+
+#define TV_SC_CTL_3		0x68068
+/** Sets the rollover for the third subcarrier phase generation DDA */
+# define TV_SCDDA3_SIZE_MASK		0x7fff0000
+# define TV_SCDDA3_SIZE_SHIFT		16
+/** Sets the increent of the third subcarrier phase generation DDA */
+# define TV_SCDDA3_INC_MASK		0x00007fff
+# define TV_SCDDA3_INC_SHIFT		0
+
+#define TV_WIN_POS		0x68070
+/** X coordinate of the display from the start of horizontal active */
+# define TV_XPOS_MASK			0x1fff0000
+# define TV_XPOS_SHIFT			16
+/** Y coordinate of the display from the start of vertical active (NBR) */
+# define TV_YPOS_MASK			0x00000fff
+# define TV_YPOS_SHIFT			0
+
+#define TV_WIN_SIZE		0x68074
+/** Horizontal size of the display window, measured in pixels*/
+# define TV_XSIZE_MASK			0x1fff0000
+# define TV_XSIZE_SHIFT			16
+/**
+ * Vertical size of the display window, measured in pixels.
+ *
+ * Must be even for interlaced modes.
+ */
+# define TV_YSIZE_MASK			0x00000fff
+# define TV_YSIZE_SHIFT			0
+
+#define TV_FILTER_CTL_1		0x68080
+/**
+ * Enables automatic scaling calculation.
+ *
+ * If set, the rest of the registers are ignored, and the calculated values can
+ * be read back from the register.
+ */
+# define TV_AUTO_SCALE			(1 << 31)
+/**
+ * Disables the vertical filter.
+ *
+ * This is required on modes more than 1024 pixels wide */
+# define TV_V_FILTER_BYPASS		(1 << 29)
+/** Enables adaptive vertical filtering */
+# define TV_VADAPT			(1 << 28)
+# define TV_VADAPT_MODE_MASK		(3 << 26)
+/** Selects the least adaptive vertical filtering mode */
+# define TV_VADAPT_MODE_LEAST		(0 << 26)
+/** Selects the moderately adaptive vertical filtering mode */
+# define TV_VADAPT_MODE_MODERATE	(1 << 26)
+/** Selects the most adaptive vertical filtering mode */
+# define TV_VADAPT_MODE_MOST		(3 << 26)
+/**
+ * Sets the horizontal scaling factor.
+ *
+ * This should be the fractional part of the horizontal scaling factor divided
+ * by the oversampling rate.  TV_HSCALE should be less than 1, and set to:
+ *
+ * (src width - 1) / ((oversample * dest width) - 1)
+ */
+# define TV_HSCALE_FRAC_MASK		0x00003fff
+# define TV_HSCALE_FRAC_SHIFT		0
+
+#define TV_FILTER_CTL_2		0x68084
+/**
+ * Sets the integer part of the 3.15 fixed-point vertical scaling factor.
+ *
+ * TV_VSCALE should be (src height - 1) / ((interlace * dest height) - 1)
+ */
+# define TV_VSCALE_INT_MASK		0x00038000
+# define TV_VSCALE_INT_SHIFT		15
+/**
+ * Sets the fractional part of the 3.15 fixed-point vertical scaling factor.
+ *
+ * \sa TV_VSCALE_INT_MASK
+ */
+# define TV_VSCALE_FRAC_MASK		0x00007fff
+# define TV_VSCALE_FRAC_SHIFT		0
+
+#define TV_FILTER_CTL_3		0x68088
+/**
+ * Sets the integer part of the 3.15 fixed-point vertical scaling factor.
+ *
+ * TV_VSCALE should be (src height - 1) / (1/4 * (dest height - 1))
+ *
+ * For progressive modes, TV_VSCALE_IP_INT should be set to zeroes.
+ */
+# define TV_VSCALE_IP_INT_MASK		0x00038000
+# define TV_VSCALE_IP_INT_SHIFT		15
+/**
+ * Sets the fractional part of the 3.15 fixed-point vertical scaling factor.
+ *
+ * For progressive modes, TV_VSCALE_IP_INT should be set to zeroes.
+ *
+ * \sa TV_VSCALE_IP_INT_MASK
+ */
+# define TV_VSCALE_IP_FRAC_MASK		0x00007fff
+# define TV_VSCALE_IP_FRAC_SHIFT		0
+
+#define TV_CC_CONTROL		0x68090
+# define TV_CC_ENABLE			(1 << 31)
+/**
+ * Specifies which field to send the CC data in.
+ *
+ * CC data is usually sent in field 0.
+ */
+# define TV_CC_FID_MASK			(1 << 27)
+# define TV_CC_FID_SHIFT		27
+/** Sets the horizontal position of the CC data.  Usually 135. */
+# define TV_CC_HOFF_MASK		0x03ff0000
+# define TV_CC_HOFF_SHIFT		16
+/** Sets the vertical position of the CC data.  Usually 21 */
+# define TV_CC_LINE_MASK		0x0000003f
+# define TV_CC_LINE_SHIFT		0
+
+#define TV_CC_DATA		0x68094
+# define TV_CC_RDY			(1 << 31)
+/** Second word of CC data to be transmitted. */
+# define TV_CC_DATA_2_MASK		0x007f0000
+# define TV_CC_DATA_2_SHIFT		16
+/** First word of CC data to be transmitted. */
+# define TV_CC_DATA_1_MASK		0x0000007f
+# define TV_CC_DATA_1_SHIFT		0
+
+#define TV_H_LUMA_0		0x68100
+#define TV_H_LUMA_59		0x681ec
+#define TV_H_CHROMA_0		0x68200
+#define TV_H_CHROMA_59		0x682ec
+#define TV_V_LUMA_0		0x68300
+#define TV_V_LUMA_42		0x683a8
+#define TV_V_CHROMA_0		0x68400
+#define TV_V_CHROMA_42		0x684a8
+
+/* Display & cursor control */
+
+/* Pipe A */
+#define PIPEADSL		0x70000
+#define PIPEACONF		0x70008
+#define   PIPEACONF_ENABLE	(1<<31)
+#define   PIPEACONF_DISABLE	0
+#define   PIPEACONF_DOUBLE_WIDE	(1<<30)
+#define   I965_PIPECONF_ACTIVE	(1<<30)
+#define   PIPEACONF_SINGLE_WIDE	0
+#define   PIPEACONF_PIPE_UNLOCKED 0
+#define   PIPEACONF_PIPE_LOCKED	(1<<25)
+#define   PIPEACONF_PALETTE	0
+#define   PIPEACONF_GAMMA		(1<<24)
+#define   PIPECONF_FORCE_BORDER	(1<<25)
+#define   PIPECONF_PROGRESSIVE	(0 << 21)
+#define   PIPECONF_INTERLACE_W_FIELD_INDICATION	(6 << 21)
+#define   PIPECONF_INTERLACE_FIELD_0_ONLY		(7 << 21)
+#define PIPEASTAT		0x70024
+#define   PIPE_FIFO_UNDERRUN_STATUS		(1UL<<31)
+#define   PIPE_CRC_ERROR_ENABLE			(1UL<<29)
+#define   PIPE_CRC_DONE_ENABLE			(1UL<<28)
+#define   PIPE_GMBUS_EVENT_ENABLE		(1UL<<27)
+#define   PIPE_HOTPLUG_INTERRUPT_ENABLE		(1UL<<26)
+#define   PIPE_VSYNC_INTERRUPT_ENABLE		(1UL<<25)
+#define   PIPE_DISPLAY_LINE_COMPARE_ENABLE	(1UL<<24)
+#define   PIPE_DPST_EVENT_ENABLE		(1UL<<23)
+#define   PIPE_LEGACY_BLC_EVENT_ENABLE		(1UL<<22)
+#define   PIPE_ODD_FIELD_INTERRUPT_ENABLE	(1UL<<21)
+#define   PIPE_EVEN_FIELD_INTERRUPT_ENABLE	(1UL<<20)
+#define   PIPE_HOTPLUG_TV_INTERRUPT_ENABLE	(1UL<<18) /* pre-965 */
+#define   PIPE_START_VBLANK_INTERRUPT_ENABLE	(1UL<<18) /* 965 or later */
+#define   PIPE_VBLANK_INTERRUPT_ENABLE		(1UL<<17)
+#define   PIPE_OVERLAY_UPDATED_ENABLE		(1UL<<16)
+#define   PIPE_CRC_ERROR_INTERRUPT_STATUS	(1UL<<13)
+#define   PIPE_CRC_DONE_INTERRUPT_STATUS	(1UL<<12)
+#define   PIPE_GMBUS_INTERRUPT_STATUS		(1UL<<11)
+#define   PIPE_HOTPLUG_INTERRUPT_STATUS		(1UL<<10)
+#define   PIPE_VSYNC_INTERRUPT_STATUS		(1UL<<9)
+#define   PIPE_DISPLAY_LINE_COMPARE_STATUS	(1UL<<8)
+#define   PIPE_DPST_EVENT_STATUS		(1UL<<7)
+#define   PIPE_LEGACY_BLC_EVENT_STATUS		(1UL<<6)
+#define   PIPE_ODD_FIELD_INTERRUPT_STATUS	(1UL<<5)
+#define   PIPE_EVEN_FIELD_INTERRUPT_STATUS	(1UL<<4)
+#define   PIPE_HOTPLUG_TV_INTERRUPT_STATUS	(1UL<<2) /* pre-965 */
+#define   PIPE_START_VBLANK_INTERRUPT_STATUS	(1UL<<2) /* 965 or later */
+#define   PIPE_VBLANK_INTERRUPT_STATUS		(1UL<<1)
+#define   PIPE_OVERLAY_UPDATED_STATUS		(1UL<<0)
+
+#define DSPARB			0x70030
+#define   DSPARB_CSTART_MASK	(0x7f << 7)
+#define   DSPARB_CSTART_SHIFT	7
+#define   DSPARB_BSTART_MASK	(0x7f)
+#define   DSPARB_BSTART_SHIFT	0
+/*
+ * The two pipe frame counter registers are not synchronized, so
+ * reading a stable value is somewhat tricky. The following code
+ * should work:
+ *
+ *  do {
+ *    high1 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >>
+ *             PIPE_FRAME_HIGH_SHIFT;
+ *    low1 =  ((INREG(PIPEAFRAMEPIXEL) & PIPE_FRAME_LOW_MASK) >>
+ *             PIPE_FRAME_LOW_SHIFT);
+ *    high2 = ((INREG(PIPEAFRAMEHIGH) & PIPE_FRAME_HIGH_MASK) >>
+ *             PIPE_FRAME_HIGH_SHIFT);
+ *  } while (high1 != high2);
+ *  frame = (high1 << 8) | low1;
+ */
+#define PIPEAFRAMEHIGH          0x70040
+#define   PIPE_FRAME_HIGH_MASK    0x0000ffff
+#define   PIPE_FRAME_HIGH_SHIFT   0
+#define PIPEAFRAMEPIXEL         0x70044
+#define   PIPE_FRAME_LOW_MASK     0xff000000
+#define   PIPE_FRAME_LOW_SHIFT    24
+#define   PIPE_PIXEL_MASK         0x00ffffff
+#define   PIPE_PIXEL_SHIFT        0
+
+/* Cursor A & B regs */
+#define CURACNTR		0x70080
+#define   CURSOR_MODE_DISABLE   0x00
+#define   CURSOR_MODE_64_32B_AX 0x07
+#define   CURSOR_MODE_64_ARGB_AX ((1 << 5) | CURSOR_MODE_64_32B_AX)
+#define   MCURSOR_GAMMA_ENABLE  (1 << 26)
+#define CURABASE		0x70084
+#define CURAPOS			0x70088
+#define   CURSOR_POS_MASK       0x007FF
+#define   CURSOR_POS_SIGN       0x8000
+#define   CURSOR_X_SHIFT        0
+#define   CURSOR_Y_SHIFT        16
+#define CURBCNTR		0x700c0
+#define CURBBASE		0x700c4
+#define CURBPOS			0x700c8
+
+/* Display A control */
+#define DSPACNTR                0x70180
+#define   DISPLAY_PLANE_ENABLE			(1<<31)
+#define   DISPLAY_PLANE_DISABLE			0
+#define   DISPPLANE_GAMMA_ENABLE		(1<<30)
+#define   DISPPLANE_GAMMA_DISABLE		0
+#define   DISPPLANE_PIXFORMAT_MASK		(0xf<<26)
+#define   DISPPLANE_8BPP			(0x2<<26)
+#define   DISPPLANE_15_16BPP			(0x4<<26)
+#define   DISPPLANE_16BPP			(0x5<<26)
+#define   DISPPLANE_32BPP_NO_ALPHA		(0x6<<26)
+#define   DISPPLANE_32BPP			(0x7<<26)
+#define   DISPPLANE_STEREO_ENABLE		(1<<25)
+#define   DISPPLANE_STEREO_DISABLE		0
+#define   DISPPLANE_SEL_PIPE_MASK		(1<<24)
+#define   DISPPLANE_SEL_PIPE_A			0
+#define   DISPPLANE_SEL_PIPE_B			(1<<24)
+#define   DISPPLANE_SRC_KEY_ENABLE		(1<<22)
+#define   DISPPLANE_SRC_KEY_DISABLE		0
+#define   DISPPLANE_LINE_DOUBLE			(1<<20)
+#define   DISPPLANE_NO_LINE_DOUBLE		0
+#define   DISPPLANE_STEREO_POLARITY_FIRST	0
+#define   DISPPLANE_STEREO_POLARITY_SECOND	(1<<18)
+#define DSPAADDR		0x70184
+#define DSPASTRIDE		0x70188
+#define DSPAPOS			0x7018C /* reserved */
+#define DSPASIZE		0x70190
+#define DSPASURF		0x7019C /* 965+ only */
+#define DSPATILEOFF		0x701A4 /* 965+ only */
+
+/* VBIOS flags */
+#define SWF00			0x71410
+#define SWF01			0x71414
+#define SWF02			0x71418
+#define SWF03			0x7141c
+#define SWF04			0x71420
+#define SWF05			0x71424
+#define SWF06			0x71428
+#define SWF10			0x70410
+#define SWF11			0x70414
+#define SWF14			0x71420
+#define SWF30			0x72414
+#define SWF31			0x72418
+#define SWF32			0x7241c
+
+/* Pipe B */
+#define PIPEBDSL		0x71000
+#define PIPEBCONF		0x71008
+#define PIPEBSTAT		0x71024
+#define PIPEBFRAMEHIGH		0x71040
+#define PIPEBFRAMEPIXEL		0x71044
+
+/* Display B control */
+#define DSPBCNTR		0x71180
+#define   DISPPLANE_ALPHA_TRANS_ENABLE		(1<<15)
+#define   DISPPLANE_ALPHA_TRANS_DISABLE		0
+#define   DISPPLANE_SPRITE_ABOVE_DISPLAY	0
+#define   DISPPLANE_SPRITE_ABOVE_OVERLAY	(1)
+#define DSPBADDR		0x71184
+#define DSPBSTRIDE		0x71188
+#define DSPBPOS			0x7118C
+#define DSPBSIZE		0x71190
+#define DSPBSURF		0x7119C
+#define DSPBTILEOFF		0x711A4
+
+/* VBIOS regs */
+#define VGACNTRL		0x71400
+# define VGA_DISP_DISABLE			(1 << 31)
+# define VGA_2X_MODE				(1 << 30)
+# define VGA_PIPE_B_SELECT			(1 << 29)
+
+#endif /* _I915_REG_H_ */

+ 509 - 0
drivers/gpu/drm/i915/i915_suspend.c

@@ -0,0 +1,509 @@
+/*
+ *
+ * Copyright 2008 (c) Intel Corporation
+ *   Jesse Barnes <jbarnes@virtuousgeek.org>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+
+static bool i915_pipe_enabled(struct drm_device *dev, enum pipe pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	if (pipe == PIPE_A)
+		return (I915_READ(DPLL_A) & DPLL_VCO_ENABLE);
+	else
+		return (I915_READ(DPLL_B) & DPLL_VCO_ENABLE);
+}
+
+static void i915_save_palette(struct drm_device *dev, enum pipe pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
+	u32 *array;
+	int i;
+
+	if (!i915_pipe_enabled(dev, pipe))
+		return;
+
+	if (pipe == PIPE_A)
+		array = dev_priv->save_palette_a;
+	else
+		array = dev_priv->save_palette_b;
+
+	for(i = 0; i < 256; i++)
+		array[i] = I915_READ(reg + (i << 2));
+}
+
+static void i915_restore_palette(struct drm_device *dev, enum pipe pipe)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	unsigned long reg = (pipe == PIPE_A ? PALETTE_A : PALETTE_B);
+	u32 *array;
+	int i;
+
+	if (!i915_pipe_enabled(dev, pipe))
+		return;
+
+	if (pipe == PIPE_A)
+		array = dev_priv->save_palette_a;
+	else
+		array = dev_priv->save_palette_b;
+
+	for(i = 0; i < 256; i++)
+		I915_WRITE(reg + (i << 2), array[i]);
+}
+
+static u8 i915_read_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	I915_WRITE8(index_port, reg);
+	return I915_READ8(data_port);
+}
+
+static u8 i915_read_ar(struct drm_device *dev, u16 st01, u8 reg, u16 palette_enable)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	I915_READ8(st01);
+	I915_WRITE8(VGA_AR_INDEX, palette_enable | reg);
+	return I915_READ8(VGA_AR_DATA_READ);
+}
+
+static void i915_write_ar(struct drm_device *dev, u16 st01, u8 reg, u8 val, u16 palette_enable)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	I915_READ8(st01);
+	I915_WRITE8(VGA_AR_INDEX, palette_enable | reg);
+	I915_WRITE8(VGA_AR_DATA_WRITE, val);
+}
+
+static void i915_write_indexed(struct drm_device *dev, u16 index_port, u16 data_port, u8 reg, u8 val)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+
+	I915_WRITE8(index_port, reg);
+	I915_WRITE8(data_port, val);
+}
+
+static void i915_save_vga(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
+	u16 cr_index, cr_data, st01;
+
+	/* VGA color palette registers */
+	dev_priv->saveDACMASK = I915_READ8(VGA_DACMASK);
+	/* DACCRX automatically increments during read */
+	I915_WRITE8(VGA_DACRX, 0);
+	/* Read 3 bytes of color data from each index */
+	for (i = 0; i < 256 * 3; i++)
+		dev_priv->saveDACDATA[i] = I915_READ8(VGA_DACDATA);
+
+	/* MSR bits */
+	dev_priv->saveMSR = I915_READ8(VGA_MSR_READ);
+	if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
+		cr_index = VGA_CR_INDEX_CGA;
+		cr_data = VGA_CR_DATA_CGA;
+		st01 = VGA_ST01_CGA;
+	} else {
+		cr_index = VGA_CR_INDEX_MDA;
+		cr_data = VGA_CR_DATA_MDA;
+		st01 = VGA_ST01_MDA;
+	}
+
+	/* CRT controller regs */
+	i915_write_indexed(dev, cr_index, cr_data, 0x11,
+			   i915_read_indexed(dev, cr_index, cr_data, 0x11) &
+			   (~0x80));
+	for (i = 0; i <= 0x24; i++)
+		dev_priv->saveCR[i] =
+			i915_read_indexed(dev, cr_index, cr_data, i);
+	/* Make sure we don't turn off CR group 0 writes */
+	dev_priv->saveCR[0x11] &= ~0x80;
+
+	/* Attribute controller registers */
+	I915_READ8(st01);
+	dev_priv->saveAR_INDEX = I915_READ8(VGA_AR_INDEX);
+	for (i = 0; i <= 0x14; i++)
+		dev_priv->saveAR[i] = i915_read_ar(dev, st01, i, 0);
+	I915_READ8(st01);
+	I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX);
+	I915_READ8(st01);
+
+	/* Graphics controller registers */
+	for (i = 0; i < 9; i++)
+		dev_priv->saveGR[i] =
+			i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i);
+
+	dev_priv->saveGR[0x10] =
+		i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10);
+	dev_priv->saveGR[0x11] =
+		i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11);
+	dev_priv->saveGR[0x18] =
+		i915_read_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18);
+
+	/* Sequencer registers */
+	for (i = 0; i < 8; i++)
+		dev_priv->saveSR[i] =
+			i915_read_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i);
+}
+
+static void i915_restore_vga(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
+	u16 cr_index, cr_data, st01;
+
+	/* MSR bits */
+	I915_WRITE8(VGA_MSR_WRITE, dev_priv->saveMSR);
+	if (dev_priv->saveMSR & VGA_MSR_CGA_MODE) {
+		cr_index = VGA_CR_INDEX_CGA;
+		cr_data = VGA_CR_DATA_CGA;
+		st01 = VGA_ST01_CGA;
+	} else {
+		cr_index = VGA_CR_INDEX_MDA;
+		cr_data = VGA_CR_DATA_MDA;
+		st01 = VGA_ST01_MDA;
+	}
+
+	/* Sequencer registers, don't write SR07 */
+	for (i = 0; i < 7; i++)
+		i915_write_indexed(dev, VGA_SR_INDEX, VGA_SR_DATA, i,
+				   dev_priv->saveSR[i]);
+
+	/* CRT controller regs */
+	/* Enable CR group 0 writes */
+	i915_write_indexed(dev, cr_index, cr_data, 0x11, dev_priv->saveCR[0x11]);
+	for (i = 0; i <= 0x24; i++)
+		i915_write_indexed(dev, cr_index, cr_data, i, dev_priv->saveCR[i]);
+
+	/* Graphics controller regs */
+	for (i = 0; i < 9; i++)
+		i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, i,
+				   dev_priv->saveGR[i]);
+
+	i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x10,
+			   dev_priv->saveGR[0x10]);
+	i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x11,
+			   dev_priv->saveGR[0x11]);
+	i915_write_indexed(dev, VGA_GR_INDEX, VGA_GR_DATA, 0x18,
+			   dev_priv->saveGR[0x18]);
+
+	/* Attribute controller registers */
+	I915_READ8(st01); /* switch back to index mode */
+	for (i = 0; i <= 0x14; i++)
+		i915_write_ar(dev, st01, i, dev_priv->saveAR[i], 0);
+	I915_READ8(st01); /* switch back to index mode */
+	I915_WRITE8(VGA_AR_INDEX, dev_priv->saveAR_INDEX | 0x20);
+	I915_READ8(st01);
+
+	/* VGA color palette registers */
+	I915_WRITE8(VGA_DACMASK, dev_priv->saveDACMASK);
+	/* DACCRX automatically increments during read */
+	I915_WRITE8(VGA_DACWX, 0);
+	/* Read 3 bytes of color data from each index */
+	for (i = 0; i < 256 * 3; i++)
+		I915_WRITE8(VGA_DACDATA, dev_priv->saveDACDATA[i]);
+
+}
+
+int i915_save_state(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
+
+	pci_read_config_byte(dev->pdev, LBB, &dev_priv->saveLBB);
+
+	/* Display arbitration control */
+	dev_priv->saveDSPARB = I915_READ(DSPARB);
+
+	/* Pipe & plane A info */
+	dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
+	dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
+	dev_priv->saveFPA0 = I915_READ(FPA0);
+	dev_priv->saveFPA1 = I915_READ(FPA1);
+	dev_priv->saveDPLL_A = I915_READ(DPLL_A);
+	if (IS_I965G(dev))
+		dev_priv->saveDPLL_A_MD = I915_READ(DPLL_A_MD);
+	dev_priv->saveHTOTAL_A = I915_READ(HTOTAL_A);
+	dev_priv->saveHBLANK_A = I915_READ(HBLANK_A);
+	dev_priv->saveHSYNC_A = I915_READ(HSYNC_A);
+	dev_priv->saveVTOTAL_A = I915_READ(VTOTAL_A);
+	dev_priv->saveVBLANK_A = I915_READ(VBLANK_A);
+	dev_priv->saveVSYNC_A = I915_READ(VSYNC_A);
+	dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
+
+	dev_priv->saveDSPACNTR = I915_READ(DSPACNTR);
+	dev_priv->saveDSPASTRIDE = I915_READ(DSPASTRIDE);
+	dev_priv->saveDSPASIZE = I915_READ(DSPASIZE);
+	dev_priv->saveDSPAPOS = I915_READ(DSPAPOS);
+	dev_priv->saveDSPAADDR = I915_READ(DSPAADDR);
+	if (IS_I965G(dev)) {
+		dev_priv->saveDSPASURF = I915_READ(DSPASURF);
+		dev_priv->saveDSPATILEOFF = I915_READ(DSPATILEOFF);
+	}
+	i915_save_palette(dev, PIPE_A);
+	dev_priv->savePIPEASTAT = I915_READ(PIPEASTAT);
+
+	/* Pipe & plane B info */
+	dev_priv->savePIPEBCONF = I915_READ(PIPEBCONF);
+	dev_priv->savePIPEBSRC = I915_READ(PIPEBSRC);
+	dev_priv->saveFPB0 = I915_READ(FPB0);
+	dev_priv->saveFPB1 = I915_READ(FPB1);
+	dev_priv->saveDPLL_B = I915_READ(DPLL_B);
+	if (IS_I965G(dev))
+		dev_priv->saveDPLL_B_MD = I915_READ(DPLL_B_MD);
+	dev_priv->saveHTOTAL_B = I915_READ(HTOTAL_B);
+	dev_priv->saveHBLANK_B = I915_READ(HBLANK_B);
+	dev_priv->saveHSYNC_B = I915_READ(HSYNC_B);
+	dev_priv->saveVTOTAL_B = I915_READ(VTOTAL_B);
+	dev_priv->saveVBLANK_B = I915_READ(VBLANK_B);
+	dev_priv->saveVSYNC_B = I915_READ(VSYNC_B);
+	dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
+
+	dev_priv->saveDSPBCNTR = I915_READ(DSPBCNTR);
+	dev_priv->saveDSPBSTRIDE = I915_READ(DSPBSTRIDE);
+	dev_priv->saveDSPBSIZE = I915_READ(DSPBSIZE);
+	dev_priv->saveDSPBPOS = I915_READ(DSPBPOS);
+	dev_priv->saveDSPBADDR = I915_READ(DSPBADDR);
+	if (IS_I965GM(dev) || IS_GM45(dev)) {
+		dev_priv->saveDSPBSURF = I915_READ(DSPBSURF);
+		dev_priv->saveDSPBTILEOFF = I915_READ(DSPBTILEOFF);
+	}
+	i915_save_palette(dev, PIPE_B);
+	dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT);
+
+	/* CRT state */
+	dev_priv->saveADPA = I915_READ(ADPA);
+
+	/* LVDS state */
+	dev_priv->savePP_CONTROL = I915_READ(PP_CONTROL);
+	dev_priv->savePFIT_PGM_RATIOS = I915_READ(PFIT_PGM_RATIOS);
+	dev_priv->saveBLC_PWM_CTL = I915_READ(BLC_PWM_CTL);
+	if (IS_I965G(dev))
+		dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
+	if (IS_MOBILE(dev) && !IS_I830(dev))
+		dev_priv->saveLVDS = I915_READ(LVDS);
+	if (!IS_I830(dev) && !IS_845G(dev))
+		dev_priv->savePFIT_CONTROL = I915_READ(PFIT_CONTROL);
+	dev_priv->savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
+	dev_priv->savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
+	dev_priv->savePP_DIVISOR = I915_READ(PP_DIVISOR);
+
+	/* FIXME: save TV & SDVO state */
+
+	/* FBC state */
+	dev_priv->saveFBC_CFB_BASE = I915_READ(FBC_CFB_BASE);
+	dev_priv->saveFBC_LL_BASE = I915_READ(FBC_LL_BASE);
+	dev_priv->saveFBC_CONTROL2 = I915_READ(FBC_CONTROL2);
+	dev_priv->saveFBC_CONTROL = I915_READ(FBC_CONTROL);
+
+	/* Interrupt state */
+	dev_priv->saveIIR = I915_READ(IIR);
+	dev_priv->saveIER = I915_READ(IER);
+	dev_priv->saveIMR = I915_READ(IMR);
+
+	/* VGA state */
+	dev_priv->saveVGA0 = I915_READ(VGA0);
+	dev_priv->saveVGA1 = I915_READ(VGA1);
+	dev_priv->saveVGA_PD = I915_READ(VGA_PD);
+	dev_priv->saveVGACNTRL = I915_READ(VGACNTRL);
+
+	/* Clock gating state */
+	dev_priv->saveD_STATE = I915_READ(D_STATE);
+	dev_priv->saveCG_2D_DIS = I915_READ(CG_2D_DIS);
+
+	/* Cache mode state */
+	dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
+
+	/* Memory Arbitration state */
+	dev_priv->saveMI_ARB_STATE = I915_READ(MI_ARB_STATE);
+
+	/* Scratch space */
+	for (i = 0; i < 16; i++) {
+		dev_priv->saveSWF0[i] = I915_READ(SWF00 + (i << 2));
+		dev_priv->saveSWF1[i] = I915_READ(SWF10 + (i << 2));
+	}
+	for (i = 0; i < 3; i++)
+		dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
+
+	i915_save_vga(dev);
+
+	return 0;
+}
+
+int i915_restore_state(struct drm_device *dev)
+{
+	struct drm_i915_private *dev_priv = dev->dev_private;
+	int i;
+
+	pci_write_config_byte(dev->pdev, LBB, dev_priv->saveLBB);
+
+	I915_WRITE(DSPARB, dev_priv->saveDSPARB);
+
+	/* Pipe & plane A info */
+	/* Prime the clock */
+	if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
+		I915_WRITE(DPLL_A, dev_priv->saveDPLL_A &
+			   ~DPLL_VCO_ENABLE);
+		DRM_UDELAY(150);
+	}
+	I915_WRITE(FPA0, dev_priv->saveFPA0);
+	I915_WRITE(FPA1, dev_priv->saveFPA1);
+	/* Actually enable it */
+	I915_WRITE(DPLL_A, dev_priv->saveDPLL_A);
+	DRM_UDELAY(150);
+	if (IS_I965G(dev))
+		I915_WRITE(DPLL_A_MD, dev_priv->saveDPLL_A_MD);
+	DRM_UDELAY(150);
+
+	/* Restore mode */
+	I915_WRITE(HTOTAL_A, dev_priv->saveHTOTAL_A);
+	I915_WRITE(HBLANK_A, dev_priv->saveHBLANK_A);
+	I915_WRITE(HSYNC_A, dev_priv->saveHSYNC_A);
+	I915_WRITE(VTOTAL_A, dev_priv->saveVTOTAL_A);
+	I915_WRITE(VBLANK_A, dev_priv->saveVBLANK_A);
+	I915_WRITE(VSYNC_A, dev_priv->saveVSYNC_A);
+	I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
+
+	/* Restore plane info */
+	I915_WRITE(DSPASIZE, dev_priv->saveDSPASIZE);
+	I915_WRITE(DSPAPOS, dev_priv->saveDSPAPOS);
+	I915_WRITE(PIPEASRC, dev_priv->savePIPEASRC);
+	I915_WRITE(DSPAADDR, dev_priv->saveDSPAADDR);
+	I915_WRITE(DSPASTRIDE, dev_priv->saveDSPASTRIDE);
+	if (IS_I965G(dev)) {
+		I915_WRITE(DSPASURF, dev_priv->saveDSPASURF);
+		I915_WRITE(DSPATILEOFF, dev_priv->saveDSPATILEOFF);
+	}
+
+	I915_WRITE(PIPEACONF, dev_priv->savePIPEACONF);
+
+	i915_restore_palette(dev, PIPE_A);
+	/* Enable the plane */
+	I915_WRITE(DSPACNTR, dev_priv->saveDSPACNTR);
+	I915_WRITE(DSPAADDR, I915_READ(DSPAADDR));
+
+	/* Pipe & plane B info */
+	if (dev_priv->saveDPLL_B & DPLL_VCO_ENABLE) {
+		I915_WRITE(DPLL_B, dev_priv->saveDPLL_B &
+			   ~DPLL_VCO_ENABLE);
+		DRM_UDELAY(150);
+	}
+	I915_WRITE(FPB0, dev_priv->saveFPB0);
+	I915_WRITE(FPB1, dev_priv->saveFPB1);
+	/* Actually enable it */
+	I915_WRITE(DPLL_B, dev_priv->saveDPLL_B);
+	DRM_UDELAY(150);
+	if (IS_I965G(dev))
+		I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
+	DRM_UDELAY(150);
+
+	/* Restore mode */
+	I915_WRITE(HTOTAL_B, dev_priv->saveHTOTAL_B);
+	I915_WRITE(HBLANK_B, dev_priv->saveHBLANK_B);
+	I915_WRITE(HSYNC_B, dev_priv->saveHSYNC_B);
+	I915_WRITE(VTOTAL_B, dev_priv->saveVTOTAL_B);
+	I915_WRITE(VBLANK_B, dev_priv->saveVBLANK_B);
+	I915_WRITE(VSYNC_B, dev_priv->saveVSYNC_B);
+	I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
+
+	/* Restore plane info */
+	I915_WRITE(DSPBSIZE, dev_priv->saveDSPBSIZE);
+	I915_WRITE(DSPBPOS, dev_priv->saveDSPBPOS);
+	I915_WRITE(PIPEBSRC, dev_priv->savePIPEBSRC);
+	I915_WRITE(DSPBADDR, dev_priv->saveDSPBADDR);
+	I915_WRITE(DSPBSTRIDE, dev_priv->saveDSPBSTRIDE);
+	if (IS_I965G(dev)) {
+		I915_WRITE(DSPBSURF, dev_priv->saveDSPBSURF);
+		I915_WRITE(DSPBTILEOFF, dev_priv->saveDSPBTILEOFF);
+	}
+
+	I915_WRITE(PIPEBCONF, dev_priv->savePIPEBCONF);
+
+	i915_restore_palette(dev, PIPE_B);
+	/* Enable the plane */
+	I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
+	I915_WRITE(DSPBADDR, I915_READ(DSPBADDR));
+
+	/* CRT state */
+	I915_WRITE(ADPA, dev_priv->saveADPA);
+
+	/* LVDS state */
+	if (IS_I965G(dev))
+		I915_WRITE(BLC_PWM_CTL2, dev_priv->saveBLC_PWM_CTL2);
+	if (IS_MOBILE(dev) && !IS_I830(dev))
+		I915_WRITE(LVDS, dev_priv->saveLVDS);
+	if (!IS_I830(dev) && !IS_845G(dev))
+		I915_WRITE(PFIT_CONTROL, dev_priv->savePFIT_CONTROL);
+
+	I915_WRITE(PFIT_PGM_RATIOS, dev_priv->savePFIT_PGM_RATIOS);
+	I915_WRITE(BLC_PWM_CTL, dev_priv->saveBLC_PWM_CTL);
+	I915_WRITE(PP_ON_DELAYS, dev_priv->savePP_ON_DELAYS);
+	I915_WRITE(PP_OFF_DELAYS, dev_priv->savePP_OFF_DELAYS);
+	I915_WRITE(PP_DIVISOR, dev_priv->savePP_DIVISOR);
+	I915_WRITE(PP_CONTROL, dev_priv->savePP_CONTROL);
+
+	/* FIXME: restore TV & SDVO state */
+
+	/* FBC info */
+	I915_WRITE(FBC_CFB_BASE, dev_priv->saveFBC_CFB_BASE);
+	I915_WRITE(FBC_LL_BASE, dev_priv->saveFBC_LL_BASE);
+	I915_WRITE(FBC_CONTROL2, dev_priv->saveFBC_CONTROL2);
+	I915_WRITE(FBC_CONTROL, dev_priv->saveFBC_CONTROL);
+
+	/* VGA state */
+	I915_WRITE(VGACNTRL, dev_priv->saveVGACNTRL);
+	I915_WRITE(VGA0, dev_priv->saveVGA0);
+	I915_WRITE(VGA1, dev_priv->saveVGA1);
+	I915_WRITE(VGA_PD, dev_priv->saveVGA_PD);
+	DRM_UDELAY(150);
+
+	/* Clock gating state */
+	I915_WRITE (D_STATE, dev_priv->saveD_STATE);
+	I915_WRITE (CG_2D_DIS, dev_priv->saveCG_2D_DIS);
+
+	/* Cache mode state */
+	I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
+
+	/* Memory arbitration state */
+	I915_WRITE (MI_ARB_STATE, dev_priv->saveMI_ARB_STATE | 0xffff0000);
+
+	for (i = 0; i < 16; i++) {
+		I915_WRITE(SWF00 + (i << 2), dev_priv->saveSWF0[i]);
+		I915_WRITE(SWF10 + (i << 2), dev_priv->saveSWF1[i+7]);
+	}
+	for (i = 0; i < 3; i++)
+		I915_WRITE(SWF30 + (i << 2), dev_priv->saveSWF2[i]);
+
+	i915_restore_vga(dev);
+
+	return 0;
+}
+

+ 15 - 14
drivers/gpu/drm/mga/mga_drv.c

@@ -45,15 +45,16 @@ static struct pci_device_id pciidlist[] = {
 static struct drm_driver driver = {
 	.driver_features =
 	    DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA |
-	    DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
-	    DRIVER_IRQ_VBL,
+	    DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
 	.dev_priv_size = sizeof(drm_mga_buf_priv_t),
 	.load = mga_driver_load,
 	.unload = mga_driver_unload,
 	.lastclose = mga_driver_lastclose,
 	.dma_quiescent = mga_driver_dma_quiescent,
 	.device_is_agp = mga_driver_device_is_agp,
-	.vblank_wait = mga_driver_vblank_wait,
+	.get_vblank_counter = mga_get_vblank_counter,
+	.enable_vblank = mga_enable_vblank,
+	.disable_vblank = mga_disable_vblank,
 	.irq_preinstall = mga_driver_irq_preinstall,
 	.irq_postinstall = mga_driver_irq_postinstall,
 	.irq_uninstall = mga_driver_irq_uninstall,
@@ -64,20 +65,20 @@ static struct drm_driver driver = {
 	.ioctls = mga_ioctls,
 	.dma_ioctl = mga_dma_buffers,
 	.fops = {
-		 .owner = THIS_MODULE,
-		 .open = drm_open,
-		 .release = drm_release,
-		 .ioctl = drm_ioctl,
-		 .mmap = drm_mmap,
-		 .poll = drm_poll,
-		 .fasync = drm_fasync,
+		.owner = THIS_MODULE,
+		.open = drm_open,
+		.release = drm_release,
+		.ioctl = drm_ioctl,
+		.mmap = drm_mmap,
+		.poll = drm_poll,
+		.fasync = drm_fasync,
 #ifdef CONFIG_COMPAT
-		 .compat_ioctl = mga_compat_ioctl,
+		.compat_ioctl = mga_compat_ioctl,
 #endif
-		 },
+	},
 	.pci_driver = {
-		 .name = DRIVER_NAME,
-		 .id_table = pciidlist,
+		.name = DRIVER_NAME,
+		.id_table = pciidlist,
 	},
 
 	.name = DRIVER_NAME,

+ 5 - 1
drivers/gpu/drm/mga/mga_drv.h

@@ -120,6 +120,7 @@ typedef struct drm_mga_private {
 	u32 clear_cmd;
 	u32 maccess;
 
+	atomic_t vbl_received;          /**< Number of vblanks received. */
 	wait_queue_head_t fence_queue;
 	atomic_t last_fence_retired;
 	u32 next_fence_to_post;
@@ -181,11 +182,14 @@ extern int mga_warp_install_microcode(drm_mga_private_t * dev_priv);
 extern int mga_warp_init(drm_mga_private_t * dev_priv);
 
 				/* mga_irq.c */
+extern int mga_enable_vblank(struct drm_device *dev, int crtc);
+extern void mga_disable_vblank(struct drm_device *dev, int crtc);
+extern u32 mga_get_vblank_counter(struct drm_device *dev, int crtc);
 extern int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence);
 extern int mga_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence);
 extern irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS);
 extern void mga_driver_irq_preinstall(struct drm_device * dev);
-extern void mga_driver_irq_postinstall(struct drm_device * dev);
+extern int mga_driver_irq_postinstall(struct drm_device *dev);
 extern void mga_driver_irq_uninstall(struct drm_device * dev);
 extern long mga_compat_ioctl(struct file *filp, unsigned int cmd,
 			     unsigned long arg);

+ 53 - 21
drivers/gpu/drm/mga/mga_irq.c

@@ -1,5 +1,6 @@
 /* mga_irq.c -- IRQ handling for radeon -*- linux-c -*-
- *
+ */
+/*
  * Copyright (C) The Weather Channel, Inc.  2002.  All Rights Reserved.
  *
  * The Weather Channel (TM) funded Tungsten Graphics to develop the
@@ -35,6 +36,18 @@
 #include "mga_drm.h"
 #include "mga_drv.h"
 
+u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
+{
+	const drm_mga_private_t *const dev_priv =
+		(drm_mga_private_t *) dev->dev_private;
+
+	if (crtc != 0)
+		return 0;
+
+	return atomic_read(&dev_priv->vbl_received);
+}
+
+
 irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
 {
 	struct drm_device *dev = (struct drm_device *) arg;
@@ -47,9 +60,8 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
 	/* VBLANK interrupt */
 	if (status & MGA_VLINEPEN) {
 		MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
-		atomic_inc(&dev->vbl_received);
-		DRM_WAKEUP(&dev->vbl_queue);
-		drm_vbl_send_signals(dev);
+		atomic_inc(&dev_priv->vbl_received);
+		drm_handle_vblank(dev, 0);
 		handled = 1;
 	}
 
@@ -58,6 +70,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
 		const u32 prim_start = MGA_READ(MGA_PRIMADDRESS);
 		const u32 prim_end = MGA_READ(MGA_PRIMEND);
 
+
 		MGA_WRITE(MGA_ICLEAR, MGA_SOFTRAPICLR);
 
 		/* In addition to clearing the interrupt-pending bit, we
@@ -72,28 +85,39 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
 		handled = 1;
 	}
 
-	if (handled) {
+	if (handled)
 		return IRQ_HANDLED;
-	}
 	return IRQ_NONE;
 }
 
-int mga_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence)
+int mga_enable_vblank(struct drm_device *dev, int crtc)
 {
-	unsigned int cur_vblank;
-	int ret = 0;
+	drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
 
-	/* Assume that the user has missed the current sequence number
-	 * by about a day rather than she wants to wait for years
-	 * using vertical blanks...
-	 */
-	DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
-		    (((cur_vblank = atomic_read(&dev->vbl_received))
-		      - *sequence) <= (1 << 23)));
+	if (crtc != 0) {
+		DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
+			  crtc);
+		return 0;
+	}
 
-	*sequence = cur_vblank;
+	MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN);
+	return 0;
+}
 
-	return ret;
+
+void mga_disable_vblank(struct drm_device *dev, int crtc)
+{
+	if (crtc != 0) {
+		DRM_ERROR("tried to disable vblank on non-existent crtc %d\n",
+			  crtc);
+	}
+
+	/* Do *NOT* disable the vertical refresh interrupt.  MGA doesn't have
+	 * a nice hardware counter that tracks the number of refreshes when
+	 * the interrupt is disabled, and the kernel doesn't know the refresh
+	 * rate to calculate an estimate.
+	 */
+	/* MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN); */
 }
 
 int mga_driver_fence_wait(struct drm_device * dev, unsigned int *sequence)
@@ -125,14 +149,22 @@ void mga_driver_irq_preinstall(struct drm_device * dev)
 	MGA_WRITE(MGA_ICLEAR, ~0);
 }
 
-void mga_driver_irq_postinstall(struct drm_device * dev)
+int mga_driver_irq_postinstall(struct drm_device *dev)
 {
 	drm_mga_private_t *dev_priv = (drm_mga_private_t *) dev->dev_private;
+	int ret;
+
+	ret = drm_vblank_init(dev, 1);
+	if (ret)
+		return ret;
 
 	DRM_INIT_WAITQUEUE(&dev_priv->fence_queue);
 
-	/* Turn on vertical blank interrupt and soft trap interrupt. */
-	MGA_WRITE(MGA_IEN, MGA_VLINEIEN | MGA_SOFTRAPEN);
+	/* Turn on soft trap interrupt.  Vertical blank interrupts are enabled
+	 * in mga_enable_vblank.
+	 */
+	MGA_WRITE(MGA_IEN, MGA_SOFTRAPEN);
+	return 0;
 }
 
 void mga_driver_irq_uninstall(struct drm_device * dev)

+ 1 - 1
drivers/gpu/drm/mga/mga_state.c

@@ -1022,7 +1022,7 @@ static int mga_getparam(struct drm_device *dev, void *data, struct drm_file *fil
 
 	switch (param->param) {
 	case MGA_PARAM_IRQ_NR:
-		value = dev->irq;
+		value = drm_dev_to_irq(dev);
 		break;
 	case MGA_PARAM_CARD_TYPE:
 		value = dev_priv->chipset;

+ 15 - 14
drivers/gpu/drm/r128/r128_drv.c

@@ -43,12 +43,13 @@ static struct pci_device_id pciidlist[] = {
 static struct drm_driver driver = {
 	.driver_features =
 	    DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
-	    DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
-	    DRIVER_IRQ_VBL,
+	    DRIVER_HAVE_DMA | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
 	.dev_priv_size = sizeof(drm_r128_buf_priv_t),
 	.preclose = r128_driver_preclose,
 	.lastclose = r128_driver_lastclose,
-	.vblank_wait = r128_driver_vblank_wait,
+	.get_vblank_counter = r128_get_vblank_counter,
+	.enable_vblank = r128_enable_vblank,
+	.disable_vblank = r128_disable_vblank,
 	.irq_preinstall = r128_driver_irq_preinstall,
 	.irq_postinstall = r128_driver_irq_postinstall,
 	.irq_uninstall = r128_driver_irq_uninstall,
@@ -59,21 +60,20 @@ static struct drm_driver driver = {
 	.ioctls = r128_ioctls,
 	.dma_ioctl = r128_cce_buffers,
 	.fops = {
-		 .owner = THIS_MODULE,
-		 .open = drm_open,
-		 .release = drm_release,
-		 .ioctl = drm_ioctl,
-		 .mmap = drm_mmap,
-		 .poll = drm_poll,
-		 .fasync = drm_fasync,
+		.owner = THIS_MODULE,
+		.open = drm_open,
+		.release = drm_release,
+		.ioctl = drm_ioctl,
+		.mmap = drm_mmap,
+		.poll = drm_poll,
+		.fasync = drm_fasync,
 #ifdef CONFIG_COMPAT
-		 .compat_ioctl = r128_compat_ioctl,
+		.compat_ioctl = r128_compat_ioctl,
 #endif
 	},
-
 	.pci_driver = {
-		 .name = DRIVER_NAME,
-		 .id_table = pciidlist,
+		.name = DRIVER_NAME,
+		.id_table = pciidlist,
 	},
 
 	.name = DRIVER_NAME,
@@ -87,6 +87,7 @@ static struct drm_driver driver = {
 static int __init r128_init(void)
 {
 	driver.num_ioctls = r128_max_ioctl;
+
 	return drm_init(&driver);
 }
 

+ 7 - 4
drivers/gpu/drm/r128/r128_drv.h

@@ -29,7 +29,7 @@
  *    Rickard E. (Rik) Faith <faith@valinux.com>
  *    Kevin E. Martin <martin@valinux.com>
  *    Gareth Hughes <gareth@valinux.com>
- *    Michel Dänzer <daenzerm@student.ethz.ch>
+ *    Michel Dzer <daenzerm@student.ethz.ch>
  */
 
 #ifndef __R128_DRV_H__
@@ -97,6 +97,8 @@ typedef struct drm_r128_private {
 	u32 crtc_offset;
 	u32 crtc_offset_cntl;
 
+	atomic_t vbl_received;
+
 	u32 color_fmt;
 	unsigned int front_offset;
 	unsigned int front_pitch;
@@ -149,11 +151,12 @@ extern int r128_wait_ring(drm_r128_private_t * dev_priv, int n);
 extern int r128_do_cce_idle(drm_r128_private_t * dev_priv);
 extern int r128_do_cleanup_cce(struct drm_device * dev);
 
-extern int r128_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence);
-
+extern int r128_enable_vblank(struct drm_device *dev, int crtc);
+extern void r128_disable_vblank(struct drm_device *dev, int crtc);
+extern u32 r128_get_vblank_counter(struct drm_device *dev, int crtc);
 extern irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS);
 extern void r128_driver_irq_preinstall(struct drm_device * dev);
-extern void r128_driver_irq_postinstall(struct drm_device * dev);
+extern int r128_driver_irq_postinstall(struct drm_device *dev);
 extern void r128_driver_irq_uninstall(struct drm_device * dev);
 extern void r128_driver_lastclose(struct drm_device * dev);
 extern void r128_driver_preclose(struct drm_device * dev,

+ 35 - 20
drivers/gpu/drm/r128/r128_irq.c

@@ -35,6 +35,16 @@
 #include "r128_drm.h"
 #include "r128_drv.h"
 
+u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
+{
+	const drm_r128_private_t *dev_priv = dev->dev_private;
+
+	if (crtc != 0)
+		return 0;
+
+	return atomic_read(&dev_priv->vbl_received);
+}
+
 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
 {
 	struct drm_device *dev = (struct drm_device *) arg;
@@ -46,30 +56,38 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
 	/* VBLANK interrupt */
 	if (status & R128_CRTC_VBLANK_INT) {
 		R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
-		atomic_inc(&dev->vbl_received);
-		DRM_WAKEUP(&dev->vbl_queue);
-		drm_vbl_send_signals(dev);
+		atomic_inc(&dev_priv->vbl_received);
+		drm_handle_vblank(dev, 0);
 		return IRQ_HANDLED;
 	}
 	return IRQ_NONE;
 }
 
-int r128_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence)
+int r128_enable_vblank(struct drm_device *dev, int crtc)
 {
-	unsigned int cur_vblank;
-	int ret = 0;
+	drm_r128_private_t *dev_priv = dev->dev_private;
 
-	/* Assume that the user has missed the current sequence number
-	 * by about a day rather than she wants to wait for years
-	 * using vertical blanks...
-	 */
-	DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
-		    (((cur_vblank = atomic_read(&dev->vbl_received))
-		      - *sequence) <= (1 << 23)));
+	if (crtc != 0) {
+		DRM_ERROR("%s:  bad crtc %d\n", __func__, crtc);
+		return -EINVAL;
+	}
 
-	*sequence = cur_vblank;
+	R128_WRITE(R128_GEN_INT_CNTL, R128_CRTC_VBLANK_INT_EN);
+	return 0;
+}
+
+void r128_disable_vblank(struct drm_device *dev, int crtc)
+{
+	if (crtc != 0)
+		DRM_ERROR("%s:  bad crtc %d\n", __func__, crtc);
 
-	return ret;
+	/*
+	 * FIXME: implement proper interrupt disable by using the vblank
+	 * counter register (if available)
+	 *
+	 * R128_WRITE(R128_GEN_INT_CNTL,
+	 *            R128_READ(R128_GEN_INT_CNTL) & ~R128_CRTC_VBLANK_INT_EN);
+	 */
 }
 
 void r128_driver_irq_preinstall(struct drm_device * dev)
@@ -82,12 +100,9 @@ void r128_driver_irq_preinstall(struct drm_device * dev)
 	R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
 }
 
-void r128_driver_irq_postinstall(struct drm_device * dev)
+int r128_driver_irq_postinstall(struct drm_device *dev)
 {
-	drm_r128_private_t *dev_priv = (drm_r128_private_t *) dev->dev_private;
-
-	/* Turn on VBL interrupt */
-	R128_WRITE(R128_GEN_INT_CNTL, R128_CRTC_VBLANK_INT_EN);
+	return drm_vblank_init(dev, 1);
 }
 
 void r128_driver_irq_uninstall(struct drm_device * dev)

+ 1 - 1
drivers/gpu/drm/r128/r128_state.c

@@ -1629,7 +1629,7 @@ static int r128_getparam(struct drm_device *dev, void *data, struct drm_file *fi
 
 	switch (param->param) {
 	case R128_PARAM_IRQ_NR:
-		value = dev->irq;
+		value = drm_dev_to_irq(dev);
 		break;
 	default:
 		return -EINVAL;

+ 38 - 15
drivers/gpu/drm/radeon/radeon_cp.c

@@ -71,7 +71,8 @@ static u32 RS690_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
 
 static u32 IGP_READ_MCIND(drm_radeon_private_t *dev_priv, int addr)
 {
-	if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)
+	if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
 		return RS690_READ_MCIND(dev_priv, addr);
 	else
 		return RS480_READ_MCIND(dev_priv, addr);
@@ -82,7 +83,8 @@ u32 radeon_read_fb_location(drm_radeon_private_t *dev_priv)
 
 	if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
 		return R500_READ_MCIND(dev_priv, RV515_MC_FB_LOCATION);
-	else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)
+	else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
+		 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
 		return RS690_READ_MCIND(dev_priv, RS690_MC_FB_LOCATION);
 	else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
 		return R500_READ_MCIND(dev_priv, R520_MC_FB_LOCATION);
@@ -94,7 +96,8 @@ static void radeon_write_fb_location(drm_radeon_private_t *dev_priv, u32 fb_loc)
 {
 	if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
 		R500_WRITE_MCIND(RV515_MC_FB_LOCATION, fb_loc);
-	else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)
+	else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
+		 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
 		RS690_WRITE_MCIND(RS690_MC_FB_LOCATION, fb_loc);
 	else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
 		R500_WRITE_MCIND(R520_MC_FB_LOCATION, fb_loc);
@@ -106,7 +109,8 @@ static void radeon_write_agp_location(drm_radeon_private_t *dev_priv, u32 agp_lo
 {
 	if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515)
 		R500_WRITE_MCIND(RV515_MC_AGP_LOCATION, agp_loc);
-	else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)
+	else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
+		 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
 		RS690_WRITE_MCIND(RS690_MC_AGP_LOCATION, agp_loc);
 	else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515)
 		R500_WRITE_MCIND(R520_MC_AGP_LOCATION, agp_loc);
@@ -122,15 +126,17 @@ static void radeon_write_agp_base(drm_radeon_private_t *dev_priv, u64 agp_base)
 	if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV515) {
 		R500_WRITE_MCIND(RV515_MC_AGP_BASE, agp_base_lo);
 		R500_WRITE_MCIND(RV515_MC_AGP_BASE_2, agp_base_hi);
-	} else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) {
+	} else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
+		 ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) {
 		RS690_WRITE_MCIND(RS690_MC_AGP_BASE, agp_base_lo);
 		RS690_WRITE_MCIND(RS690_MC_AGP_BASE_2, agp_base_hi);
 	} else if ((dev_priv->flags & RADEON_FAMILY_MASK) > CHIP_RV515) {
 		R500_WRITE_MCIND(R520_MC_AGP_BASE, agp_base_lo);
 		R500_WRITE_MCIND(R520_MC_AGP_BASE_2, agp_base_hi);
-	} else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480) {
+	} else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) {
 		RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo);
-		RADEON_WRITE(RS480_AGP_BASE_2, 0);
+		RADEON_WRITE(RS480_AGP_BASE_2, agp_base_hi);
 	} else {
 		RADEON_WRITE(RADEON_AGP_BASE, agp_base_lo);
 		if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R200)
@@ -347,6 +353,7 @@ static void radeon_cp_load_microcode(drm_radeon_private_t * dev_priv)
 		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R350) ||
 		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV350) ||
 		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV380) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) ||
 		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS480)) {
 		DRM_INFO("Loading R300 Microcode\n");
 		for (i = 0; i < 256; i++) {
@@ -356,6 +363,7 @@ static void radeon_cp_load_microcode(drm_radeon_private_t * dev_priv)
 				     R300_cp_microcode[i][0]);
 		}
 	} else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R420) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_R423) ||
 		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV410)) {
 		DRM_INFO("Loading R400 Microcode\n");
 		for (i = 0; i < 256; i++) {
@@ -364,8 +372,9 @@ static void radeon_cp_load_microcode(drm_radeon_private_t * dev_priv)
 			RADEON_WRITE(RADEON_CP_ME_RAM_DATAL,
 				     R420_cp_microcode[i][0]);
 		}
-	} else if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) {
-		DRM_INFO("Loading RS690 Microcode\n");
+	} else if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
+		   ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) {
+		DRM_INFO("Loading RS690/RS740 Microcode\n");
 		for (i = 0; i < 256; i++) {
 			RADEON_WRITE(RADEON_CP_ME_RAM_DATAH,
 				     RS690_cp_microcode[i][1]);
@@ -626,8 +635,6 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev,
 		     dev_priv->ring.size_l2qw);
 #endif
 
-	/* Start with assuming that writeback doesn't work */
-	dev_priv->writeback_works = 0;
 
 	/* Initialize the scratch register pointer.  This will cause
 	 * the scratch register values to be written out to memory
@@ -646,8 +653,18 @@ static void radeon_cp_init_ring_buffer(struct drm_device * dev,
 	RADEON_WRITE(RADEON_SCRATCH_UMSK, 0x7);
 
 	/* Turn on bus mastering */
-	tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
-	RADEON_WRITE(RADEON_BUS_CNTL, tmp);
+	if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS400) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740)) {
+		/* rs400, rs690/rs740 */
+		tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RS400_BUS_MASTER_DIS;
+		RADEON_WRITE(RADEON_BUS_CNTL, tmp);
+	} else if (!(((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RV380) ||
+		    ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_R423))) {
+		/* r1xx, r2xx, r300, r(v)350, r420/r481, rs480 */
+		tmp = RADEON_READ(RADEON_BUS_CNTL) & ~RADEON_BUS_MASTER_DIS;
+		RADEON_WRITE(RADEON_BUS_CNTL, tmp);
+	} /* PCIE cards appears to not need this */
 
 	dev_priv->sarea_priv->last_frame = dev_priv->scratch[0] = 0;
 	RADEON_WRITE(RADEON_LAST_FRAME_REG, dev_priv->sarea_priv->last_frame);
@@ -674,6 +691,9 @@ static void radeon_test_writeback(drm_radeon_private_t * dev_priv)
 {
 	u32 tmp;
 
+	/* Start with assuming that writeback doesn't work */
+	dev_priv->writeback_works = 0;
+
 	/* Writeback doesn't seem to work everywhere, test it here and possibly
 	 * enable it if it appears to work
 	 */
@@ -719,7 +739,8 @@ static void radeon_set_igpgart(drm_radeon_private_t * dev_priv, int on)
 			  dev_priv->gart_size);
 
 		temp = IGP_READ_MCIND(dev_priv, RS480_MC_MISC_CNTL);
-		if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)
+		if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
+		    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))
 			IGP_WRITE_MCIND(RS480_MC_MISC_CNTL, (RS480_GART_INDEX_REG_EN |
 							     RS690_BLOCK_GFX_D3_EN));
 		else
@@ -812,6 +833,7 @@ static void radeon_set_pcigart(drm_radeon_private_t * dev_priv, int on)
 	u32 tmp;
 
 	if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740) ||
 	    (dev_priv->flags & RADEON_IS_IGPGART)) {
 		radeon_set_igpgart(dev_priv, on);
 		return;
@@ -1286,7 +1308,7 @@ static int radeon_do_resume_cp(struct drm_device * dev)
 	radeon_cp_init_ring_buffer(dev, dev_priv);
 
 	radeon_do_engine_reset(dev);
-	radeon_enable_interrupt(dev);
+	radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1);
 
 	DRM_DEBUG("radeon_do_resume_cp() complete\n");
 
@@ -1708,6 +1730,7 @@ int radeon_driver_load(struct drm_device *dev, unsigned long flags)
 	case CHIP_R300:
 	case CHIP_R350:
 	case CHIP_R420:
+	case CHIP_R423:
 	case CHIP_RV410:
 	case CHIP_RV515:
 	case CHIP_R520:

+ 28 - 4
drivers/gpu/drm/radeon/radeon_drv.c

@@ -52,6 +52,28 @@ static int dri_library_name(struct drm_device *dev, char *buf)
 		        "r300"));
 }
 
+static int radeon_suspend(struct drm_device *dev, pm_message_t state)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+
+	/* Disable *all* interrupts */
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690)
+		RADEON_WRITE(R500_DxMODE_INT_MASK, 0);
+	RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
+	return 0;
+}
+
+static int radeon_resume(struct drm_device *dev)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+
+	/* Restore interrupt registers */
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690)
+		RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg);
+	RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg);
+	return 0;
+}
+
 static struct pci_device_id pciidlist[] = {
 	radeon_PCI_IDS
 };
@@ -59,8 +81,7 @@ static struct pci_device_id pciidlist[] = {
 static struct drm_driver driver = {
 	.driver_features =
 	    DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_PCI_DMA | DRIVER_SG |
-	    DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED |
-	    DRIVER_IRQ_VBL | DRIVER_IRQ_VBL2,
+	    DRIVER_HAVE_IRQ | DRIVER_HAVE_DMA | DRIVER_IRQ_SHARED,
 	.dev_priv_size = sizeof(drm_radeon_buf_priv_t),
 	.load = radeon_driver_load,
 	.firstopen = radeon_driver_firstopen,
@@ -69,8 +90,11 @@ static struct drm_driver driver = {
 	.postclose = radeon_driver_postclose,
 	.lastclose = radeon_driver_lastclose,
 	.unload = radeon_driver_unload,
-	.vblank_wait = radeon_driver_vblank_wait,
-	.vblank_wait2 = radeon_driver_vblank_wait2,
+	.suspend = radeon_suspend,
+	.resume = radeon_resume,
+	.get_vblank_counter = radeon_get_vblank_counter,
+	.enable_vblank = radeon_enable_vblank,
+	.disable_vblank = radeon_disable_vblank,
 	.dri_library_name = dri_library_name,
 	.irq_preinstall = radeon_driver_irq_preinstall,
 	.irq_postinstall = radeon_driver_irq_postinstall,

+ 46 - 11
drivers/gpu/drm/radeon/radeon_drv.h

@@ -122,9 +122,12 @@ enum radeon_family {
 	CHIP_RV350,
 	CHIP_RV380,
 	CHIP_R420,
+	CHIP_R423,
 	CHIP_RV410,
+	CHIP_RS400,
 	CHIP_RS480,
 	CHIP_RS690,
+	CHIP_RS740,
 	CHIP_RV515,
 	CHIP_R520,
 	CHIP_RV530,
@@ -378,17 +381,17 @@ extern void radeon_mem_release(struct drm_file *file_priv,
 			       struct mem_block *heap);
 
 				/* radeon_irq.c */
+extern void radeon_irq_set_state(struct drm_device *dev, u32 mask, int state);
 extern int radeon_irq_emit(struct drm_device *dev, void *data, struct drm_file *file_priv);
 extern int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_priv);
 
 extern void radeon_do_release(struct drm_device * dev);
-extern int radeon_driver_vblank_wait(struct drm_device * dev,
-				     unsigned int *sequence);
-extern int radeon_driver_vblank_wait2(struct drm_device * dev,
-				      unsigned int *sequence);
+extern u32 radeon_get_vblank_counter(struct drm_device *dev, int crtc);
+extern int radeon_enable_vblank(struct drm_device *dev, int crtc);
+extern void radeon_disable_vblank(struct drm_device *dev, int crtc);
 extern irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS);
 extern void radeon_driver_irq_preinstall(struct drm_device * dev);
-extern void radeon_driver_irq_postinstall(struct drm_device * dev);
+extern int radeon_driver_irq_postinstall(struct drm_device *dev);
 extern void radeon_driver_irq_uninstall(struct drm_device * dev);
 extern void radeon_enable_interrupt(struct drm_device *dev);
 extern int radeon_vblank_crtc_get(struct drm_device *dev);
@@ -397,19 +400,22 @@ extern int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value);
 extern int radeon_driver_load(struct drm_device *dev, unsigned long flags);
 extern int radeon_driver_unload(struct drm_device *dev);
 extern int radeon_driver_firstopen(struct drm_device *dev);
-extern void radeon_driver_preclose(struct drm_device * dev, struct drm_file *file_priv);
-extern void radeon_driver_postclose(struct drm_device * dev, struct drm_file * filp);
+extern void radeon_driver_preclose(struct drm_device *dev,
+				   struct drm_file *file_priv);
+extern void radeon_driver_postclose(struct drm_device *dev,
+				    struct drm_file *file_priv);
 extern void radeon_driver_lastclose(struct drm_device * dev);
-extern int radeon_driver_open(struct drm_device * dev, struct drm_file * filp_priv);
+extern int radeon_driver_open(struct drm_device *dev,
+			      struct drm_file *file_priv);
 extern long radeon_compat_ioctl(struct file *filp, unsigned int cmd,
 				unsigned long arg);
 
 /* r300_cmdbuf.c */
 extern void r300_init_reg_flags(struct drm_device *dev);
 
-extern int r300_do_cp_cmdbuf(struct drm_device * dev,
+extern int r300_do_cp_cmdbuf(struct drm_device *dev,
 			     struct drm_file *file_priv,
-			     drm_radeon_kcmd_buffer_t * cmdbuf);
+			     drm_radeon_kcmd_buffer_t *cmdbuf);
 
 /* Flags for stats.boxes
  */
@@ -434,8 +440,31 @@ extern int r300_do_cp_cmdbuf(struct drm_device * dev,
 #	define RADEON_SCISSOR_1_ENABLE		(1 << 29)
 #	define RADEON_SCISSOR_2_ENABLE		(1 << 30)
 
+/*
+ * PCIE radeons (rv370/rv380, rv410, r423/r430/r480, r5xx)
+ * don't have an explicit bus mastering disable bit.  It's handled
+ * by the PCI D-states.  PMI_BM_DIS disables D-state bus master
+ * handling, not bus mastering itself.
+ */
 #define RADEON_BUS_CNTL			0x0030
+/* r1xx, r2xx, r300, r(v)350, r420/r481, rs480 */
 #	define RADEON_BUS_MASTER_DIS		(1 << 6)
+/* rs400, rs690/rs740 */
+#	define RS400_BUS_MASTER_DIS		(1 << 14)
+#	define RS400_MSI_REARM		        (1 << 20)
+/* see RS480_MSI_REARM in AIC_CNTL for rs480 */
+
+#define RADEON_BUS_CNTL1		0x0034
+#	define RADEON_PMI_BM_DIS		(1 << 2)
+#	define RADEON_PMI_INT_DIS		(1 << 3)
+
+#define RV370_BUS_CNTL			0x004c
+#	define RV370_PMI_BM_DIS		        (1 << 5)
+#	define RV370_PMI_INT_DIS		(1 << 6)
+
+#define RADEON_MSI_REARM_EN		0x0160
+/* rv370/rv380, rv410, r423/r430/r480, r5xx */
+#	define RV370_MSI_REARM_EN		(1 << 0)
 
 #define RADEON_CLOCK_CNTL_DATA		0x000c
 #	define RADEON_PLL_WR_EN			(1 << 7)
@@ -623,6 +652,7 @@ extern int r300_do_cp_cmdbuf(struct drm_device * dev,
 #	define RADEON_SW_INT_TEST		(1 << 25)
 #	define RADEON_SW_INT_TEST_ACK		(1 << 25)
 #	define RADEON_SW_INT_FIRE		(1 << 26)
+#       define R500_DISPLAY_INT_STATUS          (1 << 0)
 
 #define RADEON_HOST_PATH_CNTL		0x0130
 #	define RADEON_HDP_SOFT_RESET		(1 << 26)
@@ -907,6 +937,7 @@ extern int r300_do_cp_cmdbuf(struct drm_device * dev,
 
 #define RADEON_AIC_CNTL			0x01d0
 #	define RADEON_PCIGART_TRANSLATE_EN	(1 << 0)
+#	define RS480_MSI_REARM	                (1 << 3)
 #define RADEON_AIC_STAT			0x01d4
 #define RADEON_AIC_PT_BASE		0x01d8
 #define RADEON_AIC_LO_ADDR		0x01dc
@@ -1116,6 +1147,9 @@ extern int r300_do_cp_cmdbuf(struct drm_device * dev,
 
 #define R200_VAP_PVS_CNTL_1               0x22D0
 
+#define RADEON_CRTC_CRNT_FRAME 0x0214
+#define RADEON_CRTC2_CRNT_FRAME 0x0314
+
 #define R500_D1CRTC_STATUS 0x609c
 #define R500_D2CRTC_STATUS 0x689c
 #define R500_CRTC_V_BLANK (1<<0)
@@ -1200,7 +1234,8 @@ do {								\
 
 #define IGP_WRITE_MCIND(addr, val)				\
 do {									\
-	if ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690)       \
+	if (((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS690) ||   \
+	    ((dev_priv->flags & RADEON_FAMILY_MASK) == CHIP_RS740))      \
 		RS690_WRITE_MCIND(addr, val);				\
 	else								\
 		RS480_WRITE_MCIND(addr, val);				\

+ 168 - 100
drivers/gpu/drm/radeon/radeon_irq.c

@@ -27,7 +27,7 @@
  *
  * Authors:
  *    Keith Whitwell <keith@tungstengraphics.com>
- *    Michel Dänzer <michel@daenzer.net>
+ *    Michel Dzer <michel@daenzer.net>
  */
 
 #include "drmP.h"
@@ -35,12 +35,128 @@
 #include "radeon_drm.h"
 #include "radeon_drv.h"
 
-static __inline__ u32 radeon_acknowledge_irqs(drm_radeon_private_t * dev_priv,
-					      u32 mask)
+void radeon_irq_set_state(struct drm_device *dev, u32 mask, int state)
 {
-	u32 irqs = RADEON_READ(RADEON_GEN_INT_STATUS) & mask;
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+
+	if (state)
+		dev_priv->irq_enable_reg |= mask;
+	else
+		dev_priv->irq_enable_reg &= ~mask;
+
+	RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg);
+}
+
+static void r500_vbl_irq_set_state(struct drm_device *dev, u32 mask, int state)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+
+	if (state)
+		dev_priv->r500_disp_irq_reg |= mask;
+	else
+		dev_priv->r500_disp_irq_reg &= ~mask;
+
+	RADEON_WRITE(R500_DxMODE_INT_MASK, dev_priv->r500_disp_irq_reg);
+}
+
+int radeon_enable_vblank(struct drm_device *dev, int crtc)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) {
+		switch (crtc) {
+		case 0:
+			r500_vbl_irq_set_state(dev, R500_D1MODE_INT_MASK, 1);
+			break;
+		case 1:
+			r500_vbl_irq_set_state(dev, R500_D2MODE_INT_MASK, 1);
+			break;
+		default:
+			DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
+				  crtc);
+			return EINVAL;
+		}
+	} else {
+		switch (crtc) {
+		case 0:
+			radeon_irq_set_state(dev, RADEON_CRTC_VBLANK_MASK, 1);
+			break;
+		case 1:
+			radeon_irq_set_state(dev, RADEON_CRTC2_VBLANK_MASK, 1);
+			break;
+		default:
+			DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
+				  crtc);
+			return EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+void radeon_disable_vblank(struct drm_device *dev, int crtc)
+{
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) {
+		switch (crtc) {
+		case 0:
+			r500_vbl_irq_set_state(dev, R500_D1MODE_INT_MASK, 0);
+			break;
+		case 1:
+			r500_vbl_irq_set_state(dev, R500_D2MODE_INT_MASK, 0);
+			break;
+		default:
+			DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
+				  crtc);
+			break;
+		}
+	} else {
+		switch (crtc) {
+		case 0:
+			radeon_irq_set_state(dev, RADEON_CRTC_VBLANK_MASK, 0);
+			break;
+		case 1:
+			radeon_irq_set_state(dev, RADEON_CRTC2_VBLANK_MASK, 0);
+			break;
+		default:
+			DRM_ERROR("tried to enable vblank on non-existent crtc %d\n",
+				  crtc);
+			break;
+		}
+	}
+}
+
+static inline u32 radeon_acknowledge_irqs(drm_radeon_private_t *dev_priv, u32 *r500_disp_int)
+{
+	u32 irqs = RADEON_READ(RADEON_GEN_INT_STATUS);
+	u32 irq_mask = RADEON_SW_INT_TEST;
+
+	*r500_disp_int = 0;
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) {
+		/* vbl interrupts in a different place */
+
+		if (irqs & R500_DISPLAY_INT_STATUS) {
+			/* if a display interrupt */
+			u32 disp_irq;
+
+			disp_irq = RADEON_READ(R500_DISP_INTERRUPT_STATUS);
+
+			*r500_disp_int = disp_irq;
+			if (disp_irq & R500_D1_VBLANK_INTERRUPT)
+				RADEON_WRITE(R500_D1MODE_VBLANK_STATUS, R500_VBLANK_ACK);
+			if (disp_irq & R500_D2_VBLANK_INTERRUPT)
+				RADEON_WRITE(R500_D2MODE_VBLANK_STATUS, R500_VBLANK_ACK);
+		}
+		irq_mask |= R500_DISPLAY_INT_STATUS;
+	} else
+		irq_mask |= RADEON_CRTC_VBLANK_STAT | RADEON_CRTC2_VBLANK_STAT;
+
+	irqs &=	irq_mask;
+
 	if (irqs)
 		RADEON_WRITE(RADEON_GEN_INT_STATUS, irqs);
+
 	return irqs;
 }
 
@@ -68,44 +184,33 @@ irqreturn_t radeon_driver_irq_handler(DRM_IRQ_ARGS)
 	drm_radeon_private_t *dev_priv =
 	    (drm_radeon_private_t *) dev->dev_private;
 	u32 stat;
+	u32 r500_disp_int;
 
 	/* Only consider the bits we're interested in - others could be used
 	 * outside the DRM
 	 */
-	stat = radeon_acknowledge_irqs(dev_priv, (RADEON_SW_INT_TEST_ACK |
-						  RADEON_CRTC_VBLANK_STAT |
-						  RADEON_CRTC2_VBLANK_STAT));
+	stat = radeon_acknowledge_irqs(dev_priv, &r500_disp_int);
 	if (!stat)
 		return IRQ_NONE;
 
 	stat &= dev_priv->irq_enable_reg;
 
 	/* SW interrupt */
-	if (stat & RADEON_SW_INT_TEST) {
+	if (stat & RADEON_SW_INT_TEST)
 		DRM_WAKEUP(&dev_priv->swi_queue);
-	}
 
 	/* VBLANK interrupt */
-	if (stat & (RADEON_CRTC_VBLANK_STAT|RADEON_CRTC2_VBLANK_STAT)) {
-		int vblank_crtc = dev_priv->vblank_crtc;
-
-		if ((vblank_crtc &
-		     (DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) ==
-		    (DRM_RADEON_VBLANK_CRTC1 | DRM_RADEON_VBLANK_CRTC2)) {
-			if (stat & RADEON_CRTC_VBLANK_STAT)
-				atomic_inc(&dev->vbl_received);
-			if (stat & RADEON_CRTC2_VBLANK_STAT)
-				atomic_inc(&dev->vbl_received2);
-		} else if (((stat & RADEON_CRTC_VBLANK_STAT) &&
-			   (vblank_crtc & DRM_RADEON_VBLANK_CRTC1)) ||
-			   ((stat & RADEON_CRTC2_VBLANK_STAT) &&
-			    (vblank_crtc & DRM_RADEON_VBLANK_CRTC2)))
-			atomic_inc(&dev->vbl_received);
-
-		DRM_WAKEUP(&dev->vbl_queue);
-		drm_vbl_send_signals(dev);
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) {
+		if (r500_disp_int & R500_D1_VBLANK_INTERRUPT)
+			drm_handle_vblank(dev, 0);
+		if (r500_disp_int & R500_D2_VBLANK_INTERRUPT)
+			drm_handle_vblank(dev, 1);
+	} else {
+		if (stat & RADEON_CRTC_VBLANK_STAT)
+			drm_handle_vblank(dev, 0);
+		if (stat & RADEON_CRTC2_VBLANK_STAT)
+			drm_handle_vblank(dev, 1);
 	}
-
 	return IRQ_HANDLED;
 }
 
@@ -144,54 +249,31 @@ static int radeon_wait_irq(struct drm_device * dev, int swi_nr)
 	return ret;
 }
 
-static int radeon_driver_vblank_do_wait(struct drm_device * dev,
-					unsigned int *sequence, int crtc)
+u32 radeon_get_vblank_counter(struct drm_device *dev, int crtc)
 {
-	drm_radeon_private_t *dev_priv =
-	    (drm_radeon_private_t *) dev->dev_private;
-	unsigned int cur_vblank;
-	int ret = 0;
-	int ack = 0;
-	atomic_t *counter;
+	drm_radeon_private_t *dev_priv = dev->dev_private;
+
 	if (!dev_priv) {
 		DRM_ERROR("called with no initialization\n");
 		return -EINVAL;
 	}
 
-	if (crtc == DRM_RADEON_VBLANK_CRTC1) {
-		counter = &dev->vbl_received;
-		ack |= RADEON_CRTC_VBLANK_STAT;
-	} else if (crtc == DRM_RADEON_VBLANK_CRTC2) {
-		counter = &dev->vbl_received2;
-		ack |= RADEON_CRTC2_VBLANK_STAT;
-	} else
+	if (crtc < 0 || crtc > 1) {
+		DRM_ERROR("Invalid crtc %d\n", crtc);
 		return -EINVAL;
+	}
 
-	radeon_acknowledge_irqs(dev_priv, ack);
-
-	dev_priv->stats.boxes |= RADEON_BOX_WAIT_IDLE;
-
-	/* Assume that the user has missed the current sequence number
-	 * by about a day rather than she wants to wait for years
-	 * using vertical blanks...
-	 */
-	DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
-		    (((cur_vblank = atomic_read(counter))
-		      - *sequence) <= (1 << 23)));
-
-	*sequence = cur_vblank;
-
-	return ret;
-}
-
-int radeon_driver_vblank_wait(struct drm_device *dev, unsigned int *sequence)
-{
-	return radeon_driver_vblank_do_wait(dev, sequence, DRM_RADEON_VBLANK_CRTC1);
-}
-
-int radeon_driver_vblank_wait2(struct drm_device *dev, unsigned int *sequence)
-{
-	return radeon_driver_vblank_do_wait(dev, sequence, DRM_RADEON_VBLANK_CRTC2);
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690) {
+		if (crtc == 0)
+			return RADEON_READ(R500_D1CRTC_FRAME_COUNT);
+		else
+			return RADEON_READ(R500_D2CRTC_FRAME_COUNT);
+	} else {
+		if (crtc == 0)
+			return RADEON_READ(RADEON_CRTC_CRNT_FRAME);
+		else
+			return RADEON_READ(RADEON_CRTC2_CRNT_FRAME);
+	}
 }
 
 /* Needs the lock as it touches the ring.
@@ -234,46 +316,41 @@ int radeon_irq_wait(struct drm_device *dev, void *data, struct drm_file *file_pr
 	return radeon_wait_irq(dev, irqwait->irq_seq);
 }
 
-void radeon_enable_interrupt(struct drm_device *dev)
-{
-	drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
-
-	dev_priv->irq_enable_reg = RADEON_SW_INT_ENABLE;
-	if (dev_priv->vblank_crtc & DRM_RADEON_VBLANK_CRTC1)
-		dev_priv->irq_enable_reg |= RADEON_CRTC_VBLANK_MASK;
-
-	if (dev_priv->vblank_crtc & DRM_RADEON_VBLANK_CRTC2)
-		dev_priv->irq_enable_reg |= RADEON_CRTC2_VBLANK_MASK;
-
-	RADEON_WRITE(RADEON_GEN_INT_CNTL, dev_priv->irq_enable_reg);
-	dev_priv->irq_enabled = 1;
-}
-
 /* drm_dma.h hooks
 */
 void radeon_driver_irq_preinstall(struct drm_device * dev)
 {
 	drm_radeon_private_t *dev_priv =
 	    (drm_radeon_private_t *) dev->dev_private;
+	u32 dummy;
 
 	/* Disable *all* interrupts */
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690)
+		RADEON_WRITE(R500_DxMODE_INT_MASK, 0);
 	RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
 
 	/* Clear bits if they're already high */
-	radeon_acknowledge_irqs(dev_priv, (RADEON_SW_INT_TEST_ACK |
-					   RADEON_CRTC_VBLANK_STAT |
-					   RADEON_CRTC2_VBLANK_STAT));
+	radeon_acknowledge_irqs(dev_priv, &dummy);
 }
 
-void radeon_driver_irq_postinstall(struct drm_device * dev)
+int radeon_driver_irq_postinstall(struct drm_device *dev)
 {
 	drm_radeon_private_t *dev_priv =
 	    (drm_radeon_private_t *) dev->dev_private;
+	int ret;
 
 	atomic_set(&dev_priv->swi_emitted, 0);
 	DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
 
-	radeon_enable_interrupt(dev);
+	ret = drm_vblank_init(dev, 2);
+	if (ret)
+		return ret;
+
+	dev->max_vblank_count = 0x001fffff;
+
+	radeon_irq_set_state(dev, RADEON_SW_INT_ENABLE, 1);
+
+	return 0;
 }
 
 void radeon_driver_irq_uninstall(struct drm_device * dev)
@@ -285,6 +362,8 @@ void radeon_driver_irq_uninstall(struct drm_device * dev)
 
 	dev_priv->irq_enabled = 0;
 
+	if ((dev_priv->flags & RADEON_FAMILY_MASK) >= CHIP_RS690)
+		RADEON_WRITE(R500_DxMODE_INT_MASK, 0);
 	/* Disable *all* interrupts */
 	RADEON_WRITE(RADEON_GEN_INT_CNTL, 0);
 }
@@ -293,18 +372,8 @@ void radeon_driver_irq_uninstall(struct drm_device * dev)
 int radeon_vblank_crtc_get(struct drm_device *dev)
 {
 	drm_radeon_private_t *dev_priv = (drm_radeon_private_t *) dev->dev_private;
-	u32 flag;
-	u32 value;
-
-	flag = RADEON_READ(RADEON_GEN_INT_CNTL);
-	value = 0;
-
-	if (flag & RADEON_CRTC_VBLANK_MASK)
-		value |= DRM_RADEON_VBLANK_CRTC1;
 
-	if (flag & RADEON_CRTC2_VBLANK_MASK)
-		value |= DRM_RADEON_VBLANK_CRTC2;
-	return value;
+	return dev_priv->vblank_crtc;
 }
 
 int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value)
@@ -315,6 +384,5 @@ int radeon_vblank_crtc_set(struct drm_device *dev, int64_t value)
 		return -EINVAL;
 	}
 	dev_priv->vblank_crtc = (unsigned int)value;
-	radeon_enable_interrupt(dev);
 	return 0;
 }

+ 1 - 1
drivers/gpu/drm/radeon/radeon_state.c

@@ -2997,7 +2997,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
 		value = GET_SCRATCH(2);
 		break;
 	case RADEON_PARAM_IRQ_NR:
-		value = dev->irq;
+		value = drm_dev_to_irq(dev);
 		break;
 	case RADEON_PARAM_GART_BASE:
 		value = dev_priv->gart_vm_start;

+ 5 - 5
drivers/gpu/drm/sis/sis_mm.c

@@ -41,7 +41,7 @@
 #define AGP_TYPE 1
 
 
-#if defined(CONFIG_FB_SIS)
+#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
 /* fb management via fb device */
 
 #define SIS_MM_ALIGN_SHIFT 0
@@ -57,7 +57,7 @@ static void *sis_sman_mm_allocate(void *private, unsigned long size,
 	if (req.size == 0)
 		return NULL;
 	else
-		return (void *)~req.offset;
+		return (void *)(unsigned long)~req.offset;
 }
 
 static void sis_sman_mm_free(void *private, void *ref)
@@ -75,12 +75,12 @@ static unsigned long sis_sman_mm_offset(void *private, void *ref)
 	return ~((unsigned long)ref);
 }
 
-#else /* CONFIG_FB_SIS */
+#else /* CONFIG_FB_SIS[_MODULE] */
 
 #define SIS_MM_ALIGN_SHIFT 4
 #define SIS_MM_ALIGN_MASK ( (1 << SIS_MM_ALIGN_SHIFT) - 1)
 
-#endif /* CONFIG_FB_SIS */
+#endif /* CONFIG_FB_SIS[_MODULE] */
 
 static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file_priv)
 {
@@ -89,7 +89,7 @@ static int sis_fb_init(struct drm_device *dev, void *data, struct drm_file *file
 	int ret;
 
 	mutex_lock(&dev->struct_mutex);
-#if defined(CONFIG_FB_SIS)
+#if defined(CONFIG_FB_SIS) || defined(CONFIG_FB_SIS_MODULE)
 	{
 		struct drm_sman_mm sman_mm;
 		sman_mm.private = (void *)0xFFFFFFFF;

+ 14 - 12
drivers/gpu/drm/via/via_drv.c

@@ -40,11 +40,13 @@ static struct pci_device_id pciidlist[] = {
 static struct drm_driver driver = {
 	.driver_features =
 	    DRIVER_USE_AGP | DRIVER_USE_MTRR | DRIVER_HAVE_IRQ |
-	    DRIVER_IRQ_SHARED | DRIVER_IRQ_VBL,
+	    DRIVER_IRQ_SHARED,
 	.load = via_driver_load,
 	.unload = via_driver_unload,
 	.context_dtor = via_final_context,
-	.vblank_wait = via_driver_vblank_wait,
+	.get_vblank_counter = via_get_vblank_counter,
+	.enable_vblank = via_enable_vblank,
+	.disable_vblank = via_disable_vblank,
 	.irq_preinstall = via_driver_irq_preinstall,
 	.irq_postinstall = via_driver_irq_postinstall,
 	.irq_uninstall = via_driver_irq_uninstall,
@@ -59,17 +61,17 @@ static struct drm_driver driver = {
 	.get_reg_ofs = drm_core_get_reg_ofs,
 	.ioctls = via_ioctls,
 	.fops = {
-		 .owner = THIS_MODULE,
-		 .open = drm_open,
-		 .release = drm_release,
-		 .ioctl = drm_ioctl,
-		 .mmap = drm_mmap,
-		 .poll = drm_poll,
-		 .fasync = drm_fasync,
-	},
+		.owner = THIS_MODULE,
+		.open = drm_open,
+		.release = drm_release,
+		.ioctl = drm_ioctl,
+		.mmap = drm_mmap,
+		.poll = drm_poll,
+		.fasync = drm_fasync,
+		},
 	.pci_driver = {
-		 .name = DRIVER_NAME,
-		 .id_table = pciidlist,
+		.name = DRIVER_NAME,
+		.id_table = pciidlist,
 	},
 
 	.name = DRIVER_NAME,

+ 10 - 6
drivers/gpu/drm/via/via_drv.h

@@ -75,6 +75,7 @@ typedef struct drm_via_private {
 	struct timeval last_vblank;
 	int last_vblank_valid;
 	unsigned usec_per_vblank;
+	atomic_t vbl_received;
 	drm_via_state_t hc_state;
 	char pci_buf[VIA_PCI_BUF_SIZE];
 	const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
@@ -130,21 +131,24 @@ extern int via_init_context(struct drm_device * dev, int context);
 extern int via_final_context(struct drm_device * dev, int context);
 
 extern int via_do_cleanup_map(struct drm_device * dev);
-extern int via_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence);
+extern u32 via_get_vblank_counter(struct drm_device *dev, int crtc);
+extern int via_enable_vblank(struct drm_device *dev, int crtc);
+extern void via_disable_vblank(struct drm_device *dev, int crtc);
 
 extern irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS);
 extern void via_driver_irq_preinstall(struct drm_device * dev);
-extern void via_driver_irq_postinstall(struct drm_device * dev);
+extern int via_driver_irq_postinstall(struct drm_device *dev);
 extern void via_driver_irq_uninstall(struct drm_device * dev);
 
 extern int via_dma_cleanup(struct drm_device * dev);
 extern void via_init_command_verifier(void);
 extern int via_driver_dma_quiescent(struct drm_device * dev);
-extern void via_init_futex(drm_via_private_t * dev_priv);
-extern void via_cleanup_futex(drm_via_private_t * dev_priv);
-extern void via_release_futex(drm_via_private_t * dev_priv, int context);
+extern void via_init_futex(drm_via_private_t *dev_priv);
+extern void via_cleanup_futex(drm_via_private_t *dev_priv);
+extern void via_release_futex(drm_via_private_t *dev_priv, int context);
 
-extern void via_reclaim_buffers_locked(struct drm_device *dev, struct drm_file *file_priv);
+extern void via_reclaim_buffers_locked(struct drm_device *dev,
+				       struct drm_file *file_priv);
 extern void via_lastclose(struct drm_device *dev);
 
 extern void via_dmablit_handler(struct drm_device *dev, int engine, int from_irq);

+ 59 - 46
drivers/gpu/drm/via/via_irq.c

@@ -43,7 +43,7 @@
 #define VIA_REG_INTERRUPT       0x200
 
 /* VIA_REG_INTERRUPT */
-#define VIA_IRQ_GLOBAL          (1 << 31)
+#define VIA_IRQ_GLOBAL	  (1 << 31)
 #define VIA_IRQ_VBLANK_ENABLE   (1 << 19)
 #define VIA_IRQ_VBLANK_PENDING  (1 << 3)
 #define VIA_IRQ_HQV0_ENABLE     (1 << 11)
@@ -68,16 +68,15 @@
 
 static maskarray_t via_pro_group_a_irqs[] = {
 	{VIA_IRQ_HQV0_ENABLE, VIA_IRQ_HQV0_PENDING, 0x000003D0, 0x00008010,
-	 0x00000000},
+	 0x00000000 },
 	{VIA_IRQ_HQV1_ENABLE, VIA_IRQ_HQV1_PENDING, 0x000013D0, 0x00008010,
-	 0x00000000},
+	 0x00000000 },
 	{VIA_IRQ_DMA0_TD_ENABLE, VIA_IRQ_DMA0_TD_PENDING, VIA_PCI_DMA_CSR0,
 	 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
 	{VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1,
 	 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008},
 };
-static int via_num_pro_group_a =
-    sizeof(via_pro_group_a_irqs) / sizeof(maskarray_t);
+static int via_num_pro_group_a = ARRAY_SIZE(via_pro_group_a_irqs);
 static int via_irqmap_pro_group_a[] = {0, 1, -1, 2, -1, 3};
 
 static maskarray_t via_unichrome_irqs[] = {
@@ -86,14 +85,24 @@ static maskarray_t via_unichrome_irqs[] = {
 	{VIA_IRQ_DMA1_TD_ENABLE, VIA_IRQ_DMA1_TD_PENDING, VIA_PCI_DMA_CSR1,
 	 VIA_DMA_CSR_TA | VIA_DMA_CSR_TD, 0x00000008}
 };
-static int via_num_unichrome = sizeof(via_unichrome_irqs) / sizeof(maskarray_t);
+static int via_num_unichrome = ARRAY_SIZE(via_unichrome_irqs);
 static int via_irqmap_unichrome[] = {-1, -1, -1, 0, -1, 1};
 
+
 static unsigned time_diff(struct timeval *now, struct timeval *then)
 {
 	return (now->tv_usec >= then->tv_usec) ?
-	    now->tv_usec - then->tv_usec :
-	    1000000 - (then->tv_usec - now->tv_usec);
+		now->tv_usec - then->tv_usec :
+		1000000 - (then->tv_usec - now->tv_usec);
+}
+
+u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
+{
+	drm_via_private_t *dev_priv = dev->dev_private;
+	if (crtc != 0)
+		return 0;
+
+	return atomic_read(&dev_priv->vbl_received);
 }
 
 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
@@ -108,23 +117,22 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
 
 	status = VIA_READ(VIA_REG_INTERRUPT);
 	if (status & VIA_IRQ_VBLANK_PENDING) {
-		atomic_inc(&dev->vbl_received);
-		if (!(atomic_read(&dev->vbl_received) & 0x0F)) {
+		atomic_inc(&dev_priv->vbl_received);
+		if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
 			do_gettimeofday(&cur_vblank);
 			if (dev_priv->last_vblank_valid) {
 				dev_priv->usec_per_vblank =
-				    time_diff(&cur_vblank,
-					      &dev_priv->last_vblank) >> 4;
+					time_diff(&cur_vblank,
+						  &dev_priv->last_vblank) >> 4;
 			}
 			dev_priv->last_vblank = cur_vblank;
 			dev_priv->last_vblank_valid = 1;
 		}
-		if (!(atomic_read(&dev->vbl_received) & 0xFF)) {
+		if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
 			DRM_DEBUG("US per vblank is: %u\n",
 				  dev_priv->usec_per_vblank);
 		}
-		DRM_WAKEUP(&dev->vbl_queue);
-		drm_vbl_send_signals(dev);
+		drm_handle_vblank(dev, 0);
 		handled = 1;
 	}
 
@@ -145,6 +153,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
 	/* Acknowlege interrupts */
 	VIA_WRITE(VIA_REG_INTERRUPT, status);
 
+
 	if (handled)
 		return IRQ_HANDLED;
 	else
@@ -163,31 +172,34 @@ static __inline__ void viadrv_acknowledge_irqs(drm_via_private_t * dev_priv)
 	}
 }
 
-int via_driver_vblank_wait(struct drm_device * dev, unsigned int *sequence)
+int via_enable_vblank(struct drm_device *dev, int crtc)
 {
-	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
-	unsigned int cur_vblank;
-	int ret = 0;
+	drm_via_private_t *dev_priv = dev->dev_private;
+	u32 status;
 
-	DRM_DEBUG("\n");
-	if (!dev_priv) {
-		DRM_ERROR("called with no initialization\n");
+	if (crtc != 0) {
+		DRM_ERROR("%s:  bad crtc %d\n", __func__, crtc);
 		return -EINVAL;
 	}
 
-	viadrv_acknowledge_irqs(dev_priv);
+	status = VIA_READ(VIA_REG_INTERRUPT);
+	VIA_WRITE(VIA_REG_INTERRUPT, status & VIA_IRQ_VBLANK_ENABLE);
+
+	VIA_WRITE8(0x83d4, 0x11);
+	VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30);
 
-	/* Assume that the user has missed the current sequence number
-	 * by about a day rather than she wants to wait for years
-	 * using vertical blanks...
-	 */
+	return 0;
+}
+
+void via_disable_vblank(struct drm_device *dev, int crtc)
+{
+	drm_via_private_t *dev_priv = dev->dev_private;
 
-	DRM_WAIT_ON(ret, dev->vbl_queue, 3 * DRM_HZ,
-		    (((cur_vblank = atomic_read(&dev->vbl_received)) -
-		      *sequence) <= (1 << 23)));
+	VIA_WRITE8(0x83d4, 0x11);
+	VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) & ~0x30);
 
-	*sequence = cur_vblank;
-	return ret;
+	if (crtc != 0)
+		DRM_ERROR("%s:  bad crtc %d\n", __func__, crtc);
 }
 
 static int
@@ -239,6 +251,7 @@ via_driver_irq_wait(struct drm_device * dev, unsigned int irq, int force_sequenc
 	return ret;
 }
 
+
 /*
  * drm_dma.h hooks
  */
@@ -292,23 +305,25 @@ void via_driver_irq_preinstall(struct drm_device * dev)
 	}
 }
 
-void via_driver_irq_postinstall(struct drm_device * dev)
+int via_driver_irq_postinstall(struct drm_device *dev)
 {
 	drm_via_private_t *dev_priv = (drm_via_private_t *) dev->dev_private;
 	u32 status;
 
-	DRM_DEBUG("\n");
-	if (dev_priv) {
-		status = VIA_READ(VIA_REG_INTERRUPT);
-		VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL
-			  | dev_priv->irq_enable_mask);
+	DRM_DEBUG("via_driver_irq_postinstall\n");
+	if (!dev_priv)
+		return -EINVAL;
 
-		/* Some magic, oh for some data sheets ! */
+	drm_vblank_init(dev, 1);
+	status = VIA_READ(VIA_REG_INTERRUPT);
+	VIA_WRITE(VIA_REG_INTERRUPT, status | VIA_IRQ_GLOBAL
+		  | dev_priv->irq_enable_mask);
 
-		VIA_WRITE8(0x83d4, 0x11);
-		VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30);
+	/* Some magic, oh for some data sheets ! */
+	VIA_WRITE8(0x83d4, 0x11);
+	VIA_WRITE8(0x83d5, VIA_READ8(0x83d5) | 0x30);
 
-	}
+	return 0;
 }
 
 void via_driver_irq_uninstall(struct drm_device * dev)
@@ -339,9 +354,6 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
 	drm_via_irq_t *cur_irq = dev_priv->via_irqs;
 	int force_sequence;
 
-	if (!dev->irq)
-		return -EINVAL;
-
 	if (irqwait->request.irq >= dev_priv->num_irqs) {
 		DRM_ERROR("Trying to wait on unknown irq %d\n",
 			  irqwait->request.irq);
@@ -352,7 +364,8 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
 
 	switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
 	case VIA_IRQ_RELATIVE:
-		irqwait->request.sequence += atomic_read(&cur_irq->irq_received);
+		irqwait->request.sequence +=
+			atomic_read(&cur_irq->irq_received);
 		irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
 	case VIA_IRQ_ABSOLUTE:
 		break;

+ 1 - 2
drivers/gpu/drm/via/via_mm.c

@@ -93,8 +93,7 @@ int via_final_context(struct drm_device *dev, int context)
 	/* Last context, perform cleanup */
 	if (dev->ctx_count == 1 && dev->dev_private) {
 		DRM_DEBUG("Last Context\n");
-		if (dev->irq)
-			drm_irq_uninstall(dev);
+		drm_irq_uninstall(dev);
 		via_cleanup_futex(dev_priv);
 		via_do_cleanup_map(dev);
 	}

Some files were not shown because too many files changed in this diff