Răsfoiți Sursa

Merge branch 'drm-vmware-next' into drm-core-next

* drm-vmware-next: (26 commits)
  vmwgfx: Minor cleanups
  vmwgfx: Bump driver minor to advertise support for new ioctls.
  vmwgfx: Be more strict with fb depths when using screen objects
  vmwgfx: Handle device surface memory limit
  vmwgfx: Make sure we always have a user-space handle to use for objects that are backing kms framebuffers.
  vmwgfx: Optimize the command submission resource list
  vmwgfx: Fix up query processing
  vmwgfx: Allow reference and unreference of NULL fence objects.
  vmwgfx: minor dmabuf utilities cleanup
  vmwgfx: Disallow user space to send present and readback commands
  vmwgfx: Add present and readback ioctls
  vmwgfx: Place overlays in GMR area if we can
  vmwgfx: Drop 3D Legacy Display Unit support
  vmwgfx: Require HWV8 for 3d support
  vmwgfx: Add screen object support
  vmwgfx: Add dmabuf helper functions for pinning
  vmwgfx: Refactor common display unit functions to shared file
  vmwgfx: Expand the command checker to cover screen object commands
  vmwgfx: Break out dirty submission code
  vmwgfx: Break out execbuf command processing
  ...
Dave Airlie 13 ani în urmă
părinte
comite
5383053627

+ 1 - 0
drivers/gpu/drm/ttm/ttm_bo.c

@@ -1295,6 +1295,7 @@ int ttm_bo_create(struct ttm_bo_device *bdev,
 
 
 	return ret;
 	return ret;
 }
 }
+EXPORT_SYMBOL(ttm_bo_create);
 
 
 static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
 static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
 					unsigned mem_type, bool allow_errors)
 					unsigned mem_type, bool allow_errors)

+ 1 - 1
drivers/gpu/drm/vmwgfx/Makefile

@@ -5,6 +5,6 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
 	    vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \
 	    vmwgfx_fb.o vmwgfx_ioctl.o vmwgfx_resource.o vmwgfx_buffer.o \
 	    vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
 	    vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
 	    vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
 	    vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
-	    vmwgfx_fence.o
+	    vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o
 
 
 obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
 obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o

+ 181 - 78
drivers/gpu/drm/vmwgfx/svga3d_reg.h

@@ -57,7 +57,8 @@ typedef enum {
    SVGA3D_HWVERSION_WS6_B1    = SVGA3D_MAKE_HWVERSION(1, 1),
    SVGA3D_HWVERSION_WS6_B1    = SVGA3D_MAKE_HWVERSION(1, 1),
    SVGA3D_HWVERSION_FUSION_11 = SVGA3D_MAKE_HWVERSION(1, 4),
    SVGA3D_HWVERSION_FUSION_11 = SVGA3D_MAKE_HWVERSION(1, 4),
    SVGA3D_HWVERSION_WS65_B1   = SVGA3D_MAKE_HWVERSION(2, 0),
    SVGA3D_HWVERSION_WS65_B1   = SVGA3D_MAKE_HWVERSION(2, 0),
-   SVGA3D_HWVERSION_CURRENT   = SVGA3D_HWVERSION_WS65_B1,
+   SVGA3D_HWVERSION_WS8_B1    = SVGA3D_MAKE_HWVERSION(2, 1),
+   SVGA3D_HWVERSION_CURRENT   = SVGA3D_HWVERSION_WS8_B1,
 } SVGA3dHardwareVersion;
 } SVGA3dHardwareVersion;
 
 
 /*
 /*
@@ -67,7 +68,8 @@ typedef enum {
 typedef uint32 SVGA3dBool; /* 32-bit Bool definition */
 typedef uint32 SVGA3dBool; /* 32-bit Bool definition */
 #define SVGA3D_NUM_CLIPPLANES                   6
 #define SVGA3D_NUM_CLIPPLANES                   6
 #define SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS  8
 #define SVGA3D_MAX_SIMULTANEOUS_RENDER_TARGETS  8
-
+#define SVGA3D_MAX_CONTEXT_IDS                  256
+#define SVGA3D_MAX_SURFACE_IDS                  (32 * 1024)
 
 
 /*
 /*
  * Surface formats.
  * Surface formats.
@@ -79,76 +81,91 @@ typedef uint32 SVGA3dBool; /* 32-bit Bool definition */
  */
  */
 
 
 typedef enum SVGA3dSurfaceFormat {
 typedef enum SVGA3dSurfaceFormat {
-   SVGA3D_FORMAT_INVALID = 0,
+   SVGA3D_FORMAT_INVALID               = 0,
 
 
-   SVGA3D_X8R8G8B8       = 1,
-   SVGA3D_A8R8G8B8       = 2,
+   SVGA3D_X8R8G8B8                     = 1,
+   SVGA3D_A8R8G8B8                     = 2,
 
 
-   SVGA3D_R5G6B5         = 3,
-   SVGA3D_X1R5G5B5       = 4,
-   SVGA3D_A1R5G5B5       = 5,
-   SVGA3D_A4R4G4B4       = 6,
+   SVGA3D_R5G6B5                       = 3,
+   SVGA3D_X1R5G5B5                     = 4,
+   SVGA3D_A1R5G5B5                     = 5,
+   SVGA3D_A4R4G4B4                     = 6,
 
 
-   SVGA3D_Z_D32          = 7,
-   SVGA3D_Z_D16          = 8,
-   SVGA3D_Z_D24S8        = 9,
-   SVGA3D_Z_D15S1        = 10,
+   SVGA3D_Z_D32                        = 7,
+   SVGA3D_Z_D16                        = 8,
+   SVGA3D_Z_D24S8                      = 9,
+   SVGA3D_Z_D15S1                      = 10,
 
 
-   SVGA3D_LUMINANCE8            = 11,
-   SVGA3D_LUMINANCE4_ALPHA4     = 12,
-   SVGA3D_LUMINANCE16           = 13,
-   SVGA3D_LUMINANCE8_ALPHA8     = 14,
+   SVGA3D_LUMINANCE8                   = 11,
+   SVGA3D_LUMINANCE4_ALPHA4            = 12,
+   SVGA3D_LUMINANCE16                  = 13,
+   SVGA3D_LUMINANCE8_ALPHA8            = 14,
 
 
-   SVGA3D_DXT1           = 15,
-   SVGA3D_DXT2           = 16,
-   SVGA3D_DXT3           = 17,
-   SVGA3D_DXT4           = 18,
-   SVGA3D_DXT5           = 19,
+   SVGA3D_DXT1                         = 15,
+   SVGA3D_DXT2                         = 16,
+   SVGA3D_DXT3                         = 17,
+   SVGA3D_DXT4                         = 18,
+   SVGA3D_DXT5                         = 19,
 
 
-   SVGA3D_BUMPU8V8       = 20,
-   SVGA3D_BUMPL6V5U5     = 21,
-   SVGA3D_BUMPX8L8V8U8   = 22,
-   SVGA3D_BUMPL8V8U8     = 23,
+   SVGA3D_BUMPU8V8                     = 20,
+   SVGA3D_BUMPL6V5U5                   = 21,
+   SVGA3D_BUMPX8L8V8U8                 = 22,
+   SVGA3D_BUMPL8V8U8                   = 23,
 
 
-   SVGA3D_ARGB_S10E5     = 24,   /* 16-bit floating-point ARGB */
-   SVGA3D_ARGB_S23E8     = 25,   /* 32-bit floating-point ARGB */
+   SVGA3D_ARGB_S10E5                   = 24,   /* 16-bit floating-point ARGB */
+   SVGA3D_ARGB_S23E8                   = 25,   /* 32-bit floating-point ARGB */
 
 
-   SVGA3D_A2R10G10B10    = 26,
+   SVGA3D_A2R10G10B10                  = 26,
 
 
    /* signed formats */
    /* signed formats */
-   SVGA3D_V8U8           = 27,
-   SVGA3D_Q8W8V8U8       = 28,
-   SVGA3D_CxV8U8         = 29,
+   SVGA3D_V8U8                         = 27,
+   SVGA3D_Q8W8V8U8                     = 28,
+   SVGA3D_CxV8U8                       = 29,
 
 
    /* mixed formats */
    /* mixed formats */
-   SVGA3D_X8L8V8U8       = 30,
-   SVGA3D_A2W10V10U10    = 31,
+   SVGA3D_X8L8V8U8                     = 30,
+   SVGA3D_A2W10V10U10                  = 31,
 
 
-   SVGA3D_ALPHA8         = 32,
+   SVGA3D_ALPHA8                       = 32,
 
 
    /* Single- and dual-component floating point formats */
    /* Single- and dual-component floating point formats */
-   SVGA3D_R_S10E5        = 33,
-   SVGA3D_R_S23E8        = 34,
-   SVGA3D_RG_S10E5       = 35,
-   SVGA3D_RG_S23E8       = 36,
+   SVGA3D_R_S10E5                      = 33,
+   SVGA3D_R_S23E8                      = 34,
+   SVGA3D_RG_S10E5                     = 35,
+   SVGA3D_RG_S23E8                     = 36,
 
 
    /*
    /*
     * Any surface can be used as a buffer object, but SVGA3D_BUFFER is
     * Any surface can be used as a buffer object, but SVGA3D_BUFFER is
     * the most efficient format to use when creating new surfaces
     * the most efficient format to use when creating new surfaces
     * expressly for index or vertex data.
     * expressly for index or vertex data.
     */
     */
-   SVGA3D_BUFFER         = 37,
 
 
-   SVGA3D_Z_D24X8        = 38,
+   SVGA3D_BUFFER                       = 37,
+
+   SVGA3D_Z_D24X8                      = 38,
 
 
-   SVGA3D_V16U16         = 39,
+   SVGA3D_V16U16                       = 39,
 
 
-   SVGA3D_G16R16         = 40,
-   SVGA3D_A16B16G16R16   = 41,
+   SVGA3D_G16R16                       = 40,
+   SVGA3D_A16B16G16R16                 = 41,
 
 
    /* Packed Video formats */
    /* Packed Video formats */
-   SVGA3D_UYVY           = 42,
-   SVGA3D_YUY2           = 43,
+   SVGA3D_UYVY                         = 42,
+   SVGA3D_YUY2                         = 43,
+
+   /* Planar video formats */
+   SVGA3D_NV12                         = 44,
+
+   /* Video format with alpha */
+   SVGA3D_AYUV                         = 45,
+
+   SVGA3D_BC4_UNORM                    = 108,
+   SVGA3D_BC5_UNORM                    = 111,
+
+   /* Advanced D3D9 depth formats. */
+   SVGA3D_Z_DF16                       = 118,
+   SVGA3D_Z_DF24                       = 119,
+   SVGA3D_Z_D24S8_INT                  = 120,
 
 
    SVGA3D_FORMAT_MAX
    SVGA3D_FORMAT_MAX
 } SVGA3dSurfaceFormat;
 } SVGA3dSurfaceFormat;
@@ -414,9 +431,19 @@ typedef enum {
    SVGA3D_RS_SRCBLENDALPHA             = 94,    /* SVGA3dBlendOp */
    SVGA3D_RS_SRCBLENDALPHA             = 94,    /* SVGA3dBlendOp */
    SVGA3D_RS_DSTBLENDALPHA             = 95,    /* SVGA3dBlendOp */
    SVGA3D_RS_DSTBLENDALPHA             = 95,    /* SVGA3dBlendOp */
    SVGA3D_RS_BLENDEQUATIONALPHA        = 96,    /* SVGA3dBlendEquation */
    SVGA3D_RS_BLENDEQUATIONALPHA        = 96,    /* SVGA3dBlendEquation */
+   SVGA3D_RS_TRANSPARENCYANTIALIAS     = 97,    /* SVGA3dTransparencyAntialiasType */
+   SVGA3D_RS_LINEAA                    = 98,    /* SVGA3dBool */
+   SVGA3D_RS_LINEWIDTH                 = 99,    /* float */
    SVGA3D_RS_MAX
    SVGA3D_RS_MAX
 } SVGA3dRenderStateName;
 } SVGA3dRenderStateName;
 
 
+typedef enum {
+   SVGA3D_TRANSPARENCYANTIALIAS_NORMAL            = 0,
+   SVGA3D_TRANSPARENCYANTIALIAS_ALPHATOCOVERAGE   = 1,
+   SVGA3D_TRANSPARENCYANTIALIAS_SUPERSAMPLE       = 2,
+   SVGA3D_TRANSPARENCYANTIALIAS_MAX
+} SVGA3dTransparencyAntialiasType;
+
 typedef enum {
 typedef enum {
    SVGA3D_VERTEXMATERIAL_NONE     = 0,    /* Use the value in the current material */
    SVGA3D_VERTEXMATERIAL_NONE     = 0,    /* Use the value in the current material */
    SVGA3D_VERTEXMATERIAL_DIFFUSE  = 1,    /* Use the value in the diffuse component */
    SVGA3D_VERTEXMATERIAL_DIFFUSE  = 1,    /* Use the value in the diffuse component */
@@ -728,10 +755,10 @@ typedef enum {
    SVGA3D_TEX_FILTER_NEAREST        = 1,
    SVGA3D_TEX_FILTER_NEAREST        = 1,
    SVGA3D_TEX_FILTER_LINEAR         = 2,
    SVGA3D_TEX_FILTER_LINEAR         = 2,
    SVGA3D_TEX_FILTER_ANISOTROPIC    = 3,
    SVGA3D_TEX_FILTER_ANISOTROPIC    = 3,
-   SVGA3D_TEX_FILTER_FLATCUBIC      = 4, // Deprecated, not implemented
-   SVGA3D_TEX_FILTER_GAUSSIANCUBIC  = 5, // Deprecated, not implemented
-   SVGA3D_TEX_FILTER_PYRAMIDALQUAD  = 6, // Not currently implemented
-   SVGA3D_TEX_FILTER_GAUSSIANQUAD   = 7, // Not currently implemented
+   SVGA3D_TEX_FILTER_FLATCUBIC      = 4, /* Deprecated, not implemented */
+   SVGA3D_TEX_FILTER_GAUSSIANCUBIC  = 5, /* Deprecated, not implemented */
+   SVGA3D_TEX_FILTER_PYRAMIDALQUAD  = 6, /* Not currently implemented */
+   SVGA3D_TEX_FILTER_GAUSSIANQUAD   = 7, /* Not currently implemented */
    SVGA3D_TEX_FILTER_MAX
    SVGA3D_TEX_FILTER_MAX
 } SVGA3dTextureFilter;
 } SVGA3dTextureFilter;
 
 
@@ -799,19 +826,19 @@ typedef enum {
 
 
 typedef enum {
 typedef enum {
    SVGA3D_DECLUSAGE_POSITION     = 0,
    SVGA3D_DECLUSAGE_POSITION     = 0,
-   SVGA3D_DECLUSAGE_BLENDWEIGHT,       //  1
-   SVGA3D_DECLUSAGE_BLENDINDICES,      //  2
-   SVGA3D_DECLUSAGE_NORMAL,            //  3
-   SVGA3D_DECLUSAGE_PSIZE,             //  4
-   SVGA3D_DECLUSAGE_TEXCOORD,          //  5
-   SVGA3D_DECLUSAGE_TANGENT,           //  6
-   SVGA3D_DECLUSAGE_BINORMAL,          //  7
-   SVGA3D_DECLUSAGE_TESSFACTOR,        //  8
-   SVGA3D_DECLUSAGE_POSITIONT,         //  9
-   SVGA3D_DECLUSAGE_COLOR,             // 10
-   SVGA3D_DECLUSAGE_FOG,               // 11
-   SVGA3D_DECLUSAGE_DEPTH,             // 12
-   SVGA3D_DECLUSAGE_SAMPLE,            // 13
+   SVGA3D_DECLUSAGE_BLENDWEIGHT,       /*  1 */
+   SVGA3D_DECLUSAGE_BLENDINDICES,      /*  2 */
+   SVGA3D_DECLUSAGE_NORMAL,            /*  3 */
+   SVGA3D_DECLUSAGE_PSIZE,             /*  4 */
+   SVGA3D_DECLUSAGE_TEXCOORD,          /*  5 */
+   SVGA3D_DECLUSAGE_TANGENT,           /*  6 */
+   SVGA3D_DECLUSAGE_BINORMAL,          /*  7 */
+   SVGA3D_DECLUSAGE_TESSFACTOR,        /*  8 */
+   SVGA3D_DECLUSAGE_POSITIONT,         /*  9 */
+   SVGA3D_DECLUSAGE_COLOR,             /* 10 */
+   SVGA3D_DECLUSAGE_FOG,               /* 11 */
+   SVGA3D_DECLUSAGE_DEPTH,             /* 12 */
+   SVGA3D_DECLUSAGE_SAMPLE,            /* 13 */
    SVGA3D_DECLUSAGE_MAX
    SVGA3D_DECLUSAGE_MAX
 } SVGA3dDeclUsage;
 } SVGA3dDeclUsage;
 
 
@@ -819,10 +846,10 @@ typedef enum {
    SVGA3D_DECLMETHOD_DEFAULT     = 0,
    SVGA3D_DECLMETHOD_DEFAULT     = 0,
    SVGA3D_DECLMETHOD_PARTIALU,
    SVGA3D_DECLMETHOD_PARTIALU,
    SVGA3D_DECLMETHOD_PARTIALV,
    SVGA3D_DECLMETHOD_PARTIALV,
-   SVGA3D_DECLMETHOD_CROSSUV,          // Normal
+   SVGA3D_DECLMETHOD_CROSSUV,          /* Normal */
    SVGA3D_DECLMETHOD_UV,
    SVGA3D_DECLMETHOD_UV,
-   SVGA3D_DECLMETHOD_LOOKUP,           // Lookup a displacement map
-   SVGA3D_DECLMETHOD_LOOKUPPRESAMPLED, // Lookup a pre-sampled displacement map
+   SVGA3D_DECLMETHOD_LOOKUP,           /* Lookup a displacement map */
+   SVGA3D_DECLMETHOD_LOOKUPPRESAMPLED, /* Lookup a pre-sampled displacement map */
 } SVGA3dDeclMethod;
 } SVGA3dDeclMethod;
 
 
 typedef enum {
 typedef enum {
@@ -930,7 +957,6 @@ typedef enum {
 } SVGA3dCubeFace;
 } SVGA3dCubeFace;
 
 
 typedef enum {
 typedef enum {
-   SVGA3D_SHADERTYPE_COMPILED_DX8               = 0,
    SVGA3D_SHADERTYPE_VS                         = 1,
    SVGA3D_SHADERTYPE_VS                         = 1,
    SVGA3D_SHADERTYPE_PS                         = 2,
    SVGA3D_SHADERTYPE_PS                         = 2,
    SVGA3D_SHADERTYPE_MAX
    SVGA3D_SHADERTYPE_MAX
@@ -968,11 +994,17 @@ typedef enum {
 } SVGA3dTransferType;
 } SVGA3dTransferType;
 
 
 /*
 /*
- * The maximum number vertex arrays we're guaranteed to support in
+ * The maximum number of vertex arrays we're guaranteed to support in
  * SVGA_3D_CMD_DRAWPRIMITIVES.
  * SVGA_3D_CMD_DRAWPRIMITIVES.
  */
  */
 #define SVGA3D_MAX_VERTEX_ARRAYS   32
 #define SVGA3D_MAX_VERTEX_ARRAYS   32
 
 
+/*
+ * The maximum number of primitive ranges we're guaranteed to support
+ * in SVGA_3D_CMD_DRAWPRIMITIVES.
+ */
+#define SVGA3D_MAX_DRAW_PRIMITIVE_RANGES 32
+
 /*
 /*
  * Identifiers for commands in the command FIFO.
  * Identifiers for commands in the command FIFO.
  *
  *
@@ -990,7 +1022,7 @@ typedef enum {
 #define SVGA_3D_CMD_LEGACY_BASE            1000
 #define SVGA_3D_CMD_LEGACY_BASE            1000
 #define SVGA_3D_CMD_BASE                   1040
 #define SVGA_3D_CMD_BASE                   1040
 
 
-#define SVGA_3D_CMD_SURFACE_DEFINE         SVGA_3D_CMD_BASE + 0
+#define SVGA_3D_CMD_SURFACE_DEFINE         SVGA_3D_CMD_BASE + 0     /* Deprecated */
 #define SVGA_3D_CMD_SURFACE_DESTROY        SVGA_3D_CMD_BASE + 1
 #define SVGA_3D_CMD_SURFACE_DESTROY        SVGA_3D_CMD_BASE + 1
 #define SVGA_3D_CMD_SURFACE_COPY           SVGA_3D_CMD_BASE + 2
 #define SVGA_3D_CMD_SURFACE_COPY           SVGA_3D_CMD_BASE + 2
 #define SVGA_3D_CMD_SURFACE_STRETCHBLT     SVGA_3D_CMD_BASE + 3
 #define SVGA_3D_CMD_SURFACE_STRETCHBLT     SVGA_3D_CMD_BASE + 3
@@ -1008,7 +1040,7 @@ typedef enum {
 #define SVGA_3D_CMD_SETVIEWPORT            SVGA_3D_CMD_BASE + 15
 #define SVGA_3D_CMD_SETVIEWPORT            SVGA_3D_CMD_BASE + 15
 #define SVGA_3D_CMD_SETCLIPPLANE           SVGA_3D_CMD_BASE + 16
 #define SVGA_3D_CMD_SETCLIPPLANE           SVGA_3D_CMD_BASE + 16
 #define SVGA_3D_CMD_CLEAR                  SVGA_3D_CMD_BASE + 17
 #define SVGA_3D_CMD_CLEAR                  SVGA_3D_CMD_BASE + 17
-#define SVGA_3D_CMD_PRESENT                SVGA_3D_CMD_BASE + 18    // Deprecated
+#define SVGA_3D_CMD_PRESENT                SVGA_3D_CMD_BASE + 18    /* Deprecated */
 #define SVGA_3D_CMD_SHADER_DEFINE          SVGA_3D_CMD_BASE + 19
 #define SVGA_3D_CMD_SHADER_DEFINE          SVGA_3D_CMD_BASE + 19
 #define SVGA_3D_CMD_SHADER_DESTROY         SVGA_3D_CMD_BASE + 20
 #define SVGA_3D_CMD_SHADER_DESTROY         SVGA_3D_CMD_BASE + 20
 #define SVGA_3D_CMD_SET_SHADER             SVGA_3D_CMD_BASE + 21
 #define SVGA_3D_CMD_SET_SHADER             SVGA_3D_CMD_BASE + 21
@@ -1018,9 +1050,13 @@ typedef enum {
 #define SVGA_3D_CMD_BEGIN_QUERY            SVGA_3D_CMD_BASE + 25
 #define SVGA_3D_CMD_BEGIN_QUERY            SVGA_3D_CMD_BASE + 25
 #define SVGA_3D_CMD_END_QUERY              SVGA_3D_CMD_BASE + 26
 #define SVGA_3D_CMD_END_QUERY              SVGA_3D_CMD_BASE + 26
 #define SVGA_3D_CMD_WAIT_FOR_QUERY         SVGA_3D_CMD_BASE + 27
 #define SVGA_3D_CMD_WAIT_FOR_QUERY         SVGA_3D_CMD_BASE + 27
-#define SVGA_3D_CMD_PRESENT_READBACK       SVGA_3D_CMD_BASE + 28    // Deprecated
+#define SVGA_3D_CMD_PRESENT_READBACK       SVGA_3D_CMD_BASE + 28    /* Deprecated */
 #define SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN SVGA_3D_CMD_BASE + 29
 #define SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN SVGA_3D_CMD_BASE + 29
-#define SVGA_3D_CMD_MAX                    SVGA_3D_CMD_BASE + 30
+#define SVGA_3D_CMD_SURFACE_DEFINE_V2      SVGA_3D_CMD_BASE + 30
+#define SVGA_3D_CMD_GENERATE_MIPMAPS       SVGA_3D_CMD_BASE + 31
+#define SVGA_3D_CMD_ACTIVATE_SURFACE       SVGA_3D_CMD_BASE + 40
+#define SVGA_3D_CMD_DEACTIVATE_SURFACE     SVGA_3D_CMD_BASE + 41
+#define SVGA_3D_CMD_MAX                    SVGA_3D_CMD_BASE + 42
 
 
 #define SVGA_3D_CMD_FUTURE_MAX             2000
 #define SVGA_3D_CMD_FUTURE_MAX             2000
 
 
@@ -1031,9 +1067,9 @@ typedef enum {
 typedef struct {
 typedef struct {
    union {
    union {
       struct {
       struct {
-         uint16  function;       // SVGA3dFogFunction
-         uint8   type;           // SVGA3dFogType
-         uint8   base;           // SVGA3dFogBase
+         uint16  function;       /* SVGA3dFogFunction */
+         uint8   type;           /* SVGA3dFogType */
+         uint8   base;           /* SVGA3dFogBase */
       };
       };
       uint32     uintValue;
       uint32     uintValue;
    };
    };
@@ -1109,6 +1145,8 @@ typedef enum {
    SVGA3D_SURFACE_HINT_RENDERTARGET    = (1 << 6),
    SVGA3D_SURFACE_HINT_RENDERTARGET    = (1 << 6),
    SVGA3D_SURFACE_HINT_DEPTHSTENCIL    = (1 << 7),
    SVGA3D_SURFACE_HINT_DEPTHSTENCIL    = (1 << 7),
    SVGA3D_SURFACE_HINT_WRITEONLY       = (1 << 8),
    SVGA3D_SURFACE_HINT_WRITEONLY       = (1 << 8),
+   SVGA3D_SURFACE_MASKABLE_ANTIALIAS   = (1 << 9),
+   SVGA3D_SURFACE_AUTOGENMIPMAPS       = (1 << 10),
 } SVGA3dSurfaceFlags;
 } SVGA3dSurfaceFlags;
 
 
 typedef
 typedef
@@ -1121,6 +1159,12 @@ struct {
    uint32                      sid;
    uint32                      sid;
    SVGA3dSurfaceFlags          surfaceFlags;
    SVGA3dSurfaceFlags          surfaceFlags;
    SVGA3dSurfaceFormat         format;
    SVGA3dSurfaceFormat         format;
+   /*
+    * If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace
+    * structures must have the same value of numMipLevels field.
+    * Otherwise, all but the first SVGA3dSurfaceFace structures must have the
+    * numMipLevels set to 0.
+    */
    SVGA3dSurfaceFace           face[SVGA3D_MAX_SURFACE_FACES];
    SVGA3dSurfaceFace           face[SVGA3D_MAX_SURFACE_FACES];
    /*
    /*
     * Followed by an SVGA3dSize structure for each mip level in each face.
     * Followed by an SVGA3dSize structure for each mip level in each face.
@@ -1133,6 +1177,31 @@ struct {
     */
     */
 } SVGA3dCmdDefineSurface;       /* SVGA_3D_CMD_SURFACE_DEFINE */
 } SVGA3dCmdDefineSurface;       /* SVGA_3D_CMD_SURFACE_DEFINE */
 
 
+typedef
+struct {
+   uint32                      sid;
+   SVGA3dSurfaceFlags          surfaceFlags;
+   SVGA3dSurfaceFormat         format;
+   /*
+    * If surfaceFlags has SVGA3D_SURFACE_CUBEMAP bit set, all SVGA3dSurfaceFace
+    * structures must have the same value of numMipLevels field.
+    * Otherwise, all but the first SVGA3dSurfaceFace structures must have the
+    * numMipLevels set to 0.
+    */
+   SVGA3dSurfaceFace           face[SVGA3D_MAX_SURFACE_FACES];
+   uint32                      multisampleCount;
+   SVGA3dTextureFilter         autogenFilter;
+   /*
+    * Followed by an SVGA3dSize structure for each mip level in each face.
+    *
+    * A note on surface sizes: Sizes are always specified in pixels,
+    * even if the true surface size is not a multiple of the minimum
+    * block size of the surface's format. For example, a 3x3x1 DXT1
+    * compressed texture would actually be stored as a 4x4x1 image in
+    * memory.
+    */
+} SVGA3dCmdDefineSurface_v2;     /* SVGA_3D_CMD_SURFACE_DEFINE_V2 */
+
 typedef
 typedef
 struct {
 struct {
    uint32               sid;
    uint32               sid;
@@ -1474,10 +1543,12 @@ struct {
     * SVGA3dCmdDrawPrimitives structure. In order,
     * SVGA3dCmdDrawPrimitives structure. In order,
     * they are:
     * they are:
     *
     *
-    * 1. SVGA3dVertexDecl, quantity 'numVertexDecls'
-    * 2. SVGA3dPrimitiveRange, quantity 'numRanges'
+    * 1. SVGA3dVertexDecl, quantity 'numVertexDecls', but no more than
+    *    SVGA3D_MAX_VERTEX_ARRAYS;
+    * 2. SVGA3dPrimitiveRange, quantity 'numRanges', but no more than
+    *    SVGA3D_MAX_DRAW_PRIMITIVE_RANGES;
     * 3. Optionally, SVGA3dVertexDivisor, quantity 'numVertexDecls' (contains
     * 3. Optionally, SVGA3dVertexDivisor, quantity 'numVertexDecls' (contains
-    *    the frequency divisor for this the corresponding vertex decl)
+    *    the frequency divisor for the corresponding vertex decl).
     */
     */
 } SVGA3dCmdDrawPrimitives;      /* SVGA_3D_CMD_DRAWPRIMITIVES */
 } SVGA3dCmdDrawPrimitives;      /* SVGA_3D_CMD_DRAWPRIMITIVES */
 
 
@@ -1671,6 +1742,12 @@ struct {
    /* Clipping: zero or more SVGASignedRects follow */
    /* Clipping: zero or more SVGASignedRects follow */
 } SVGA3dCmdBlitSurfaceToScreen;         /* SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN */
 } SVGA3dCmdBlitSurfaceToScreen;         /* SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN */
 
 
+typedef
+struct {
+   uint32               sid;
+   SVGA3dTextureFilter  filter;
+} SVGA3dCmdGenerateMipmaps;             /* SVGA_3D_CMD_GENERATE_MIPMAPS */
+
 
 
 /*
 /*
  * Capability query index.
  * Capability query index.
@@ -1774,6 +1851,32 @@ typedef enum {
    SVGA3D_DEVCAP_SURFACEFMT_A16B16G16R16           = 67,
    SVGA3D_DEVCAP_SURFACEFMT_A16B16G16R16           = 67,
    SVGA3D_DEVCAP_SURFACEFMT_UYVY                   = 68,
    SVGA3D_DEVCAP_SURFACEFMT_UYVY                   = 68,
    SVGA3D_DEVCAP_SURFACEFMT_YUY2                   = 69,
    SVGA3D_DEVCAP_SURFACEFMT_YUY2                   = 69,
+   SVGA3D_DEVCAP_MULTISAMPLE_NONMASKABLESAMPLES    = 70,
+   SVGA3D_DEVCAP_MULTISAMPLE_MASKABLESAMPLES       = 71,
+   SVGA3D_DEVCAP_ALPHATOCOVERAGE                   = 72,
+   SVGA3D_DEVCAP_SUPERSAMPLE                       = 73,
+   SVGA3D_DEVCAP_AUTOGENMIPMAPS                    = 74,
+   SVGA3D_DEVCAP_SURFACEFMT_NV12                   = 75,
+   SVGA3D_DEVCAP_SURFACEFMT_AYUV                   = 76,
+
+   /*
+    * This is the maximum number of SVGA context IDs that the guest
+    * can define using SVGA_3D_CMD_CONTEXT_DEFINE.
+    */
+   SVGA3D_DEVCAP_MAX_CONTEXT_IDS                   = 77,
+
+   /*
+    * This is the maximum number of SVGA surface IDs that the guest
+    * can define using SVGA_3D_CMD_SURFACE_DEFINE*.
+    */
+   SVGA3D_DEVCAP_MAX_SURFACE_IDS                   = 78,
+
+   SVGA3D_DEVCAP_SURFACEFMT_Z_DF16                 = 79,
+   SVGA3D_DEVCAP_SURFACEFMT_Z_DF24                 = 80,
+   SVGA3D_DEVCAP_SURFACEFMT_Z_D24S8_INT            = 81,
+
+   SVGA3D_DEVCAP_SURFACEFMT_BC4_UNORM              = 82,
+   SVGA3D_DEVCAP_SURFACEFMT_BC5_UNORM              = 83,
 
 
    /*
    /*
     * Don't add new caps into the previous section; the values in this
     * Don't add new caps into the previous section; the values in this

+ 1 - 1
drivers/gpu/drm/vmwgfx/svga_escape.h

@@ -75,7 +75,7 @@
  */
  */
 
 
 #define SVGA_ESCAPE_VMWARE_HINT               0x00030000
 #define SVGA_ESCAPE_VMWARE_HINT               0x00030000
-#define SVGA_ESCAPE_VMWARE_HINT_FULLSCREEN    0x00030001  // Deprecated
+#define SVGA_ESCAPE_VMWARE_HINT_FULLSCREEN    0x00030001  /* Deprecated */
 
 
 typedef
 typedef
 struct {
 struct {

+ 11 - 11
drivers/gpu/drm/vmwgfx/svga_overlay.h

@@ -38,9 +38,9 @@
  * Video formats we support
  * Video formats we support
  */
  */
 
 
-#define VMWARE_FOURCC_YV12 0x32315659 // 'Y' 'V' '1' '2'
-#define VMWARE_FOURCC_YUY2 0x32595559 // 'Y' 'U' 'Y' '2'
-#define VMWARE_FOURCC_UYVY 0x59565955 // 'U' 'Y' 'V' 'Y'
+#define VMWARE_FOURCC_YV12 0x32315659 /* 'Y' 'V' '1' '2' */
+#define VMWARE_FOURCC_YUY2 0x32595559 /* 'Y' 'U' 'Y' '2' */
+#define VMWARE_FOURCC_UYVY 0x59565955 /* 'U' 'Y' 'V' 'Y' */
 
 
 typedef enum {
 typedef enum {
    SVGA_OVERLAY_FORMAT_INVALID = 0,
    SVGA_OVERLAY_FORMAT_INVALID = 0,
@@ -68,7 +68,7 @@ struct SVGAEscapeVideoSetRegs {
       uint32 streamId;
       uint32 streamId;
    } header;
    } header;
 
 
-   // May include zero or more items.
+   /* May include zero or more items. */
    struct {
    struct {
       uint32 registerId;
       uint32 registerId;
       uint32 value;
       uint32 value;
@@ -134,12 +134,12 @@ struct {
  */
  */
 
 
 static inline bool
 static inline bool
-VMwareVideoGetAttributes(const SVGAOverlayFormat format,    // IN
-                         uint32 *width,                     // IN / OUT
-                         uint32 *height,                    // IN / OUT
-                         uint32 *size,                      // OUT
-                         uint32 *pitches,                   // OUT (optional)
-                         uint32 *offsets)                   // OUT (optional)
+VMwareVideoGetAttributes(const SVGAOverlayFormat format,    /* IN */
+                         uint32 *width,                     /* IN / OUT */
+                         uint32 *height,                    /* IN / OUT */
+                         uint32 *size,                      /* OUT */
+                         uint32 *pitches,                   /* OUT (optional) */
+                         uint32 *offsets)                   /* OUT (optional) */
 {
 {
     int tmp;
     int tmp;
 
 
@@ -198,4 +198,4 @@ VMwareVideoGetAttributes(const SVGAOverlayFormat format,    // IN
     return true;
     return true;
 }
 }
 
 
-#endif // _SVGA_OVERLAY_H_
+#endif /* _SVGA_OVERLAY_H_ */

+ 166 - 54
drivers/gpu/drm/vmwgfx/svga_reg.h

@@ -276,7 +276,7 @@ enum {
  * possible.
  * possible.
  */
  */
 #define SVGA_GMR_NULL         ((uint32) -1)
 #define SVGA_GMR_NULL         ((uint32) -1)
-#define SVGA_GMR_FRAMEBUFFER  ((uint32) -2)  // Guest Framebuffer (GFB)
+#define SVGA_GMR_FRAMEBUFFER  ((uint32) -2)  /* Guest Framebuffer (GFB) */
 
 
 typedef
 typedef
 struct SVGAGuestMemDescriptor {
 struct SVGAGuestMemDescriptor {
@@ -317,13 +317,35 @@ struct SVGAGMRImageFormat {
       struct {
       struct {
          uint32 bitsPerPixel : 8;
          uint32 bitsPerPixel : 8;
          uint32 colorDepth   : 8;
          uint32 colorDepth   : 8;
-         uint32 reserved     : 16;  // Must be zero
+         uint32 reserved     : 16;  /* Must be zero */
       };
       };
 
 
       uint32 value;
       uint32 value;
    };
    };
 } SVGAGMRImageFormat;
 } SVGAGMRImageFormat;
 
 
+typedef
+struct SVGAGuestImage {
+   SVGAGuestPtr         ptr;
+
+   /*
+    * A note on interpretation of pitch: This value of pitch is the
+    * number of bytes between vertically adjacent image
+    * blocks. Normally this is the number of bytes between the first
+    * pixel of two adjacent scanlines. With compressed textures,
+    * however, this may represent the number of bytes between
+    * compression blocks rather than between rows of pixels.
+    *
+    * XXX: Compressed textures currently must be tightly packed in guest memory.
+    *
+    * If the image is 1-dimensional, pitch is ignored.
+    *
+    * If 'pitch' is zero, the SVGA3D device calculates a pitch value
+    * assuming each row of blocks is tightly packed.
+    */
+   uint32 pitch;
+} SVGAGuestImage;
+
 /*
 /*
  * SVGAColorBGRX --
  * SVGAColorBGRX --
  *
  *
@@ -339,7 +361,7 @@ struct SVGAColorBGRX {
          uint32 b : 8;
          uint32 b : 8;
          uint32 g : 8;
          uint32 g : 8;
          uint32 r : 8;
          uint32 r : 8;
-         uint32 x : 8;  // Unused
+         uint32 x : 8;  /* Unused */
       };
       };
 
 
       uint32 value;
       uint32 value;
@@ -395,16 +417,16 @@ struct SVGASignedPoint {
 #define SVGA_CAP_NONE               0x00000000
 #define SVGA_CAP_NONE               0x00000000
 #define SVGA_CAP_RECT_COPY          0x00000002
 #define SVGA_CAP_RECT_COPY          0x00000002
 #define SVGA_CAP_CURSOR             0x00000020
 #define SVGA_CAP_CURSOR             0x00000020
-#define SVGA_CAP_CURSOR_BYPASS      0x00000040   // Legacy (Use Cursor Bypass 3 instead)
-#define SVGA_CAP_CURSOR_BYPASS_2    0x00000080   // Legacy (Use Cursor Bypass 3 instead)
+#define SVGA_CAP_CURSOR_BYPASS      0x00000040   /* Legacy (Use Cursor Bypass 3 instead) */
+#define SVGA_CAP_CURSOR_BYPASS_2    0x00000080   /* Legacy (Use Cursor Bypass 3 instead) */
 #define SVGA_CAP_8BIT_EMULATION     0x00000100
 #define SVGA_CAP_8BIT_EMULATION     0x00000100
 #define SVGA_CAP_ALPHA_CURSOR       0x00000200
 #define SVGA_CAP_ALPHA_CURSOR       0x00000200
 #define SVGA_CAP_3D                 0x00004000
 #define SVGA_CAP_3D                 0x00004000
 #define SVGA_CAP_EXTENDED_FIFO      0x00008000
 #define SVGA_CAP_EXTENDED_FIFO      0x00008000
-#define SVGA_CAP_MULTIMON           0x00010000   // Legacy multi-monitor support
+#define SVGA_CAP_MULTIMON           0x00010000   /* Legacy multi-monitor support */
 #define SVGA_CAP_PITCHLOCK          0x00020000
 #define SVGA_CAP_PITCHLOCK          0x00020000
 #define SVGA_CAP_IRQMASK            0x00040000
 #define SVGA_CAP_IRQMASK            0x00040000
-#define SVGA_CAP_DISPLAY_TOPOLOGY   0x00080000   // Legacy multi-monitor support
+#define SVGA_CAP_DISPLAY_TOPOLOGY   0x00080000   /* Legacy multi-monitor support */
 #define SVGA_CAP_GMR                0x00100000
 #define SVGA_CAP_GMR                0x00100000
 #define SVGA_CAP_TRACES             0x00200000
 #define SVGA_CAP_TRACES             0x00200000
 #define SVGA_CAP_GMR2               0x00400000
 #define SVGA_CAP_GMR2               0x00400000
@@ -453,7 +475,7 @@ enum {
 
 
    SVGA_FIFO_CAPABILITIES = 4,
    SVGA_FIFO_CAPABILITIES = 4,
    SVGA_FIFO_FLAGS,
    SVGA_FIFO_FLAGS,
-   // Valid with SVGA_FIFO_CAP_FENCE:
+   /* Valid with SVGA_FIFO_CAP_FENCE: */
    SVGA_FIFO_FENCE,
    SVGA_FIFO_FENCE,
 
 
    /*
    /*
@@ -466,32 +488,46 @@ enum {
     * extended FIFO.
     * extended FIFO.
     */
     */
 
 
-   // Valid if exists (i.e. if extended FIFO enabled):
+   /* Valid if exists (i.e. if extended FIFO enabled): */
    SVGA_FIFO_3D_HWVERSION,       /* See SVGA3dHardwareVersion in svga3d_reg.h */
    SVGA_FIFO_3D_HWVERSION,       /* See SVGA3dHardwareVersion in svga3d_reg.h */
-   // Valid with SVGA_FIFO_CAP_PITCHLOCK:
+   /* Valid with SVGA_FIFO_CAP_PITCHLOCK: */
    SVGA_FIFO_PITCHLOCK,
    SVGA_FIFO_PITCHLOCK,
 
 
-   // Valid with SVGA_FIFO_CAP_CURSOR_BYPASS_3:
+   /* Valid with SVGA_FIFO_CAP_CURSOR_BYPASS_3: */
    SVGA_FIFO_CURSOR_ON,          /* Cursor bypass 3 show/hide register */
    SVGA_FIFO_CURSOR_ON,          /* Cursor bypass 3 show/hide register */
    SVGA_FIFO_CURSOR_X,           /* Cursor bypass 3 x register */
    SVGA_FIFO_CURSOR_X,           /* Cursor bypass 3 x register */
    SVGA_FIFO_CURSOR_Y,           /* Cursor bypass 3 y register */
    SVGA_FIFO_CURSOR_Y,           /* Cursor bypass 3 y register */
    SVGA_FIFO_CURSOR_COUNT,       /* Incremented when any of the other 3 change */
    SVGA_FIFO_CURSOR_COUNT,       /* Incremented when any of the other 3 change */
    SVGA_FIFO_CURSOR_LAST_UPDATED,/* Last time the host updated the cursor */
    SVGA_FIFO_CURSOR_LAST_UPDATED,/* Last time the host updated the cursor */
 
 
-   // Valid with SVGA_FIFO_CAP_RESERVE:
+   /* Valid with SVGA_FIFO_CAP_RESERVE: */
    SVGA_FIFO_RESERVED,           /* Bytes past NEXT_CMD with real contents */
    SVGA_FIFO_RESERVED,           /* Bytes past NEXT_CMD with real contents */
 
 
    /*
    /*
-    * Valid with SVGA_FIFO_CAP_SCREEN_OBJECT:
+    * Valid with SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2:
     *
     *
     * By default this is SVGA_ID_INVALID, to indicate that the cursor
     * By default this is SVGA_ID_INVALID, to indicate that the cursor
     * coordinates are specified relative to the virtual root. If this
     * coordinates are specified relative to the virtual root. If this
     * is set to a specific screen ID, cursor position is reinterpreted
     * is set to a specific screen ID, cursor position is reinterpreted
-    * as a signed offset relative to that screen's origin. This is the
-    * only way to place the cursor on a non-rooted screen.
+    * as a signed offset relative to that screen's origin.
     */
     */
    SVGA_FIFO_CURSOR_SCREEN_ID,
    SVGA_FIFO_CURSOR_SCREEN_ID,
 
 
+   /*
+    * Valid with SVGA_FIFO_CAP_DEAD
+    *
+    * An arbitrary value written by the host, drivers should not use it.
+    */
+   SVGA_FIFO_DEAD,
+
+   /*
+    * Valid with SVGA_FIFO_CAP_3D_HWVERSION_REVISED:
+    *
+    * Contains 3D HWVERSION (see SVGA3dHardwareVersion in svga3d_reg.h)
+    * on platforms that can enforce graphics resource limits.
+    */
+   SVGA_FIFO_3D_HWVERSION_REVISED,
+
    /*
    /*
     * XXX: The gap here, up until SVGA_FIFO_3D_CAPS, can be used for new
     * XXX: The gap here, up until SVGA_FIFO_3D_CAPS, can be used for new
     * registers, but this must be done carefully and with judicious use of
     * registers, but this must be done carefully and with judicious use of
@@ -530,7 +566,7 @@ enum {
     * sets SVGA_FIFO_MIN high enough to leave room for them.
     * sets SVGA_FIFO_MIN high enough to leave room for them.
     */
     */
 
 
-   // Valid if register exists:
+   /* Valid if register exists: */
    SVGA_FIFO_GUEST_3D_HWVERSION, /* Guest driver's 3D version */
    SVGA_FIFO_GUEST_3D_HWVERSION, /* Guest driver's 3D version */
    SVGA_FIFO_FENCE_GOAL,         /* Matching target for SVGA_IRQFLAG_FENCE_GOAL */
    SVGA_FIFO_FENCE_GOAL,         /* Matching target for SVGA_IRQFLAG_FENCE_GOAL */
    SVGA_FIFO_BUSY,               /* See "FIFO Synchronization Registers" */
    SVGA_FIFO_BUSY,               /* See "FIFO Synchronization Registers" */
@@ -731,6 +767,37 @@ enum {
  *
  *
  *       - When a screen is resized, either using Screen Object commands or
  *       - When a screen is resized, either using Screen Object commands or
  *         legacy multimon registers, its contents are preserved.
  *         legacy multimon registers, its contents are preserved.
+ *
+ * SVGA_FIFO_CAP_GMR2 --
+ *
+ *    Provides new commands to define and remap guest memory regions (GMR).
+ *
+ *    New 2D commands:
+ *       DEFINE_GMR2, REMAP_GMR2.
+ *
+ * SVGA_FIFO_CAP_3D_HWVERSION_REVISED --
+ *
+ *    Indicates new register SVGA_FIFO_3D_HWVERSION_REVISED exists.
+ *    This register may replace SVGA_FIFO_3D_HWVERSION on platforms
+ *    that enforce graphics resource limits.  This allows the platform
+ *    to clear SVGA_FIFO_3D_HWVERSION and disable 3D in legacy guest
+ *    drivers that do not limit their resources.
+ *
+ *    Note this is an alias to SVGA_FIFO_CAP_GMR2 because these indicators
+ *    are codependent (and thus we use a single capability bit).
+ *
+ * SVGA_FIFO_CAP_SCREEN_OBJECT_2 --
+ *
+ *    Modifies the DEFINE_SCREEN command to include a guest provided
+ *    backing store in GMR memory and the bytesPerLine for the backing
+ *    store.  This capability requires the use of a backing store when
+ *    creating screen objects.  However if SVGA_FIFO_CAP_SCREEN_OBJECT
+ *    is present then backing stores are optional.
+ *
+ * SVGA_FIFO_CAP_DEAD --
+ *
+ *    Drivers should not use this cap bit.  This cap bit can not be
+ *    reused since some hosts already expose it.
  */
  */
 
 
 #define SVGA_FIFO_CAP_NONE                  0
 #define SVGA_FIFO_CAP_NONE                  0
@@ -742,6 +809,10 @@ enum {
 #define SVGA_FIFO_CAP_ESCAPE            (1<<5)
 #define SVGA_FIFO_CAP_ESCAPE            (1<<5)
 #define SVGA_FIFO_CAP_RESERVE           (1<<6)
 #define SVGA_FIFO_CAP_RESERVE           (1<<6)
 #define SVGA_FIFO_CAP_SCREEN_OBJECT     (1<<7)
 #define SVGA_FIFO_CAP_SCREEN_OBJECT     (1<<7)
+#define SVGA_FIFO_CAP_GMR2              (1<<8)
+#define SVGA_FIFO_CAP_3D_HWVERSION_REVISED  SVGA_FIFO_CAP_GMR2
+#define SVGA_FIFO_CAP_SCREEN_OBJECT_2   (1<<9)
+#define SVGA_FIFO_CAP_DEAD              (1<<10)
 
 
 
 
 /*
 /*
@@ -752,7 +823,7 @@ enum {
 
 
 #define SVGA_FIFO_FLAG_NONE                 0
 #define SVGA_FIFO_FLAG_NONE                 0
 #define SVGA_FIFO_FLAG_ACCELFRONT       (1<<0)
 #define SVGA_FIFO_FLAG_ACCELFRONT       (1<<0)
-#define SVGA_FIFO_FLAG_RESERVED        (1<<31) // Internal use only
+#define SVGA_FIFO_FLAG_RESERVED        (1<<31) /* Internal use only */
 
 
 /*
 /*
  * FIFO reservation sentinel value
  * FIFO reservation sentinel value
@@ -785,22 +856,22 @@ enum {
    SVGA_VIDEO_DATA_OFFSET,
    SVGA_VIDEO_DATA_OFFSET,
    SVGA_VIDEO_FORMAT,
    SVGA_VIDEO_FORMAT,
    SVGA_VIDEO_COLORKEY,
    SVGA_VIDEO_COLORKEY,
-   SVGA_VIDEO_SIZE,          // Deprecated
+   SVGA_VIDEO_SIZE,          /* Deprecated */
    SVGA_VIDEO_WIDTH,
    SVGA_VIDEO_WIDTH,
    SVGA_VIDEO_HEIGHT,
    SVGA_VIDEO_HEIGHT,
    SVGA_VIDEO_SRC_X,
    SVGA_VIDEO_SRC_X,
    SVGA_VIDEO_SRC_Y,
    SVGA_VIDEO_SRC_Y,
    SVGA_VIDEO_SRC_WIDTH,
    SVGA_VIDEO_SRC_WIDTH,
    SVGA_VIDEO_SRC_HEIGHT,
    SVGA_VIDEO_SRC_HEIGHT,
-   SVGA_VIDEO_DST_X,         // Signed int32
-   SVGA_VIDEO_DST_Y,         // Signed int32
+   SVGA_VIDEO_DST_X,         /* Signed int32 */
+   SVGA_VIDEO_DST_Y,         /* Signed int32 */
    SVGA_VIDEO_DST_WIDTH,
    SVGA_VIDEO_DST_WIDTH,
    SVGA_VIDEO_DST_HEIGHT,
    SVGA_VIDEO_DST_HEIGHT,
    SVGA_VIDEO_PITCH_1,
    SVGA_VIDEO_PITCH_1,
    SVGA_VIDEO_PITCH_2,
    SVGA_VIDEO_PITCH_2,
    SVGA_VIDEO_PITCH_3,
    SVGA_VIDEO_PITCH_3,
-   SVGA_VIDEO_DATA_GMRID,    // Optional, defaults to SVGA_GMR_FRAMEBUFFER
-   SVGA_VIDEO_DST_SCREEN_ID, // Optional, defaults to virtual coords (SVGA_ID_INVALID)
+   SVGA_VIDEO_DATA_GMRID,    /* Optional, defaults to SVGA_GMR_FRAMEBUFFER */
+   SVGA_VIDEO_DST_SCREEN_ID, /* Optional, defaults to virtual coords (SVGA_ID_INVALID) */
    SVGA_VIDEO_NUM_REGS
    SVGA_VIDEO_NUM_REGS
 };
 };
 
 
@@ -851,15 +922,51 @@ typedef struct SVGAOverlayUnit {
  *    compatibility. New flags can be added, and the struct may grow,
  *    compatibility. New flags can be added, and the struct may grow,
  *    but existing fields must retain their meaning.
  *    but existing fields must retain their meaning.
  *
  *
+ *    Added with SVGA_FIFO_CAP_SCREEN_OBJECT_2 are required fields of
+ *    a SVGAGuestPtr that is used to back the screen contents.  This
+ *    memory must come from the GFB.  The guest is not allowed to
+ *    access the memory and doing so will have undefined results.  The
+ *    backing store is required to be page aligned and the size is
+ *    padded to the next page boundry.  The number of pages is:
+ *       (bytesPerLine * size.width * 4 + PAGE_SIZE - 1) / PAGE_SIZE
+ *
+ *    The pitch in the backingStore is required to be at least large
+ *    enough to hold a 32bbp scanline.  It is recommended that the
+ *    driver pad bytesPerLine for a potential performance win.
+ *
+ *    The cloneCount field is treated as a hint from the guest that
+ *    the user wants this display to be cloned, countCount times.  A
+ *    value of zero means no cloning should happen.
  */
  */
 
 
-#define SVGA_SCREEN_HAS_ROOT    (1 << 0)  // Screen is present in the virtual coord space
-#define SVGA_SCREEN_IS_PRIMARY  (1 << 1)  // Guest considers this screen to be 'primary'
-#define SVGA_SCREEN_FULLSCREEN_HINT (1 << 2)   // Guest is running a fullscreen app here
+#define SVGA_SCREEN_MUST_BE_SET     (1 << 0) /* Must be set or results undefined */
+#define SVGA_SCREEN_HAS_ROOT SVGA_SCREEN_MUST_BE_SET /* Deprecated */
+#define SVGA_SCREEN_IS_PRIMARY      (1 << 1) /* Guest considers this screen to be 'primary' */
+#define SVGA_SCREEN_FULLSCREEN_HINT (1 << 2) /* Guest is running a fullscreen app here */
+
+/*
+ * Added with SVGA_FIFO_CAP_SCREEN_OBJECT_2.  When the screen is
+ * deactivated the base layer is defined to lose all contents and
+ * become black.  When a screen is deactivated the backing store is
+ * optional.  When set backingPtr and bytesPerLine will be ignored.
+ */
+#define SVGA_SCREEN_DEACTIVATE  (1 << 3)
+
+/*
+ * Added with SVGA_FIFO_CAP_SCREEN_OBJECT_2.  When this flag is set
+ * the screen contents will be outputted as all black to the user
+ * though the base layer contents is preserved.  The screen base layer
+ * can still be read and written to like normal though the no visible
+ * effect will be seen by the user.  When the flag is changed the
+ * screen will be blanked or redrawn to the current contents as needed
+ * without any extra commands from the driver.  This flag only has an
+ * effect when the screen is not deactivated.
+ */
+#define SVGA_SCREEN_BLANKING (1 << 4)
 
 
 typedef
 typedef
 struct SVGAScreenObject {
 struct SVGAScreenObject {
-   uint32 structSize;   // sizeof(SVGAScreenObject)
+   uint32 structSize;   /* sizeof(SVGAScreenObject) */
    uint32 id;
    uint32 id;
    uint32 flags;
    uint32 flags;
    struct {
    struct {
@@ -869,7 +976,14 @@ struct SVGAScreenObject {
    struct {
    struct {
       int32 x;
       int32 x;
       int32 y;
       int32 y;
-   } root;              // Only used if SVGA_SCREEN_HAS_ROOT is set.
+   } root;
+
+   /*
+    * Added and required by SVGA_FIFO_CAP_SCREEN_OBJECT_2, optional
+    * with SVGA_FIFO_CAP_SCREEN_OBJECT.
+    */
+   SVGAGuestImage backingStore;
+   uint32 cloneCount;
 } SVGAScreenObject;
 } SVGAScreenObject;
 
 
 
 
@@ -944,7 +1058,7 @@ typedef enum {
  */
  */
 
 
 typedef
 typedef
-struct {
+struct SVGAFifoCmdUpdate {
    uint32 x;
    uint32 x;
    uint32 y;
    uint32 y;
    uint32 width;
    uint32 width;
@@ -963,7 +1077,7 @@ struct {
  */
  */
 
 
 typedef
 typedef
-struct {
+struct SVGAFifoCmdRectCopy {
    uint32 srcX;
    uint32 srcX;
    uint32 srcY;
    uint32 srcY;
    uint32 destX;
    uint32 destX;
@@ -987,14 +1101,14 @@ struct {
  */
  */
 
 
 typedef
 typedef
-struct {
-   uint32 id;             // Reserved, must be zero.
+struct SVGAFifoCmdDefineCursor {
+   uint32 id;             /* Reserved, must be zero. */
    uint32 hotspotX;
    uint32 hotspotX;
    uint32 hotspotY;
    uint32 hotspotY;
    uint32 width;
    uint32 width;
    uint32 height;
    uint32 height;
-   uint32 andMaskDepth;   // Value must be 1 or equal to BITS_PER_PIXEL
-   uint32 xorMaskDepth;   // Value must be 1 or equal to BITS_PER_PIXEL
+   uint32 andMaskDepth;   /* Value must be 1 or equal to BITS_PER_PIXEL */
+   uint32 xorMaskDepth;   /* Value must be 1 or equal to BITS_PER_PIXEL */
    /*
    /*
     * Followed by scanline data for AND mask, then XOR mask.
     * Followed by scanline data for AND mask, then XOR mask.
     * Each scanline is padded to a 32-bit boundary.
     * Each scanline is padded to a 32-bit boundary.
@@ -1016,8 +1130,8 @@ struct {
  */
  */
 
 
 typedef
 typedef
-struct {
-   uint32 id;             // Reserved, must be zero.
+struct SVGAFifoCmdDefineAlphaCursor {
+   uint32 id;             /* Reserved, must be zero. */
    uint32 hotspotX;
    uint32 hotspotX;
    uint32 hotspotY;
    uint32 hotspotY;
    uint32 width;
    uint32 width;
@@ -1039,7 +1153,7 @@ struct {
  */
  */
 
 
 typedef
 typedef
-struct {
+struct SVGAFifoCmdUpdateVerbose {
    uint32 x;
    uint32 x;
    uint32 y;
    uint32 y;
    uint32 width;
    uint32 width;
@@ -1064,13 +1178,13 @@ struct {
 #define  SVGA_ROP_COPY                    0x03
 #define  SVGA_ROP_COPY                    0x03
 
 
 typedef
 typedef
-struct {
-   uint32 color;     // In the same format as the GFB
+struct SVGAFifoCmdFrontRopFill {
+   uint32 color;     /* In the same format as the GFB */
    uint32 x;
    uint32 x;
    uint32 y;
    uint32 y;
    uint32 width;
    uint32 width;
    uint32 height;
    uint32 height;
-   uint32 rop;       // Must be SVGA_ROP_COPY
+   uint32 rop;       /* Must be SVGA_ROP_COPY */
 } SVGAFifoCmdFrontRopFill;
 } SVGAFifoCmdFrontRopFill;
 
 
 
 
@@ -1107,7 +1221,7 @@ struct {
  */
  */
 
 
 typedef
 typedef
-struct {
+struct SVGAFifoCmdEscape {
    uint32 nsid;
    uint32 nsid;
    uint32 size;
    uint32 size;
    /* followed by 'size' bytes of data */
    /* followed by 'size' bytes of data */
@@ -1137,12 +1251,12 @@ struct {
  *    registers (SVGA_REG_NUM_GUEST_DISPLAYS, SVGA_REG_DISPLAY_*).
  *    registers (SVGA_REG_NUM_GUEST_DISPLAYS, SVGA_REG_DISPLAY_*).
  *
  *
  * Availability:
  * Availability:
- *    SVGA_FIFO_CAP_SCREEN_OBJECT
+ *    SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
  */
  */
 
 
 typedef
 typedef
 struct {
 struct {
-   SVGAScreenObject screen;   // Variable-length according to version
+   SVGAScreenObject screen;   /* Variable-length according to version */
 } SVGAFifoCmdDefineScreen;
 } SVGAFifoCmdDefineScreen;
 
 
 
 
@@ -1153,7 +1267,7 @@ struct {
  *    re-use.
  *    re-use.
  *
  *
  * Availability:
  * Availability:
- *    SVGA_FIFO_CAP_SCREEN_OBJECT
+ *    SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
  */
  */
 
 
 typedef
 typedef
@@ -1206,7 +1320,7 @@ struct {
  *    GMRFB.
  *    GMRFB.
  *
  *
  * Availability:
  * Availability:
- *    SVGA_FIFO_CAP_SCREEN_OBJECT
+ *    SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
  */
  */
 
 
 typedef
 typedef
@@ -1243,7 +1357,7 @@ struct {
  *    SVGA_CMD_ANNOTATION_* commands for details.
  *    SVGA_CMD_ANNOTATION_* commands for details.
  *
  *
  * Availability:
  * Availability:
- *    SVGA_FIFO_CAP_SCREEN_OBJECT
+ *    SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
  */
  */
 
 
 typedef
 typedef
@@ -1291,7 +1405,7 @@ struct {
  *    the time any subsequent FENCE commands are reached.
  *    the time any subsequent FENCE commands are reached.
  *
  *
  * Availability:
  * Availability:
- *    SVGA_FIFO_CAP_SCREEN_OBJECT
+ *    SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
  */
  */
 
 
 typedef
 typedef
@@ -1326,7 +1440,7 @@ struct {
  *    user's display is being remoted over a network connection.
  *    user's display is being remoted over a network connection.
  *
  *
  * Availability:
  * Availability:
- *    SVGA_FIFO_CAP_SCREEN_OBJECT
+ *    SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
  */
  */
 
 
 typedef
 typedef
@@ -1358,7 +1472,7 @@ struct {
  *    undefined.
  *    undefined.
  *
  *
  * Availability:
  * Availability:
- *    SVGA_FIFO_CAP_SCREEN_OBJECT
+ *    SVGA_FIFO_CAP_SCREEN_OBJECT or SVGA_FIFO_CAP_SCREEN_OBJECT_2
  */
  */
 
 
 typedef
 typedef
@@ -1381,8 +1495,7 @@ typedef
 struct {
 struct {
    uint32 gmrId;
    uint32 gmrId;
    uint32 numPages;
    uint32 numPages;
-}
-SVGAFifoCmdDefineGMR2;
+} SVGAFifoCmdDefineGMR2;
 
 
 
 
 /*
 /*
@@ -1424,8 +1537,8 @@ typedef
 struct {
 struct {
    uint32 gmrId;
    uint32 gmrId;
    SVGARemapGMR2Flags flags;
    SVGARemapGMR2Flags flags;
-	uint32 offsetPages; /* offset in pages to begin remap */
-	uint32 numPages; /* number of pages to remap */
+   uint32 offsetPages; /* offset in pages to begin remap */
+   uint32 numPages; /* number of pages to remap */
    /*
    /*
     * Followed by additional data depending on SVGARemapGMR2Flags.
     * Followed by additional data depending on SVGARemapGMR2Flags.
     *
     *
@@ -1434,7 +1547,6 @@ struct {
     * (according to flag SVGA_REMAP_GMR2_PPN64) follows.  If flag
     * (according to flag SVGA_REMAP_GMR2_PPN64) follows.  If flag
     * SVGA_REMAP_GMR2_SINGLE_PPN is set, array contains a single entry.
     * SVGA_REMAP_GMR2_SINGLE_PPN is set, array contains a single entry.
     */
     */
-}
-SVGAFifoCmdRemapGMR2;
+} SVGAFifoCmdRemapGMR2;
 
 
 #endif
 #endif

+ 47 - 0
drivers/gpu/drm/vmwgfx/vmwgfx_buffer.c

@@ -42,6 +42,10 @@ static uint32_t sys_placement_flags = TTM_PL_FLAG_SYSTEM |
 static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR |
 static uint32_t gmr_placement_flags = VMW_PL_FLAG_GMR |
 	TTM_PL_FLAG_CACHED;
 	TTM_PL_FLAG_CACHED;
 
 
+static uint32_t gmr_ne_placement_flags = VMW_PL_FLAG_GMR |
+	TTM_PL_FLAG_CACHED |
+	TTM_PL_FLAG_NO_EVICT;
+
 struct ttm_placement vmw_vram_placement = {
 struct ttm_placement vmw_vram_placement = {
 	.fpfn = 0,
 	.fpfn = 0,
 	.lpfn = 0,
 	.lpfn = 0,
@@ -56,6 +60,11 @@ static uint32_t vram_gmr_placement_flags[] = {
 	VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
 	VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
 };
 };
 
 
+static uint32_t gmr_vram_placement_flags[] = {
+	VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED,
+	TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED
+};
+
 struct ttm_placement vmw_vram_gmr_placement = {
 struct ttm_placement vmw_vram_gmr_placement = {
 	.fpfn = 0,
 	.fpfn = 0,
 	.lpfn = 0,
 	.lpfn = 0,
@@ -65,6 +74,20 @@ struct ttm_placement vmw_vram_gmr_placement = {
 	.busy_placement = &gmr_placement_flags
 	.busy_placement = &gmr_placement_flags
 };
 };
 
 
+static uint32_t vram_gmr_ne_placement_flags[] = {
+	TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT,
+	VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED | TTM_PL_FLAG_NO_EVICT
+};
+
+struct ttm_placement vmw_vram_gmr_ne_placement = {
+	.fpfn = 0,
+	.lpfn = 0,
+	.num_placement = 2,
+	.placement = vram_gmr_ne_placement_flags,
+	.num_busy_placement = 1,
+	.busy_placement = &gmr_ne_placement_flags
+};
+
 struct ttm_placement vmw_vram_sys_placement = {
 struct ttm_placement vmw_vram_sys_placement = {
 	.fpfn = 0,
 	.fpfn = 0,
 	.lpfn = 0,
 	.lpfn = 0,
@@ -92,6 +115,30 @@ struct ttm_placement vmw_sys_placement = {
 	.busy_placement = &sys_placement_flags
 	.busy_placement = &sys_placement_flags
 };
 };
 
 
+static uint32_t evictable_placement_flags[] = {
+	TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED,
+	TTM_PL_FLAG_VRAM | TTM_PL_FLAG_CACHED,
+	VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED
+};
+
+struct ttm_placement vmw_evictable_placement = {
+	.fpfn = 0,
+	.lpfn = 0,
+	.num_placement = 3,
+	.placement = evictable_placement_flags,
+	.num_busy_placement = 1,
+	.busy_placement = &sys_placement_flags
+};
+
+struct ttm_placement vmw_srf_placement = {
+	.fpfn = 0,
+	.lpfn = 0,
+	.num_placement = 1,
+	.num_busy_placement = 2,
+	.placement = &gmr_placement_flags,
+	.busy_placement = gmr_vram_placement_flags
+};
+
 struct vmw_ttm_backend {
 struct vmw_ttm_backend {
 	struct ttm_backend backend;
 	struct ttm_backend backend;
 	struct page **pages;
 	struct page **pages;

+ 322 - 0
drivers/gpu/drm/vmwgfx/vmwgfx_dmabuf.c

@@ -0,0 +1,322 @@
+/**************************************************************************
+ *
+ * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "ttm/ttm_placement.h"
+
+#include "drmP.h"
+#include "vmwgfx_drv.h"
+
+
+/**
+ * vmw_dmabuf_to_placement - Validate a buffer to placement.
+ *
+ * @dev_priv:  Driver private.
+ * @buf:  DMA buffer to move.
+ * @pin:  Pin buffer if true.
+ * @interruptible:  Use interruptible wait.
+ *
+ * May only be called by the current master since it assumes that the
+ * master lock is the current master's lock.
+ * This function takes the master's lock in write mode.
+ * Flushes and unpins the query bo to avoid failures.
+ *
+ * Returns
+ *  -ERESTARTSYS if interrupted by a signal.
+ */
+int vmw_dmabuf_to_placement(struct vmw_private *dev_priv,
+			    struct vmw_dma_buffer *buf,
+			    struct ttm_placement *placement,
+			    bool interruptible)
+{
+	struct vmw_master *vmaster = dev_priv->active_master;
+	struct ttm_buffer_object *bo = &buf->base;
+	int ret;
+
+	ret = ttm_write_lock(&vmaster->lock, interruptible);
+	if (unlikely(ret != 0))
+		return ret;
+
+	vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
+
+	ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
+	if (unlikely(ret != 0))
+		goto err;
+
+	ret = ttm_bo_validate(bo, placement, interruptible, false, false);
+
+	ttm_bo_unreserve(bo);
+
+err:
+	ttm_write_unlock(&vmaster->lock);
+	return ret;
+}
+
+/**
+ * vmw_dmabuf_to_vram_or_gmr - Move a buffer to vram or gmr.
+ *
+ * May only be called by the current master since it assumes that the
+ * master lock is the current master's lock.
+ * This function takes the master's lock in write mode.
+ * Flushes and unpins the query bo if @pin == true to avoid failures.
+ *
+ * @dev_priv:  Driver private.
+ * @buf:  DMA buffer to move.
+ * @pin:  Pin buffer if true.
+ * @interruptible:  Use interruptible wait.
+ *
+ * Returns
+ * -ERESTARTSYS if interrupted by a signal.
+ */
+int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
+			      struct vmw_dma_buffer *buf,
+			      bool pin, bool interruptible)
+{
+	struct vmw_master *vmaster = dev_priv->active_master;
+	struct ttm_buffer_object *bo = &buf->base;
+	struct ttm_placement *placement;
+	int ret;
+
+	ret = ttm_write_lock(&vmaster->lock, interruptible);
+	if (unlikely(ret != 0))
+		return ret;
+
+	if (pin)
+		vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
+
+	ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
+	if (unlikely(ret != 0))
+		goto err;
+
+	/**
+	 * Put BO in VRAM if there is space, otherwise as a GMR.
+	 * If there is no space in VRAM and GMR ids are all used up,
+	 * start evicting GMRs to make room. If the DMA buffer can't be
+	 * used as a GMR, this will return -ENOMEM.
+	 */
+
+	if (pin)
+		placement = &vmw_vram_gmr_ne_placement;
+	else
+		placement = &vmw_vram_gmr_placement;
+
+	ret = ttm_bo_validate(bo, placement, interruptible, false, false);
+	if (likely(ret == 0) || ret == -ERESTARTSYS)
+		goto err_unreserve;
+
+
+	/**
+	 * If that failed, try VRAM again, this time evicting
+	 * previous contents.
+	 */
+
+	if (pin)
+		placement = &vmw_vram_ne_placement;
+	else
+		placement = &vmw_vram_placement;
+
+	ret = ttm_bo_validate(bo, placement, interruptible, false, false);
+
+err_unreserve:
+	ttm_bo_unreserve(bo);
+err:
+	ttm_write_unlock(&vmaster->lock);
+	return ret;
+}
+
+/**
+ * vmw_dmabuf_to_vram - Move a buffer to vram.
+ *
+ * May only be called by the current master since it assumes that the
+ * master lock is the current master's lock.
+ * This function takes the master's lock in write mode.
+ *
+ * @dev_priv:  Driver private.
+ * @buf:  DMA buffer to move.
+ * @pin:  Pin buffer in vram if true.
+ * @interruptible:  Use interruptible wait.
+ *
+ * Returns
+ * -ERESTARTSYS if interrupted by a signal.
+ */
+int vmw_dmabuf_to_vram(struct vmw_private *dev_priv,
+		       struct vmw_dma_buffer *buf,
+		       bool pin, bool interruptible)
+{
+	struct ttm_placement *placement;
+
+	if (pin)
+		placement = &vmw_vram_ne_placement;
+	else
+		placement = &vmw_vram_placement;
+
+	return vmw_dmabuf_to_placement(dev_priv, buf,
+				       placement,
+				       interruptible);
+}
+
+/**
+ * vmw_dmabuf_to_start_of_vram - Move a buffer to start of vram.
+ *
+ * May only be called by the current master since it assumes that the
+ * master lock is the current master's lock.
+ * This function takes the master's lock in write mode.
+ * Flushes and unpins the query bo if @pin == true to avoid failures.
+ *
+ * @dev_priv:  Driver private.
+ * @buf:  DMA buffer to move.
+ * @pin:  Pin buffer in vram if true.
+ * @interruptible:  Use interruptible wait.
+ *
+ * Returns
+ * -ERESTARTSYS if interrupted by a signal.
+ */
+int vmw_dmabuf_to_start_of_vram(struct vmw_private *dev_priv,
+				struct vmw_dma_buffer *buf,
+				bool pin, bool interruptible)
+{
+	struct vmw_master *vmaster = dev_priv->active_master;
+	struct ttm_buffer_object *bo = &buf->base;
+	struct ttm_placement placement;
+	int ret = 0;
+
+	if (pin)
+		placement = vmw_vram_ne_placement;
+	else
+		placement = vmw_vram_placement;
+	placement.lpfn = bo->num_pages;
+
+	ret = ttm_write_lock(&vmaster->lock, interruptible);
+	if (unlikely(ret != 0))
+		return ret;
+
+	if (pin)
+		vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
+
+	ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
+	if (unlikely(ret != 0))
+		goto err_unlock;
+
+	/* Is this buffer already in vram but not at the start of it? */
+	if (bo->mem.mem_type == TTM_PL_VRAM &&
+	    bo->mem.start < bo->num_pages &&
+	    bo->mem.start > 0)
+		(void) ttm_bo_validate(bo, &vmw_sys_placement, false,
+				       false, false);
+
+	ret = ttm_bo_validate(bo, &placement, interruptible, false, false);
+
+	/* For some reason we didn't up at the start of vram */
+	WARN_ON(ret == 0 && bo->offset != 0);
+
+	ttm_bo_unreserve(bo);
+err_unlock:
+	ttm_write_unlock(&vmaster->lock);
+
+	return ret;
+}
+
+
+/**
+ * vmw_dmabuf_upin - Unpin the buffer given buffer, does not move the buffer.
+ *
+ * May only be called by the current master since it assumes that the
+ * master lock is the current master's lock.
+ * This function takes the master's lock in write mode.
+ *
+ * @dev_priv:  Driver private.
+ * @buf:  DMA buffer to unpin.
+ * @interruptible:  Use interruptible wait.
+ *
+ * Returns
+ * -ERESTARTSYS if interrupted by a signal.
+ */
+int vmw_dmabuf_unpin(struct vmw_private *dev_priv,
+		     struct vmw_dma_buffer *buf,
+		     bool interruptible)
+{
+	/*
+	 * We could in theory early out if the buffer is
+	 * unpinned but we need to lock and reserve the buffer
+	 * anyways so we don't gain much by that.
+	 */
+	return vmw_dmabuf_to_placement(dev_priv, buf,
+				       &vmw_evictable_placement,
+				       interruptible);
+}
+
+
+/**
+ * vmw_bo_get_guest_ptr - Get the guest ptr representing the current placement
+ * of a buffer.
+ *
+ * @bo: Pointer to a struct ttm_buffer_object. Must be pinned or reserved.
+ * @ptr: SVGAGuestPtr returning the result.
+ */
+void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *bo,
+			  SVGAGuestPtr *ptr)
+{
+	if (bo->mem.mem_type == TTM_PL_VRAM) {
+		ptr->gmrId = SVGA_GMR_FRAMEBUFFER;
+		ptr->offset = bo->offset;
+	} else {
+		ptr->gmrId = bo->mem.start;
+		ptr->offset = 0;
+	}
+}
+
+
+/**
+ * vmw_bo_pin - Pin or unpin a buffer object without moving it.
+ *
+ * @bo: The buffer object. Must be reserved, and present either in VRAM
+ * or GMR memory.
+ * @pin: Whether to pin or unpin.
+ *
+ */
+void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin)
+{
+	uint32_t pl_flags;
+	struct ttm_placement placement;
+	uint32_t old_mem_type = bo->mem.mem_type;
+	int ret;
+
+	BUG_ON(!atomic_read(&bo->reserved));
+	BUG_ON(old_mem_type != TTM_PL_VRAM &&
+	       old_mem_type != VMW_PL_FLAG_GMR);
+
+	pl_flags = TTM_PL_FLAG_VRAM | VMW_PL_FLAG_GMR | TTM_PL_FLAG_CACHED;
+	if (pin)
+		pl_flags |= TTM_PL_FLAG_NO_EVICT;
+
+	memset(&placement, 0, sizeof(placement));
+	placement.num_placement = 1;
+	placement.placement = &pl_flags;
+
+	ret = ttm_bo_validate(bo, &placement, false, true, true);
+
+	BUG_ON(ret != 0 || bo->mem.mem_type != old_mem_type);
+}

+ 136 - 21
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c

@@ -94,6 +94,12 @@
 #define DRM_IOCTL_VMW_FENCE_UNREF				\
 #define DRM_IOCTL_VMW_FENCE_UNREF				\
 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF,		\
 	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_FENCE_UNREF,		\
 		 struct drm_vmw_fence_arg)
 		 struct drm_vmw_fence_arg)
+#define DRM_IOCTL_VMW_PRESENT					\
+	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT,		\
+		 struct drm_vmw_present_arg)
+#define DRM_IOCTL_VMW_PRESENT_READBACK				\
+	DRM_IOW(DRM_COMMAND_BASE + DRM_VMW_PRESENT_READBACK,	\
+		 struct drm_vmw_present_readback_arg)
 
 
 /**
 /**
  * The core DRM version of this macro doesn't account for
  * The core DRM version of this macro doesn't account for
@@ -146,6 +152,13 @@ static struct drm_ioctl_desc vmw_ioctls[] = {
 		      DRM_AUTH | DRM_UNLOCKED),
 		      DRM_AUTH | DRM_UNLOCKED),
 	VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
 	VMW_IOCTL_DEF(VMW_GET_3D_CAP, vmw_get_cap_3d_ioctl,
 		      DRM_AUTH | DRM_UNLOCKED),
 		      DRM_AUTH | DRM_UNLOCKED),
+
+	/* these allow direct access to the framebuffers mark as master only */
+	VMW_IOCTL_DEF(VMW_PRESENT, vmw_present_ioctl,
+		      DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
+	VMW_IOCTL_DEF(VMW_PRESENT_READBACK,
+		      vmw_present_readback_ioctl,
+		      DRM_MASTER | DRM_AUTH | DRM_UNLOCKED),
 };
 };
 
 
 static struct pci_device_id vmw_pci_id_list[] = {
 static struct pci_device_id vmw_pci_id_list[] = {
@@ -200,6 +213,72 @@ static void vmw_print_capabilities(uint32_t capabilities)
 		DRM_INFO("  Screen Object 2.\n");
 		DRM_INFO("  Screen Object 2.\n");
 }
 }
 
 
+
+/**
+ * vmw_execbuf_prepare_dummy_query - Initialize a query result structure at
+ * the start of a buffer object.
+ *
+ * @dev_priv: The device private structure.
+ *
+ * This function will idle the buffer using an uninterruptible wait, then
+ * map the first page and initialize a pending occlusion query result structure,
+ * Finally it will unmap the buffer.
+ *
+ * TODO: Since we're only mapping a single page, we should optimize the map
+ * to use kmap_atomic / iomap_atomic.
+ */
+static void vmw_dummy_query_bo_prepare(struct vmw_private *dev_priv)
+{
+	struct ttm_bo_kmap_obj map;
+	volatile SVGA3dQueryResult *result;
+	bool dummy;
+	int ret;
+	struct ttm_bo_device *bdev = &dev_priv->bdev;
+	struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
+
+	ttm_bo_reserve(bo, false, false, false, 0);
+	spin_lock(&bdev->fence_lock);
+	ret = ttm_bo_wait(bo, false, false, false, TTM_USAGE_READWRITE);
+	spin_unlock(&bdev->fence_lock);
+	if (unlikely(ret != 0))
+		(void) vmw_fallback_wait(dev_priv, false, true, 0, false,
+					 10*HZ);
+
+	ret = ttm_bo_kmap(bo, 0, 1, &map);
+	if (likely(ret == 0)) {
+		result = ttm_kmap_obj_virtual(&map, &dummy);
+		result->totalSize = sizeof(*result);
+		result->state = SVGA3D_QUERYSTATE_PENDING;
+		result->result32 = 0xff;
+		ttm_bo_kunmap(&map);
+	} else
+		DRM_ERROR("Dummy query buffer map failed.\n");
+	ttm_bo_unreserve(bo);
+}
+
+
+/**
+ * vmw_dummy_query_bo_create - create a bo to hold a dummy query result
+ *
+ * @dev_priv: A device private structure.
+ *
+ * This function creates a small buffer object that holds the query
+ * result for dummy queries emitted as query barriers.
+ * No interruptible waits are done within this function.
+ *
+ * Returns an error if bo creation fails.
+ */
+static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv)
+{
+	return ttm_bo_create(&dev_priv->bdev,
+			     PAGE_SIZE,
+			     ttm_bo_type_device,
+			     &vmw_vram_sys_placement,
+			     0, 0, false, NULL,
+			     &dev_priv->dummy_query_bo);
+}
+
+
 static int vmw_request_device(struct vmw_private *dev_priv)
 static int vmw_request_device(struct vmw_private *dev_priv)
 {
 {
 	int ret;
 	int ret;
@@ -210,12 +289,29 @@ static int vmw_request_device(struct vmw_private *dev_priv)
 		return ret;
 		return ret;
 	}
 	}
 	vmw_fence_fifo_up(dev_priv->fman);
 	vmw_fence_fifo_up(dev_priv->fman);
+	ret = vmw_dummy_query_bo_create(dev_priv);
+	if (unlikely(ret != 0))
+		goto out_no_query_bo;
+	vmw_dummy_query_bo_prepare(dev_priv);
 
 
 	return 0;
 	return 0;
+
+out_no_query_bo:
+	vmw_fence_fifo_down(dev_priv->fman);
+	vmw_fifo_release(dev_priv, &dev_priv->fifo);
+	return ret;
 }
 }
 
 
 static void vmw_release_device(struct vmw_private *dev_priv)
 static void vmw_release_device(struct vmw_private *dev_priv)
 {
 {
+	/*
+	 * Previous destructions should've released
+	 * the pinned bo.
+	 */
+
+	BUG_ON(dev_priv->pinned_bo != NULL);
+
+	ttm_bo_unref(&dev_priv->dummy_query_bo);
 	vmw_fence_fifo_down(dev_priv->fman);
 	vmw_fence_fifo_down(dev_priv->fman);
 	vmw_fifo_release(dev_priv, &dev_priv->fifo);
 	vmw_fifo_release(dev_priv, &dev_priv->fifo);
 }
 }
@@ -306,6 +402,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 	init_waitqueue_head(&dev_priv->fifo_queue);
 	init_waitqueue_head(&dev_priv->fifo_queue);
 	dev_priv->fence_queue_waiters = 0;
 	dev_priv->fence_queue_waiters = 0;
 	atomic_set(&dev_priv->fifo_queue_waiters, 0);
 	atomic_set(&dev_priv->fifo_queue_waiters, 0);
+	INIT_LIST_HEAD(&dev_priv->surface_lru);
+	dev_priv->used_memory_size = 0;
 
 
 	dev_priv->io_start = pci_resource_start(dev->pdev, 0);
 	dev_priv->io_start = pci_resource_start(dev->pdev, 0);
 	dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
 	dev_priv->vram_start = pci_resource_start(dev->pdev, 1);
@@ -326,6 +424,10 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 
 
 	dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
 	dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES);
 
 
+	dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
+	dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
+	dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
+	dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
 	if (dev_priv->capabilities & SVGA_CAP_GMR) {
 	if (dev_priv->capabilities & SVGA_CAP_GMR) {
 		dev_priv->max_gmr_descriptors =
 		dev_priv->max_gmr_descriptors =
 			vmw_read(dev_priv,
 			vmw_read(dev_priv,
@@ -338,13 +440,15 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 			vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
 			vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
 		dev_priv->memory_size =
 		dev_priv->memory_size =
 			vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
 			vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
+		dev_priv->memory_size -= dev_priv->vram_size;
+	} else {
+		/*
+		 * An arbitrary limit of 512MiB on surface
+		 * memory. But all HWV8 hardware supports GMR2.
+		 */
+		dev_priv->memory_size = 512*1024*1024;
 	}
 	}
 
 
-	dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
-	dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
-	dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH);
-	dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT);
-
 	mutex_unlock(&dev_priv->hw_mutex);
 	mutex_unlock(&dev_priv->hw_mutex);
 
 
 	vmw_print_capabilities(dev_priv->capabilities);
 	vmw_print_capabilities(dev_priv->capabilities);
@@ -358,8 +462,8 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
 	if (dev_priv->capabilities & SVGA_CAP_GMR2) {
 		DRM_INFO("Max number of GMR pages is %u\n",
 		DRM_INFO("Max number of GMR pages is %u\n",
 			 (unsigned)dev_priv->max_gmr_pages);
 			 (unsigned)dev_priv->max_gmr_pages);
-		DRM_INFO("Max dedicated hypervisor graphics memory is %u\n",
-			 (unsigned)dev_priv->memory_size);
+		DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",
+			 (unsigned)dev_priv->memory_size / 1024);
 	}
 	}
 	DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
 	DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
 		 dev_priv->vram_start, dev_priv->vram_size / 1024);
 		 dev_priv->vram_start, dev_priv->vram_size / 1024);
@@ -451,22 +555,30 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 	dev_priv->fman = vmw_fence_manager_init(dev_priv);
 	dev_priv->fman = vmw_fence_manager_init(dev_priv);
 	if (unlikely(dev_priv->fman == NULL))
 	if (unlikely(dev_priv->fman == NULL))
 		goto out_no_fman;
 		goto out_no_fman;
+
+	/* Need to start the fifo to check if we can do screen objects */
+	ret = vmw_3d_resource_inc(dev_priv, true);
+	if (unlikely(ret != 0))
+		goto out_no_fifo;
+	vmw_kms_save_vga(dev_priv);
+
+	/* Start kms and overlay systems, needs fifo. */
 	ret = vmw_kms_init(dev_priv);
 	ret = vmw_kms_init(dev_priv);
 	if (unlikely(ret != 0))
 	if (unlikely(ret != 0))
 		goto out_no_kms;
 		goto out_no_kms;
 	vmw_overlay_init(dev_priv);
 	vmw_overlay_init(dev_priv);
+
+	/* 3D Depends on Screen Objects being used. */
+	DRM_INFO("Detected %sdevice 3D availability.\n",
+		 vmw_fifo_have_3d(dev_priv) ?
+		 "" : "no ");
+
+	/* We might be done with the fifo now */
 	if (dev_priv->enable_fb) {
 	if (dev_priv->enable_fb) {
-		ret = vmw_3d_resource_inc(dev_priv, false);
-		if (unlikely(ret != 0))
-			goto out_no_fifo;
-		vmw_kms_save_vga(dev_priv);
 		vmw_fb_init(dev_priv);
 		vmw_fb_init(dev_priv);
-		DRM_INFO("%s", vmw_fifo_have_3d(dev_priv) ?
-			 "Detected device 3D availability.\n" :
-			 "Detected no device 3D availability.\n");
 	} else {
 	} else {
-		DRM_INFO("Delayed 3D detection since we're not "
-			 "running the device in SVGA mode yet.\n");
+		vmw_kms_restore_vga(dev_priv);
+		vmw_3d_resource_dec(dev_priv, true);
 	}
 	}
 
 
 	if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
 	if (dev_priv->capabilities & SVGA_CAP_IRQMASK) {
@@ -483,15 +595,17 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
 	return 0;
 	return 0;
 
 
 out_no_irq:
 out_no_irq:
-	if (dev_priv->enable_fb) {
+	if (dev_priv->enable_fb)
 		vmw_fb_close(dev_priv);
 		vmw_fb_close(dev_priv);
+	vmw_overlay_close(dev_priv);
+	vmw_kms_close(dev_priv);
+out_no_kms:
+	/* We still have a 3D resource reference held */
+	if (dev_priv->enable_fb) {
 		vmw_kms_restore_vga(dev_priv);
 		vmw_kms_restore_vga(dev_priv);
 		vmw_3d_resource_dec(dev_priv, false);
 		vmw_3d_resource_dec(dev_priv, false);
 	}
 	}
 out_no_fifo:
 out_no_fifo:
-	vmw_overlay_close(dev_priv);
-	vmw_kms_close(dev_priv);
-out_no_kms:
 	vmw_fence_manager_takedown(dev_priv->fman);
 	vmw_fence_manager_takedown(dev_priv->fman);
 out_no_fman:
 out_no_fman:
 	if (dev_priv->stealth)
 	if (dev_priv->stealth)
@@ -771,7 +885,7 @@ static void vmw_master_drop(struct drm_device *dev,
 
 
 	vmw_fp->locked_master = drm_master_get(file_priv->master);
 	vmw_fp->locked_master = drm_master_get(file_priv->master);
 	ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
 	ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile);
-	vmw_kms_idle_workqueues(vmaster);
+	vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
 
 
 	if (unlikely((ret != 0))) {
 	if (unlikely((ret != 0))) {
 		DRM_ERROR("Unable to lock TTM at VT switch.\n");
 		DRM_ERROR("Unable to lock TTM at VT switch.\n");
@@ -823,6 +937,7 @@ static int vmwgfx_pm_notifier(struct notifier_block *nb, unsigned long val,
 		 * This empties VRAM and unbinds all GMR bindings.
 		 * This empties VRAM and unbinds all GMR bindings.
 		 * Buffer contents is moved to swappable memory.
 		 * Buffer contents is moved to swappable memory.
 		 */
 		 */
+		vmw_execbuf_release_pinned_bo(dev_priv, false, 0);
 		ttm_bo_swapout_all(&dev_priv->bdev);
 		ttm_bo_swapout_all(&dev_priv->bdev);
 
 
 		break;
 		break;

+ 106 - 9
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h

@@ -40,9 +40,9 @@
 #include "ttm/ttm_module.h"
 #include "ttm/ttm_module.h"
 #include "vmwgfx_fence.h"
 #include "vmwgfx_fence.h"
 
 
-#define VMWGFX_DRIVER_DATE "20110901"
+#define VMWGFX_DRIVER_DATE "20110927"
 #define VMWGFX_DRIVER_MAJOR 2
 #define VMWGFX_DRIVER_MAJOR 2
-#define VMWGFX_DRIVER_MINOR 0
+#define VMWGFX_DRIVER_MINOR 1
 #define VMWGFX_DRIVER_PATCHLEVEL 0
 #define VMWGFX_DRIVER_PATCHLEVEL 0
 #define VMWGFX_FILE_PAGE_OFFSET 0x00100000
 #define VMWGFX_FILE_PAGE_OFFSET 0x00100000
 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
 #define VMWGFX_FIFO_STATIC_SIZE (1024*1024)
@@ -79,9 +79,11 @@ struct vmw_resource {
 	int id;
 	int id;
 	enum ttm_object_type res_type;
 	enum ttm_object_type res_type;
 	bool avail;
 	bool avail;
+	void (*remove_from_lists) (struct vmw_resource *res);
 	void (*hw_destroy) (struct vmw_resource *res);
 	void (*hw_destroy) (struct vmw_resource *res);
 	void (*res_free) (struct vmw_resource *res);
 	void (*res_free) (struct vmw_resource *res);
-	bool on_validate_list;
+	struct list_head validate_head;
+	struct list_head query_head; /* Protected by the cmdbuf mutex */
 	/* TODO is a generic snooper needed? */
 	/* TODO is a generic snooper needed? */
 #if 0
 #if 0
 	void (*snoop)(struct vmw_resource *res,
 	void (*snoop)(struct vmw_resource *res,
@@ -97,8 +99,12 @@ struct vmw_cursor_snooper {
 	uint32_t *image;
 	uint32_t *image;
 };
 };
 
 
+struct vmw_framebuffer;
+struct vmw_surface_offset;
+
 struct vmw_surface {
 struct vmw_surface {
 	struct vmw_resource res;
 	struct vmw_resource res;
+	struct list_head lru_head; /* Protected by the resource lock */
 	uint32_t flags;
 	uint32_t flags;
 	uint32_t format;
 	uint32_t format;
 	uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
 	uint32_t mip_levels[DRM_VMW_MAX_SURFACE_FACES];
@@ -109,6 +115,9 @@ struct vmw_surface {
 
 
 	/* TODO so far just a extra pointer */
 	/* TODO so far just a extra pointer */
 	struct vmw_cursor_snooper snooper;
 	struct vmw_cursor_snooper snooper;
+	struct ttm_buffer_object *backup;
+	struct vmw_surface_offset *offsets;
+	uint32_t backup_size;
 };
 };
 
 
 struct vmw_marker_queue {
 struct vmw_marker_queue {
@@ -139,6 +148,8 @@ struct vmw_sw_context{
 	struct ida bo_list;
 	struct ida bo_list;
 	uint32_t last_cid;
 	uint32_t last_cid;
 	bool cid_valid;
 	bool cid_valid;
+	bool kernel; /**< is the called made from the kernel */
+	struct vmw_resource *cur_ctx;
 	uint32_t last_sid;
 	uint32_t last_sid;
 	uint32_t sid_translation;
 	uint32_t sid_translation;
 	bool sid_valid;
 	bool sid_valid;
@@ -150,8 +161,12 @@ struct vmw_sw_context{
 	uint32_t cur_val_buf;
 	uint32_t cur_val_buf;
 	uint32_t *cmd_bounce;
 	uint32_t *cmd_bounce;
 	uint32_t cmd_bounce_size;
 	uint32_t cmd_bounce_size;
-	struct vmw_resource *resources[VMWGFX_MAX_VALIDATIONS];
-	uint32_t num_ref_resources;
+	struct list_head resource_list;
+	uint32_t fence_flags;
+	struct list_head query_list;
+	struct ttm_buffer_object *cur_query_bo;
+	uint32_t cur_query_cid;
+	bool query_cid_valid;
 };
 };
 
 
 struct vmw_legacy_display;
 struct vmw_legacy_display;
@@ -216,6 +231,7 @@ struct vmw_private {
 
 
 	void *fb_info;
 	void *fb_info;
 	struct vmw_legacy_display *ldu_priv;
 	struct vmw_legacy_display *ldu_priv;
+	struct vmw_screen_object_display *sou_priv;
 	struct vmw_overlay *overlay_priv;
 	struct vmw_overlay *overlay_priv;
 
 
 	/*
 	/*
@@ -290,6 +306,26 @@ struct vmw_private {
 
 
 	struct mutex release_mutex;
 	struct mutex release_mutex;
 	uint32_t num_3d_resources;
 	uint32_t num_3d_resources;
+
+	/*
+	 * Query processing. These members
+	 * are protected by the cmdbuf mutex.
+	 */
+
+	struct ttm_buffer_object *dummy_query_bo;
+	struct ttm_buffer_object *pinned_bo;
+	uint32_t query_cid;
+	bool dummy_query_bo_pinned;
+
+	/*
+	 * Surface swapping. The "surface_lru" list is protected by the
+	 * resource lock in order to be able to destroy a surface and take
+	 * it off the lru atomically. "used_memory_size" is currently
+	 * protected by the cmdbuf mutex for simplicity.
+	 */
+
+	struct list_head surface_lru;
+	uint32_t used_memory_size;
 };
 };
 
 
 static inline struct vmw_private *vmw_priv(struct drm_device *dev)
 static inline struct vmw_private *vmw_priv(struct drm_device *dev)
@@ -369,6 +405,8 @@ extern int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
 extern int vmw_surface_check(struct vmw_private *dev_priv,
 extern int vmw_surface_check(struct vmw_private *dev_priv,
 			     struct ttm_object_file *tfile,
 			     struct ttm_object_file *tfile,
 			     uint32_t handle, int *id);
 			     uint32_t handle, int *id);
+extern int vmw_surface_validate(struct vmw_private *dev_priv,
+				struct vmw_surface *srf);
 extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
 extern void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo);
 extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
 extern int vmw_dmabuf_init(struct vmw_private *dev_priv,
 			   struct vmw_dma_buffer *vmw_bo,
 			   struct vmw_dma_buffer *vmw_bo,
@@ -384,10 +422,6 @@ extern uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
 extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
 extern void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo);
 extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
 extern int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
 				  uint32_t id, struct vmw_dma_buffer **out);
 				  uint32_t id, struct vmw_dma_buffer **out);
-extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
-				       struct vmw_dma_buffer *bo);
-extern int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
-				struct vmw_dma_buffer *bo);
 extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
 extern int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
 				  struct drm_file *file_priv);
 				  struct drm_file *file_priv);
 extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
 extern int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
@@ -396,7 +430,30 @@ extern int vmw_user_stream_lookup(struct vmw_private *dev_priv,
 				  struct ttm_object_file *tfile,
 				  struct ttm_object_file *tfile,
 				  uint32_t *inout_id,
 				  uint32_t *inout_id,
 				  struct vmw_resource **out);
 				  struct vmw_resource **out);
+extern void vmw_resource_unreserve(struct list_head *list);
 
 
+/**
+ * DMA buffer helper routines - vmwgfx_dmabuf.c
+ */
+extern int vmw_dmabuf_to_placement(struct vmw_private *vmw_priv,
+				   struct vmw_dma_buffer *bo,
+				   struct ttm_placement *placement,
+				   bool interruptible);
+extern int vmw_dmabuf_to_vram(struct vmw_private *dev_priv,
+			      struct vmw_dma_buffer *buf,
+			      bool pin, bool interruptible);
+extern int vmw_dmabuf_to_vram_or_gmr(struct vmw_private *dev_priv,
+				     struct vmw_dma_buffer *buf,
+				     bool pin, bool interruptible);
+extern int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
+				       struct vmw_dma_buffer *bo,
+				       bool pin, bool interruptible);
+extern int vmw_dmabuf_unpin(struct vmw_private *vmw_priv,
+			    struct vmw_dma_buffer *bo,
+			    bool interruptible);
+extern void vmw_bo_get_guest_ptr(const struct ttm_buffer_object *buf,
+				 SVGAGuestPtr *ptr);
+extern void vmw_bo_pin(struct ttm_buffer_object *bo, bool pin);
 
 
 /**
 /**
  * Misc Ioctl functionality - vmwgfx_ioctl.c
  * Misc Ioctl functionality - vmwgfx_ioctl.c
@@ -406,6 +463,10 @@ extern int vmw_getparam_ioctl(struct drm_device *dev, void *data,
 			      struct drm_file *file_priv);
 			      struct drm_file *file_priv);
 extern int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
 extern int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
 				struct drm_file *file_priv);
 				struct drm_file *file_priv);
+extern int vmw_present_ioctl(struct drm_device *dev, void *data,
+			     struct drm_file *file_priv);
+extern int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
+				      struct drm_file *file_priv);
 
 
 /**
 /**
  * Fifo utilities - vmwgfx_fifo.c
  * Fifo utilities - vmwgfx_fifo.c
@@ -422,6 +483,8 @@ extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
 extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
 extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
 extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
 extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
 extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
 extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
+extern int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
+				     uint32_t cid);
 
 
 /**
 /**
  * TTM glue - vmwgfx_ttm_glue.c
  * TTM glue - vmwgfx_ttm_glue.c
@@ -439,7 +502,10 @@ extern struct ttm_placement vmw_vram_placement;
 extern struct ttm_placement vmw_vram_ne_placement;
 extern struct ttm_placement vmw_vram_ne_placement;
 extern struct ttm_placement vmw_vram_sys_placement;
 extern struct ttm_placement vmw_vram_sys_placement;
 extern struct ttm_placement vmw_vram_gmr_placement;
 extern struct ttm_placement vmw_vram_gmr_placement;
+extern struct ttm_placement vmw_vram_gmr_ne_placement;
 extern struct ttm_placement vmw_sys_placement;
 extern struct ttm_placement vmw_sys_placement;
+extern struct ttm_placement vmw_evictable_placement;
+extern struct ttm_placement vmw_srf_placement;
 extern struct ttm_bo_driver vmw_bo_driver;
 extern struct ttm_bo_driver vmw_bo_driver;
 extern int vmw_dma_quiescent(struct drm_device *dev);
 extern int vmw_dma_quiescent(struct drm_device *dev);
 
 
@@ -449,6 +515,24 @@ extern int vmw_dma_quiescent(struct drm_device *dev);
 
 
 extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
 extern int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
 			     struct drm_file *file_priv);
 			     struct drm_file *file_priv);
+extern int vmw_execbuf_process(struct drm_file *file_priv,
+			       struct vmw_private *dev_priv,
+			       void __user *user_commands,
+			       void *kernel_commands,
+			       uint32_t command_size,
+			       uint64_t throttle_us,
+			       struct drm_vmw_fence_rep __user
+			       *user_fence_rep);
+
+extern void
+vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
+			      bool only_on_cid_match, uint32_t cid);
+
+extern int vmw_execbuf_fence_commands(struct drm_file *file_priv,
+				      struct vmw_private *dev_priv,
+				      struct vmw_fence_obj **p_fence,
+				      uint32_t *p_handle);
+
 
 
 /**
 /**
  * IRQs and wating - vmwgfx_irq.c
  * IRQs and wating - vmwgfx_irq.c
@@ -520,6 +604,19 @@ bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
 				uint32_t pitch,
 				uint32_t pitch,
 				uint32_t height);
 				uint32_t height);
 u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc);
 u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc);
+int vmw_kms_present(struct vmw_private *dev_priv,
+		    struct drm_file *file_priv,
+		    struct vmw_framebuffer *vfb,
+		    struct vmw_surface *surface,
+		    uint32_t sid, int32_t destX, int32_t destY,
+		    struct drm_vmw_rect *clips,
+		    uint32_t num_clips);
+int vmw_kms_readback(struct vmw_private *dev_priv,
+		     struct drm_file *file_priv,
+		     struct vmw_framebuffer *vfb,
+		     struct drm_vmw_fence_rep __user *user_fence_rep,
+		     struct drm_vmw_rect *clips,
+		     uint32_t num_clips);
 
 
 /**
 /**
  * Overlay control - vmwgfx_overlay.c
  * Overlay control - vmwgfx_overlay.c

+ 561 - 105
drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c

@@ -44,28 +44,64 @@ static int vmw_cmd_ok(struct vmw_private *dev_priv,
 	return 0;
 	return 0;
 }
 }
 
 
-
-static int vmw_resource_to_validate_list(struct vmw_sw_context *sw_context,
-					 struct vmw_resource **p_res)
+static void vmw_resource_to_validate_list(struct vmw_sw_context *sw_context,
+					  struct vmw_resource **p_res)
 {
 {
-	int ret = 0;
 	struct vmw_resource *res = *p_res;
 	struct vmw_resource *res = *p_res;
 
 
-	if (!res->on_validate_list) {
-		if (sw_context->num_ref_resources >= VMWGFX_MAX_VALIDATIONS) {
-			DRM_ERROR("Too many resources referenced in "
-				  "command stream.\n");
-			ret = -ENOMEM;
-			goto out;
-		}
-		sw_context->resources[sw_context->num_ref_resources++] = res;
-		res->on_validate_list = true;
-		return 0;
+	if (list_empty(&res->validate_head)) {
+		list_add_tail(&res->validate_head, &sw_context->resource_list);
+		*p_res = NULL;
+	} else
+		vmw_resource_unreference(p_res);
+}
+
+/**
+ * vmw_bo_to_validate_list - add a bo to a validate list
+ *
+ * @sw_context: The software context used for this command submission batch.
+ * @bo: The buffer object to add.
+ * @fence_flags: Fence flags to be or'ed with any other fence flags for
+ * this buffer on this submission batch.
+ * @p_val_node: If non-NULL Will be updated with the validate node number
+ * on return.
+ *
+ * Returns -EINVAL if the limit of number of buffer objects per command
+ * submission is reached.
+ */
+static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context,
+				   struct ttm_buffer_object *bo,
+				   uint32_t fence_flags,
+				   uint32_t *p_val_node)
+{
+	uint32_t val_node;
+	struct ttm_validate_buffer *val_buf;
+
+	val_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
+
+	if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) {
+		DRM_ERROR("Max number of DMA buffers per submission"
+			  " exceeded.\n");
+		return -EINVAL;
 	}
 	}
 
 
-out:
-	vmw_resource_unreference(p_res);
-	return ret;
+	val_buf = &sw_context->val_bufs[val_node];
+	if (unlikely(val_node == sw_context->cur_val_buf)) {
+		val_buf->new_sync_obj_arg = NULL;
+		val_buf->bo = ttm_bo_reference(bo);
+		val_buf->usage = TTM_USAGE_READWRITE;
+		list_add_tail(&val_buf->head, &sw_context->validate_nodes);
+		++sw_context->cur_val_buf;
+	}
+
+	val_buf->new_sync_obj_arg = (void *)
+		((unsigned long) val_buf->new_sync_obj_arg | fence_flags);
+	sw_context->fence_flags |= fence_flags;
+
+	if (p_val_node)
+		*p_val_node = val_node;
+
+	return 0;
 }
 }
 
 
 static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
 static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
@@ -94,7 +130,10 @@ static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
 
 
 	sw_context->last_cid = cmd->cid;
 	sw_context->last_cid = cmd->cid;
 	sw_context->cid_valid = true;
 	sw_context->cid_valid = true;
-	return vmw_resource_to_validate_list(sw_context, &ctx);
+	sw_context->cur_ctx = ctx;
+	vmw_resource_to_validate_list(sw_context, &ctx);
+
+	return 0;
 }
 }
 
 
 static int vmw_cmd_sid_check(struct vmw_private *dev_priv,
 static int vmw_cmd_sid_check(struct vmw_private *dev_priv,
@@ -114,7 +153,8 @@ static int vmw_cmd_sid_check(struct vmw_private *dev_priv,
 		return 0;
 		return 0;
 	}
 	}
 
 
-	ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile,
+	ret = vmw_user_surface_lookup_handle(dev_priv,
+					     sw_context->tfile,
 					     *sid, &srf);
 					     *sid, &srf);
 	if (unlikely(ret != 0)) {
 	if (unlikely(ret != 0)) {
 		DRM_ERROR("Could ot find or use surface 0x%08x "
 		DRM_ERROR("Could ot find or use surface 0x%08x "
@@ -124,13 +164,23 @@ static int vmw_cmd_sid_check(struct vmw_private *dev_priv,
 		return ret;
 		return ret;
 	}
 	}
 
 
+	ret = vmw_surface_validate(dev_priv, srf);
+	if (unlikely(ret != 0)) {
+		if (ret != -ERESTARTSYS)
+			DRM_ERROR("Could not validate surface.\n");
+		vmw_surface_unreference(&srf);
+		return ret;
+	}
+
 	sw_context->last_sid = *sid;
 	sw_context->last_sid = *sid;
 	sw_context->sid_valid = true;
 	sw_context->sid_valid = true;
 	sw_context->sid_translation = srf->res.id;
 	sw_context->sid_translation = srf->res.id;
 	*sid = sw_context->sid_translation;
 	*sid = sw_context->sid_translation;
 
 
 	res = &srf->res;
 	res = &srf->res;
-	return vmw_resource_to_validate_list(sw_context, &res);
+	vmw_resource_to_validate_list(sw_context, &res);
+
+	return 0;
 }
 }
 
 
 
 
@@ -197,6 +247,12 @@ static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
 	} *cmd;
 	} *cmd;
 
 
 	cmd = container_of(header, struct vmw_sid_cmd, header);
 	cmd = container_of(header, struct vmw_sid_cmd, header);
+
+	if (unlikely(!sw_context->kernel)) {
+		DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id);
+		return -EPERM;
+	}
+
 	return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.srcImage.sid);
 	return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.srcImage.sid);
 }
 }
 
 
@@ -209,10 +265,179 @@ static int vmw_cmd_present_check(struct vmw_private *dev_priv,
 		SVGA3dCmdPresent body;
 		SVGA3dCmdPresent body;
 	} *cmd;
 	} *cmd;
 
 
+
 	cmd = container_of(header, struct vmw_sid_cmd, header);
 	cmd = container_of(header, struct vmw_sid_cmd, header);
+
+	if (unlikely(!sw_context->kernel)) {
+		DRM_ERROR("Kernel only SVGA3d command: %u.\n", cmd->header.id);
+		return -EPERM;
+	}
+
 	return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid);
 	return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid);
 }
 }
 
 
+/**
+ * vmw_query_bo_switch_prepare - Prepare to switch pinned buffer for queries.
+ *
+ * @dev_priv: The device private structure.
+ * @cid: The hardware context for the next query.
+ * @new_query_bo: The new buffer holding query results.
+ * @sw_context: The software context used for this command submission.
+ *
+ * This function checks whether @new_query_bo is suitable for holding
+ * query results, and if another buffer currently is pinned for query
+ * results. If so, the function prepares the state of @sw_context for
+ * switching pinned buffers after successful submission of the current
+ * command batch. It also checks whether we're using a new query context.
+ * In that case, it makes sure we emit a query barrier for the old
+ * context before the current query buffer is fenced.
+ */
+static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv,
+				       uint32_t cid,
+				       struct ttm_buffer_object *new_query_bo,
+				       struct vmw_sw_context *sw_context)
+{
+	int ret;
+	bool add_cid = false;
+	uint32_t cid_to_add;
+
+	if (unlikely(new_query_bo != sw_context->cur_query_bo)) {
+
+		if (unlikely(new_query_bo->num_pages > 4)) {
+			DRM_ERROR("Query buffer too large.\n");
+			return -EINVAL;
+		}
+
+		if (unlikely(sw_context->cur_query_bo != NULL)) {
+			BUG_ON(!sw_context->query_cid_valid);
+			add_cid = true;
+			cid_to_add = sw_context->cur_query_cid;
+			ret = vmw_bo_to_validate_list(sw_context,
+						      sw_context->cur_query_bo,
+						      DRM_VMW_FENCE_FLAG_EXEC,
+						      NULL);
+			if (unlikely(ret != 0))
+				return ret;
+		}
+		sw_context->cur_query_bo = new_query_bo;
+
+		ret = vmw_bo_to_validate_list(sw_context,
+					      dev_priv->dummy_query_bo,
+					      DRM_VMW_FENCE_FLAG_EXEC,
+					      NULL);
+		if (unlikely(ret != 0))
+			return ret;
+
+	}
+
+	if (unlikely(cid != sw_context->cur_query_cid &&
+		     sw_context->query_cid_valid)) {
+		add_cid = true;
+		cid_to_add = sw_context->cur_query_cid;
+	}
+
+	sw_context->cur_query_cid = cid;
+	sw_context->query_cid_valid = true;
+
+	if (add_cid) {
+		struct vmw_resource *ctx = sw_context->cur_ctx;
+
+		if (list_empty(&ctx->query_head))
+			list_add_tail(&ctx->query_head,
+				      &sw_context->query_list);
+		ret = vmw_bo_to_validate_list(sw_context,
+					      dev_priv->dummy_query_bo,
+					      DRM_VMW_FENCE_FLAG_EXEC,
+					      NULL);
+		if (unlikely(ret != 0))
+			return ret;
+	}
+	return 0;
+}
+
+
+/**
+ * vmw_query_bo_switch_commit - Finalize switching pinned query buffer
+ *
+ * @dev_priv: The device private structure.
+ * @sw_context: The software context used for this command submission batch.
+ *
+ * This function will check if we're switching query buffers, and will then,
+ * if no other query waits are issued this command submission batch,
+ * issue a dummy occlusion query wait used as a query barrier. When the fence
+ * object following that query wait has signaled, we are sure that all
+ * preseding queries have finished, and the old query buffer can be unpinned.
+ * However, since both the new query buffer and the old one are fenced with
+ * that fence, we can do an asynchronus unpin now, and be sure that the
+ * old query buffer won't be moved until the fence has signaled.
+ *
+ * As mentioned above, both the new - and old query buffers need to be fenced
+ * using a sequence emitted *after* calling this function.
+ */
+static void vmw_query_bo_switch_commit(struct vmw_private *dev_priv,
+				     struct vmw_sw_context *sw_context)
+{
+
+	struct vmw_resource *ctx, *next_ctx;
+	int ret;
+
+	/*
+	 * The validate list should still hold references to all
+	 * contexts here.
+	 */
+
+	list_for_each_entry_safe(ctx, next_ctx, &sw_context->query_list,
+				 query_head) {
+		list_del_init(&ctx->query_head);
+
+		BUG_ON(list_empty(&ctx->validate_head));
+
+		ret = vmw_fifo_emit_dummy_query(dev_priv, ctx->id);
+
+		if (unlikely(ret != 0))
+			DRM_ERROR("Out of fifo space for dummy query.\n");
+	}
+
+	if (dev_priv->pinned_bo != sw_context->cur_query_bo) {
+		if (dev_priv->pinned_bo) {
+			vmw_bo_pin(dev_priv->pinned_bo, false);
+			ttm_bo_unref(&dev_priv->pinned_bo);
+		}
+
+		vmw_bo_pin(sw_context->cur_query_bo, true);
+
+		/*
+		 * We pin also the dummy_query_bo buffer so that we
+		 * don't need to validate it when emitting
+		 * dummy queries in context destroy paths.
+		 */
+
+		vmw_bo_pin(dev_priv->dummy_query_bo, true);
+		dev_priv->dummy_query_bo_pinned = true;
+
+		dev_priv->query_cid = sw_context->cur_query_cid;
+		dev_priv->pinned_bo =
+			ttm_bo_reference(sw_context->cur_query_bo);
+	}
+}
+
+/**
+ * vmw_query_switch_backoff - clear query barrier list
+ * @sw_context: The sw context used for this submission batch.
+ *
+ * This function is used as part of an error path, where a previously
+ * set up list of query barriers needs to be cleared.
+ *
+ */
+static void vmw_query_switch_backoff(struct vmw_sw_context *sw_context)
+{
+	struct list_head *list, *next;
+
+	list_for_each_safe(list, next, &sw_context->query_list) {
+		list_del_init(list);
+	}
+}
+
 static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
 static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
 				   struct vmw_sw_context *sw_context,
 				   struct vmw_sw_context *sw_context,
 				   SVGAGuestPtr *ptr,
 				   SVGAGuestPtr *ptr,
@@ -222,8 +447,6 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
 	struct ttm_buffer_object *bo;
 	struct ttm_buffer_object *bo;
 	uint32_t handle = ptr->gmrId;
 	uint32_t handle = ptr->gmrId;
 	struct vmw_relocation *reloc;
 	struct vmw_relocation *reloc;
-	uint32_t cur_validate_node;
-	struct ttm_validate_buffer *val_buf;
 	int ret;
 	int ret;
 
 
 	ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
 	ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
@@ -243,23 +466,11 @@ static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
 	reloc = &sw_context->relocs[sw_context->cur_reloc++];
 	reloc = &sw_context->relocs[sw_context->cur_reloc++];
 	reloc->location = ptr;
 	reloc->location = ptr;
 
 
-	cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
-	if (unlikely(cur_validate_node >= VMWGFX_MAX_VALIDATIONS)) {
-		DRM_ERROR("Max number of DMA buffers per submission"
-			  " exceeded.\n");
-		ret = -EINVAL;
+	ret = vmw_bo_to_validate_list(sw_context, bo, DRM_VMW_FENCE_FLAG_EXEC,
+				      &reloc->index);
+	if (unlikely(ret != 0))
 		goto out_no_reloc;
 		goto out_no_reloc;
-	}
 
 
-	reloc->index = cur_validate_node;
-	if (unlikely(cur_validate_node == sw_context->cur_val_buf)) {
-		val_buf = &sw_context->val_bufs[cur_validate_node];
-		val_buf->bo = ttm_bo_reference(bo);
-		val_buf->usage = TTM_USAGE_READWRITE;
-		val_buf->new_sync_obj_arg = (void *) DRM_VMW_FENCE_FLAG_EXEC;
-		list_add_tail(&val_buf->head, &sw_context->validate_nodes);
-		++sw_context->cur_val_buf;
-	}
 	*vmw_bo_p = vmw_bo;
 	*vmw_bo_p = vmw_bo;
 	return 0;
 	return 0;
 
 
@@ -291,8 +502,11 @@ static int vmw_cmd_end_query(struct vmw_private *dev_priv,
 	if (unlikely(ret != 0))
 	if (unlikely(ret != 0))
 		return ret;
 		return ret;
 
 
+	ret = vmw_query_bo_switch_prepare(dev_priv, cmd->q.cid,
+					  &vmw_bo->base, sw_context);
+
 	vmw_dmabuf_unreference(&vmw_bo);
 	vmw_dmabuf_unreference(&vmw_bo);
-	return 0;
+	return ret;
 }
 }
 
 
 static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
 static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
@@ -305,6 +519,7 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
 		SVGA3dCmdWaitForQuery q;
 		SVGA3dCmdWaitForQuery q;
 	} *cmd;
 	} *cmd;
 	int ret;
 	int ret;
+	struct vmw_resource *ctx;
 
 
 	cmd = container_of(header, struct vmw_query_cmd, header);
 	cmd = container_of(header, struct vmw_query_cmd, header);
 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
 	ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
@@ -318,6 +533,16 @@ static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
 		return ret;
 		return ret;
 
 
 	vmw_dmabuf_unreference(&vmw_bo);
 	vmw_dmabuf_unreference(&vmw_bo);
+
+	/*
+	 * This wait will act as a barrier for previous waits for this
+	 * context.
+	 */
+
+	ctx = sw_context->cur_ctx;
+	if (!list_empty(&ctx->query_head))
+		list_del_init(&ctx->query_head);
+
 	return 0;
 	return 0;
 }
 }
 
 
@@ -350,6 +575,13 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
 		goto out_no_reloc;
 		goto out_no_reloc;
 	}
 	}
 
 
+	ret = vmw_surface_validate(dev_priv, srf);
+	if (unlikely(ret != 0)) {
+		if (ret != -ERESTARTSYS)
+			DRM_ERROR("Culd not validate surface.\n");
+		goto out_no_validate;
+	}
+
 	/*
 	/*
 	 * Patch command stream with device SID.
 	 * Patch command stream with device SID.
 	 */
 	 */
@@ -359,8 +591,12 @@ static int vmw_cmd_dma(struct vmw_private *dev_priv,
 	vmw_dmabuf_unreference(&vmw_bo);
 	vmw_dmabuf_unreference(&vmw_bo);
 
 
 	res = &srf->res;
 	res = &srf->res;
-	return vmw_resource_to_validate_list(sw_context, &res);
+	vmw_resource_to_validate_list(sw_context, &res);
 
 
+	return 0;
+
+out_no_validate:
+	vmw_surface_unreference(&srf);
 out_no_reloc:
 out_no_reloc:
 	vmw_dmabuf_unreference(&vmw_bo);
 	vmw_dmabuf_unreference(&vmw_bo);
 	return ret;
 	return ret;
@@ -450,6 +686,71 @@ static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
 	return 0;
 	return 0;
 }
 }
 
 
+static int vmw_cmd_check_define_gmrfb(struct vmw_private *dev_priv,
+				      struct vmw_sw_context *sw_context,
+				      void *buf)
+{
+	struct vmw_dma_buffer *vmw_bo;
+	int ret;
+
+	struct {
+		uint32_t header;
+		SVGAFifoCmdDefineGMRFB body;
+	} *cmd = buf;
+
+	ret = vmw_translate_guest_ptr(dev_priv, sw_context,
+				      &cmd->body.ptr,
+				      &vmw_bo);
+	if (unlikely(ret != 0))
+		return ret;
+
+	vmw_dmabuf_unreference(&vmw_bo);
+
+	return ret;
+}
+
+static int vmw_cmd_check_not_3d(struct vmw_private *dev_priv,
+				struct vmw_sw_context *sw_context,
+				void *buf, uint32_t *size)
+{
+	uint32_t size_remaining = *size;
+	uint32_t cmd_id;
+
+	cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
+	switch (cmd_id) {
+	case SVGA_CMD_UPDATE:
+		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdUpdate);
+		break;
+	case SVGA_CMD_DEFINE_GMRFB:
+		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdDefineGMRFB);
+		break;
+	case SVGA_CMD_BLIT_GMRFB_TO_SCREEN:
+		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
+		break;
+	case SVGA_CMD_BLIT_SCREEN_TO_GMRFB:
+		*size = sizeof(uint32_t) + sizeof(SVGAFifoCmdBlitGMRFBToScreen);
+		break;
+	default:
+		DRM_ERROR("Unsupported SVGA command: %u.\n", cmd_id);
+		return -EINVAL;
+	}
+
+	if (*size > size_remaining) {
+		DRM_ERROR("Invalid SVGA command (size mismatch):"
+			  " %u.\n", cmd_id);
+		return -EINVAL;
+	}
+
+	if (unlikely(!sw_context->kernel)) {
+		DRM_ERROR("Kernel only SVGA command: %u.\n", cmd_id);
+		return -EPERM;
+	}
+
+	if (cmd_id == SVGA_CMD_DEFINE_GMRFB)
+		return vmw_cmd_check_define_gmrfb(dev_priv, sw_context, buf);
+
+	return 0;
+}
 
 
 typedef int (*vmw_cmd_func) (struct vmw_private *,
 typedef int (*vmw_cmd_func) (struct vmw_private *,
 			     struct vmw_sw_context *,
 			     struct vmw_sw_context *,
@@ -502,11 +803,11 @@ static int vmw_cmd_check(struct vmw_private *dev_priv,
 	SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
 	SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
 	int ret;
 	int ret;
 
 
-	cmd_id = ((uint32_t *)buf)[0];
-	if (cmd_id == SVGA_CMD_UPDATE) {
-		*size = 5 << 2;
-		return 0;
-	}
+	cmd_id = le32_to_cpu(((uint32_t *)buf)[0]);
+	/* Handle any none 3D commands */
+	if (unlikely(cmd_id < SVGA_CMD_MAX))
+		return vmw_cmd_check_not_3d(dev_priv, sw_context, buf, size);
+
 
 
 	cmd_id = le32_to_cpu(header->id);
 	cmd_id = le32_to_cpu(header->id);
 	*size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
 	*size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
@@ -531,9 +832,9 @@ out_err:
 
 
 static int vmw_cmd_check_all(struct vmw_private *dev_priv,
 static int vmw_cmd_check_all(struct vmw_private *dev_priv,
 			     struct vmw_sw_context *sw_context,
 			     struct vmw_sw_context *sw_context,
+			     void *buf,
 			     uint32_t size)
 			     uint32_t size)
 {
 {
-	void *buf = sw_context->cmd_bounce;
 	int32_t cur_size = size;
 	int32_t cur_size = size;
 	int ret;
 	int ret;
 
 
@@ -582,7 +883,7 @@ static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
 static void vmw_clear_validations(struct vmw_sw_context *sw_context)
 static void vmw_clear_validations(struct vmw_sw_context *sw_context)
 {
 {
 	struct ttm_validate_buffer *entry, *next;
 	struct ttm_validate_buffer *entry, *next;
-	uint32_t i = sw_context->num_ref_resources;
+	struct vmw_resource *res, *res_next;
 
 
 	/*
 	/*
 	 * Drop references to DMA buffers held during command submission.
 	 * Drop references to DMA buffers held during command submission.
@@ -599,9 +900,11 @@ static void vmw_clear_validations(struct vmw_sw_context *sw_context)
 	/*
 	/*
 	 * Drop references to resources held during command submission.
 	 * Drop references to resources held during command submission.
 	 */
 	 */
-	while (i-- > 0) {
-		sw_context->resources[i]->on_validate_list = false;
-		vmw_resource_unreference(&sw_context->resources[i]);
+	vmw_resource_unreserve(&sw_context->resource_list);
+	list_for_each_entry_safe(res, res_next, &sw_context->resource_list,
+				 validate_head) {
+		list_del_init(&res->validate_head);
+		vmw_resource_unreference(&res);
 	}
 	}
 }
 }
 
 
@@ -610,6 +913,16 @@ static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
 {
 {
 	int ret;
 	int ret;
 
 
+
+	/*
+	 * Don't validate pinned buffers.
+	 */
+
+	if (bo == dev_priv->pinned_bo ||
+	    (bo == dev_priv->dummy_query_bo &&
+	     dev_priv->dummy_query_bo_pinned))
+		return 0;
+
 	/**
 	/**
 	 * Put BO in VRAM if there is space, otherwise as a GMR.
 	 * Put BO in VRAM if there is space, otherwise as a GMR.
 	 * If there is no space in VRAM and GMR ids are all used up,
 	 * If there is no space in VRAM and GMR ids are all used up,
@@ -681,6 +994,9 @@ static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
  * Creates a fence object and submits a command stream marker.
  * Creates a fence object and submits a command stream marker.
  * If this fails for some reason, We sync the fifo and return NULL.
  * If this fails for some reason, We sync the fifo and return NULL.
  * It is then safe to fence buffers with a NULL pointer.
  * It is then safe to fence buffers with a NULL pointer.
+ *
+ * If @p_handle is not NULL @file_priv must also not be NULL. Creates
+ * a userspace handle if @p_handle is not NULL, otherwise not.
  */
  */
 
 
 int vmw_execbuf_fence_commands(struct drm_file *file_priv,
 int vmw_execbuf_fence_commands(struct drm_file *file_priv,
@@ -692,6 +1008,8 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv,
 	int ret;
 	int ret;
 	bool synced = false;
 	bool synced = false;
 
 
+	/* p_handle implies file_priv. */
+	BUG_ON(p_handle != NULL && file_priv == NULL);
 
 
 	ret = vmw_fifo_send_fence(dev_priv, &sequence);
 	ret = vmw_fifo_send_fence(dev_priv, &sequence);
 	if (unlikely(ret != 0)) {
 	if (unlikely(ret != 0)) {
@@ -719,69 +1037,61 @@ int vmw_execbuf_fence_commands(struct drm_file *file_priv,
 	return 0;
 	return 0;
 }
 }
 
 
-int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
-		      struct drm_file *file_priv)
+int vmw_execbuf_process(struct drm_file *file_priv,
+			struct vmw_private *dev_priv,
+			void __user *user_commands,
+			void *kernel_commands,
+			uint32_t command_size,
+			uint64_t throttle_us,
+			struct drm_vmw_fence_rep __user *user_fence_rep)
 {
 {
-	struct vmw_private *dev_priv = vmw_priv(dev);
-	struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
-	struct drm_vmw_fence_rep fence_rep;
-	struct drm_vmw_fence_rep __user *user_fence_rep;
-	int ret;
-	void *user_cmd;
-	void *cmd;
 	struct vmw_sw_context *sw_context = &dev_priv->ctx;
 	struct vmw_sw_context *sw_context = &dev_priv->ctx;
-	struct vmw_master *vmaster = vmw_master(file_priv->master);
+	struct drm_vmw_fence_rep fence_rep;
 	struct vmw_fence_obj *fence;
 	struct vmw_fence_obj *fence;
 	uint32_t handle;
 	uint32_t handle;
+	void *cmd;
+	int ret;
 
 
-	/*
-	 * This will allow us to extend the ioctl argument while
-	 * maintaining backwards compatibility:
-	 * We take different code paths depending on the value of
-	 * arg->version.
-	 */
-
-	if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) {
-		DRM_ERROR("Incorrect execbuf version.\n");
-		DRM_ERROR("You're running outdated experimental "
-			  "vmwgfx user-space drivers.");
-		return -EINVAL;
-	}
-
-	ret = ttm_read_lock(&vmaster->lock, true);
+	ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
 	if (unlikely(ret != 0))
 	if (unlikely(ret != 0))
-		return ret;
+		return -ERESTARTSYS;
 
 
-	ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
-	if (unlikely(ret != 0)) {
-		ret = -ERESTARTSYS;
-		goto out_no_cmd_mutex;
-	}
+	if (kernel_commands == NULL) {
+		sw_context->kernel = false;
 
 
-	ret = vmw_resize_cmd_bounce(sw_context, arg->command_size);
-	if (unlikely(ret != 0))
-		goto out_unlock;
+		ret = vmw_resize_cmd_bounce(sw_context, command_size);
+		if (unlikely(ret != 0))
+			goto out_unlock;
 
 
-	user_cmd = (void __user *)(unsigned long)arg->commands;
-	ret = copy_from_user(sw_context->cmd_bounce,
-			     user_cmd, arg->command_size);
 
 
-	if (unlikely(ret != 0)) {
-		ret = -EFAULT;
-		DRM_ERROR("Failed copying commands.\n");
-		goto out_unlock;
-	}
+		ret = copy_from_user(sw_context->cmd_bounce,
+				     user_commands, command_size);
+
+		if (unlikely(ret != 0)) {
+			ret = -EFAULT;
+			DRM_ERROR("Failed copying commands.\n");
+			goto out_unlock;
+		}
+		kernel_commands = sw_context->cmd_bounce;
+	} else
+		sw_context->kernel = true;
 
 
 	sw_context->tfile = vmw_fpriv(file_priv)->tfile;
 	sw_context->tfile = vmw_fpriv(file_priv)->tfile;
 	sw_context->cid_valid = false;
 	sw_context->cid_valid = false;
 	sw_context->sid_valid = false;
 	sw_context->sid_valid = false;
 	sw_context->cur_reloc = 0;
 	sw_context->cur_reloc = 0;
 	sw_context->cur_val_buf = 0;
 	sw_context->cur_val_buf = 0;
-	sw_context->num_ref_resources = 0;
+	sw_context->fence_flags = 0;
+	INIT_LIST_HEAD(&sw_context->query_list);
+	INIT_LIST_HEAD(&sw_context->resource_list);
+	sw_context->cur_query_bo = dev_priv->pinned_bo;
+	sw_context->cur_query_cid = dev_priv->query_cid;
+	sw_context->query_cid_valid = (dev_priv->pinned_bo != NULL);
 
 
 	INIT_LIST_HEAD(&sw_context->validate_nodes);
 	INIT_LIST_HEAD(&sw_context->validate_nodes);
 
 
-	ret = vmw_cmd_check_all(dev_priv, sw_context, arg->command_size);
+	ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
+				command_size);
 	if (unlikely(ret != 0))
 	if (unlikely(ret != 0))
 		goto out_err;
 		goto out_err;
 
 
@@ -795,26 +1105,25 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
 
 
 	vmw_apply_relocations(sw_context);
 	vmw_apply_relocations(sw_context);
 
 
-	if (arg->throttle_us) {
+	if (throttle_us) {
 		ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
 		ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
-				   arg->throttle_us);
+				   throttle_us);
 
 
 		if (unlikely(ret != 0))
 		if (unlikely(ret != 0))
 			goto out_throttle;
 			goto out_throttle;
 	}
 	}
 
 
-	cmd = vmw_fifo_reserve(dev_priv, arg->command_size);
+	cmd = vmw_fifo_reserve(dev_priv, command_size);
 	if (unlikely(cmd == NULL)) {
 	if (unlikely(cmd == NULL)) {
 		DRM_ERROR("Failed reserving fifo space for commands.\n");
 		DRM_ERROR("Failed reserving fifo space for commands.\n");
 		ret = -ENOMEM;
 		ret = -ENOMEM;
-		goto out_err;
+		goto out_throttle;
 	}
 	}
 
 
-	memcpy(cmd, sw_context->cmd_bounce, arg->command_size);
-	vmw_fifo_commit(dev_priv, arg->command_size);
+	memcpy(cmd, kernel_commands, command_size);
+	vmw_fifo_commit(dev_priv, command_size);
 
 
-	user_fence_rep = (struct drm_vmw_fence_rep __user *)
-		(unsigned long)arg->fence_rep;
+	vmw_query_bo_switch_commit(dev_priv, sw_context);
 	ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
 	ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
 					 &fence,
 					 &fence,
 					 (user_fence_rep) ? &handle : NULL);
 					 (user_fence_rep) ? &handle : NULL);
@@ -831,7 +1140,6 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
 				    (void *) fence);
 				    (void *) fence);
 
 
 	vmw_clear_validations(sw_context);
 	vmw_clear_validations(sw_context);
-	mutex_unlock(&dev_priv->cmdbuf_mutex);
 
 
 	if (user_fence_rep) {
 	if (user_fence_rep) {
 		fence_rep.error = ret;
 		fence_rep.error = ret;
@@ -868,17 +1176,165 @@ int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
 	if (likely(fence != NULL))
 	if (likely(fence != NULL))
 		vmw_fence_obj_unreference(&fence);
 		vmw_fence_obj_unreference(&fence);
 
 
-	vmw_kms_cursor_post_execbuf(dev_priv);
-	ttm_read_unlock(&vmaster->lock);
+	mutex_unlock(&dev_priv->cmdbuf_mutex);
 	return 0;
 	return 0;
+
 out_err:
 out_err:
 	vmw_free_relocations(sw_context);
 	vmw_free_relocations(sw_context);
 out_throttle:
 out_throttle:
+	vmw_query_switch_backoff(sw_context);
 	ttm_eu_backoff_reservation(&sw_context->validate_nodes);
 	ttm_eu_backoff_reservation(&sw_context->validate_nodes);
 	vmw_clear_validations(sw_context);
 	vmw_clear_validations(sw_context);
 out_unlock:
 out_unlock:
 	mutex_unlock(&dev_priv->cmdbuf_mutex);
 	mutex_unlock(&dev_priv->cmdbuf_mutex);
-out_no_cmd_mutex:
+	return ret;
+}
+
+/**
+ * vmw_execbuf_unpin_panic - Idle the fifo and unpin the query buffer.
+ *
+ * @dev_priv: The device private structure.
+ *
+ * This function is called to idle the fifo and unpin the query buffer
+ * if the normal way to do this hits an error, which should typically be
+ * extremely rare.
+ */
+static void vmw_execbuf_unpin_panic(struct vmw_private *dev_priv)
+{
+	DRM_ERROR("Can't unpin query buffer. Trying to recover.\n");
+
+	(void) vmw_fallback_wait(dev_priv, false, true, 0, false, 10*HZ);
+	vmw_bo_pin(dev_priv->pinned_bo, false);
+	vmw_bo_pin(dev_priv->dummy_query_bo, false);
+	dev_priv->dummy_query_bo_pinned = false;
+}
+
+
+/**
+ * vmw_execbuf_release_pinned_bo - Flush queries and unpin the pinned
+ * query bo.
+ *
+ * @dev_priv: The device private structure.
+ * @only_on_cid_match: Only flush and unpin if the current active query cid
+ * matches @cid.
+ * @cid: Optional context id to match.
+ *
+ * This function should be used to unpin the pinned query bo, or
+ * as a query barrier when we need to make sure that all queries have
+ * finished before the next fifo command. (For example on hardware
+ * context destructions where the hardware may otherwise leak unfinished
+ * queries).
+ *
+ * This function does not return any failure codes, but make attempts
+ * to do safe unpinning in case of errors.
+ *
+ * The function will synchronize on the previous query barrier, and will
+ * thus not finish until that barrier has executed.
+ */
+void vmw_execbuf_release_pinned_bo(struct vmw_private *dev_priv,
+				   bool only_on_cid_match, uint32_t cid)
+{
+	int ret = 0;
+	struct list_head validate_list;
+	struct ttm_validate_buffer pinned_val, query_val;
+	struct vmw_fence_obj *fence;
+
+	mutex_lock(&dev_priv->cmdbuf_mutex);
+
+	if (dev_priv->pinned_bo == NULL)
+		goto out_unlock;
+
+	if (only_on_cid_match && cid != dev_priv->query_cid)
+		goto out_unlock;
+
+	INIT_LIST_HEAD(&validate_list);
+
+	pinned_val.new_sync_obj_arg = (void *)(unsigned long)
+		DRM_VMW_FENCE_FLAG_EXEC;
+	pinned_val.bo = ttm_bo_reference(dev_priv->pinned_bo);
+	list_add_tail(&pinned_val.head, &validate_list);
+
+	query_val.new_sync_obj_arg = pinned_val.new_sync_obj_arg;
+	query_val.bo = ttm_bo_reference(dev_priv->dummy_query_bo);
+	list_add_tail(&query_val.head, &validate_list);
+
+	do {
+		ret = ttm_eu_reserve_buffers(&validate_list);
+	} while (ret == -ERESTARTSYS);
+
+	if (unlikely(ret != 0)) {
+		vmw_execbuf_unpin_panic(dev_priv);
+		goto out_no_reserve;
+	}
+
+	ret = vmw_fifo_emit_dummy_query(dev_priv, dev_priv->query_cid);
+	if (unlikely(ret != 0)) {
+		vmw_execbuf_unpin_panic(dev_priv);
+		goto out_no_emit;
+	}
+
+	vmw_bo_pin(dev_priv->pinned_bo, false);
+	vmw_bo_pin(dev_priv->dummy_query_bo, false);
+	dev_priv->dummy_query_bo_pinned = false;
+
+	(void) vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+	ttm_eu_fence_buffer_objects(&validate_list, (void *) fence);
+
+	ttm_bo_unref(&query_val.bo);
+	ttm_bo_unref(&pinned_val.bo);
+	ttm_bo_unref(&dev_priv->pinned_bo);
+
+out_unlock:
+	mutex_unlock(&dev_priv->cmdbuf_mutex);
+	return;
+
+out_no_emit:
+	ttm_eu_backoff_reservation(&validate_list);
+out_no_reserve:
+	ttm_bo_unref(&query_val.bo);
+	ttm_bo_unref(&pinned_val.bo);
+	ttm_bo_unref(&dev_priv->pinned_bo);
+	mutex_unlock(&dev_priv->cmdbuf_mutex);
+}
+
+
+int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
+		      struct drm_file *file_priv)
+{
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
+	struct vmw_master *vmaster = vmw_master(file_priv->master);
+	int ret;
+
+	/*
+	 * This will allow us to extend the ioctl argument while
+	 * maintaining backwards compatibility:
+	 * We take different code paths depending on the value of
+	 * arg->version.
+	 */
+
+	if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) {
+		DRM_ERROR("Incorrect execbuf version.\n");
+		DRM_ERROR("You're running outdated experimental "
+			  "vmwgfx user-space drivers.");
+		return -EINVAL;
+	}
+
+	ret = ttm_read_lock(&vmaster->lock, true);
+	if (unlikely(ret != 0))
+		return ret;
+
+	ret = vmw_execbuf_process(file_priv, dev_priv,
+				  (void __user *)(unsigned long)arg->commands,
+				  NULL, arg->command_size, arg->throttle_us,
+				  (void __user *)(unsigned long)arg->fence_rep);
+
+	if (unlikely(ret != 0))
+		goto out_unlock;
+
+	vmw_kms_cursor_post_execbuf(dev_priv);
+
+out_unlock:
 	ttm_read_unlock(&vmaster->lock);
 	ttm_read_unlock(&vmaster->lock);
 	return ret;
 	return ret;
 }
 }

+ 2 - 54
drivers/gpu/drm/vmwgfx/vmwgfx_fb.c

@@ -592,58 +592,6 @@ int vmw_fb_close(struct vmw_private *vmw_priv)
 	return 0;
 	return 0;
 }
 }
 
 
-int vmw_dmabuf_from_vram(struct vmw_private *vmw_priv,
-			 struct vmw_dma_buffer *vmw_bo)
-{
-	struct ttm_buffer_object *bo = &vmw_bo->base;
-	int ret = 0;
-
-	ret = ttm_bo_reserve(bo, false, false, false, 0);
-	if (unlikely(ret != 0))
-		return ret;
-
-	ret = ttm_bo_validate(bo, &vmw_sys_placement, false, false, false);
-	ttm_bo_unreserve(bo);
-
-	return ret;
-}
-
-int vmw_dmabuf_to_start_of_vram(struct vmw_private *vmw_priv,
-				struct vmw_dma_buffer *vmw_bo)
-{
-	struct ttm_buffer_object *bo = &vmw_bo->base;
-	struct ttm_placement ne_placement = vmw_vram_ne_placement;
-	int ret = 0;
-
-	ne_placement.lpfn = bo->num_pages;
-
-	/* interuptable? */
-	ret = ttm_write_lock(&vmw_priv->active_master->lock, false);
-	if (unlikely(ret != 0))
-		return ret;
-
-	ret = ttm_bo_reserve(bo, false, false, false, 0);
-	if (unlikely(ret != 0))
-		goto err_unlock;
-
-	if (bo->mem.mem_type == TTM_PL_VRAM &&
-	    bo->mem.start < bo->num_pages &&
-	    bo->mem.start > 0)
-		(void) ttm_bo_validate(bo, &vmw_sys_placement, false,
-				       false, false);
-
-	ret = ttm_bo_validate(bo, &ne_placement, false, false, false);
-
-	/* Could probably bug on */
-	WARN_ON(bo->offset != 0);
-
-	ttm_bo_unreserve(bo);
-err_unlock:
-	ttm_write_unlock(&vmw_priv->active_master->lock);
-
-	return ret;
-}
-
 int vmw_fb_off(struct vmw_private *vmw_priv)
 int vmw_fb_off(struct vmw_private *vmw_priv)
 {
 {
 	struct fb_info *info;
 	struct fb_info *info;
@@ -665,7 +613,7 @@ int vmw_fb_off(struct vmw_private *vmw_priv)
 	par->bo_ptr = NULL;
 	par->bo_ptr = NULL;
 	ttm_bo_kunmap(&par->map);
 	ttm_bo_kunmap(&par->map);
 
 
-	vmw_dmabuf_from_vram(vmw_priv, par->vmw_bo);
+	vmw_dmabuf_unpin(vmw_priv, par->vmw_bo, false);
 
 
 	return 0;
 	return 0;
 }
 }
@@ -691,7 +639,7 @@ int vmw_fb_on(struct vmw_private *vmw_priv)
 	/* Make sure that all overlays are stoped when we take over */
 	/* Make sure that all overlays are stoped when we take over */
 	vmw_overlay_stop_all(vmw_priv);
 	vmw_overlay_stop_all(vmw_priv);
 
 
-	ret = vmw_dmabuf_to_start_of_vram(vmw_priv, par->vmw_bo);
+	ret = vmw_dmabuf_to_start_of_vram(vmw_priv, par->vmw_bo, true, false);
 	if (unlikely(ret != 0)) {
 	if (unlikely(ret != 0)) {
 		DRM_ERROR("could not move buffer to start of VRAM\n");
 		DRM_ERROR("could not move buffer to start of VRAM\n");
 		goto err_no_buffer;
 		goto err_no_buffer;

+ 8 - 1
drivers/gpu/drm/vmwgfx/vmwgfx_fence.c

@@ -177,6 +177,9 @@ out_unlock:
 
 
 struct vmw_fence_obj *vmw_fence_obj_reference(struct vmw_fence_obj *fence)
 struct vmw_fence_obj *vmw_fence_obj_reference(struct vmw_fence_obj *fence)
 {
 {
+	if (unlikely(fence == NULL))
+		return NULL;
+
 	kref_get(&fence->kref);
 	kref_get(&fence->kref);
 	return fence;
 	return fence;
 }
 }
@@ -191,8 +194,12 @@ struct vmw_fence_obj *vmw_fence_obj_reference(struct vmw_fence_obj *fence)
 void vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p)
 void vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p)
 {
 {
 	struct vmw_fence_obj *fence = *fence_p;
 	struct vmw_fence_obj *fence = *fence_p;
-	struct vmw_fence_manager *fman = fence->fman;
+	struct vmw_fence_manager *fman;
+
+	if (unlikely(fence == NULL))
+		return;
 
 
+	fman = fence->fman;
 	*fence_p = NULL;
 	*fence_p = NULL;
 	spin_lock_irq(&fman->lock);
 	spin_lock_irq(&fman->lock);
 	BUG_ON(atomic_read(&fence->kref.refcount) == 0);
 	BUG_ON(atomic_read(&fence->kref.refcount) == 0);

+ 72 - 1
drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c

@@ -45,7 +45,11 @@ bool vmw_fifo_have_3d(struct vmw_private *dev_priv)
 	if (hwversion == 0)
 	if (hwversion == 0)
 		return false;
 		return false;
 
 
-	if (hwversion < SVGA3D_HWVERSION_WS65_B1)
+	if (hwversion < SVGA3D_HWVERSION_WS8_B1)
+		return false;
+
+	/* Non-Screen Object path does not support surfaces */
+	if (!dev_priv->sou_priv)
 		return false;
 		return false;
 
 
 	return true;
 	return true;
@@ -277,6 +281,16 @@ static int vmw_fifo_wait(struct vmw_private *dev_priv,
 	return ret;
 	return ret;
 }
 }
 
 
+/**
+ * Reserve @bytes number of bytes in the fifo.
+ *
+ * This function will return NULL (error) on two conditions:
+ *  If it timeouts waiting for fifo space, or if @bytes is larger than the
+ *   available fifo space.
+ *
+ * Returns:
+ *   Pointer to the fifo, or null on error (possible hardware hang).
+ */
 void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
 void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
 {
 {
 	struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
 	struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
@@ -491,3 +505,60 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
 out_err:
 out_err:
 	return ret;
 	return ret;
 }
 }
+
+/**
+ * vmw_fifo_emit_dummy_query - emits a dummy query to the fifo.
+ *
+ * @dev_priv: The device private structure.
+ * @cid: The hardware context id used for the query.
+ *
+ * This function is used to emit a dummy occlusion query with
+ * no primitives rendered between query begin and query end.
+ * It's used to provide a query barrier, in order to know that when
+ * this query is finished, all preceding queries are also finished.
+ *
+ * A Query results structure should have been initialized at the start
+ * of the dev_priv->dummy_query_bo buffer object. And that buffer object
+ * must also be either reserved or pinned when this function is called.
+ *
+ * Returns -ENOMEM on failure to reserve fifo space.
+ */
+int vmw_fifo_emit_dummy_query(struct vmw_private *dev_priv,
+			      uint32_t cid)
+{
+	/*
+	 * A query wait without a preceding query end will
+	 * actually finish all queries for this cid
+	 * without writing to the query result structure.
+	 */
+
+	struct ttm_buffer_object *bo = dev_priv->dummy_query_bo;
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdWaitForQuery body;
+	} *cmd;
+
+	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
+
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Out of fifo space for dummy query.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id = SVGA_3D_CMD_WAIT_FOR_QUERY;
+	cmd->header.size = sizeof(cmd->body);
+	cmd->body.cid = cid;
+	cmd->body.type = SVGA3D_QUERYTYPE_OCCLUSION;
+
+	if (bo->mem.mem_type == TTM_PL_VRAM) {
+		cmd->body.guestResult.gmrId = SVGA_GMR_FRAMEBUFFER;
+		cmd->body.guestResult.offset = bo->offset;
+	} else {
+		cmd->body.guestResult.gmrId = bo->mem.start;
+		cmd->body.guestResult.offset = 0;
+	}
+
+	vmw_fifo_commit(dev_priv, sizeof(*cmd));
+
+	return 0;
+}

+ 172 - 0
drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c

@@ -27,6 +27,7 @@
 
 
 #include "vmwgfx_drv.h"
 #include "vmwgfx_drv.h"
 #include "vmwgfx_drm.h"
 #include "vmwgfx_drm.h"
+#include "vmwgfx_kms.h"
 
 
 int vmw_getparam_ioctl(struct drm_device *dev, void *data,
 int vmw_getparam_ioctl(struct drm_device *dev, void *data,
 		       struct drm_file *file_priv)
 		       struct drm_file *file_priv)
@@ -110,3 +111,174 @@ int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data,
 
 
 	return ret;
 	return ret;
 }
 }
+
+int vmw_present_ioctl(struct drm_device *dev, void *data,
+		      struct drm_file *file_priv)
+{
+	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	struct drm_vmw_present_arg *arg =
+		(struct drm_vmw_present_arg *)data;
+	struct vmw_surface *surface;
+	struct vmw_master *vmaster = vmw_master(file_priv->master);
+	struct drm_vmw_rect __user *clips_ptr;
+	struct drm_vmw_rect *clips = NULL;
+	struct drm_mode_object *obj;
+	struct vmw_framebuffer *vfb;
+	uint32_t num_clips;
+	int ret;
+
+	num_clips = arg->num_clips;
+	clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
+
+	if (unlikely(num_clips == 0))
+		return 0;
+
+	if (clips_ptr == NULL) {
+		DRM_ERROR("Variable clips_ptr must be specified.\n");
+		ret = -EINVAL;
+		goto out_clips;
+	}
+
+	clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
+	if (clips == NULL) {
+		DRM_ERROR("Failed to allocate clip rect list.\n");
+		ret = -ENOMEM;
+		goto out_clips;
+	}
+
+	ret = copy_from_user(clips, clips_ptr, num_clips * sizeof(*clips));
+	if (ret) {
+		DRM_ERROR("Failed to copy clip rects from userspace.\n");
+		goto out_no_copy;
+	}
+
+	ret = mutex_lock_interruptible(&dev->mode_config.mutex);
+	if (unlikely(ret != 0)) {
+		ret = -ERESTARTSYS;
+		goto out_no_mode_mutex;
+	}
+
+	obj = drm_mode_object_find(dev, arg->fb_id, DRM_MODE_OBJECT_FB);
+	if (!obj) {
+		DRM_ERROR("Invalid framebuffer id.\n");
+		ret = -EINVAL;
+		goto out_no_fb;
+	}
+
+	vfb = vmw_framebuffer_to_vfb(obj_to_fb(obj));
+	if (!vfb->dmabuf) {
+		DRM_ERROR("Framebuffer not dmabuf backed.\n");
+		ret = -EINVAL;
+		goto out_no_fb;
+	}
+
+	ret = ttm_read_lock(&vmaster->lock, true);
+	if (unlikely(ret != 0))
+		goto out_no_ttm_lock;
+
+	ret = vmw_user_surface_lookup_handle(dev_priv, tfile, arg->sid,
+					     &surface);
+	if (ret)
+		goto out_no_surface;
+
+	ret = vmw_kms_present(dev_priv, file_priv,
+			      vfb, surface, arg->sid,
+			      arg->dest_x, arg->dest_y,
+			      clips, num_clips);
+
+	/* vmw_user_surface_lookup takes one ref so does new_fb */
+	vmw_surface_unreference(&surface);
+
+out_no_surface:
+	ttm_read_unlock(&vmaster->lock);
+out_no_ttm_lock:
+out_no_fb:
+	mutex_unlock(&dev->mode_config.mutex);
+out_no_mode_mutex:
+out_no_copy:
+	kfree(clips);
+out_clips:
+	return ret;
+}
+
+int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
+			       struct drm_file *file_priv)
+{
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	struct drm_vmw_present_readback_arg *arg =
+		(struct drm_vmw_present_readback_arg *)data;
+	struct drm_vmw_fence_rep __user *user_fence_rep =
+		(struct drm_vmw_fence_rep __user *)
+		(unsigned long)arg->fence_rep;
+	struct vmw_master *vmaster = vmw_master(file_priv->master);
+	struct drm_vmw_rect __user *clips_ptr;
+	struct drm_vmw_rect *clips = NULL;
+	struct drm_mode_object *obj;
+	struct vmw_framebuffer *vfb;
+	uint32_t num_clips;
+	int ret;
+
+	num_clips = arg->num_clips;
+	clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
+
+	if (unlikely(num_clips == 0))
+		return 0;
+
+	if (clips_ptr == NULL) {
+		DRM_ERROR("Argument clips_ptr must be specified.\n");
+		ret = -EINVAL;
+		goto out_clips;
+	}
+
+	clips = kzalloc(num_clips * sizeof(*clips), GFP_KERNEL);
+	if (clips == NULL) {
+		DRM_ERROR("Failed to allocate clip rect list.\n");
+		ret = -ENOMEM;
+		goto out_clips;
+	}
+
+	ret = copy_from_user(clips, clips_ptr, num_clips * sizeof(*clips));
+	if (ret) {
+		DRM_ERROR("Failed to copy clip rects from userspace.\n");
+		goto out_no_copy;
+	}
+
+	ret = mutex_lock_interruptible(&dev->mode_config.mutex);
+	if (unlikely(ret != 0)) {
+		ret = -ERESTARTSYS;
+		goto out_no_mode_mutex;
+	}
+
+	obj = drm_mode_object_find(dev, arg->fb_id, DRM_MODE_OBJECT_FB);
+	if (!obj) {
+		DRM_ERROR("Invalid framebuffer id.\n");
+		ret = -EINVAL;
+		goto out_no_fb;
+	}
+
+	vfb = vmw_framebuffer_to_vfb(obj_to_fb(obj));
+	if (!vfb->dmabuf) {
+		DRM_ERROR("Framebuffer not dmabuf backed.\n");
+		ret = -EINVAL;
+		goto out_no_fb;
+	}
+
+	ret = ttm_read_lock(&vmaster->lock, true);
+	if (unlikely(ret != 0))
+		goto out_no_ttm_lock;
+
+	ret = vmw_kms_readback(dev_priv, file_priv,
+			       vfb, user_fence_rep,
+			       clips, num_clips);
+
+	ttm_read_unlock(&vmaster->lock);
+out_no_ttm_lock:
+out_no_fb:
+	mutex_unlock(&dev->mode_config.mutex);
+out_no_mode_mutex:
+out_no_copy:
+	kfree(clips);
+out_clips:
+	return ret;
+}

+ 641 - 200
drivers/gpu/drm/vmwgfx/vmwgfx_kms.c

@@ -27,12 +27,10 @@
 
 
 #include "vmwgfx_kms.h"
 #include "vmwgfx_kms.h"
 
 
+
 /* Might need a hrtimer here? */
 /* Might need a hrtimer here? */
 #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
 #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
 
 
-static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb);
-static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb);
-
 void vmw_display_unit_cleanup(struct vmw_display_unit *du)
 void vmw_display_unit_cleanup(struct vmw_display_unit *du)
 {
 {
 	if (du->cursor_surface)
 	if (du->cursor_surface)
@@ -329,41 +327,10 @@ struct vmw_framebuffer_surface {
 	struct vmw_framebuffer base;
 	struct vmw_framebuffer base;
 	struct vmw_surface *surface;
 	struct vmw_surface *surface;
 	struct vmw_dma_buffer *buffer;
 	struct vmw_dma_buffer *buffer;
-	struct delayed_work d_work;
-	struct mutex work_lock;
-	bool present_fs;
 	struct list_head head;
 	struct list_head head;
 	struct drm_master *master;
 	struct drm_master *master;
 };
 };
 
 
-/**
- * vmw_kms_idle_workqueues - Flush workqueues on this master
- *
- * @vmaster - Pointer identifying the master, for the surfaces of which
- * we idle the dirty work queues.
- *
- * This function should be called with the ttm lock held in exclusive mode
- * to idle all dirty work queues before the fifo is taken down.
- *
- * The work task may actually requeue itself, but after the flush returns we're
- * sure that there's nothing to present, since the ttm lock is held in
- * exclusive mode, so the fifo will never get used.
- */
-
-void vmw_kms_idle_workqueues(struct vmw_master *vmaster)
-{
-	struct vmw_framebuffer_surface *entry;
-
-	mutex_lock(&vmaster->fb_surf_mutex);
-	list_for_each_entry(entry, &vmaster->fb_surf, head) {
-		if (cancel_delayed_work_sync(&entry->d_work))
-			(void) entry->d_work.work.func(&entry->d_work.work);
-
-		(void) cancel_delayed_work_sync(&entry->d_work);
-	}
-	mutex_unlock(&vmaster->fb_surf_mutex);
-}
-
 void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
 void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
 {
 {
 	struct vmw_framebuffer_surface *vfbs =
 	struct vmw_framebuffer_surface *vfbs =
@@ -375,64 +342,69 @@ void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
 	list_del(&vfbs->head);
 	list_del(&vfbs->head);
 	mutex_unlock(&vmaster->fb_surf_mutex);
 	mutex_unlock(&vmaster->fb_surf_mutex);
 
 
-	cancel_delayed_work_sync(&vfbs->d_work);
 	drm_master_put(&vfbs->master);
 	drm_master_put(&vfbs->master);
 	drm_framebuffer_cleanup(framebuffer);
 	drm_framebuffer_cleanup(framebuffer);
 	vmw_surface_unreference(&vfbs->surface);
 	vmw_surface_unreference(&vfbs->surface);
+	ttm_base_object_unref(&vfbs->base.user_obj);
 
 
 	kfree(vfbs);
 	kfree(vfbs);
 }
 }
 
 
-static void vmw_framebuffer_present_fs_callback(struct work_struct *work)
+static int do_surface_dirty_sou(struct vmw_private *dev_priv,
+				struct drm_file *file_priv,
+				struct vmw_framebuffer *framebuffer,
+				struct vmw_surface *surf,
+				unsigned flags, unsigned color,
+				struct drm_clip_rect *clips,
+				unsigned num_clips, int inc)
 {
 {
-	struct delayed_work *d_work =
-		container_of(work, struct delayed_work, work);
-	struct vmw_framebuffer_surface *vfbs =
-		container_of(d_work, struct vmw_framebuffer_surface, d_work);
-	struct vmw_surface *surf = vfbs->surface;
-	struct drm_framebuffer *framebuffer = &vfbs->base.base;
-	struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
+	int left = clips->x2, right = clips->x1;
+	int top = clips->y2, bottom = clips->y1;
+	size_t fifo_size;
+	int i, ret;
 
 
 	struct {
 	struct {
 		SVGA3dCmdHeader header;
 		SVGA3dCmdHeader header;
-		SVGA3dCmdPresent body;
-		SVGA3dCopyRect cr;
+		SVGA3dCmdBlitSurfaceToScreen body;
 	} *cmd;
 	} *cmd;
 
 
-	/**
-	 * Strictly we should take the ttm_lock in read mode before accessing
-	 * the fifo, to make sure the fifo is present and up. However,
-	 * instead we flush all workqueues under the ttm lock in exclusive mode
-	 * before taking down the fifo.
-	 */
-	mutex_lock(&vfbs->work_lock);
-	if (!vfbs->present_fs)
-		goto out_unlock;
-
-	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
-	if (unlikely(cmd == NULL))
-		goto out_resched;
-
-	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_PRESENT);
-	cmd->header.size = cpu_to_le32(sizeof(cmd->body) + sizeof(cmd->cr));
-	cmd->body.sid = cpu_to_le32(surf->res.id);
-	cmd->cr.x = cpu_to_le32(0);
-	cmd->cr.y = cpu_to_le32(0);
-	cmd->cr.srcx = cmd->cr.x;
-	cmd->cr.srcy = cmd->cr.y;
-	cmd->cr.w = cpu_to_le32(framebuffer->width);
-	cmd->cr.h = cpu_to_le32(framebuffer->height);
-	vfbs->present_fs = false;
-	vmw_fifo_commit(dev_priv, sizeof(*cmd));
-out_resched:
-	/**
-	 * Will not re-add if already pending.
-	 */
-	schedule_delayed_work(&vfbs->d_work, VMWGFX_PRESENT_RATE);
-out_unlock:
-	mutex_unlock(&vfbs->work_lock);
-}
 
 
+	fifo_size = sizeof(*cmd);
+	cmd = kzalloc(fifo_size, GFP_KERNEL);
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Temporary fifo memory alloc failed.\n");
+		return -ENOMEM;
+	}
+
+	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN);
+	cmd->header.size = cpu_to_le32(sizeof(cmd->body));
+
+	cmd->body.srcImage.sid = cpu_to_le32(framebuffer->user_handle);
+	cmd->body.destScreenId = SVGA_ID_INVALID; /* virtual coords */
+
+	for (i = 0; i < num_clips; i++, clips += inc) {
+		left = min_t(int, left, (int)clips->x1);
+		right = max_t(int, right, (int)clips->x2);
+		top = min_t(int, top, (int)clips->y1);
+		bottom = max_t(int, bottom, (int)clips->y2);
+	}
+
+	cmd->body.srcRect.left = left;
+	cmd->body.srcRect.right = right;
+	cmd->body.srcRect.top = top;
+	cmd->body.srcRect.bottom = bottom;
+
+	cmd->body.destRect.left = left;
+	cmd->body.destRect.right = right;
+	cmd->body.destRect.top = top;
+	cmd->body.destRect.bottom = bottom;
+
+	ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd, fifo_size,
+				  0, NULL);
+	kfree(cmd);
+
+	return ret;
+}
 
 
 int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
 int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
 				  struct drm_file *file_priv,
 				  struct drm_file *file_priv,
@@ -446,42 +418,19 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
 		vmw_framebuffer_to_vfbs(framebuffer);
 		vmw_framebuffer_to_vfbs(framebuffer);
 	struct vmw_surface *surf = vfbs->surface;
 	struct vmw_surface *surf = vfbs->surface;
 	struct drm_clip_rect norect;
 	struct drm_clip_rect norect;
-	SVGA3dCopyRect *cr;
-	int i, inc = 1;
-	int ret;
-
-	struct {
-		SVGA3dCmdHeader header;
-		SVGA3dCmdPresent body;
-		SVGA3dCopyRect cr;
-	} *cmd;
+	int ret, inc = 1;
 
 
 	if (unlikely(vfbs->master != file_priv->master))
 	if (unlikely(vfbs->master != file_priv->master))
 		return -EINVAL;
 		return -EINVAL;
 
 
+	/* Require ScreenObject support for 3D */
+	if (!dev_priv->sou_priv)
+		return -EINVAL;
+
 	ret = ttm_read_lock(&vmaster->lock, true);
 	ret = ttm_read_lock(&vmaster->lock, true);
 	if (unlikely(ret != 0))
 	if (unlikely(ret != 0))
 		return ret;
 		return ret;
 
 
-	if (!num_clips ||
-	    !(dev_priv->fifo.capabilities &
-	      SVGA_FIFO_CAP_SCREEN_OBJECT)) {
-		int ret;
-
-		mutex_lock(&vfbs->work_lock);
-		vfbs->present_fs = true;
-		ret = schedule_delayed_work(&vfbs->d_work, VMWGFX_PRESENT_RATE);
-		mutex_unlock(&vfbs->work_lock);
-		if (ret) {
-			/**
-			 * No work pending, Force immediate present.
-			 */
-			vmw_framebuffer_present_fs_callback(&vfbs->d_work.work);
-		}
-		ttm_read_unlock(&vmaster->lock);
-		return 0;
-	}
-
 	if (!num_clips) {
 	if (!num_clips) {
 		num_clips = 1;
 		num_clips = 1;
 		clips = &norect;
 		clips = &norect;
@@ -493,29 +442,10 @@ int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
 		inc = 2; /* skip source rects */
 		inc = 2; /* skip source rects */
 	}
 	}
 
 
-	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr));
-	if (unlikely(cmd == NULL)) {
-		DRM_ERROR("Fifo reserve failed.\n");
-		ttm_read_unlock(&vmaster->lock);
-		return -ENOMEM;
-	}
-
-	memset(cmd, 0, sizeof(*cmd));
-
-	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_PRESENT);
-	cmd->header.size = cpu_to_le32(sizeof(cmd->body) + num_clips * sizeof(cmd->cr));
-	cmd->body.sid = cpu_to_le32(surf->res.id);
-
-	for (i = 0, cr = &cmd->cr; i < num_clips; i++, cr++, clips += inc) {
-		cr->x = cpu_to_le16(clips->x1);
-		cr->y = cpu_to_le16(clips->y1);
-		cr->srcx = cr->x;
-		cr->srcy = cr->y;
-		cr->w = cpu_to_le16(clips->x2 - clips->x1);
-		cr->h = cpu_to_le16(clips->y2 - clips->y1);
-	}
+	ret = do_surface_dirty_sou(dev_priv, file_priv, &vfbs->base, surf,
+				   flags, color,
+				   clips, num_clips, inc);
 
 
-	vmw_fifo_commit(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr));
 	ttm_read_unlock(&vmaster->lock);
 	ttm_read_unlock(&vmaster->lock);
 	return 0;
 	return 0;
 }
 }
@@ -540,6 +470,10 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
 	struct vmw_master *vmaster = vmw_master(file_priv->master);
 	struct vmw_master *vmaster = vmw_master(file_priv->master);
 	int ret;
 	int ret;
 
 
+	/* 3D is only supported on HWv8 hosts which supports screen objects */
+	if (!dev_priv->sou_priv)
+		return -ENOSYS;
+
 	/*
 	/*
 	 * Sanity checks.
 	 * Sanity checks.
 	 */
 	 */
@@ -602,14 +536,11 @@ static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
 	vfbs->base.base.depth = mode_cmd->depth;
 	vfbs->base.base.depth = mode_cmd->depth;
 	vfbs->base.base.width = mode_cmd->width;
 	vfbs->base.base.width = mode_cmd->width;
 	vfbs->base.base.height = mode_cmd->height;
 	vfbs->base.base.height = mode_cmd->height;
-	vfbs->base.pin = &vmw_surface_dmabuf_pin;
-	vfbs->base.unpin = &vmw_surface_dmabuf_unpin;
 	vfbs->surface = surface;
 	vfbs->surface = surface;
+	vfbs->base.user_handle = mode_cmd->handle;
 	vfbs->master = drm_master_get(file_priv->master);
 	vfbs->master = drm_master_get(file_priv->master);
-	mutex_init(&vfbs->work_lock);
 
 
 	mutex_lock(&vmaster->fb_surf_mutex);
 	mutex_lock(&vmaster->fb_surf_mutex);
-	INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback);
 	list_add_tail(&vfbs->head, &vmaster->fb_surf);
 	list_add_tail(&vfbs->head, &vmaster->fb_surf);
 	mutex_unlock(&vmaster->fb_surf_mutex);
 	mutex_unlock(&vmaster->fb_surf_mutex);
 
 
@@ -644,48 +575,34 @@ void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
 
 
 	drm_framebuffer_cleanup(framebuffer);
 	drm_framebuffer_cleanup(framebuffer);
 	vmw_dmabuf_unreference(&vfbd->buffer);
 	vmw_dmabuf_unreference(&vfbd->buffer);
+	ttm_base_object_unref(&vfbd->base.user_obj);
 
 
 	kfree(vfbd);
 	kfree(vfbd);
 }
 }
 
 
-int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
-				 struct drm_file *file_priv,
-				 unsigned flags, unsigned color,
-				 struct drm_clip_rect *clips,
-				 unsigned num_clips)
+static int do_dmabuf_dirty_ldu(struct vmw_private *dev_priv,
+			       struct vmw_framebuffer *framebuffer,
+			       struct vmw_dma_buffer *buffer,
+			       unsigned flags, unsigned color,
+			       struct drm_clip_rect *clips,
+			       unsigned num_clips, int increment)
 {
 {
-	struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
-	struct vmw_master *vmaster = vmw_master(file_priv->master);
-	struct drm_clip_rect norect;
-	int ret;
+	size_t fifo_size;
+	int i;
+
 	struct {
 	struct {
 		uint32_t header;
 		uint32_t header;
 		SVGAFifoCmdUpdate body;
 		SVGAFifoCmdUpdate body;
 	} *cmd;
 	} *cmd;
-	int i, increment = 1;
-
-	ret = ttm_read_lock(&vmaster->lock, true);
-	if (unlikely(ret != 0))
-		return ret;
 
 
-	if (!num_clips) {
-		num_clips = 1;
-		clips = &norect;
-		norect.x1 = norect.y1 = 0;
-		norect.x2 = framebuffer->width;
-		norect.y2 = framebuffer->height;
-	} else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
-		num_clips /= 2;
-		increment = 2;
-	}
-
-	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips);
+	fifo_size = sizeof(*cmd) * num_clips;
+	cmd = vmw_fifo_reserve(dev_priv, fifo_size);
 	if (unlikely(cmd == NULL)) {
 	if (unlikely(cmd == NULL)) {
 		DRM_ERROR("Fifo reserve failed.\n");
 		DRM_ERROR("Fifo reserve failed.\n");
-		ttm_read_unlock(&vmaster->lock);
 		return -ENOMEM;
 		return -ENOMEM;
 	}
 	}
 
 
+	memset(cmd, 0, fifo_size);
 	for (i = 0; i < num_clips; i++, clips += increment) {
 	for (i = 0; i < num_clips; i++, clips += increment) {
 		cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE);
 		cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE);
 		cmd[i].body.x = cpu_to_le32(clips->x1);
 		cmd[i].body.x = cpu_to_le32(clips->x1);
@@ -694,57 +611,117 @@ int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
 		cmd[i].body.height = cpu_to_le32(clips->y2 - clips->y1);
 		cmd[i].body.height = cpu_to_le32(clips->y2 - clips->y1);
 	}
 	}
 
 
-	vmw_fifo_commit(dev_priv, sizeof(*cmd) * num_clips);
-	ttm_read_unlock(&vmaster->lock);
-
+	vmw_fifo_commit(dev_priv, fifo_size);
 	return 0;
 	return 0;
 }
 }
 
 
-static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
-	.destroy = vmw_framebuffer_dmabuf_destroy,
-	.dirty = vmw_framebuffer_dmabuf_dirty,
-	.create_handle = vmw_framebuffer_create_handle,
-};
-
-static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb)
+static int do_dmabuf_dirty_sou(struct drm_file *file_priv,
+			       struct vmw_private *dev_priv,
+			       struct vmw_framebuffer *framebuffer,
+			       struct vmw_dma_buffer *buffer,
+			       unsigned flags, unsigned color,
+			       struct drm_clip_rect *clips,
+			       unsigned num_clips, int increment)
 {
 {
-	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
-	struct vmw_framebuffer_surface *vfbs =
-		vmw_framebuffer_to_vfbs(&vfb->base);
-	unsigned long size = vfbs->base.base.pitch * vfbs->base.base.height;
-	int ret;
+	size_t fifo_size;
+	int i, ret;
+
+	struct {
+		uint32_t header;
+		SVGAFifoCmdDefineGMRFB body;
+	} *cmd;
+	struct {
+		uint32_t header;
+		SVGAFifoCmdBlitGMRFBToScreen body;
+	} *blits;
 
 
-	vfbs->buffer = kzalloc(sizeof(*vfbs->buffer), GFP_KERNEL);
-	if (unlikely(vfbs->buffer == NULL))
+	fifo_size = sizeof(*cmd) + sizeof(*blits) * num_clips;
+	cmd = kmalloc(fifo_size, GFP_KERNEL);
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed to allocate temporary cmd buffer.\n");
 		return -ENOMEM;
 		return -ENOMEM;
+	}
 
 
-	vmw_overlay_pause_all(dev_priv);
-	ret = vmw_dmabuf_init(dev_priv, vfbs->buffer, size,
-			       &vmw_vram_ne_placement,
-			       false, &vmw_dmabuf_bo_free);
-	vmw_overlay_resume_all(dev_priv);
-	if (unlikely(ret != 0))
-		vfbs->buffer = NULL;
+	memset(cmd, 0, fifo_size);
+	cmd->header = SVGA_CMD_DEFINE_GMRFB;
+	cmd->body.format.bitsPerPixel = framebuffer->base.bits_per_pixel;
+	cmd->body.format.colorDepth = framebuffer->base.depth;
+	cmd->body.format.reserved = 0;
+	cmd->body.bytesPerLine = framebuffer->base.pitch;
+	cmd->body.ptr.gmrId = framebuffer->user_handle;
+	cmd->body.ptr.offset = 0;
+
+	blits = (void *)&cmd[1];
+	for (i = 0; i < num_clips; i++, clips += increment) {
+		blits[i].header = SVGA_CMD_BLIT_GMRFB_TO_SCREEN;
+		blits[i].body.srcOrigin.x = clips->x1;
+		blits[i].body.srcOrigin.y = clips->y1;
+		blits[i].body.destRect.left = clips->x1;
+		blits[i].body.destRect.top = clips->y1;
+		blits[i].body.destRect.right = clips->x2;
+		blits[i].body.destRect.bottom = clips->y2;
+	}
+
+	ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
+				  fifo_size, 0, NULL);
+
+	kfree(cmd);
 
 
 	return ret;
 	return ret;
 }
 }
 
 
-static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb)
+int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
+				 struct drm_file *file_priv,
+				 unsigned flags, unsigned color,
+				 struct drm_clip_rect *clips,
+				 unsigned num_clips)
 {
 {
-	struct ttm_buffer_object *bo;
-	struct vmw_framebuffer_surface *vfbs =
-		vmw_framebuffer_to_vfbs(&vfb->base);
+	struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
+	struct vmw_master *vmaster = vmw_master(file_priv->master);
+	struct vmw_framebuffer_dmabuf *vfbd =
+		vmw_framebuffer_to_vfbd(framebuffer);
+	struct vmw_dma_buffer *dmabuf = vfbd->buffer;
+	struct drm_clip_rect norect;
+	int ret, increment = 1;
 
 
-	if (unlikely(vfbs->buffer == NULL))
-		return 0;
+	ret = ttm_read_lock(&vmaster->lock, true);
+	if (unlikely(ret != 0))
+		return ret;
 
 
-	bo = &vfbs->buffer->base;
-	ttm_bo_unref(&bo);
-	vfbs->buffer = NULL;
+	if (!num_clips) {
+		num_clips = 1;
+		clips = &norect;
+		norect.x1 = norect.y1 = 0;
+		norect.x2 = framebuffer->width;
+		norect.y2 = framebuffer->height;
+	} else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
+		num_clips /= 2;
+		increment = 2;
+	}
 
 
-	return 0;
+	if (dev_priv->ldu_priv) {
+		ret = do_dmabuf_dirty_ldu(dev_priv, &vfbd->base, dmabuf,
+					  flags, color,
+					  clips, num_clips, increment);
+	} else {
+		ret = do_dmabuf_dirty_sou(file_priv, dev_priv, &vfbd->base,
+					  dmabuf, flags, color,
+					  clips, num_clips, increment);
+	}
+
+	ttm_read_unlock(&vmaster->lock);
+	return ret;
 }
 }
 
 
+static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
+	.destroy = vmw_framebuffer_dmabuf_destroy,
+	.dirty = vmw_framebuffer_dmabuf_dirty,
+	.create_handle = vmw_framebuffer_create_handle,
+};
+
+/**
+ * Pin the dmabuffer to the start of vram.
+ */
 static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb)
 static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb)
 {
 {
 	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
 	struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
@@ -752,10 +729,12 @@ static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb)
 		vmw_framebuffer_to_vfbd(&vfb->base);
 		vmw_framebuffer_to_vfbd(&vfb->base);
 	int ret;
 	int ret;
 
 
+	/* This code should not be used with screen objects */
+	BUG_ON(dev_priv->sou_priv);
 
 
 	vmw_overlay_pause_all(dev_priv);
 	vmw_overlay_pause_all(dev_priv);
 
 
-	ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer);
+	ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer, true, false);
 
 
 	vmw_overlay_resume_all(dev_priv);
 	vmw_overlay_resume_all(dev_priv);
 
 
@@ -775,7 +754,7 @@ static int vmw_framebuffer_dmabuf_unpin(struct vmw_framebuffer *vfb)
 		return 0;
 		return 0;
 	}
 	}
 
 
-	return vmw_dmabuf_from_vram(dev_priv, vfbd->buffer);
+	return vmw_dmabuf_unpin(dev_priv, vfbd->buffer, false);
 }
 }
 
 
 static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
 static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
@@ -797,6 +776,33 @@ static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
 		return -EINVAL;
 		return -EINVAL;
 	}
 	}
 
 
+	/* Limited framebuffer color depth support for screen objects */
+	if (dev_priv->sou_priv) {
+		switch (mode_cmd->depth) {
+		case 32:
+		case 24:
+			/* Only support 32 bpp for 32 and 24 depth fbs */
+			if (mode_cmd->bpp == 32)
+				break;
+
+			DRM_ERROR("Invalid color depth/bbp: %d %d\n",
+				  mode_cmd->depth, mode_cmd->bpp);
+			return -EINVAL;
+		case 16:
+		case 15:
+			/* Only support 16 bpp for 16 and 15 depth fbs */
+			if (mode_cmd->bpp == 16)
+				break;
+
+			DRM_ERROR("Invalid color depth/bbp: %d %d\n",
+				  mode_cmd->depth, mode_cmd->bpp);
+			return -EINVAL;
+		default:
+			DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth);
+			return -EINVAL;
+		}
+	}
+
 	vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
 	vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
 	if (!vfbd) {
 	if (!vfbd) {
 		ret = -ENOMEM;
 		ret = -ENOMEM;
@@ -818,9 +824,13 @@ static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
 	vfbd->base.base.depth = mode_cmd->depth;
 	vfbd->base.base.depth = mode_cmd->depth;
 	vfbd->base.base.width = mode_cmd->width;
 	vfbd->base.base.width = mode_cmd->width;
 	vfbd->base.base.height = mode_cmd->height;
 	vfbd->base.base.height = mode_cmd->height;
-	vfbd->base.pin = vmw_framebuffer_dmabuf_pin;
-	vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin;
+	if (!dev_priv->sou_priv) {
+		vfbd->base.pin = vmw_framebuffer_dmabuf_pin;
+		vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin;
+	}
+	vfbd->base.dmabuf = true;
 	vfbd->buffer = dmabuf;
 	vfbd->buffer = dmabuf;
+	vfbd->base.user_handle = mode_cmd->handle;
 	*out = &vfbd->base;
 	*out = &vfbd->base;
 
 
 	return 0;
 	return 0;
@@ -846,6 +856,7 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
 	struct vmw_framebuffer *vfb = NULL;
 	struct vmw_framebuffer *vfb = NULL;
 	struct vmw_surface *surface = NULL;
 	struct vmw_surface *surface = NULL;
 	struct vmw_dma_buffer *bo = NULL;
 	struct vmw_dma_buffer *bo = NULL;
+	struct ttm_base_object *user_obj;
 	u64 required_size;
 	u64 required_size;
 	int ret;
 	int ret;
 
 
@@ -861,6 +872,21 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
 		return NULL;
 		return NULL;
 	}
 	}
 
 
+	/*
+	 * Take a reference on the user object of the resource
+	 * backing the kms fb. This ensures that user-space handle
+	 * lookups on that resource will always work as long as
+	 * it's registered with a kms framebuffer. This is important,
+	 * since vmw_execbuf_process identifies resources in the
+	 * command stream using user-space handles.
+	 */
+
+	user_obj = ttm_base_object_lookup(tfile, mode_cmd->handle);
+	if (unlikely(user_obj == NULL)) {
+		DRM_ERROR("Could not locate requested kms frame buffer.\n");
+		return ERR_PTR(-ENOENT);
+	}
+
 	/**
 	/**
 	 * End conditioned code.
 	 * End conditioned code.
 	 */
 	 */
@@ -881,8 +907,10 @@ static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
 
 
 	if (ret) {
 	if (ret) {
 		DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
 		DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
+		ttm_base_object_unref(&user_obj);
 		return ERR_PTR(ret);
 		return ERR_PTR(ret);
-	}
+	} else
+		vfb->user_obj = user_obj;
 	return &vfb->base;
 	return &vfb->base;
 
 
 try_dmabuf:
 try_dmabuf:
@@ -902,8 +930,10 @@ try_dmabuf:
 
 
 	if (ret) {
 	if (ret) {
 		DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
 		DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
+		ttm_base_object_unref(&user_obj);
 		return ERR_PTR(ret);
 		return ERR_PTR(ret);
-	}
+	} else
+		vfb->user_obj = user_obj;
 
 
 	return &vfb->base;
 	return &vfb->base;
 
 
@@ -911,6 +941,7 @@ err_not_scanout:
 	DRM_ERROR("surface not marked as scanout\n");
 	DRM_ERROR("surface not marked as scanout\n");
 	/* vmw_user_surface_lookup takes one ref */
 	/* vmw_user_surface_lookup takes one ref */
 	vmw_surface_unreference(&surface);
 	vmw_surface_unreference(&surface);
+	ttm_base_object_unref(&user_obj);
 
 
 	return ERR_PTR(-EINVAL);
 	return ERR_PTR(-EINVAL);
 }
 }
@@ -919,6 +950,175 @@ static struct drm_mode_config_funcs vmw_kms_funcs = {
 	.fb_create = vmw_kms_fb_create,
 	.fb_create = vmw_kms_fb_create,
 };
 };
 
 
+int vmw_kms_present(struct vmw_private *dev_priv,
+		    struct drm_file *file_priv,
+		    struct vmw_framebuffer *vfb,
+		    struct vmw_surface *surface,
+		    uint32_t sid,
+		    int32_t destX, int32_t destY,
+		    struct drm_vmw_rect *clips,
+		    uint32_t num_clips)
+{
+	size_t fifo_size;
+	int i, ret;
+
+	struct {
+		SVGA3dCmdHeader header;
+		SVGA3dCmdBlitSurfaceToScreen body;
+	} *cmd;
+	SVGASignedRect *blits;
+
+	BUG_ON(surface == NULL);
+	BUG_ON(!clips || !num_clips);
+
+	fifo_size = sizeof(*cmd) + sizeof(SVGASignedRect) * num_clips;
+	cmd = kmalloc(fifo_size, GFP_KERNEL);
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed to allocate temporary fifo memory.\n");
+		return -ENOMEM;
+	}
+
+	memset(cmd, 0, fifo_size);
+
+	cmd->header.id = cpu_to_le32(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN);
+	cmd->header.size = cpu_to_le32(fifo_size - sizeof(cmd->header));
+
+	cmd->body.srcImage.sid = sid;
+	cmd->body.destScreenId = SVGA_ID_INVALID; /* virtual coords */
+
+	cmd->body.srcRect.left = 0;
+	cmd->body.srcRect.right = surface->sizes[0].width;
+	cmd->body.srcRect.top = 0;
+	cmd->body.srcRect.bottom = surface->sizes[0].height;
+
+	cmd->body.destRect.left = destX;
+	cmd->body.destRect.right = destX + surface->sizes[0].width;
+	cmd->body.destRect.top = destY;
+	cmd->body.destRect.bottom = destY + surface->sizes[0].height;
+
+	blits = (SVGASignedRect *)&cmd[1];
+	for (i = 0; i < num_clips; i++) {
+		blits[i].left   = clips[i].x;
+		blits[i].right  = clips[i].x + clips[i].w;
+		blits[i].top    = clips[i].y;
+		blits[i].bottom = clips[i].y + clips[i].h;
+	}
+
+	ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd,
+				  fifo_size, 0, NULL);
+
+	kfree(cmd);
+
+	return ret;
+}
+
+int vmw_kms_readback(struct vmw_private *dev_priv,
+		     struct drm_file *file_priv,
+		     struct vmw_framebuffer *vfb,
+		     struct drm_vmw_fence_rep __user *user_fence_rep,
+		     struct drm_vmw_rect *clips,
+		     uint32_t num_clips)
+{
+	struct vmw_framebuffer_dmabuf *vfbd =
+		vmw_framebuffer_to_vfbd(&vfb->base);
+	struct vmw_dma_buffer *dmabuf = vfbd->buffer;
+	struct vmw_display_unit *units[VMWGFX_NUM_DISPLAY_UNITS];
+	struct drm_crtc *crtc;
+	size_t fifo_size;
+	int i, k, ret, num_units, blits_pos;
+
+	struct {
+		uint32_t header;
+		SVGAFifoCmdDefineGMRFB body;
+	} *cmd;
+	struct {
+		uint32_t header;
+		SVGAFifoCmdBlitScreenToGMRFB body;
+	} *blits;
+
+	num_units = 0;
+	list_for_each_entry(crtc, &dev_priv->dev->mode_config.crtc_list, head) {
+		if (crtc->fb != &vfb->base)
+			continue;
+		units[num_units++] = vmw_crtc_to_du(crtc);
+	}
+
+	BUG_ON(dmabuf == NULL);
+	BUG_ON(!clips || !num_clips);
+
+	/* take a safe guess at fifo size */
+	fifo_size = sizeof(*cmd) + sizeof(*blits) * num_clips * num_units;
+	cmd = kmalloc(fifo_size, GFP_KERNEL);
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Failed to allocate temporary fifo memory.\n");
+		return -ENOMEM;
+	}
+
+	memset(cmd, 0, fifo_size);
+	cmd->header = SVGA_CMD_DEFINE_GMRFB;
+	cmd->body.format.bitsPerPixel = vfb->base.bits_per_pixel;
+	cmd->body.format.colorDepth = vfb->base.depth;
+	cmd->body.format.reserved = 0;
+	cmd->body.bytesPerLine = vfb->base.pitch;
+	cmd->body.ptr.gmrId = vfb->user_handle;
+	cmd->body.ptr.offset = 0;
+
+	blits = (void *)&cmd[1];
+	blits_pos = 0;
+	for (i = 0; i < num_units; i++) {
+		struct drm_vmw_rect *c = clips;
+		for (k = 0; k < num_clips; k++, c++) {
+			/* transform clip coords to crtc origin based coords */
+			int clip_x1 = c->x - units[i]->crtc.x;
+			int clip_x2 = c->x - units[i]->crtc.x + c->w;
+			int clip_y1 = c->y - units[i]->crtc.y;
+			int clip_y2 = c->y - units[i]->crtc.y + c->h;
+			int dest_x = c->x;
+			int dest_y = c->y;
+
+			/* compensate for clipping, we negate
+			 * a negative number and add that.
+			 */
+			if (clip_x1 < 0)
+				dest_x += -clip_x1;
+			if (clip_y1 < 0)
+				dest_y += -clip_y1;
+
+			/* clip */
+			clip_x1 = max(clip_x1, 0);
+			clip_y1 = max(clip_y1, 0);
+			clip_x2 = min(clip_x2, units[i]->crtc.mode.hdisplay);
+			clip_y2 = min(clip_y2, units[i]->crtc.mode.vdisplay);
+
+			/* and cull any rects that misses the crtc */
+			if (clip_x1 >= units[i]->crtc.mode.hdisplay ||
+			    clip_y1 >= units[i]->crtc.mode.vdisplay ||
+			    clip_x2 <= 0 || clip_y2 <= 0)
+				continue;
+
+			blits[blits_pos].header = SVGA_CMD_BLIT_SCREEN_TO_GMRFB;
+			blits[blits_pos].body.srcScreenId = units[i]->unit;
+			blits[blits_pos].body.destOrigin.x = dest_x;
+			blits[blits_pos].body.destOrigin.y = dest_y;
+
+			blits[blits_pos].body.srcRect.left = clip_x1;
+			blits[blits_pos].body.srcRect.top = clip_y1;
+			blits[blits_pos].body.srcRect.right = clip_x2;
+			blits[blits_pos].body.srcRect.bottom = clip_y2;
+			blits_pos++;
+		}
+	}
+	/* reset size here and use calculated exact size from loops */
+	fifo_size = sizeof(*cmd) + sizeof(*blits) * blits_pos;
+
+	ret = vmw_execbuf_process(file_priv, dev_priv, NULL, cmd, fifo_size,
+				  0, user_fence_rep);
+
+	kfree(cmd);
+
+	return ret;
+}
+
 int vmw_kms_init(struct vmw_private *dev_priv)
 int vmw_kms_init(struct vmw_private *dev_priv)
 {
 {
 	struct drm_device *dev = dev_priv->dev;
 	struct drm_device *dev = dev_priv->dev;
@@ -932,7 +1132,9 @@ int vmw_kms_init(struct vmw_private *dev_priv)
 	dev->mode_config.max_width = 8192;
 	dev->mode_config.max_width = 8192;
 	dev->mode_config.max_height = 8192;
 	dev->mode_config.max_height = 8192;
 
 
-	ret = vmw_kms_init_legacy_display_system(dev_priv);
+	ret = vmw_kms_init_screen_object_display(dev_priv);
+	if (ret) /* Fallback */
+		(void)vmw_kms_init_legacy_display_system(dev_priv);
 
 
 	return 0;
 	return 0;
 }
 }
@@ -1103,3 +1305,242 @@ u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc)
 {
 {
 	return 0;
 	return 0;
 }
 }
+
+
+/*
+ * Small shared kms functions.
+ */
+
+int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
+			 struct drm_vmw_rect *rects)
+{
+	struct drm_device *dev = dev_priv->dev;
+	struct vmw_display_unit *du;
+	struct drm_connector *con;
+
+	mutex_lock(&dev->mode_config.mutex);
+
+#if 0
+	{
+		unsigned int i;
+
+		DRM_INFO("%s: new layout ", __func__);
+		for (i = 0; i < num; i++)
+			DRM_INFO("(%i, %i %ux%u) ", rects[i].x, rects[i].y,
+				 rects[i].w, rects[i].h);
+		DRM_INFO("\n");
+	}
+#endif
+
+	list_for_each_entry(con, &dev->mode_config.connector_list, head) {
+		du = vmw_connector_to_du(con);
+		if (num > du->unit) {
+			du->pref_width = rects[du->unit].w;
+			du->pref_height = rects[du->unit].h;
+			du->pref_active = true;
+		} else {
+			du->pref_width = 800;
+			du->pref_height = 600;
+			du->pref_active = false;
+		}
+		con->status = vmw_du_connector_detect(con, true);
+	}
+
+	mutex_unlock(&dev->mode_config.mutex);
+
+	return 0;
+}
+
+void vmw_du_crtc_save(struct drm_crtc *crtc)
+{
+}
+
+void vmw_du_crtc_restore(struct drm_crtc *crtc)
+{
+}
+
+void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
+			   u16 *r, u16 *g, u16 *b,
+			   uint32_t start, uint32_t size)
+{
+	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
+	int i;
+
+	for (i = 0; i < size; i++) {
+		DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i,
+			  r[i], g[i], b[i]);
+		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8);
+		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
+		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
+	}
+}
+
+void vmw_du_connector_dpms(struct drm_connector *connector, int mode)
+{
+}
+
+void vmw_du_connector_save(struct drm_connector *connector)
+{
+}
+
+void vmw_du_connector_restore(struct drm_connector *connector)
+{
+}
+
+enum drm_connector_status
+vmw_du_connector_detect(struct drm_connector *connector, bool force)
+{
+	uint32_t num_displays;
+	struct drm_device *dev = connector->dev;
+	struct vmw_private *dev_priv = vmw_priv(dev);
+
+	mutex_lock(&dev_priv->hw_mutex);
+	num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
+	mutex_unlock(&dev_priv->hw_mutex);
+
+	return ((vmw_connector_to_du(connector)->unit < num_displays) ?
+		connector_status_connected : connector_status_disconnected);
+}
+
+static struct drm_display_mode vmw_kms_connector_builtin[] = {
+	/* 640x480@60Hz */
+	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
+		   752, 800, 0, 480, 489, 492, 525, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 800x600@60Hz */
+	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
+		   968, 1056, 0, 600, 601, 605, 628, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1024x768@60Hz */
+	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
+		   1184, 1344, 0, 768, 771, 777, 806, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 1152x864@75Hz */
+	{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
+		   1344, 1600, 0, 864, 865, 868, 900, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1280x768@60Hz */
+	{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
+		   1472, 1664, 0, 768, 771, 778, 798, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1280x800@60Hz */
+	{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
+		   1480, 1680, 0, 800, 803, 809, 831, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
+	/* 1280x960@60Hz */
+	{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
+		   1488, 1800, 0, 960, 961, 964, 1000, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1280x1024@60Hz */
+	{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
+		   1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1360x768@60Hz */
+	{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
+		   1536, 1792, 0, 768, 771, 777, 795, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1440x1050@60Hz */
+	{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
+		   1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1440x900@60Hz */
+	{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
+		   1672, 1904, 0, 900, 903, 909, 934, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1600x1200@60Hz */
+	{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
+		   1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
+		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1680x1050@60Hz */
+	{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
+		   1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1792x1344@60Hz */
+	{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
+		   2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1853x1392@60Hz */
+	{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
+		   2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1920x1200@60Hz */
+	{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
+		   2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 1920x1440@60Hz */
+	{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
+		   2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* 2560x1600@60Hz */
+	{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
+		   3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
+		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
+	/* Terminate */
+	{ DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
+};
+
+int vmw_du_connector_fill_modes(struct drm_connector *connector,
+				uint32_t max_width, uint32_t max_height)
+{
+	struct vmw_display_unit *du = vmw_connector_to_du(connector);
+	struct drm_device *dev = connector->dev;
+	struct vmw_private *dev_priv = vmw_priv(dev);
+	struct drm_display_mode *mode = NULL;
+	struct drm_display_mode *bmode;
+	struct drm_display_mode prefmode = { DRM_MODE("preferred",
+		DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+		DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
+	};
+	int i;
+
+	/* Add preferred mode */
+	{
+		mode = drm_mode_duplicate(dev, &prefmode);
+		if (!mode)
+			return 0;
+		mode->hdisplay = du->pref_width;
+		mode->vdisplay = du->pref_height;
+		mode->vrefresh = drm_mode_vrefresh(mode);
+		if (vmw_kms_validate_mode_vram(dev_priv, mode->hdisplay * 2,
+					       mode->vdisplay)) {
+			drm_mode_probed_add(connector, mode);
+
+			if (du->pref_mode) {
+				list_del_init(&du->pref_mode->head);
+				drm_mode_destroy(dev, du->pref_mode);
+			}
+
+			du->pref_mode = mode;
+		}
+	}
+
+	for (i = 0; vmw_kms_connector_builtin[i].type != 0; i++) {
+		bmode = &vmw_kms_connector_builtin[i];
+		if (bmode->hdisplay > max_width ||
+		    bmode->vdisplay > max_height)
+			continue;
+
+		if (!vmw_kms_validate_mode_vram(dev_priv, bmode->hdisplay * 2,
+						bmode->vdisplay))
+			continue;
+
+		mode = drm_mode_duplicate(dev, bmode);
+		if (!mode)
+			return 0;
+		mode->vrefresh = drm_mode_vrefresh(mode);
+
+		drm_mode_probed_add(connector, mode);
+	}
+
+	drm_mode_connector_list_update(connector);
+
+	return 1;
+}
+
+int vmw_du_connector_set_property(struct drm_connector *connector,
+				  struct drm_property *property,
+				  uint64_t val)
+{
+	return 0;
+}

+ 43 - 1
drivers/gpu/drm/vmwgfx/vmwgfx_kms.h

@@ -31,6 +31,8 @@
 #include "drmP.h"
 #include "drmP.h"
 #include "vmwgfx_drv.h"
 #include "vmwgfx_drv.h"
 
 
+#define VMWGFX_NUM_DISPLAY_UNITS 8
+
 
 
 #define vmw_framebuffer_to_vfb(x) \
 #define vmw_framebuffer_to_vfb(x) \
 	container_of(x, struct vmw_framebuffer, base)
 	container_of(x, struct vmw_framebuffer, base)
@@ -45,6 +47,9 @@ struct vmw_framebuffer {
 	struct drm_framebuffer base;
 	struct drm_framebuffer base;
 	int (*pin)(struct vmw_framebuffer *fb);
 	int (*pin)(struct vmw_framebuffer *fb);
 	int (*unpin)(struct vmw_framebuffer *fb);
 	int (*unpin)(struct vmw_framebuffer *fb);
+	bool dmabuf;
+	struct ttm_base_object *user_obj;
+	uint32_t user_handle;
 };
 };
 
 
 
 
@@ -83,22 +88,59 @@ struct vmw_display_unit {
 	int hotspot_y;
 	int hotspot_y;
 
 
 	unsigned unit;
 	unsigned unit;
+
+	/*
+	 * Prefered mode tracking.
+	 */
+	unsigned pref_width;
+	unsigned pref_height;
+	bool pref_active;
+	struct drm_display_mode *pref_mode;
 };
 };
 
 
+#define vmw_crtc_to_du(x) \
+	container_of(x, struct vmw_display_unit, crtc)
+#define vmw_connector_to_du(x) \
+	container_of(x, struct vmw_display_unit, connector)
+
+
 /*
 /*
  * Shared display unit functions - vmwgfx_kms.c
  * Shared display unit functions - vmwgfx_kms.c
  */
  */
 void vmw_display_unit_cleanup(struct vmw_display_unit *du);
 void vmw_display_unit_cleanup(struct vmw_display_unit *du);
+void vmw_du_crtc_save(struct drm_crtc *crtc);
+void vmw_du_crtc_restore(struct drm_crtc *crtc);
+void vmw_du_crtc_gamma_set(struct drm_crtc *crtc,
+			   u16 *r, u16 *g, u16 *b,
+			   uint32_t start, uint32_t size);
 int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
 int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
 			   uint32_t handle, uint32_t width, uint32_t height);
 			   uint32_t handle, uint32_t width, uint32_t height);
 int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
 int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y);
+void vmw_du_connector_dpms(struct drm_connector *connector, int mode);
+void vmw_du_connector_save(struct drm_connector *connector);
+void vmw_du_connector_restore(struct drm_connector *connector);
+enum drm_connector_status
+vmw_du_connector_detect(struct drm_connector *connector, bool force);
+int vmw_du_connector_fill_modes(struct drm_connector *connector,
+				uint32_t max_width, uint32_t max_height);
+int vmw_du_connector_set_property(struct drm_connector *connector,
+				  struct drm_property *property,
+				  uint64_t val);
+int vmw_du_update_layout(struct vmw_private *dev_priv, unsigned num,
+			 struct drm_vmw_rect *rects);
 
 
 /*
 /*
  * Legacy display unit functions - vmwgfx_ldu.c
  * Legacy display unit functions - vmwgfx_ldu.c
  */
  */
 int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv);
 int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv);
 int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv);
 int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv);
-int vmw_kms_ldu_update_layout(struct vmw_private *dev_priv, unsigned num,
+
+/*
+ * Screen Objects display functions - vmwgfx_scrn.c
+ */
+int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv);
+int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv);
+int vmw_kms_sou_update_layout(struct vmw_private *dev_priv, unsigned num,
 			      struct drm_vmw_rect *rects);
 			      struct drm_vmw_rect *rects);
 
 
 #endif
 #endif

+ 17 - 256
drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c

@@ -27,7 +27,6 @@
 
 
 #include "vmwgfx_kms.h"
 #include "vmwgfx_kms.h"
 
 
-#define VMWGFX_LDU_NUM_DU 8
 
 
 #define vmw_crtc_to_ldu(x) \
 #define vmw_crtc_to_ldu(x) \
 	container_of(x, struct vmw_legacy_display_unit, base.crtc)
 	container_of(x, struct vmw_legacy_display_unit, base.crtc)
@@ -51,11 +50,6 @@ struct vmw_legacy_display {
 struct vmw_legacy_display_unit {
 struct vmw_legacy_display_unit {
 	struct vmw_display_unit base;
 	struct vmw_display_unit base;
 
 
-	unsigned pref_width;
-	unsigned pref_height;
-	bool pref_active;
-	struct drm_display_mode *pref_mode;
-
 	struct list_head active;
 	struct list_head active;
 };
 };
 
 
@@ -71,29 +65,6 @@ static void vmw_ldu_destroy(struct vmw_legacy_display_unit *ldu)
  * Legacy Display Unit CRTC functions
  * Legacy Display Unit CRTC functions
  */
  */
 
 
-static void vmw_ldu_crtc_save(struct drm_crtc *crtc)
-{
-}
-
-static void vmw_ldu_crtc_restore(struct drm_crtc *crtc)
-{
-}
-
-static void vmw_ldu_crtc_gamma_set(struct drm_crtc *crtc,
-				   u16 *r, u16 *g, u16 *b,
-				   uint32_t start, uint32_t size)
-{
-	struct vmw_private *dev_priv = vmw_priv(crtc->dev);
-	int i;
-
-	for (i = 0; i < size; i++) {
-		DRM_DEBUG("%d r/g/b = 0x%04x / 0x%04x / 0x%04x\n", i, r[i], g[i], b[i]);
-		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 0, r[i] >> 8);
-		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 1, g[i] >> 8);
-		vmw_write(dev_priv, SVGA_PALETTE_BASE + i * 3 + 2, b[i] >> 8);
-	}
-}
-
 static void vmw_ldu_crtc_destroy(struct drm_crtc *crtc)
 static void vmw_ldu_crtc_destroy(struct drm_crtc *crtc)
 {
 {
 	vmw_ldu_destroy(vmw_crtc_to_ldu(crtc));
 	vmw_ldu_destroy(vmw_crtc_to_ldu(crtc));
@@ -301,15 +272,16 @@ static int vmw_ldu_crtc_set_config(struct drm_mode_set *set)
 }
 }
 
 
 static struct drm_crtc_funcs vmw_legacy_crtc_funcs = {
 static struct drm_crtc_funcs vmw_legacy_crtc_funcs = {
-	.save = vmw_ldu_crtc_save,
-	.restore = vmw_ldu_crtc_restore,
+	.save = vmw_du_crtc_save,
+	.restore = vmw_du_crtc_restore,
 	.cursor_set = vmw_du_crtc_cursor_set,
 	.cursor_set = vmw_du_crtc_cursor_set,
 	.cursor_move = vmw_du_crtc_cursor_move,
 	.cursor_move = vmw_du_crtc_cursor_move,
-	.gamma_set = vmw_ldu_crtc_gamma_set,
+	.gamma_set = vmw_du_crtc_gamma_set,
 	.destroy = vmw_ldu_crtc_destroy,
 	.destroy = vmw_ldu_crtc_destroy,
 	.set_config = vmw_ldu_crtc_set_config,
 	.set_config = vmw_ldu_crtc_set_config,
 };
 };
 
 
+
 /*
 /*
  * Legacy Display Unit encoder functions
  * Legacy Display Unit encoder functions
  */
  */
@@ -327,190 +299,18 @@ static struct drm_encoder_funcs vmw_legacy_encoder_funcs = {
  * Legacy Display Unit connector functions
  * Legacy Display Unit connector functions
  */
  */
 
 
-static void vmw_ldu_connector_dpms(struct drm_connector *connector, int mode)
-{
-}
-
-static void vmw_ldu_connector_save(struct drm_connector *connector)
-{
-}
-
-static void vmw_ldu_connector_restore(struct drm_connector *connector)
-{
-}
-
-static enum drm_connector_status
-	vmw_ldu_connector_detect(struct drm_connector *connector,
-				 bool force)
-{
-	uint32_t num_displays;
-	struct drm_device *dev = connector->dev;
-	struct vmw_private *dev_priv = vmw_priv(dev);
-
-	mutex_lock(&dev_priv->hw_mutex);
-	num_displays = vmw_read(dev_priv, SVGA_REG_NUM_DISPLAYS);
-	mutex_unlock(&dev_priv->hw_mutex);
-
-	return ((vmw_connector_to_ldu(connector)->base.unit < num_displays) ?
-		connector_status_connected : connector_status_disconnected);
-}
-
-static const struct drm_display_mode vmw_ldu_connector_builtin[] = {
-	/* 640x480@60Hz */
-	{ DRM_MODE("640x480", DRM_MODE_TYPE_DRIVER, 25175, 640, 656,
-		   752, 800, 0, 480, 489, 492, 525, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
-	/* 800x600@60Hz */
-	{ DRM_MODE("800x600", DRM_MODE_TYPE_DRIVER, 40000, 800, 840,
-		   968, 1056, 0, 600, 601, 605, 628, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
-	/* 1024x768@60Hz */
-	{ DRM_MODE("1024x768", DRM_MODE_TYPE_DRIVER, 65000, 1024, 1048,
-		   1184, 1344, 0, 768, 771, 777, 806, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_NVSYNC) },
-	/* 1152x864@75Hz */
-	{ DRM_MODE("1152x864", DRM_MODE_TYPE_DRIVER, 108000, 1152, 1216,
-		   1344, 1600, 0, 864, 865, 868, 900, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
-	/* 1280x768@60Hz */
-	{ DRM_MODE("1280x768", DRM_MODE_TYPE_DRIVER, 79500, 1280, 1344,
-		   1472, 1664, 0, 768, 771, 778, 798, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
-	/* 1280x800@60Hz */
-	{ DRM_MODE("1280x800", DRM_MODE_TYPE_DRIVER, 83500, 1280, 1352,
-		   1480, 1680, 0, 800, 803, 809, 831, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_NVSYNC) },
-	/* 1280x960@60Hz */
-	{ DRM_MODE("1280x960", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1376,
-		   1488, 1800, 0, 960, 961, 964, 1000, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
-	/* 1280x1024@60Hz */
-	{ DRM_MODE("1280x1024", DRM_MODE_TYPE_DRIVER, 108000, 1280, 1328,
-		   1440, 1688, 0, 1024, 1025, 1028, 1066, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
-	/* 1360x768@60Hz */
-	{ DRM_MODE("1360x768", DRM_MODE_TYPE_DRIVER, 85500, 1360, 1424,
-		   1536, 1792, 0, 768, 771, 777, 795, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
-	/* 1440x1050@60Hz */
-	{ DRM_MODE("1400x1050", DRM_MODE_TYPE_DRIVER, 121750, 1400, 1488,
-		   1632, 1864, 0, 1050, 1053, 1057, 1089, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
-	/* 1440x900@60Hz */
-	{ DRM_MODE("1440x900", DRM_MODE_TYPE_DRIVER, 106500, 1440, 1520,
-		   1672, 1904, 0, 900, 903, 909, 934, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
-	/* 1600x1200@60Hz */
-	{ DRM_MODE("1600x1200", DRM_MODE_TYPE_DRIVER, 162000, 1600, 1664,
-		   1856, 2160, 0, 1200, 1201, 1204, 1250, 0,
-		   DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC) },
-	/* 1680x1050@60Hz */
-	{ DRM_MODE("1680x1050", DRM_MODE_TYPE_DRIVER, 146250, 1680, 1784,
-		   1960, 2240, 0, 1050, 1053, 1059, 1089, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
-	/* 1792x1344@60Hz */
-	{ DRM_MODE("1792x1344", DRM_MODE_TYPE_DRIVER, 204750, 1792, 1920,
-		   2120, 2448, 0, 1344, 1345, 1348, 1394, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
-	/* 1853x1392@60Hz */
-	{ DRM_MODE("1856x1392", DRM_MODE_TYPE_DRIVER, 218250, 1856, 1952,
-		   2176, 2528, 0, 1392, 1393, 1396, 1439, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
-	/* 1920x1200@60Hz */
-	{ DRM_MODE("1920x1200", DRM_MODE_TYPE_DRIVER, 193250, 1920, 2056,
-		   2256, 2592, 0, 1200, 1203, 1209, 1245, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
-	/* 1920x1440@60Hz */
-	{ DRM_MODE("1920x1440", DRM_MODE_TYPE_DRIVER, 234000, 1920, 2048,
-		   2256, 2600, 0, 1440, 1441, 1444, 1500, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
-	/* 2560x1600@60Hz */
-	{ DRM_MODE("2560x1600", DRM_MODE_TYPE_DRIVER, 348500, 2560, 2752,
-		   3032, 3504, 0, 1600, 1603, 1609, 1658, 0,
-		   DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) },
-	/* Terminate */
-	{ DRM_MODE("", 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0) },
-};
-
-static int vmw_ldu_connector_fill_modes(struct drm_connector *connector,
-					uint32_t max_width, uint32_t max_height)
-{
-	struct vmw_legacy_display_unit *ldu = vmw_connector_to_ldu(connector);
-	struct drm_device *dev = connector->dev;
-	struct vmw_private *dev_priv = vmw_priv(dev);
-	struct drm_display_mode *mode = NULL;
-	struct drm_display_mode prefmode = { DRM_MODE("preferred",
-		DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
-		0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
-		DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
-	};
-	int i;
-
-	/* Add preferred mode */
-	{
-		mode = drm_mode_duplicate(dev, &prefmode);
-		if (!mode)
-			return 0;
-		mode->hdisplay = ldu->pref_width;
-		mode->vdisplay = ldu->pref_height;
-		mode->vrefresh = drm_mode_vrefresh(mode);
-		if (vmw_kms_validate_mode_vram(dev_priv, mode->hdisplay * 2,
-					       mode->vdisplay)) {
-			drm_mode_probed_add(connector, mode);
-
-			if (ldu->pref_mode) {
-				list_del_init(&ldu->pref_mode->head);
-				drm_mode_destroy(dev, ldu->pref_mode);
-			}
-
-			ldu->pref_mode = mode;
-		}
-	}
-
-	for (i = 0; vmw_ldu_connector_builtin[i].type != 0; i++) {
-		const struct drm_display_mode *bmode;
-
-		bmode = &vmw_ldu_connector_builtin[i];
-		if (bmode->hdisplay > max_width ||
-		    bmode->vdisplay > max_height)
-			continue;
-
-		if (!vmw_kms_validate_mode_vram(dev_priv, bmode->hdisplay * 2,
-						bmode->vdisplay))
-			continue;
-
-		mode = drm_mode_duplicate(dev, bmode);
-		if (!mode)
-			return 0;
-		mode->vrefresh = drm_mode_vrefresh(mode);
-
-		drm_mode_probed_add(connector, mode);
-	}
-
-	drm_mode_connector_list_update(connector);
-
-	return 1;
-}
-
-static int vmw_ldu_connector_set_property(struct drm_connector *connector,
-					  struct drm_property *property,
-					  uint64_t val)
-{
-	return 0;
-}
-
 static void vmw_ldu_connector_destroy(struct drm_connector *connector)
 static void vmw_ldu_connector_destroy(struct drm_connector *connector)
 {
 {
 	vmw_ldu_destroy(vmw_connector_to_ldu(connector));
 	vmw_ldu_destroy(vmw_connector_to_ldu(connector));
 }
 }
 
 
 static struct drm_connector_funcs vmw_legacy_connector_funcs = {
 static struct drm_connector_funcs vmw_legacy_connector_funcs = {
-	.dpms = vmw_ldu_connector_dpms,
-	.save = vmw_ldu_connector_save,
-	.restore = vmw_ldu_connector_restore,
-	.detect = vmw_ldu_connector_detect,
-	.fill_modes = vmw_ldu_connector_fill_modes,
-	.set_property = vmw_ldu_connector_set_property,
+	.dpms = vmw_du_connector_dpms,
+	.save = vmw_du_connector_save,
+	.restore = vmw_du_connector_restore,
+	.detect = vmw_du_connector_detect,
+	.fill_modes = vmw_du_connector_fill_modes,
+	.set_property = vmw_du_connector_set_property,
 	.destroy = vmw_ldu_connector_destroy,
 	.destroy = vmw_ldu_connector_destroy,
 };
 };
 
 
@@ -533,14 +333,14 @@ static int vmw_ldu_init(struct vmw_private *dev_priv, unsigned unit)
 
 
 	INIT_LIST_HEAD(&ldu->active);
 	INIT_LIST_HEAD(&ldu->active);
 
 
-	ldu->pref_active = (unit == 0);
-	ldu->pref_width = 800;
-	ldu->pref_height = 600;
-	ldu->pref_mode = NULL;
+	ldu->base.pref_active = (unit == 0);
+	ldu->base.pref_width = 800;
+	ldu->base.pref_height = 600;
+	ldu->base.pref_mode = NULL;
 
 
 	drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
 	drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
 			   DRM_MODE_CONNECTOR_LVDS);
 			   DRM_MODE_CONNECTOR_LVDS);
-	connector->status = vmw_ldu_connector_detect(connector, true);
+	connector->status = vmw_du_connector_detect(connector, true);
 
 
 	drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs,
 	drm_encoder_init(dev, encoder, &vmw_legacy_encoder_funcs,
 			 DRM_MODE_ENCODER_LVDS);
 			 DRM_MODE_ENCODER_LVDS);
@@ -583,9 +383,9 @@ int vmw_kms_init_legacy_display_system(struct vmw_private *dev_priv)
 	drm_mode_create_dirty_info_property(dev_priv->dev);
 	drm_mode_create_dirty_info_property(dev_priv->dev);
 
 
 	if (dev_priv->capabilities & SVGA_CAP_MULTIMON) {
 	if (dev_priv->capabilities & SVGA_CAP_MULTIMON) {
-		for (i = 0; i < VMWGFX_LDU_NUM_DU; ++i)
+		for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i)
 			vmw_ldu_init(dev_priv, i);
 			vmw_ldu_init(dev_priv, i);
-		ret = drm_vblank_init(dev, VMWGFX_LDU_NUM_DU);
+		ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS);
 	} else {
 	} else {
 		/* for old hardware without multimon only enable one display */
 		/* for old hardware without multimon only enable one display */
 		vmw_ldu_init(dev_priv, 0);
 		vmw_ldu_init(dev_priv, 0);
@@ -609,42 +409,3 @@ int vmw_kms_close_legacy_display_system(struct vmw_private *dev_priv)
 
 
 	return 0;
 	return 0;
 }
 }
-
-int vmw_kms_ldu_update_layout(struct vmw_private *dev_priv, unsigned num,
-			      struct drm_vmw_rect *rects)
-{
-	struct drm_device *dev = dev_priv->dev;
-	struct vmw_legacy_display_unit *ldu;
-	struct drm_connector *con;
-	int i;
-
-	mutex_lock(&dev->mode_config.mutex);
-
-#if 0
-	DRM_INFO("%s: new layout ", __func__);
-	for (i = 0; i < (int)num; i++)
-		DRM_INFO("(%i, %i %ux%u) ", rects[i].x, rects[i].y,
-			 rects[i].w, rects[i].h);
-	DRM_INFO("\n");
-#else
-	(void)i;
-#endif
-
-	list_for_each_entry(con, &dev->mode_config.connector_list, head) {
-		ldu = vmw_connector_to_ldu(con);
-		if (num > ldu->base.unit) {
-			ldu->pref_width = rects[ldu->base.unit].w;
-			ldu->pref_height = rects[ldu->base.unit].h;
-			ldu->pref_active = true;
-		} else {
-			ldu->pref_width = 800;
-			ldu->pref_height = 600;
-			ldu->pref_active = false;
-		}
-		con->status = vmw_ldu_connector_detect(con, true);
-	}
-
-	mutex_unlock(&dev->mode_config.mutex);
-
-	return 0;
-}

+ 89 - 99
drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c

@@ -86,48 +86,6 @@ static inline void fill_flush(struct vmw_escape_video_flush *cmd,
 	cmd->flush.streamId = stream_id;
 	cmd->flush.streamId = stream_id;
 }
 }
 
 
-/**
- * Pin or unpin a buffer in vram.
- *
- * @dev_priv:  Driver private.
- * @buf:  DMA buffer to pin or unpin.
- * @pin:  Pin buffer in vram if true.
- * @interruptible:  Use interruptible wait.
- *
- * Takes the current masters ttm lock in read.
- *
- * Returns
- * -ERESTARTSYS if interrupted by a signal.
- */
-static int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
-				  struct vmw_dma_buffer *buf,
-				  bool pin, bool interruptible)
-{
-	struct ttm_buffer_object *bo = &buf->base;
-	struct ttm_placement *overlay_placement = &vmw_vram_placement;
-	int ret;
-
-	ret = ttm_read_lock(&dev_priv->active_master->lock, interruptible);
-	if (unlikely(ret != 0))
-		return ret;
-
-	ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
-	if (unlikely(ret != 0))
-		goto err;
-
-	if (pin)
-		overlay_placement = &vmw_vram_ne_placement;
-
-	ret = ttm_bo_validate(bo, overlay_placement, interruptible, false, false);
-
-	ttm_bo_unreserve(bo);
-
-err:
-	ttm_read_unlock(&dev_priv->active_master->lock);
-
-	return ret;
-}
-
 /**
 /**
  * Send put command to hw.
  * Send put command to hw.
  *
  *
@@ -139,68 +97,80 @@ static int vmw_overlay_send_put(struct vmw_private *dev_priv,
 				struct drm_vmw_control_stream_arg *arg,
 				struct drm_vmw_control_stream_arg *arg,
 				bool interruptible)
 				bool interruptible)
 {
 {
+	struct vmw_escape_video_flush *flush;
+	size_t fifo_size;
+	bool have_so = dev_priv->sou_priv ? true : false;
+	int i, num_items;
+	SVGAGuestPtr ptr;
+
 	struct {
 	struct {
 		struct vmw_escape_header escape;
 		struct vmw_escape_header escape;
 		struct {
 		struct {
-			struct {
-				uint32_t cmdType;
-				uint32_t streamId;
-			} header;
-			struct {
-				uint32_t registerId;
-				uint32_t value;
-			} items[SVGA_VIDEO_PITCH_3 + 1];
-		} body;
-		struct vmw_escape_video_flush flush;
+			uint32_t cmdType;
+			uint32_t streamId;
+		} header;
 	} *cmds;
 	} *cmds;
-	uint32_t offset;
-	int i, ret;
+	struct {
+		uint32_t registerId;
+		uint32_t value;
+	} *items;
 
 
-	for (;;) {
-		cmds = vmw_fifo_reserve(dev_priv, sizeof(*cmds));
-		if (cmds)
-			break;
+	/* defines are a index needs + 1 */
+	if (have_so)
+		num_items = SVGA_VIDEO_DST_SCREEN_ID + 1;
+	else
+		num_items = SVGA_VIDEO_PITCH_3 + 1;
 
 
-		ret = vmw_fallback_wait(dev_priv, false, true, 0,
-					interruptible, 3*HZ);
-		if (interruptible && ret == -ERESTARTSYS)
-			return ret;
-		else
-			BUG_ON(ret != 0);
+	fifo_size = sizeof(*cmds) + sizeof(*flush) + sizeof(*items) * num_items;
+
+	cmds = vmw_fifo_reserve(dev_priv, fifo_size);
+	/* hardware has hung, can't do anything here */
+	if (!cmds)
+		return -ENOMEM;
+
+	items = (typeof(items))&cmds[1];
+	flush = (struct vmw_escape_video_flush *)&items[num_items];
+
+	/* the size is header + number of items */
+	fill_escape(&cmds->escape, sizeof(*items) * (num_items + 1));
+
+	cmds->header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS;
+	cmds->header.streamId = arg->stream_id;
+
+	/* the IDs are neatly numbered */
+	for (i = 0; i < num_items; i++)
+		items[i].registerId = i;
+
+	vmw_bo_get_guest_ptr(&buf->base, &ptr);
+	ptr.offset += arg->offset;
+
+	items[SVGA_VIDEO_ENABLED].value     = true;
+	items[SVGA_VIDEO_FLAGS].value       = arg->flags;
+	items[SVGA_VIDEO_DATA_OFFSET].value = ptr.offset;
+	items[SVGA_VIDEO_FORMAT].value      = arg->format;
+	items[SVGA_VIDEO_COLORKEY].value    = arg->color_key;
+	items[SVGA_VIDEO_SIZE].value        = arg->size;
+	items[SVGA_VIDEO_WIDTH].value       = arg->width;
+	items[SVGA_VIDEO_HEIGHT].value      = arg->height;
+	items[SVGA_VIDEO_SRC_X].value       = arg->src.x;
+	items[SVGA_VIDEO_SRC_Y].value       = arg->src.y;
+	items[SVGA_VIDEO_SRC_WIDTH].value   = arg->src.w;
+	items[SVGA_VIDEO_SRC_HEIGHT].value  = arg->src.h;
+	items[SVGA_VIDEO_DST_X].value       = arg->dst.x;
+	items[SVGA_VIDEO_DST_Y].value       = arg->dst.y;
+	items[SVGA_VIDEO_DST_WIDTH].value   = arg->dst.w;
+	items[SVGA_VIDEO_DST_HEIGHT].value  = arg->dst.h;
+	items[SVGA_VIDEO_PITCH_1].value     = arg->pitch[0];
+	items[SVGA_VIDEO_PITCH_2].value     = arg->pitch[1];
+	items[SVGA_VIDEO_PITCH_3].value     = arg->pitch[2];
+	if (have_so) {
+		items[SVGA_VIDEO_DATA_GMRID].value    = ptr.gmrId;
+		items[SVGA_VIDEO_DST_SCREEN_ID].value = SVGA_ID_INVALID;
 	}
 	}
 
 
-	fill_escape(&cmds->escape, sizeof(cmds->body));
-	cmds->body.header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS;
-	cmds->body.header.streamId = arg->stream_id;
-
-	for (i = 0; i <= SVGA_VIDEO_PITCH_3; i++)
-		cmds->body.items[i].registerId = i;
-
-	offset = buf->base.offset + arg->offset;
-
-	cmds->body.items[SVGA_VIDEO_ENABLED].value     = true;
-	cmds->body.items[SVGA_VIDEO_FLAGS].value       = arg->flags;
-	cmds->body.items[SVGA_VIDEO_DATA_OFFSET].value = offset;
-	cmds->body.items[SVGA_VIDEO_FORMAT].value      = arg->format;
-	cmds->body.items[SVGA_VIDEO_COLORKEY].value    = arg->color_key;
-	cmds->body.items[SVGA_VIDEO_SIZE].value        = arg->size;
-	cmds->body.items[SVGA_VIDEO_WIDTH].value       = arg->width;
-	cmds->body.items[SVGA_VIDEO_HEIGHT].value      = arg->height;
-	cmds->body.items[SVGA_VIDEO_SRC_X].value       = arg->src.x;
-	cmds->body.items[SVGA_VIDEO_SRC_Y].value       = arg->src.y;
-	cmds->body.items[SVGA_VIDEO_SRC_WIDTH].value   = arg->src.w;
-	cmds->body.items[SVGA_VIDEO_SRC_HEIGHT].value  = arg->src.h;
-	cmds->body.items[SVGA_VIDEO_DST_X].value       = arg->dst.x;
-	cmds->body.items[SVGA_VIDEO_DST_Y].value       = arg->dst.y;
-	cmds->body.items[SVGA_VIDEO_DST_WIDTH].value   = arg->dst.w;
-	cmds->body.items[SVGA_VIDEO_DST_HEIGHT].value  = arg->dst.h;
-	cmds->body.items[SVGA_VIDEO_PITCH_1].value     = arg->pitch[0];
-	cmds->body.items[SVGA_VIDEO_PITCH_2].value     = arg->pitch[1];
-	cmds->body.items[SVGA_VIDEO_PITCH_3].value     = arg->pitch[2];
-
-	fill_flush(&cmds->flush, arg->stream_id);
+	fill_flush(flush, arg->stream_id);
 
 
-	vmw_fifo_commit(dev_priv, sizeof(*cmds));
+	vmw_fifo_commit(dev_priv, fifo_size);
 
 
 	return 0;
 	return 0;
 }
 }
@@ -247,6 +217,25 @@ static int vmw_overlay_send_stop(struct vmw_private *dev_priv,
 	return 0;
 	return 0;
 }
 }
 
 
+/**
+ * Move a buffer to vram or gmr if @pin is set, else unpin the buffer.
+ *
+ * With the introduction of screen objects buffers could now be
+ * used with GMRs instead of being locked to vram.
+ */
+static int vmw_overlay_move_buffer(struct vmw_private *dev_priv,
+				   struct vmw_dma_buffer *buf,
+				   bool pin, bool inter)
+{
+	if (!pin)
+		return vmw_dmabuf_unpin(dev_priv, buf, inter);
+
+	if (!dev_priv->sou_priv)
+		return vmw_dmabuf_to_vram(dev_priv, buf, true, inter);
+
+	return vmw_dmabuf_to_vram_or_gmr(dev_priv, buf, true, inter);
+}
+
 /**
 /**
  * Stop or pause a stream.
  * Stop or pause a stream.
  *
  *
@@ -279,8 +268,8 @@ static int vmw_overlay_stop(struct vmw_private *dev_priv,
 			return ret;
 			return ret;
 
 
 		/* We just remove the NO_EVICT flag so no -ENOMEM */
 		/* We just remove the NO_EVICT flag so no -ENOMEM */
-		ret = vmw_dmabuf_pin_in_vram(dev_priv, stream->buf, false,
-					     interruptible);
+		ret = vmw_overlay_move_buffer(dev_priv, stream->buf, false,
+					      interruptible);
 		if (interruptible && ret == -ERESTARTSYS)
 		if (interruptible && ret == -ERESTARTSYS)
 			return ret;
 			return ret;
 		else
 		else
@@ -342,7 +331,7 @@ static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
 	/* We don't start the old stream if we are interrupted.
 	/* We don't start the old stream if we are interrupted.
 	 * Might return -ENOMEM if it can't fit the buffer in vram.
 	 * Might return -ENOMEM if it can't fit the buffer in vram.
 	 */
 	 */
-	ret = vmw_dmabuf_pin_in_vram(dev_priv, buf, true, interruptible);
+	ret = vmw_overlay_move_buffer(dev_priv, buf, true, interruptible);
 	if (ret)
 	if (ret)
 		return ret;
 		return ret;
 
 
@@ -351,7 +340,8 @@ static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
 		/* This one needs to happen no matter what. We only remove
 		/* This one needs to happen no matter what. We only remove
 		 * the NO_EVICT flag so this is safe from -ENOMEM.
 		 * the NO_EVICT flag so this is safe from -ENOMEM.
 		 */
 		 */
-		BUG_ON(vmw_dmabuf_pin_in_vram(dev_priv, buf, false, false) != 0);
+		BUG_ON(vmw_overlay_move_buffer(dev_priv, buf, false, false)
+		       != 0);
 		return ret;
 		return ret;
 	}
 	}
 
 

Fișier diff suprimat deoarece este prea mare
+ 764 - 75
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c


+ 566 - 0
drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c

@@ -0,0 +1,566 @@
+/**************************************************************************
+ *
+ * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+
+#include "vmwgfx_kms.h"
+
+
+#define vmw_crtc_to_sou(x) \
+	container_of(x, struct vmw_screen_object_unit, base.crtc)
+#define vmw_encoder_to_sou(x) \
+	container_of(x, struct vmw_screen_object_unit, base.encoder)
+#define vmw_connector_to_sou(x) \
+	container_of(x, struct vmw_screen_object_unit, base.connector)
+
+struct vmw_screen_object_display {
+	struct list_head active;
+
+	unsigned num_active;
+	unsigned last_num_active;
+
+	struct vmw_framebuffer *fb;
+};
+
+/**
+ * Display unit using screen objects.
+ */
+struct vmw_screen_object_unit {
+	struct vmw_display_unit base;
+
+	unsigned long buffer_size; /**< Size of allocated buffer */
+	struct vmw_dma_buffer *buffer; /**< Backing store buffer */
+
+	bool defined;
+
+	struct list_head active;
+};
+
+static void vmw_sou_destroy(struct vmw_screen_object_unit *sou)
+{
+	list_del_init(&sou->active);
+	vmw_display_unit_cleanup(&sou->base);
+	kfree(sou);
+}
+
+
+/*
+ * Screen Object Display Unit CRTC functions
+ */
+
+static void vmw_sou_crtc_destroy(struct drm_crtc *crtc)
+{
+	vmw_sou_destroy(vmw_crtc_to_sou(crtc));
+}
+
+static int vmw_sou_del_active(struct vmw_private *vmw_priv,
+			      struct vmw_screen_object_unit *sou)
+{
+	struct vmw_screen_object_display *ld = vmw_priv->sou_priv;
+	if (list_empty(&sou->active))
+		return 0;
+
+	/* Must init otherwise list_empty(&sou->active) will not work. */
+	list_del_init(&sou->active);
+	if (--(ld->num_active) == 0) {
+		BUG_ON(!ld->fb);
+		if (ld->fb->unpin)
+			ld->fb->unpin(ld->fb);
+		ld->fb = NULL;
+	}
+
+	return 0;
+}
+
+static int vmw_sou_add_active(struct vmw_private *vmw_priv,
+			      struct vmw_screen_object_unit *sou,
+			      struct vmw_framebuffer *vfb)
+{
+	struct vmw_screen_object_display *ld = vmw_priv->sou_priv;
+	struct vmw_screen_object_unit *entry;
+	struct list_head *at;
+
+	BUG_ON(!ld->num_active && ld->fb);
+	if (vfb != ld->fb) {
+		if (ld->fb && ld->fb->unpin)
+			ld->fb->unpin(ld->fb);
+		if (vfb->pin)
+			vfb->pin(vfb);
+		ld->fb = vfb;
+	}
+
+	if (!list_empty(&sou->active))
+		return 0;
+
+	at = &ld->active;
+	list_for_each_entry(entry, &ld->active, active) {
+		if (entry->base.unit > sou->base.unit)
+			break;
+
+		at = &entry->active;
+	}
+
+	list_add(&sou->active, at);
+
+	ld->num_active++;
+
+	return 0;
+}
+
+/**
+ * Send the fifo command to create a screen.
+ */
+static int vmw_sou_fifo_create(struct vmw_private *dev_priv,
+			       struct vmw_screen_object_unit *sou,
+			       uint32_t x, uint32_t y,
+			       struct drm_display_mode *mode)
+{
+	size_t fifo_size;
+
+	struct {
+		struct {
+			uint32_t cmdType;
+		} header;
+		SVGAScreenObject obj;
+	} *cmd;
+
+	BUG_ON(!sou->buffer);
+
+	fifo_size = sizeof(*cmd);
+	cmd = vmw_fifo_reserve(dev_priv, fifo_size);
+	/* The hardware has hung, nothing we can do about it here. */
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Fifo reserve failed.\n");
+		return -ENOMEM;
+	}
+
+	memset(cmd, 0, fifo_size);
+	cmd->header.cmdType = SVGA_CMD_DEFINE_SCREEN;
+	cmd->obj.structSize = sizeof(SVGAScreenObject);
+	cmd->obj.id = sou->base.unit;
+	cmd->obj.flags = SVGA_SCREEN_HAS_ROOT |
+		(sou->base.unit == 0 ? SVGA_SCREEN_IS_PRIMARY : 0);
+	cmd->obj.size.width = mode->hdisplay;
+	cmd->obj.size.height = mode->vdisplay;
+	cmd->obj.root.x = x;
+	cmd->obj.root.y = y;
+
+	/* Ok to assume that buffer is pinned in vram */
+	vmw_bo_get_guest_ptr(&sou->buffer->base, &cmd->obj.backingStore.ptr);
+	cmd->obj.backingStore.pitch = mode->hdisplay * 4;
+
+	vmw_fifo_commit(dev_priv, fifo_size);
+
+	sou->defined = true;
+
+	return 0;
+}
+
+/**
+ * Send the fifo command to destroy a screen.
+ */
+static int vmw_sou_fifo_destroy(struct vmw_private *dev_priv,
+				struct vmw_screen_object_unit *sou)
+{
+	size_t fifo_size;
+	int ret;
+
+	struct {
+		struct {
+			uint32_t cmdType;
+		} header;
+		SVGAFifoCmdDestroyScreen body;
+	} *cmd;
+
+	/* no need to do anything */
+	if (unlikely(!sou->defined))
+		return 0;
+
+	fifo_size = sizeof(*cmd);
+	cmd = vmw_fifo_reserve(dev_priv, fifo_size);
+	/* the hardware has hung, nothing we can do about it here */
+	if (unlikely(cmd == NULL)) {
+		DRM_ERROR("Fifo reserve failed.\n");
+		return -ENOMEM;
+	}
+
+	memset(cmd, 0, fifo_size);
+	cmd->header.cmdType = SVGA_CMD_DESTROY_SCREEN;
+	cmd->body.screenId = sou->base.unit;
+
+	vmw_fifo_commit(dev_priv, fifo_size);
+
+	/* Force sync */
+	ret = vmw_fallback_wait(dev_priv, false, true, 0, false, 3*HZ);
+	if (unlikely(ret != 0))
+		DRM_ERROR("Failed to sync with HW");
+	else
+		sou->defined = false;
+
+	return ret;
+}
+
+/**
+ * Free the backing store.
+ */
+static void vmw_sou_backing_free(struct vmw_private *dev_priv,
+				 struct vmw_screen_object_unit *sou)
+{
+	struct ttm_buffer_object *bo;
+
+	if (unlikely(sou->buffer == NULL))
+		return;
+
+	bo = &sou->buffer->base;
+	ttm_bo_unref(&bo);
+	sou->buffer = NULL;
+	sou->buffer_size = 0;
+}
+
+/**
+ * Allocate the backing store for the buffer.
+ */
+static int vmw_sou_backing_alloc(struct vmw_private *dev_priv,
+				 struct vmw_screen_object_unit *sou,
+				 unsigned long size)
+{
+	int ret;
+
+	if (sou->buffer_size == size)
+		return 0;
+
+	if (sou->buffer)
+		vmw_sou_backing_free(dev_priv, sou);
+
+	sou->buffer = kzalloc(sizeof(*sou->buffer), GFP_KERNEL);
+	if (unlikely(sou->buffer == NULL))
+		return -ENOMEM;
+
+	/* After we have alloced the backing store might not be able to
+	 * resume the overlays, this is preferred to failing to alloc.
+	 */
+	vmw_overlay_pause_all(dev_priv);
+	ret = vmw_dmabuf_init(dev_priv, sou->buffer, size,
+			      &vmw_vram_ne_placement,
+			      false, &vmw_dmabuf_bo_free);
+	vmw_overlay_resume_all(dev_priv);
+
+	if (unlikely(ret != 0))
+		sou->buffer = NULL; /* vmw_dmabuf_init frees on error */
+	else
+		sou->buffer_size = size;
+
+	return ret;
+}
+
+static int vmw_sou_crtc_set_config(struct drm_mode_set *set)
+{
+	struct vmw_private *dev_priv;
+	struct vmw_screen_object_unit *sou;
+	struct drm_connector *connector;
+	struct drm_display_mode *mode;
+	struct drm_encoder *encoder;
+	struct vmw_framebuffer *vfb;
+	struct drm_framebuffer *fb;
+	struct drm_crtc *crtc;
+	int ret = 0;
+
+	if (!set)
+		return -EINVAL;
+
+	if (!set->crtc)
+		return -EINVAL;
+
+	/* get the sou */
+	crtc = set->crtc;
+	sou = vmw_crtc_to_sou(crtc);
+	vfb = set->fb ? vmw_framebuffer_to_vfb(set->fb) : NULL;
+	dev_priv = vmw_priv(crtc->dev);
+
+	if (set->num_connectors > 1) {
+		DRM_ERROR("to many connectors\n");
+		return -EINVAL;
+	}
+
+	if (set->num_connectors == 1 &&
+	    set->connectors[0] != &sou->base.connector) {
+		DRM_ERROR("connector doesn't match %p %p\n",
+			set->connectors[0], &sou->base.connector);
+		return -EINVAL;
+	}
+
+	/* sou only supports one fb active at the time */
+	if (dev_priv->sou_priv->fb && vfb &&
+	    !(dev_priv->sou_priv->num_active == 1 &&
+	      !list_empty(&sou->active)) &&
+	    dev_priv->sou_priv->fb != vfb) {
+		DRM_ERROR("Multiple framebuffers not supported\n");
+		return -EINVAL;
+	}
+
+	/* since they always map one to one these are safe */
+	connector = &sou->base.connector;
+	encoder = &sou->base.encoder;
+
+	/* should we turn the crtc off */
+	if (set->num_connectors == 0 || !set->mode || !set->fb) {
+		ret = vmw_sou_fifo_destroy(dev_priv, sou);
+		/* the hardware has hung don't do anything more */
+		if (unlikely(ret != 0))
+			return ret;
+
+		connector->encoder = NULL;
+		encoder->crtc = NULL;
+		crtc->fb = NULL;
+		crtc->x = 0;
+		crtc->y = 0;
+
+		vmw_sou_del_active(dev_priv, sou);
+
+		vmw_sou_backing_free(dev_priv, sou);
+
+		return 0;
+	}
+
+
+	/* we now know we want to set a mode */
+	mode = set->mode;
+	fb = set->fb;
+
+	if (set->x + mode->hdisplay > fb->width ||
+	    set->y + mode->vdisplay > fb->height) {
+		DRM_ERROR("set outside of framebuffer\n");
+		return -EINVAL;
+	}
+
+	vmw_fb_off(dev_priv);
+
+	if (mode->hdisplay != crtc->mode.hdisplay ||
+	    mode->vdisplay != crtc->mode.vdisplay) {
+		/* no need to check if depth is different, because backing
+		 * store depth is forced to 4 by the device.
+		 */
+
+		ret = vmw_sou_fifo_destroy(dev_priv, sou);
+		/* the hardware has hung don't do anything more */
+		if (unlikely(ret != 0))
+			return ret;
+
+		vmw_sou_backing_free(dev_priv, sou);
+	}
+
+	if (!sou->buffer) {
+		/* forced to depth 4 by the device */
+		size_t size = mode->hdisplay * mode->vdisplay * 4;
+		ret = vmw_sou_backing_alloc(dev_priv, sou, size);
+		if (unlikely(ret != 0))
+			return ret;
+	}
+
+	ret = vmw_sou_fifo_create(dev_priv, sou, set->x, set->y, mode);
+	if (unlikely(ret != 0)) {
+		/*
+		 * We are in a bit of a situation here, the hardware has
+		 * hung and we may or may not have a buffer hanging of
+		 * the screen object, best thing to do is not do anything
+		 * if we where defined, if not just turn the crtc of.
+		 * Not what userspace wants but it needs to htfu.
+		 */
+		if (sou->defined)
+			return ret;
+
+		connector->encoder = NULL;
+		encoder->crtc = NULL;
+		crtc->fb = NULL;
+		crtc->x = 0;
+		crtc->y = 0;
+
+		return ret;
+	}
+
+	vmw_sou_add_active(dev_priv, sou, vfb);
+
+	connector->encoder = encoder;
+	encoder->crtc = crtc;
+	crtc->mode = *mode;
+	crtc->fb = fb;
+	crtc->x = set->x;
+	crtc->y = set->y;
+
+	return 0;
+}
+
+static struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
+	.save = vmw_du_crtc_save,
+	.restore = vmw_du_crtc_restore,
+	.cursor_set = vmw_du_crtc_cursor_set,
+	.cursor_move = vmw_du_crtc_cursor_move,
+	.gamma_set = vmw_du_crtc_gamma_set,
+	.destroy = vmw_sou_crtc_destroy,
+	.set_config = vmw_sou_crtc_set_config,
+};
+
+/*
+ * Screen Object Display Unit encoder functions
+ */
+
+static void vmw_sou_encoder_destroy(struct drm_encoder *encoder)
+{
+	vmw_sou_destroy(vmw_encoder_to_sou(encoder));
+}
+
+static struct drm_encoder_funcs vmw_screen_object_encoder_funcs = {
+	.destroy = vmw_sou_encoder_destroy,
+};
+
+/*
+ * Screen Object Display Unit connector functions
+ */
+
+static void vmw_sou_connector_destroy(struct drm_connector *connector)
+{
+	vmw_sou_destroy(vmw_connector_to_sou(connector));
+}
+
+static struct drm_connector_funcs vmw_legacy_connector_funcs = {
+	.dpms = vmw_du_connector_dpms,
+	.save = vmw_du_connector_save,
+	.restore = vmw_du_connector_restore,
+	.detect = vmw_du_connector_detect,
+	.fill_modes = vmw_du_connector_fill_modes,
+	.set_property = vmw_du_connector_set_property,
+	.destroy = vmw_sou_connector_destroy,
+};
+
+static int vmw_sou_init(struct vmw_private *dev_priv, unsigned unit)
+{
+	struct vmw_screen_object_unit *sou;
+	struct drm_device *dev = dev_priv->dev;
+	struct drm_connector *connector;
+	struct drm_encoder *encoder;
+	struct drm_crtc *crtc;
+
+	sou = kzalloc(sizeof(*sou), GFP_KERNEL);
+	if (!sou)
+		return -ENOMEM;
+
+	sou->base.unit = unit;
+	crtc = &sou->base.crtc;
+	encoder = &sou->base.encoder;
+	connector = &sou->base.connector;
+
+	INIT_LIST_HEAD(&sou->active);
+
+	sou->base.pref_active = (unit == 0);
+	sou->base.pref_width = 800;
+	sou->base.pref_height = 600;
+	sou->base.pref_mode = NULL;
+
+	drm_connector_init(dev, connector, &vmw_legacy_connector_funcs,
+			   DRM_MODE_CONNECTOR_LVDS);
+	connector->status = vmw_du_connector_detect(connector, true);
+
+	drm_encoder_init(dev, encoder, &vmw_screen_object_encoder_funcs,
+			 DRM_MODE_ENCODER_LVDS);
+	drm_mode_connector_attach_encoder(connector, encoder);
+	encoder->possible_crtcs = (1 << unit);
+	encoder->possible_clones = 0;
+
+	drm_crtc_init(dev, crtc, &vmw_screen_object_crtc_funcs);
+
+	drm_mode_crtc_set_gamma_size(crtc, 256);
+
+	drm_connector_attach_property(connector,
+				      dev->mode_config.dirty_info_property,
+				      1);
+
+	return 0;
+}
+
+int vmw_kms_init_screen_object_display(struct vmw_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+	int i;
+	int ret;
+
+	if (dev_priv->sou_priv) {
+		DRM_INFO("sou system already on\n");
+		return -EINVAL;
+	}
+
+	if (!(dev_priv->fifo.capabilities & SVGA_FIFO_CAP_SCREEN_OBJECT_2)) {
+		DRM_INFO("Not using screen objects,"
+			 " missing cap SCREEN_OBJECT_2\n");
+		return -ENOSYS;
+	}
+
+	ret = -ENOMEM;
+	dev_priv->sou_priv = kmalloc(sizeof(*dev_priv->sou_priv), GFP_KERNEL);
+	if (unlikely(!dev_priv->sou_priv))
+		goto err_no_mem;
+
+	INIT_LIST_HEAD(&dev_priv->sou_priv->active);
+	dev_priv->sou_priv->num_active = 0;
+	dev_priv->sou_priv->last_num_active = 0;
+	dev_priv->sou_priv->fb = NULL;
+
+	ret = drm_vblank_init(dev, VMWGFX_NUM_DISPLAY_UNITS);
+	if (unlikely(ret != 0))
+		goto err_free;
+
+	ret = drm_mode_create_dirty_info_property(dev_priv->dev);
+	if (unlikely(ret != 0))
+		goto err_vblank_cleanup;
+
+	for (i = 0; i < VMWGFX_NUM_DISPLAY_UNITS; ++i)
+		vmw_sou_init(dev_priv, i);
+
+	DRM_INFO("Screen objects system initialized\n");
+
+	return 0;
+
+err_vblank_cleanup:
+	drm_vblank_cleanup(dev);
+err_free:
+	kfree(dev_priv->sou_priv);
+err_no_mem:
+	return ret;
+}
+
+int vmw_kms_close_screen_object_display(struct vmw_private *dev_priv)
+{
+	struct drm_device *dev = dev_priv->dev;
+
+	drm_vblank_cleanup(dev);
+	if (!dev_priv->sou_priv)
+		return -ENOSYS;
+
+	if (!list_empty(&dev_priv->sou_priv->active))
+		DRM_ERROR("Still have active outputs when unloading driver");
+
+	kfree(dev_priv->sou_priv);
+
+	return 0;
+}

+ 63 - 0
include/drm/vmwgfx_drm.h

@@ -52,6 +52,8 @@
 #define DRM_VMW_FENCE_SIGNALED       15
 #define DRM_VMW_FENCE_SIGNALED       15
 #define DRM_VMW_FENCE_UNREF          16
 #define DRM_VMW_FENCE_UNREF          16
 #define DRM_VMW_FENCE_EVENT          17
 #define DRM_VMW_FENCE_EVENT          17
+#define DRM_VMW_PRESENT              18
+#define DRM_VMW_PRESENT_READBACK     19
 
 
 
 
 /*************************************************************************/
 /*************************************************************************/
@@ -681,5 +683,66 @@ struct drm_vmw_fence_arg {
 };
 };
 
 
 
 
+/*************************************************************************/
+/**
+ * DRM_VMW_PRESENT
+ *
+ * Executes an SVGA present on a given fb for a given surface. The surface
+ * is placed on the framebuffer. Cliprects are given relative to the given
+ * point (the point disignated by dest_{x|y}).
+ *
+ */
+
+/**
+ * struct drm_vmw_present_arg
+ * @fb_id: framebuffer id to present / read back from.
+ * @sid: Surface id to present from.
+ * @dest_x: X placement coordinate for surface.
+ * @dest_y: Y placement coordinate for surface.
+ * @clips_ptr: Pointer to an array of clip rects cast to an uint64_t.
+ * @num_clips: Number of cliprects given relative to the framebuffer origin,
+ * in the same coordinate space as the frame buffer.
+ * @pad64: Unused 64-bit padding.
+ *
+ * Input argument to the DRM_VMW_PRESENT ioctl.
+ */
+
+struct drm_vmw_present_arg {
+	uint32_t fb_id;
+	uint32_t sid;
+	int32_t dest_x;
+	int32_t dest_y;
+	uint64_t clips_ptr;
+	uint32_t num_clips;
+	uint32_t pad64;
+};
+
+
+/*************************************************************************/
+/**
+ * DRM_VMW_PRESENT_READBACK
+ *
+ * Executes an SVGA present readback from a given fb to the dma buffer
+ * currently bound as the fb. If there is no dma buffer bound to the fb,
+ * an error will be returned.
+ *
+ */
+
+/**
+ * struct drm_vmw_present_arg
+ * @fb_id: fb_id to present / read back from.
+ * @num_clips: Number of cliprects.
+ * @clips_ptr: Pointer to an array of clip rects cast to an uint64_t.
+ * @fence_rep: Pointer to a struct drm_vmw_fence_rep, cast to an uint64_t.
+ * If this member is NULL, then the ioctl should not return a fence.
+ */
+
+struct drm_vmw_present_readback_arg {
+	 uint32_t fb_id;
+	 uint32_t num_clips;
+	 uint64_t clips_ptr;
+	 uint64_t fence_rep;
+};
+
 
 
 #endif
 #endif

Unele fișiere nu au fost afișate deoarece prea multe fișiere au fost modificate în acest diff