|
@@ -690,7 +690,7 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
|
|
|
{
|
|
|
u32 p1, p2, m1, m2, vco, bestn, bestm1, bestm2, bestp1, bestp2;
|
|
|
u32 m, n, fastclk;
|
|
|
- u32 updrate, minupdate, fracbits, p;
|
|
|
+ u32 updrate, minupdate, p;
|
|
|
unsigned long bestppm, ppm, absppm;
|
|
|
int dotclk, flag;
|
|
|
|
|
@@ -701,7 +701,6 @@ vlv_find_best_dpll(const intel_limit_t *limit, struct drm_crtc *crtc,
|
|
|
fastclk = dotclk / (2*100);
|
|
|
updrate = 0;
|
|
|
minupdate = 19200;
|
|
|
- fracbits = 1;
|
|
|
n = p = p1 = p2 = m = m1 = m2 = vco = bestn = 0;
|
|
|
bestm1 = bestm2 = bestp1 = bestp2 = 0;
|
|
|
|
|
@@ -1877,7 +1876,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
|
|
|
return 0;
|
|
|
|
|
|
err_unpin:
|
|
|
- i915_gem_object_unpin(obj);
|
|
|
+ i915_gem_object_unpin_from_display_plane(obj);
|
|
|
err_interruptible:
|
|
|
dev_priv->mm.interruptible = true;
|
|
|
return ret;
|
|
@@ -1886,7 +1885,7 @@ err_interruptible:
|
|
|
void intel_unpin_fb_obj(struct drm_i915_gem_object *obj)
|
|
|
{
|
|
|
i915_gem_object_unpin_fence(obj);
|
|
|
- i915_gem_object_unpin(obj);
|
|
|
+ i915_gem_object_unpin_from_display_plane(obj);
|
|
|
}
|
|
|
|
|
|
/* Computes the linear offset to the base tile and adjusts x, y. bytes per pixel
|
|
@@ -2598,7 +2597,7 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
|
|
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
|
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
|
|
|
int pipe = intel_crtc->pipe;
|
|
|
- u32 reg, temp, i;
|
|
|
+ u32 reg, temp, i, j;
|
|
|
|
|
|
/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
|
|
|
for train result */
|
|
@@ -2614,97 +2613,99 @@ static void ivb_manual_fdi_link_train(struct drm_crtc *crtc)
|
|
|
DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
|
|
|
I915_READ(FDI_RX_IIR(pipe)));
|
|
|
|
|
|
- /* enable CPU FDI TX and PCH FDI RX */
|
|
|
- reg = FDI_TX_CTL(pipe);
|
|
|
- temp = I915_READ(reg);
|
|
|
- temp &= ~FDI_DP_PORT_WIDTH_MASK;
|
|
|
- temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
|
|
|
- temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
|
|
|
- temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
|
|
|
- temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
|
|
|
- temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
|
|
|
- temp |= FDI_COMPOSITE_SYNC;
|
|
|
- I915_WRITE(reg, temp | FDI_TX_ENABLE);
|
|
|
-
|
|
|
- I915_WRITE(FDI_RX_MISC(pipe),
|
|
|
- FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
|
|
|
-
|
|
|
- reg = FDI_RX_CTL(pipe);
|
|
|
- temp = I915_READ(reg);
|
|
|
- temp &= ~FDI_LINK_TRAIN_AUTO;
|
|
|
- temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
|
|
|
- temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
|
|
|
- temp |= FDI_COMPOSITE_SYNC;
|
|
|
- I915_WRITE(reg, temp | FDI_RX_ENABLE);
|
|
|
+ /* Try each vswing and preemphasis setting twice before moving on */
|
|
|
+ for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
|
|
|
+ /* disable first in case we need to retry */
|
|
|
+ reg = FDI_TX_CTL(pipe);
|
|
|
+ temp = I915_READ(reg);
|
|
|
+ temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
|
|
|
+ temp &= ~FDI_TX_ENABLE;
|
|
|
+ I915_WRITE(reg, temp);
|
|
|
|
|
|
- POSTING_READ(reg);
|
|
|
- udelay(150);
|
|
|
+ reg = FDI_RX_CTL(pipe);
|
|
|
+ temp = I915_READ(reg);
|
|
|
+ temp &= ~FDI_LINK_TRAIN_AUTO;
|
|
|
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
|
|
|
+ temp &= ~FDI_RX_ENABLE;
|
|
|
+ I915_WRITE(reg, temp);
|
|
|
|
|
|
- for (i = 0; i < 4; i++) {
|
|
|
+ /* enable CPU FDI TX and PCH FDI RX */
|
|
|
reg = FDI_TX_CTL(pipe);
|
|
|
temp = I915_READ(reg);
|
|
|
+ temp &= ~FDI_DP_PORT_WIDTH_MASK;
|
|
|
+ temp |= FDI_DP_PORT_WIDTH(intel_crtc->config.fdi_lanes);
|
|
|
+ temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
|
|
|
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
|
|
|
- temp |= snb_b_fdi_train_param[i];
|
|
|
- I915_WRITE(reg, temp);
|
|
|
+ temp |= snb_b_fdi_train_param[j/2];
|
|
|
+ temp |= FDI_COMPOSITE_SYNC;
|
|
|
+ I915_WRITE(reg, temp | FDI_TX_ENABLE);
|
|
|
|
|
|
- POSTING_READ(reg);
|
|
|
- udelay(500);
|
|
|
+ I915_WRITE(FDI_RX_MISC(pipe),
|
|
|
+ FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
|
|
|
|
|
|
- reg = FDI_RX_IIR(pipe);
|
|
|
+ reg = FDI_RX_CTL(pipe);
|
|
|
temp = I915_READ(reg);
|
|
|
- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
|
|
|
-
|
|
|
- if (temp & FDI_RX_BIT_LOCK ||
|
|
|
- (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
|
|
|
- I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
|
|
|
- DRM_DEBUG_KMS("FDI train 1 done, level %i.\n", i);
|
|
|
- break;
|
|
|
- }
|
|
|
- }
|
|
|
- if (i == 4)
|
|
|
- DRM_ERROR("FDI train 1 fail!\n");
|
|
|
+ temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
|
|
|
+ temp |= FDI_COMPOSITE_SYNC;
|
|
|
+ I915_WRITE(reg, temp | FDI_RX_ENABLE);
|
|
|
|
|
|
- /* Train 2 */
|
|
|
- reg = FDI_TX_CTL(pipe);
|
|
|
- temp = I915_READ(reg);
|
|
|
- temp &= ~FDI_LINK_TRAIN_NONE_IVB;
|
|
|
- temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
|
|
|
- temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
|
|
|
- temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
|
|
|
- I915_WRITE(reg, temp);
|
|
|
+ POSTING_READ(reg);
|
|
|
+ udelay(1); /* should be 0.5us */
|
|
|
|
|
|
- reg = FDI_RX_CTL(pipe);
|
|
|
- temp = I915_READ(reg);
|
|
|
- temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
|
|
|
- temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
|
|
|
- I915_WRITE(reg, temp);
|
|
|
+ for (i = 0; i < 4; i++) {
|
|
|
+ reg = FDI_RX_IIR(pipe);
|
|
|
+ temp = I915_READ(reg);
|
|
|
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
|
|
|
|
|
|
- POSTING_READ(reg);
|
|
|
- udelay(150);
|
|
|
+ if (temp & FDI_RX_BIT_LOCK ||
|
|
|
+ (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
|
|
|
+ I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
|
|
|
+ DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
|
|
|
+ i);
|
|
|
+ break;
|
|
|
+ }
|
|
|
+ udelay(1); /* should be 0.5us */
|
|
|
+ }
|
|
|
+ if (i == 4) {
|
|
|
+ DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
|
|
|
+ continue;
|
|
|
+ }
|
|
|
|
|
|
- for (i = 0; i < 4; i++) {
|
|
|
+ /* Train 2 */
|
|
|
reg = FDI_TX_CTL(pipe);
|
|
|
temp = I915_READ(reg);
|
|
|
- temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
|
|
|
- temp |= snb_b_fdi_train_param[i];
|
|
|
+ temp &= ~FDI_LINK_TRAIN_NONE_IVB;
|
|
|
+ temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
|
|
|
+ I915_WRITE(reg, temp);
|
|
|
+
|
|
|
+ reg = FDI_RX_CTL(pipe);
|
|
|
+ temp = I915_READ(reg);
|
|
|
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
|
|
|
+ temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
|
|
|
I915_WRITE(reg, temp);
|
|
|
|
|
|
POSTING_READ(reg);
|
|
|
- udelay(500);
|
|
|
+ udelay(2); /* should be 1.5us */
|
|
|
|
|
|
- reg = FDI_RX_IIR(pipe);
|
|
|
- temp = I915_READ(reg);
|
|
|
- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
|
|
|
+ for (i = 0; i < 4; i++) {
|
|
|
+ reg = FDI_RX_IIR(pipe);
|
|
|
+ temp = I915_READ(reg);
|
|
|
+ DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
|
|
|
|
|
|
- if (temp & FDI_RX_SYMBOL_LOCK) {
|
|
|
- I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
|
|
|
- DRM_DEBUG_KMS("FDI train 2 done, level %i.\n", i);
|
|
|
- break;
|
|
|
+ if (temp & FDI_RX_SYMBOL_LOCK ||
|
|
|
+ (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
|
|
|
+ I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
|
|
|
+ DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
|
|
|
+ i);
|
|
|
+ goto train_done;
|
|
|
+ }
|
|
|
+ udelay(2); /* should be 1.5us */
|
|
|
}
|
|
|
+ if (i == 4)
|
|
|
+ DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
|
|
|
}
|
|
|
- if (i == 4)
|
|
|
- DRM_ERROR("FDI train 2 fail!\n");
|
|
|
|
|
|
+train_done:
|
|
|
DRM_DEBUG_KMS("FDI train done.\n");
|
|
|
}
|
|
|
|
|
@@ -4423,13 +4424,10 @@ static void vlv_update_pll(struct intel_crtc *crtc)
|
|
|
int pipe = crtc->pipe;
|
|
|
u32 dpll, mdiv;
|
|
|
u32 bestn, bestm1, bestm2, bestp1, bestp2;
|
|
|
- bool is_hdmi;
|
|
|
u32 coreclk, reg_val, dpll_md;
|
|
|
|
|
|
mutex_lock(&dev_priv->dpio_lock);
|
|
|
|
|
|
- is_hdmi = intel_pipe_has_type(&crtc->base, INTEL_OUTPUT_HDMI);
|
|
|
-
|
|
|
bestn = crtc->config.dpll.n;
|
|
|
bestm1 = crtc->config.dpll.m1;
|
|
|
bestm2 = crtc->config.dpll.m2;
|
|
@@ -5934,11 +5932,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
|
|
|
struct intel_ddi_plls *plls = &dev_priv->ddi_plls;
|
|
|
struct intel_crtc *crtc;
|
|
|
unsigned long irqflags;
|
|
|
- uint32_t val, pch_hpd_mask;
|
|
|
-
|
|
|
- pch_hpd_mask = SDE_PORTB_HOTPLUG_CPT | SDE_PORTC_HOTPLUG_CPT;
|
|
|
- if (!(dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE))
|
|
|
- pch_hpd_mask |= SDE_PORTD_HOTPLUG_CPT | SDE_CRT_HOTPLUG_CPT;
|
|
|
+ uint32_t val;
|
|
|
|
|
|
list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
|
|
|
WARN(crtc->base.enabled, "CRTC for pipe %c enabled\n",
|
|
@@ -5964,7 +5958,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
|
|
|
WARN((val & ~DE_PCH_EVENT_IVB) != val,
|
|
|
"Unexpected DEIMR bits enabled: 0x%x\n", val);
|
|
|
val = I915_READ(SDEIMR);
|
|
|
- WARN((val & ~pch_hpd_mask) != val,
|
|
|
+ WARN((val | SDE_HOTPLUG_MASK_CPT) != 0xffffffff,
|
|
|
"Unexpected SDEIMR bits enabled: 0x%x\n", val);
|
|
|
spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
|
|
|
}
|
|
@@ -6035,16 +6029,21 @@ void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
|
|
|
LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
|
|
|
return;
|
|
|
|
|
|
+ /* Make sure we're not on PC8 state before disabling PC8, otherwise
|
|
|
+ * we'll hang the machine! */
|
|
|
+ dev_priv->uncore.funcs.force_wake_get(dev_priv);
|
|
|
+
|
|
|
if (val & LCPLL_POWER_DOWN_ALLOW) {
|
|
|
val &= ~LCPLL_POWER_DOWN_ALLOW;
|
|
|
I915_WRITE(LCPLL_CTL, val);
|
|
|
+ POSTING_READ(LCPLL_CTL);
|
|
|
}
|
|
|
|
|
|
val = I915_READ(D_COMP);
|
|
|
val |= D_COMP_COMP_FORCE;
|
|
|
val &= ~D_COMP_COMP_DISABLE;
|
|
|
I915_WRITE(D_COMP, val);
|
|
|
- I915_READ(D_COMP);
|
|
|
+ POSTING_READ(D_COMP);
|
|
|
|
|
|
val = I915_READ(LCPLL_CTL);
|
|
|
val &= ~LCPLL_PLL_DISABLE;
|
|
@@ -6062,6 +6061,168 @@ void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
|
|
|
LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
|
|
|
DRM_ERROR("Switching back to LCPLL failed\n");
|
|
|
}
|
|
|
+
|
|
|
+ dev_priv->uncore.funcs.force_wake_put(dev_priv);
|
|
|
+}
|
|
|
+
|
|
|
+void hsw_enable_pc8_work(struct work_struct *__work)
|
|
|
+{
|
|
|
+ struct drm_i915_private *dev_priv =
|
|
|
+ container_of(to_delayed_work(__work), struct drm_i915_private,
|
|
|
+ pc8.enable_work);
|
|
|
+ struct drm_device *dev = dev_priv->dev;
|
|
|
+ uint32_t val;
|
|
|
+
|
|
|
+ if (dev_priv->pc8.enabled)
|
|
|
+ return;
|
|
|
+
|
|
|
+ DRM_DEBUG_KMS("Enabling package C8+\n");
|
|
|
+
|
|
|
+ dev_priv->pc8.enabled = true;
|
|
|
+
|
|
|
+ if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
|
|
|
+ val = I915_READ(SOUTH_DSPCLK_GATE_D);
|
|
|
+ val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
|
|
|
+ I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
|
|
|
+ }
|
|
|
+
|
|
|
+ lpt_disable_clkout_dp(dev);
|
|
|
+ hsw_pc8_disable_interrupts(dev);
|
|
|
+ hsw_disable_lcpll(dev_priv, true, true);
|
|
|
+}
|
|
|
+
|
|
|
+static void __hsw_enable_package_c8(struct drm_i915_private *dev_priv)
|
|
|
+{
|
|
|
+ WARN_ON(!mutex_is_locked(&dev_priv->pc8.lock));
|
|
|
+ WARN(dev_priv->pc8.disable_count < 1,
|
|
|
+ "pc8.disable_count: %d\n", dev_priv->pc8.disable_count);
|
|
|
+
|
|
|
+ dev_priv->pc8.disable_count--;
|
|
|
+ if (dev_priv->pc8.disable_count != 0)
|
|
|
+ return;
|
|
|
+
|
|
|
+ schedule_delayed_work(&dev_priv->pc8.enable_work,
|
|
|
+ msecs_to_jiffies(i915_pc8_timeout));
|
|
|
+}
|
|
|
+
|
|
|
+static void __hsw_disable_package_c8(struct drm_i915_private *dev_priv)
|
|
|
+{
|
|
|
+ struct drm_device *dev = dev_priv->dev;
|
|
|
+ uint32_t val;
|
|
|
+
|
|
|
+ WARN_ON(!mutex_is_locked(&dev_priv->pc8.lock));
|
|
|
+ WARN(dev_priv->pc8.disable_count < 0,
|
|
|
+ "pc8.disable_count: %d\n", dev_priv->pc8.disable_count);
|
|
|
+
|
|
|
+ dev_priv->pc8.disable_count++;
|
|
|
+ if (dev_priv->pc8.disable_count != 1)
|
|
|
+ return;
|
|
|
+
|
|
|
+ cancel_delayed_work_sync(&dev_priv->pc8.enable_work);
|
|
|
+ if (!dev_priv->pc8.enabled)
|
|
|
+ return;
|
|
|
+
|
|
|
+ DRM_DEBUG_KMS("Disabling package C8+\n");
|
|
|
+
|
|
|
+ hsw_restore_lcpll(dev_priv);
|
|
|
+ hsw_pc8_restore_interrupts(dev);
|
|
|
+ lpt_init_pch_refclk(dev);
|
|
|
+
|
|
|
+ if (dev_priv->pch_id == INTEL_PCH_LPT_LP_DEVICE_ID_TYPE) {
|
|
|
+ val = I915_READ(SOUTH_DSPCLK_GATE_D);
|
|
|
+ val |= PCH_LP_PARTITION_LEVEL_DISABLE;
|
|
|
+ I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
|
|
|
+ }
|
|
|
+
|
|
|
+ intel_prepare_ddi(dev);
|
|
|
+ i915_gem_init_swizzling(dev);
|
|
|
+ mutex_lock(&dev_priv->rps.hw_lock);
|
|
|
+ gen6_update_ring_freq(dev);
|
|
|
+ mutex_unlock(&dev_priv->rps.hw_lock);
|
|
|
+ dev_priv->pc8.enabled = false;
|
|
|
+}
|
|
|
+
|
|
|
+void hsw_enable_package_c8(struct drm_i915_private *dev_priv)
|
|
|
+{
|
|
|
+ mutex_lock(&dev_priv->pc8.lock);
|
|
|
+ __hsw_enable_package_c8(dev_priv);
|
|
|
+ mutex_unlock(&dev_priv->pc8.lock);
|
|
|
+}
|
|
|
+
|
|
|
+void hsw_disable_package_c8(struct drm_i915_private *dev_priv)
|
|
|
+{
|
|
|
+ mutex_lock(&dev_priv->pc8.lock);
|
|
|
+ __hsw_disable_package_c8(dev_priv);
|
|
|
+ mutex_unlock(&dev_priv->pc8.lock);
|
|
|
+}
|
|
|
+
|
|
|
+static bool hsw_can_enable_package_c8(struct drm_i915_private *dev_priv)
|
|
|
+{
|
|
|
+ struct drm_device *dev = dev_priv->dev;
|
|
|
+ struct intel_crtc *crtc;
|
|
|
+ uint32_t val;
|
|
|
+
|
|
|
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
|
|
|
+ if (crtc->base.enabled)
|
|
|
+ return false;
|
|
|
+
|
|
|
+ /* This case is still possible since we have the i915.disable_power_well
|
|
|
+ * parameter and also the KVMr or something else might be requesting the
|
|
|
+ * power well. */
|
|
|
+ val = I915_READ(HSW_PWR_WELL_DRIVER);
|
|
|
+ if (val != 0) {
|
|
|
+ DRM_DEBUG_KMS("Not enabling PC8: power well on\n");
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+
|
|
|
+ return true;
|
|
|
+}
|
|
|
+
|
|
|
+/* Since we're called from modeset_global_resources there's no way to
|
|
|
+ * symmetrically increase and decrease the refcount, so we use
|
|
|
+ * dev_priv->pc8.requirements_met to track whether we already have the refcount
|
|
|
+ * or not.
|
|
|
+ */
|
|
|
+static void hsw_update_package_c8(struct drm_device *dev)
|
|
|
+{
|
|
|
+ struct drm_i915_private *dev_priv = dev->dev_private;
|
|
|
+ bool allow;
|
|
|
+
|
|
|
+ if (!i915_enable_pc8)
|
|
|
+ return;
|
|
|
+
|
|
|
+ mutex_lock(&dev_priv->pc8.lock);
|
|
|
+
|
|
|
+ allow = hsw_can_enable_package_c8(dev_priv);
|
|
|
+
|
|
|
+ if (allow == dev_priv->pc8.requirements_met)
|
|
|
+ goto done;
|
|
|
+
|
|
|
+ dev_priv->pc8.requirements_met = allow;
|
|
|
+
|
|
|
+ if (allow)
|
|
|
+ __hsw_enable_package_c8(dev_priv);
|
|
|
+ else
|
|
|
+ __hsw_disable_package_c8(dev_priv);
|
|
|
+
|
|
|
+done:
|
|
|
+ mutex_unlock(&dev_priv->pc8.lock);
|
|
|
+}
|
|
|
+
|
|
|
+static void hsw_package_c8_gpu_idle(struct drm_i915_private *dev_priv)
|
|
|
+{
|
|
|
+ if (!dev_priv->pc8.gpu_idle) {
|
|
|
+ dev_priv->pc8.gpu_idle = true;
|
|
|
+ hsw_enable_package_c8(dev_priv);
|
|
|
+ }
|
|
|
+}
|
|
|
+
|
|
|
+static void hsw_package_c8_gpu_busy(struct drm_i915_private *dev_priv)
|
|
|
+{
|
|
|
+ if (dev_priv->pc8.gpu_idle) {
|
|
|
+ dev_priv->pc8.gpu_idle = false;
|
|
|
+ hsw_disable_package_c8(dev_priv);
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
static void haswell_modeset_global_resources(struct drm_device *dev)
|
|
@@ -6079,6 +6240,8 @@ static void haswell_modeset_global_resources(struct drm_device *dev)
|
|
|
}
|
|
|
|
|
|
intel_set_power_well(dev, enable);
|
|
|
+
|
|
|
+ hsw_update_package_c8(dev);
|
|
|
}
|
|
|
|
|
|
static int haswell_crtc_mode_set(struct drm_crtc *crtc,
|
|
@@ -6759,7 +6922,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
|
|
|
if (intel_crtc->cursor_bo != obj)
|
|
|
i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
|
|
|
} else
|
|
|
- i915_gem_object_unpin(intel_crtc->cursor_bo);
|
|
|
+ i915_gem_object_unpin_from_display_plane(intel_crtc->cursor_bo);
|
|
|
drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
|
|
|
}
|
|
|
|
|
@@ -6774,7 +6937,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
|
|
|
|
|
|
return 0;
|
|
|
fail_unpin:
|
|
|
- i915_gem_object_unpin(obj);
|
|
|
+ i915_gem_object_unpin_from_display_plane(obj);
|
|
|
fail_locked:
|
|
|
mutex_unlock(&dev->struct_mutex);
|
|
|
fail:
|
|
@@ -7310,13 +7473,19 @@ static void intel_decrease_pllclock(struct drm_crtc *crtc)
|
|
|
|
|
|
void intel_mark_busy(struct drm_device *dev)
|
|
|
{
|
|
|
- i915_update_gfx_val(dev->dev_private);
|
|
|
+ struct drm_i915_private *dev_priv = dev->dev_private;
|
|
|
+
|
|
|
+ hsw_package_c8_gpu_busy(dev_priv);
|
|
|
+ i915_update_gfx_val(dev_priv);
|
|
|
}
|
|
|
|
|
|
void intel_mark_idle(struct drm_device *dev)
|
|
|
{
|
|
|
+ struct drm_i915_private *dev_priv = dev->dev_private;
|
|
|
struct drm_crtc *crtc;
|
|
|
|
|
|
+ hsw_package_c8_gpu_idle(dev_priv);
|
|
|
+
|
|
|
if (!i915_powersave)
|
|
|
return;
|
|
|
|
|
@@ -8891,6 +9060,9 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set,
|
|
|
drm_mode_debug_printmodeline(set->mode);
|
|
|
config->mode_changed = true;
|
|
|
}
|
|
|
+
|
|
|
+ DRM_DEBUG_KMS("computed changes for [CRTC:%d], mode_changed=%d, fb_changed=%d\n",
|
|
|
+ set->crtc->base.id, config->mode_changed, config->fb_changed);
|
|
|
}
|
|
|
|
|
|
static int
|
|
@@ -8901,14 +9073,13 @@ intel_modeset_stage_output_state(struct drm_device *dev,
|
|
|
struct drm_crtc *new_crtc;
|
|
|
struct intel_connector *connector;
|
|
|
struct intel_encoder *encoder;
|
|
|
- int count, ro;
|
|
|
+ int ro;
|
|
|
|
|
|
/* The upper layers ensure that we either disable a crtc or have a list
|
|
|
* of connectors. For paranoia, double-check this. */
|
|
|
WARN_ON(!set->fb && (set->num_connectors != 0));
|
|
|
WARN_ON(set->fb && (set->num_connectors == 0));
|
|
|
|
|
|
- count = 0;
|
|
|
list_for_each_entry(connector, &dev->mode_config.connector_list,
|
|
|
base.head) {
|
|
|
/* Otherwise traverse passed in connector list and get encoders
|
|
@@ -8942,7 +9113,6 @@ intel_modeset_stage_output_state(struct drm_device *dev,
|
|
|
/* connector->new_encoder is now updated for all connectors. */
|
|
|
|
|
|
/* Update crtc of enabled connectors. */
|
|
|
- count = 0;
|
|
|
list_for_each_entry(connector, &dev->mode_config.connector_list,
|
|
|
base.head) {
|
|
|
if (!connector->new_encoder)
|
|
@@ -10114,6 +10284,17 @@ void i915_redisable_vga(struct drm_device *dev)
|
|
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
|
u32 vga_reg = i915_vgacntrl_reg(dev);
|
|
|
|
|
|
+ /* This function can be called both from intel_modeset_setup_hw_state or
|
|
|
+ * at a very early point in our resume sequence, where the power well
|
|
|
+ * structures are not yet restored. Since this function is at a very
|
|
|
+ * paranoid "someone might have enabled VGA while we were not looking"
|
|
|
+ * level, just check if the power well is enabled instead of trying to
|
|
|
+ * follow the "don't touch the power well if we don't need it" policy
|
|
|
+ * the rest of the driver uses. */
|
|
|
+ if (HAS_POWER_WELL(dev) &&
|
|
|
+ (I915_READ(HSW_PWR_WELL_DRIVER) & HSW_PWR_WELL_STATE_ENABLED) == 0)
|
|
|
+ return;
|
|
|
+
|
|
|
if (I915_READ(vga_reg) != VGA_DISP_DISABLE) {
|
|
|
DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
|
|
|
i915_disable_vga(dev);
|
|
@@ -10302,7 +10483,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
|
|
|
{
|
|
|
struct drm_i915_private *dev_priv = dev->dev_private;
|
|
|
struct drm_crtc *crtc;
|
|
|
- struct intel_crtc *intel_crtc;
|
|
|
|
|
|
/*
|
|
|
* Interrupts and polling as the first thing to avoid creating havoc.
|
|
@@ -10326,7 +10506,6 @@ void intel_modeset_cleanup(struct drm_device *dev)
|
|
|
if (!crtc->fb)
|
|
|
continue;
|
|
|
|
|
|
- intel_crtc = to_intel_crtc(crtc);
|
|
|
intel_increase_pllclock(crtc);
|
|
|
}
|
|
|
|