intel_uncore.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571
  1. /*
  2. * Copyright © 2013 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. */
  23. #include "i915_drv.h"
  24. #include "intel_drv.h"
  25. #define FORCEWAKE_ACK_TIMEOUT_MS 2
  26. static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
  27. {
  28. u32 gt_thread_status_mask;
  29. if (IS_HASWELL(dev_priv->dev))
  30. gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW;
  31. else
  32. gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK;
  33. /* w/a for a sporadic read returning 0 by waiting for the GT
  34. * thread to wake up.
  35. */
  36. if (wait_for_atomic_us((I915_READ_NOTRACE(GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500))
  37. DRM_ERROR("GT thread status wait timed out\n");
  38. }
  39. static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
  40. {
  41. I915_WRITE_NOTRACE(FORCEWAKE, 0);
  42. POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
  43. }
  44. static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
  45. {
  46. if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0,
  47. FORCEWAKE_ACK_TIMEOUT_MS))
  48. DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
  49. I915_WRITE_NOTRACE(FORCEWAKE, 1);
  50. POSTING_READ(ECOBUS); /* something from same cacheline, but !FORCEWAKE */
  51. if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK) & 1),
  52. FORCEWAKE_ACK_TIMEOUT_MS))
  53. DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
  54. /* WaRsForcewakeWaitTC0:snb */
  55. __gen6_gt_wait_for_thread_c0(dev_priv);
  56. }
  57. static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
  58. {
  59. I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
  60. /* something from same cacheline, but !FORCEWAKE_MT */
  61. POSTING_READ(ECOBUS);
  62. }
  63. static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
  64. {
  65. u32 forcewake_ack;
  66. if (IS_HASWELL(dev_priv->dev))
  67. forcewake_ack = FORCEWAKE_ACK_HSW;
  68. else
  69. forcewake_ack = FORCEWAKE_MT_ACK;
  70. if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL) == 0,
  71. FORCEWAKE_ACK_TIMEOUT_MS))
  72. DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
  73. I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
  74. /* something from same cacheline, but !FORCEWAKE_MT */
  75. POSTING_READ(ECOBUS);
  76. if (wait_for_atomic((I915_READ_NOTRACE(forcewake_ack) & FORCEWAKE_KERNEL),
  77. FORCEWAKE_ACK_TIMEOUT_MS))
  78. DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
  79. /* WaRsForcewakeWaitTC0:ivb,hsw */
  80. __gen6_gt_wait_for_thread_c0(dev_priv);
  81. }
  82. static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
  83. {
  84. u32 gtfifodbg;
  85. gtfifodbg = I915_READ_NOTRACE(GTFIFODBG);
  86. if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK,
  87. "MMIO read or write has been dropped %x\n", gtfifodbg))
  88. I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
  89. }
  90. static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
  91. {
  92. I915_WRITE_NOTRACE(FORCEWAKE, 0);
  93. /* something from same cacheline, but !FORCEWAKE */
  94. POSTING_READ(ECOBUS);
  95. gen6_gt_check_fifodbg(dev_priv);
  96. }
  97. static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
  98. {
  99. I915_WRITE_NOTRACE(FORCEWAKE_MT, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
  100. /* something from same cacheline, but !FORCEWAKE_MT */
  101. POSTING_READ(ECOBUS);
  102. gen6_gt_check_fifodbg(dev_priv);
  103. }
  104. static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
  105. {
  106. int ret = 0;
  107. if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
  108. int loop = 500;
  109. u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
  110. while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
  111. udelay(10);
  112. fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
  113. }
  114. if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
  115. ++ret;
  116. dev_priv->uncore.fifo_count = fifo;
  117. }
  118. dev_priv->uncore.fifo_count--;
  119. return ret;
  120. }
  121. static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
  122. {
  123. I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(0xffff));
  124. /* something from same cacheline, but !FORCEWAKE_VLV */
  125. POSTING_READ(FORCEWAKE_ACK_VLV);
  126. }
  127. static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
  128. {
  129. if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL) == 0,
  130. FORCEWAKE_ACK_TIMEOUT_MS))
  131. DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
  132. I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
  133. I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV,
  134. _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
  135. if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL),
  136. FORCEWAKE_ACK_TIMEOUT_MS))
  137. DRM_ERROR("Timed out waiting for GT to ack forcewake request.\n");
  138. if (wait_for_atomic((I915_READ_NOTRACE(FORCEWAKE_ACK_MEDIA_VLV) &
  139. FORCEWAKE_KERNEL),
  140. FORCEWAKE_ACK_TIMEOUT_MS))
  141. DRM_ERROR("Timed out waiting for media to ack forcewake request.\n");
  142. /* WaRsForcewakeWaitTC0:vlv */
  143. __gen6_gt_wait_for_thread_c0(dev_priv);
  144. }
  145. static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
  146. {
  147. I915_WRITE_NOTRACE(FORCEWAKE_VLV, _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
  148. I915_WRITE_NOTRACE(FORCEWAKE_MEDIA_VLV,
  149. _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
  150. /* The below doubles as a POSTING_READ */
  151. gen6_gt_check_fifodbg(dev_priv);
  152. }
  153. void intel_uncore_early_sanitize(struct drm_device *dev)
  154. {
  155. struct drm_i915_private *dev_priv = dev->dev_private;
  156. if (HAS_FPGA_DBG_UNCLAIMED(dev))
  157. I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
  158. }
  159. void intel_uncore_init(struct drm_device *dev)
  160. {
  161. struct drm_i915_private *dev_priv = dev->dev_private;
  162. if (IS_VALLEYVIEW(dev)) {
  163. dev_priv->uncore.funcs.force_wake_get = vlv_force_wake_get;
  164. dev_priv->uncore.funcs.force_wake_put = vlv_force_wake_put;
  165. } else if (IS_HASWELL(dev)) {
  166. dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get;
  167. dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put;
  168. } else if (IS_IVYBRIDGE(dev)) {
  169. u32 ecobus;
  170. /* IVB configs may use multi-threaded forcewake */
  171. /* A small trick here - if the bios hasn't configured
  172. * MT forcewake, and if the device is in RC6, then
  173. * force_wake_mt_get will not wake the device and the
  174. * ECOBUS read will return zero. Which will be
  175. * (correctly) interpreted by the test below as MT
  176. * forcewake being disabled.
  177. */
  178. mutex_lock(&dev->struct_mutex);
  179. __gen6_gt_force_wake_mt_get(dev_priv);
  180. ecobus = I915_READ_NOTRACE(ECOBUS);
  181. __gen6_gt_force_wake_mt_put(dev_priv);
  182. mutex_unlock(&dev->struct_mutex);
  183. if (ecobus & FORCEWAKE_MT_ENABLE) {
  184. dev_priv->uncore.funcs.force_wake_get =
  185. __gen6_gt_force_wake_mt_get;
  186. dev_priv->uncore.funcs.force_wake_put =
  187. __gen6_gt_force_wake_mt_put;
  188. } else {
  189. DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
  190. DRM_INFO("when using vblank-synced partial screen updates.\n");
  191. dev_priv->uncore.funcs.force_wake_get =
  192. __gen6_gt_force_wake_get;
  193. dev_priv->uncore.funcs.force_wake_put =
  194. __gen6_gt_force_wake_put;
  195. }
  196. } else if (IS_GEN6(dev)) {
  197. dev_priv->uncore.funcs.force_wake_get =
  198. __gen6_gt_force_wake_get;
  199. dev_priv->uncore.funcs.force_wake_put =
  200. __gen6_gt_force_wake_put;
  201. }
  202. }
  203. void intel_uncore_sanitize(struct drm_device *dev)
  204. {
  205. struct drm_i915_private *dev_priv = dev->dev_private;
  206. if (IS_VALLEYVIEW(dev)) {
  207. vlv_force_wake_reset(dev_priv);
  208. } else if (INTEL_INFO(dev)->gen >= 6) {
  209. __gen6_gt_force_wake_reset(dev_priv);
  210. if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
  211. __gen6_gt_force_wake_mt_reset(dev_priv);
  212. }
  213. /* BIOS often leaves RC6 enabled, but disable it for hw init */
  214. intel_disable_gt_powersave(dev);
  215. }
  216. /*
  217. * Generally this is called implicitly by the register read function. However,
  218. * if some sequence requires the GT to not power down then this function should
  219. * be called at the beginning of the sequence followed by a call to
  220. * gen6_gt_force_wake_put() at the end of the sequence.
  221. */
  222. void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
  223. {
  224. unsigned long irqflags;
  225. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  226. if (dev_priv->uncore.forcewake_count++ == 0)
  227. dev_priv->uncore.funcs.force_wake_get(dev_priv);
  228. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  229. }
  230. /*
  231. * see gen6_gt_force_wake_get()
  232. */
  233. void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
  234. {
  235. unsigned long irqflags;
  236. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  237. if (--dev_priv->uncore.forcewake_count == 0)
  238. dev_priv->uncore.funcs.force_wake_put(dev_priv);
  239. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  240. }
  241. /* We give fast paths for the really cool registers */
  242. #define NEEDS_FORCE_WAKE(dev_priv, reg) \
  243. ((HAS_FORCE_WAKE((dev_priv)->dev)) && \
  244. ((reg) < 0x40000) && \
  245. ((reg) != FORCEWAKE))
  246. static void
  247. ilk_dummy_write(struct drm_i915_private *dev_priv)
  248. {
  249. /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
  250. * the chip from rc6 before touching it for real. MI_MODE is masked,
  251. * hence harmless to write 0 into. */
  252. I915_WRITE_NOTRACE(MI_MODE, 0);
  253. }
  254. static void
  255. hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
  256. {
  257. if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
  258. (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
  259. DRM_ERROR("Unknown unclaimed register before writing to %x\n",
  260. reg);
  261. I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
  262. }
  263. }
  264. static void
  265. hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
  266. {
  267. if (HAS_FPGA_DBG_UNCLAIMED(dev_priv->dev) &&
  268. (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
  269. DRM_ERROR("Unclaimed write to %x\n", reg);
  270. I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
  271. }
  272. }
  273. #define __i915_read(x, y) \
  274. u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
  275. unsigned long irqflags; \
  276. u##x val = 0; \
  277. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
  278. if (IS_GEN5(dev_priv->dev)) \
  279. ilk_dummy_write(dev_priv); \
  280. if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
  281. if (dev_priv->uncore.forcewake_count == 0) \
  282. dev_priv->uncore.funcs.force_wake_get(dev_priv); \
  283. val = read##y(dev_priv->regs + reg); \
  284. if (dev_priv->uncore.forcewake_count == 0) \
  285. dev_priv->uncore.funcs.force_wake_put(dev_priv); \
  286. } else { \
  287. val = read##y(dev_priv->regs + reg); \
  288. } \
  289. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
  290. trace_i915_reg_rw(false, reg, val, sizeof(val)); \
  291. return val; \
  292. }
  293. __i915_read(8, b)
  294. __i915_read(16, w)
  295. __i915_read(32, l)
  296. __i915_read(64, q)
  297. #undef __i915_read
  298. #define __i915_write(x, y) \
  299. void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
  300. unsigned long irqflags; \
  301. u32 __fifo_ret = 0; \
  302. trace_i915_reg_rw(true, reg, val, sizeof(val)); \
  303. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); \
  304. if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
  305. __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
  306. } \
  307. if (IS_GEN5(dev_priv->dev)) \
  308. ilk_dummy_write(dev_priv); \
  309. hsw_unclaimed_reg_clear(dev_priv, reg); \
  310. write##y(val, dev_priv->regs + reg); \
  311. if (unlikely(__fifo_ret)) { \
  312. gen6_gt_check_fifodbg(dev_priv); \
  313. } \
  314. hsw_unclaimed_reg_check(dev_priv, reg); \
  315. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
  316. }
  317. __i915_write(8, b)
  318. __i915_write(16, w)
  319. __i915_write(32, l)
  320. __i915_write(64, q)
  321. #undef __i915_write
  322. static const struct register_whitelist {
  323. uint64_t offset;
  324. uint32_t size;
  325. uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
  326. } whitelist[] = {
  327. { RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 },
  328. };
  329. int i915_reg_read_ioctl(struct drm_device *dev,
  330. void *data, struct drm_file *file)
  331. {
  332. struct drm_i915_private *dev_priv = dev->dev_private;
  333. struct drm_i915_reg_read *reg = data;
  334. struct register_whitelist const *entry = whitelist;
  335. int i;
  336. for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
  337. if (entry->offset == reg->offset &&
  338. (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
  339. break;
  340. }
  341. if (i == ARRAY_SIZE(whitelist))
  342. return -EINVAL;
  343. switch (entry->size) {
  344. case 8:
  345. reg->val = I915_READ64(reg->offset);
  346. break;
  347. case 4:
  348. reg->val = I915_READ(reg->offset);
  349. break;
  350. case 2:
  351. reg->val = I915_READ16(reg->offset);
  352. break;
  353. case 1:
  354. reg->val = I915_READ8(reg->offset);
  355. break;
  356. default:
  357. WARN_ON(1);
  358. return -EINVAL;
  359. }
  360. return 0;
  361. }
  362. static int i8xx_do_reset(struct drm_device *dev)
  363. {
  364. struct drm_i915_private *dev_priv = dev->dev_private;
  365. if (IS_I85X(dev))
  366. return -ENODEV;
  367. I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830);
  368. POSTING_READ(D_STATE);
  369. if (IS_I830(dev) || IS_845G(dev)) {
  370. I915_WRITE(DEBUG_RESET_I830,
  371. DEBUG_RESET_DISPLAY |
  372. DEBUG_RESET_RENDER |
  373. DEBUG_RESET_FULL);
  374. POSTING_READ(DEBUG_RESET_I830);
  375. msleep(1);
  376. I915_WRITE(DEBUG_RESET_I830, 0);
  377. POSTING_READ(DEBUG_RESET_I830);
  378. }
  379. msleep(1);
  380. I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830);
  381. POSTING_READ(D_STATE);
  382. return 0;
  383. }
  384. static int i965_reset_complete(struct drm_device *dev)
  385. {
  386. u8 gdrst;
  387. pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
  388. return (gdrst & GRDOM_RESET_ENABLE) == 0;
  389. }
  390. static int i965_do_reset(struct drm_device *dev)
  391. {
  392. int ret;
  393. /*
  394. * Set the domains we want to reset (GRDOM/bits 2 and 3) as
  395. * well as the reset bit (GR/bit 0). Setting the GR bit
  396. * triggers the reset; when done, the hardware will clear it.
  397. */
  398. pci_write_config_byte(dev->pdev, I965_GDRST,
  399. GRDOM_RENDER | GRDOM_RESET_ENABLE);
  400. ret = wait_for(i965_reset_complete(dev), 500);
  401. if (ret)
  402. return ret;
  403. /* We can't reset render&media without also resetting display ... */
  404. pci_write_config_byte(dev->pdev, I965_GDRST,
  405. GRDOM_MEDIA | GRDOM_RESET_ENABLE);
  406. ret = wait_for(i965_reset_complete(dev), 500);
  407. if (ret)
  408. return ret;
  409. pci_write_config_byte(dev->pdev, I965_GDRST, 0);
  410. return 0;
  411. }
  412. static int ironlake_do_reset(struct drm_device *dev)
  413. {
  414. struct drm_i915_private *dev_priv = dev->dev_private;
  415. u32 gdrst;
  416. int ret;
  417. gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
  418. gdrst &= ~GRDOM_MASK;
  419. I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
  420. gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE);
  421. ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
  422. if (ret)
  423. return ret;
  424. /* We can't reset render&media without also resetting display ... */
  425. gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
  426. gdrst &= ~GRDOM_MASK;
  427. I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
  428. gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE);
  429. return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
  430. }
  431. static int gen6_do_reset(struct drm_device *dev)
  432. {
  433. struct drm_i915_private *dev_priv = dev->dev_private;
  434. int ret;
  435. unsigned long irqflags;
  436. /* Hold uncore.lock across reset to prevent any register access
  437. * with forcewake not set correctly
  438. */
  439. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  440. /* Reset the chip */
  441. /* GEN6_GDRST is not in the gt power well, no need to check
  442. * for fifo space for the write or forcewake the chip for
  443. * the read
  444. */
  445. I915_WRITE_NOTRACE(GEN6_GDRST, GEN6_GRDOM_FULL);
  446. /* Spin waiting for the device to ack the reset request */
  447. ret = wait_for((I915_READ_NOTRACE(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
  448. /* If reset with a user forcewake, try to restore, otherwise turn it off */
  449. if (dev_priv->uncore.forcewake_count)
  450. dev_priv->uncore.funcs.force_wake_get(dev_priv);
  451. else
  452. dev_priv->uncore.funcs.force_wake_put(dev_priv);
  453. /* Restore fifo count */
  454. dev_priv->uncore.fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES);
  455. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  456. return ret;
  457. }
  458. int intel_gpu_reset(struct drm_device *dev)
  459. {
  460. switch (INTEL_INFO(dev)->gen) {
  461. case 7:
  462. case 6: return gen6_do_reset(dev);
  463. case 5: return ironlake_do_reset(dev);
  464. case 4: return i965_do_reset(dev);
  465. case 2: return i8xx_do_reset(dev);
  466. default: return -ENODEV;
  467. }
  468. }
  469. void intel_uncore_clear_errors(struct drm_device *dev)
  470. {
  471. struct drm_i915_private *dev_priv = dev->dev_private;
  472. if (HAS_FPGA_DBG_UNCLAIMED(dev))
  473. I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
  474. }
  475. void intel_uncore_check_errors(struct drm_device *dev)
  476. {
  477. struct drm_i915_private *dev_priv = dev->dev_private;
  478. if (HAS_FPGA_DBG_UNCLAIMED(dev) &&
  479. (I915_READ_NOTRACE(FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
  480. DRM_ERROR("Unclaimed register before interrupt\n");
  481. I915_WRITE_NOTRACE(FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
  482. }
  483. }