intel_uncore.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813
  1. /*
  2. * Copyright © 2013 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. */
  23. #include "i915_drv.h"
  24. #include "intel_drv.h"
  25. #define FORCEWAKE_ACK_TIMEOUT_MS 2
  26. #define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__))
  27. #define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__))
  28. #define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
  29. #define __raw_i915_write16(dev_priv__, reg__, val__) writew(val__, (dev_priv__)->regs + (reg__))
  30. #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
  31. #define __raw_i915_write32(dev_priv__, reg__, val__) writel(val__, (dev_priv__)->regs + (reg__))
  32. #define __raw_i915_read64(dev_priv__, reg__) readq((dev_priv__)->regs + (reg__))
  33. #define __raw_i915_write64(dev_priv__, reg__, val__) writeq(val__, (dev_priv__)->regs + (reg__))
  34. #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__)
  35. static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
  36. {
  37. u32 gt_thread_status_mask;
  38. if (IS_HASWELL(dev_priv->dev))
  39. gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW;
  40. else
  41. gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK;
  42. /* w/a for a sporadic read returning 0 by waiting for the GT
  43. * thread to wake up.
  44. */
  45. if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500))
  46. DRM_ERROR("GT thread status wait timed out\n");
  47. }
  48. static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
  49. {
  50. __raw_i915_write32(dev_priv, FORCEWAKE, 0);
  51. /* something from same cacheline, but !FORCEWAKE */
  52. __raw_posting_read(dev_priv, ECOBUS);
  53. }
  54. static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
  55. {
  56. if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1) == 0,
  57. FORCEWAKE_ACK_TIMEOUT_MS))
  58. DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
  59. __raw_i915_write32(dev_priv, FORCEWAKE, 1);
  60. /* something from same cacheline, but !FORCEWAKE */
  61. __raw_posting_read(dev_priv, ECOBUS);
  62. if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1),
  63. FORCEWAKE_ACK_TIMEOUT_MS))
  64. DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
  65. /* WaRsForcewakeWaitTC0:snb */
  66. __gen6_gt_wait_for_thread_c0(dev_priv);
  67. }
  68. static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
  69. {
  70. __raw_i915_write32(dev_priv, FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
  71. /* something from same cacheline, but !FORCEWAKE_MT */
  72. __raw_posting_read(dev_priv, ECOBUS);
  73. }
  74. static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
  75. {
  76. u32 forcewake_ack;
  77. if (IS_HASWELL(dev_priv->dev) || IS_GEN8(dev_priv->dev))
  78. forcewake_ack = FORCEWAKE_ACK_HSW;
  79. else
  80. forcewake_ack = FORCEWAKE_MT_ACK;
  81. if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL) == 0,
  82. FORCEWAKE_ACK_TIMEOUT_MS))
  83. DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
  84. __raw_i915_write32(dev_priv, FORCEWAKE_MT,
  85. _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
  86. /* something from same cacheline, but !FORCEWAKE_MT */
  87. __raw_posting_read(dev_priv, ECOBUS);
  88. if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL),
  89. FORCEWAKE_ACK_TIMEOUT_MS))
  90. DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
  91. /* WaRsForcewakeWaitTC0:ivb,hsw */
  92. if (INTEL_INFO(dev_priv->dev)->gen < 8)
  93. __gen6_gt_wait_for_thread_c0(dev_priv);
  94. }
  95. static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
  96. {
  97. u32 gtfifodbg;
  98. gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
  99. if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK,
  100. "MMIO read or write has been dropped %x\n", gtfifodbg))
  101. __raw_i915_write32(dev_priv, GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
  102. }
  103. static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
  104. {
  105. __raw_i915_write32(dev_priv, FORCEWAKE, 0);
  106. /* something from same cacheline, but !FORCEWAKE */
  107. __raw_posting_read(dev_priv, ECOBUS);
  108. gen6_gt_check_fifodbg(dev_priv);
  109. }
  110. static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
  111. {
  112. __raw_i915_write32(dev_priv, FORCEWAKE_MT,
  113. _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
  114. /* something from same cacheline, but !FORCEWAKE_MT */
  115. __raw_posting_read(dev_priv, ECOBUS);
  116. gen6_gt_check_fifodbg(dev_priv);
  117. }
  118. static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
  119. {
  120. int ret = 0;
  121. if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
  122. int loop = 500;
  123. u32 fifo = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
  124. while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
  125. udelay(10);
  126. fifo = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
  127. }
  128. if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
  129. ++ret;
  130. dev_priv->uncore.fifo_count = fifo;
  131. }
  132. dev_priv->uncore.fifo_count--;
  133. return ret;
  134. }
  135. static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
  136. {
  137. __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
  138. _MASKED_BIT_DISABLE(0xffff));
  139. /* something from same cacheline, but !FORCEWAKE_VLV */
  140. __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
  141. }
  142. static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
  143. {
  144. if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL) == 0,
  145. FORCEWAKE_ACK_TIMEOUT_MS))
  146. DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
  147. __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
  148. _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
  149. __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
  150. _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
  151. if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL),
  152. FORCEWAKE_ACK_TIMEOUT_MS))
  153. DRM_ERROR("Timed out waiting for GT to ack forcewake request.\n");
  154. if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_MEDIA_VLV) &
  155. FORCEWAKE_KERNEL),
  156. FORCEWAKE_ACK_TIMEOUT_MS))
  157. DRM_ERROR("Timed out waiting for media to ack forcewake request.\n");
  158. /* WaRsForcewakeWaitTC0:vlv */
  159. __gen6_gt_wait_for_thread_c0(dev_priv);
  160. }
  161. static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
  162. {
  163. __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
  164. _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
  165. __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
  166. _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
  167. /* The below doubles as a POSTING_READ */
  168. gen6_gt_check_fifodbg(dev_priv);
  169. }
  170. static void gen6_force_wake_work(struct work_struct *work)
  171. {
  172. struct drm_i915_private *dev_priv =
  173. container_of(work, typeof(*dev_priv), uncore.force_wake_work.work);
  174. unsigned long irqflags;
  175. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  176. if (--dev_priv->uncore.forcewake_count == 0)
  177. dev_priv->uncore.funcs.force_wake_put(dev_priv);
  178. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  179. }
  180. static void intel_uncore_forcewake_reset(struct drm_device *dev)
  181. {
  182. struct drm_i915_private *dev_priv = dev->dev_private;
  183. if (IS_VALLEYVIEW(dev)) {
  184. vlv_force_wake_reset(dev_priv);
  185. } else if (INTEL_INFO(dev)->gen >= 6) {
  186. __gen6_gt_force_wake_reset(dev_priv);
  187. if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
  188. __gen6_gt_force_wake_mt_reset(dev_priv);
  189. }
  190. }
  191. void intel_uncore_early_sanitize(struct drm_device *dev)
  192. {
  193. struct drm_i915_private *dev_priv = dev->dev_private;
  194. if (HAS_FPGA_DBG_UNCLAIMED(dev))
  195. __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
  196. if (IS_HASWELL(dev) &&
  197. (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) == 1)) {
  198. /* The docs do not explain exactly how the calculation can be
  199. * made. It is somewhat guessable, but for now, it's always
  200. * 128MB.
  201. * NB: We can't write IDICR yet because we do not have gt funcs
  202. * set up */
  203. dev_priv->ellc_size = 128;
  204. DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
  205. }
  206. intel_uncore_forcewake_reset(dev);
  207. }
  208. void intel_uncore_sanitize(struct drm_device *dev)
  209. {
  210. struct drm_i915_private *dev_priv = dev->dev_private;
  211. u32 reg_val;
  212. intel_uncore_forcewake_reset(dev);
  213. /* BIOS often leaves RC6 enabled, but disable it for hw init */
  214. intel_disable_gt_powersave(dev);
  215. /* Turn off power gate, require especially for the BIOS less system */
  216. if (IS_VALLEYVIEW(dev)) {
  217. mutex_lock(&dev_priv->rps.hw_lock);
  218. reg_val = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS);
  219. if (reg_val & (RENDER_PWRGT | MEDIA_PWRGT | DISP2D_PWRGT))
  220. vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, 0x0);
  221. mutex_unlock(&dev_priv->rps.hw_lock);
  222. }
  223. }
  224. /*
  225. * Generally this is called implicitly by the register read function. However,
  226. * if some sequence requires the GT to not power down then this function should
  227. * be called at the beginning of the sequence followed by a call to
  228. * gen6_gt_force_wake_put() at the end of the sequence.
  229. */
  230. void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
  231. {
  232. unsigned long irqflags;
  233. if (!dev_priv->uncore.funcs.force_wake_get)
  234. return;
  235. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  236. if (dev_priv->uncore.forcewake_count++ == 0)
  237. dev_priv->uncore.funcs.force_wake_get(dev_priv);
  238. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  239. }
  240. /*
  241. * see gen6_gt_force_wake_get()
  242. */
  243. void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
  244. {
  245. unsigned long irqflags;
  246. if (!dev_priv->uncore.funcs.force_wake_put)
  247. return;
  248. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  249. if (--dev_priv->uncore.forcewake_count == 0) {
  250. dev_priv->uncore.forcewake_count++;
  251. mod_delayed_work(dev_priv->wq,
  252. &dev_priv->uncore.force_wake_work,
  253. 1);
  254. }
  255. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  256. }
  257. /* We give fast paths for the really cool registers */
  258. #define NEEDS_FORCE_WAKE(dev_priv, reg) \
  259. ((reg) < 0x40000 && (reg) != FORCEWAKE)
  260. static void
  261. ilk_dummy_write(struct drm_i915_private *dev_priv)
  262. {
  263. /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
  264. * the chip from rc6 before touching it for real. MI_MODE is masked,
  265. * hence harmless to write 0 into. */
  266. __raw_i915_write32(dev_priv, MI_MODE, 0);
  267. }
  268. static void
  269. hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
  270. {
  271. if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
  272. DRM_ERROR("Unknown unclaimed register before writing to %x\n",
  273. reg);
  274. __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
  275. }
  276. }
  277. static void
  278. hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
  279. {
  280. if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
  281. DRM_ERROR("Unclaimed write to %x\n", reg);
  282. __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
  283. }
  284. }
  285. #define REG_READ_HEADER(x) \
  286. unsigned long irqflags; \
  287. u##x val = 0; \
  288. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
  289. #define REG_READ_FOOTER \
  290. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
  291. trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
  292. return val
  293. #define __gen4_read(x) \
  294. static u##x \
  295. gen4_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
  296. REG_READ_HEADER(x); \
  297. val = __raw_i915_read##x(dev_priv, reg); \
  298. REG_READ_FOOTER; \
  299. }
  300. #define __gen5_read(x) \
  301. static u##x \
  302. gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
  303. REG_READ_HEADER(x); \
  304. ilk_dummy_write(dev_priv); \
  305. val = __raw_i915_read##x(dev_priv, reg); \
  306. REG_READ_FOOTER; \
  307. }
  308. #define __gen6_read(x) \
  309. static u##x \
  310. gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
  311. REG_READ_HEADER(x); \
  312. if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
  313. if (dev_priv->uncore.forcewake_count == 0) \
  314. dev_priv->uncore.funcs.force_wake_get(dev_priv); \
  315. val = __raw_i915_read##x(dev_priv, reg); \
  316. if (dev_priv->uncore.forcewake_count == 0) \
  317. dev_priv->uncore.funcs.force_wake_put(dev_priv); \
  318. } else { \
  319. val = __raw_i915_read##x(dev_priv, reg); \
  320. } \
  321. REG_READ_FOOTER; \
  322. }
  323. __gen6_read(8)
  324. __gen6_read(16)
  325. __gen6_read(32)
  326. __gen6_read(64)
  327. __gen5_read(8)
  328. __gen5_read(16)
  329. __gen5_read(32)
  330. __gen5_read(64)
  331. __gen4_read(8)
  332. __gen4_read(16)
  333. __gen4_read(32)
  334. __gen4_read(64)
  335. #undef __gen6_read
  336. #undef __gen5_read
  337. #undef __gen4_read
  338. #undef REG_READ_FOOTER
  339. #undef REG_READ_HEADER
  340. #define REG_WRITE_HEADER \
  341. unsigned long irqflags; \
  342. trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
  343. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
  344. #define __gen4_write(x) \
  345. static void \
  346. gen4_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
  347. REG_WRITE_HEADER; \
  348. __raw_i915_write##x(dev_priv, reg, val); \
  349. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
  350. }
  351. #define __gen5_write(x) \
  352. static void \
  353. gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
  354. REG_WRITE_HEADER; \
  355. ilk_dummy_write(dev_priv); \
  356. __raw_i915_write##x(dev_priv, reg, val); \
  357. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
  358. }
  359. #define __gen6_write(x) \
  360. static void \
  361. gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
  362. u32 __fifo_ret = 0; \
  363. REG_WRITE_HEADER; \
  364. if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
  365. __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
  366. } \
  367. __raw_i915_write##x(dev_priv, reg, val); \
  368. if (unlikely(__fifo_ret)) { \
  369. gen6_gt_check_fifodbg(dev_priv); \
  370. } \
  371. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
  372. }
  373. #define __hsw_write(x) \
  374. static void \
  375. hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
  376. u32 __fifo_ret = 0; \
  377. REG_WRITE_HEADER; \
  378. if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
  379. __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
  380. } \
  381. hsw_unclaimed_reg_clear(dev_priv, reg); \
  382. __raw_i915_write##x(dev_priv, reg, val); \
  383. if (unlikely(__fifo_ret)) { \
  384. gen6_gt_check_fifodbg(dev_priv); \
  385. } \
  386. hsw_unclaimed_reg_check(dev_priv, reg); \
  387. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
  388. }
  389. static const u32 gen8_shadowed_regs[] = {
  390. FORCEWAKE_MT,
  391. GEN6_RPNSWREQ,
  392. GEN6_RC_VIDEO_FREQ,
  393. RING_TAIL(RENDER_RING_BASE),
  394. RING_TAIL(GEN6_BSD_RING_BASE),
  395. RING_TAIL(VEBOX_RING_BASE),
  396. RING_TAIL(BLT_RING_BASE),
  397. /* TODO: Other registers are not yet used */
  398. };
  399. static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
  400. {
  401. int i;
  402. for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
  403. if (reg == gen8_shadowed_regs[i])
  404. return true;
  405. return false;
  406. }
  407. #define __gen8_write(x) \
  408. static void \
  409. gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
  410. bool __needs_put = !is_gen8_shadowed(dev_priv, reg); \
  411. REG_WRITE_HEADER; \
  412. if (__needs_put) { \
  413. dev_priv->uncore.funcs.force_wake_get(dev_priv); \
  414. } \
  415. __raw_i915_write##x(dev_priv, reg, val); \
  416. if (__needs_put) { \
  417. dev_priv->uncore.funcs.force_wake_put(dev_priv); \
  418. } \
  419. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
  420. }
  421. __gen8_write(8)
  422. __gen8_write(16)
  423. __gen8_write(32)
  424. __gen8_write(64)
  425. __hsw_write(8)
  426. __hsw_write(16)
  427. __hsw_write(32)
  428. __hsw_write(64)
  429. __gen6_write(8)
  430. __gen6_write(16)
  431. __gen6_write(32)
  432. __gen6_write(64)
  433. __gen5_write(8)
  434. __gen5_write(16)
  435. __gen5_write(32)
  436. __gen5_write(64)
  437. __gen4_write(8)
  438. __gen4_write(16)
  439. __gen4_write(32)
  440. __gen4_write(64)
  441. #undef __gen8_write
  442. #undef __hsw_write
  443. #undef __gen6_write
  444. #undef __gen5_write
  445. #undef __gen4_write
  446. #undef REG_WRITE_HEADER
  447. void intel_uncore_init(struct drm_device *dev)
  448. {
  449. struct drm_i915_private *dev_priv = dev->dev_private;
  450. INIT_DELAYED_WORK(&dev_priv->uncore.force_wake_work,
  451. gen6_force_wake_work);
  452. if (IS_VALLEYVIEW(dev)) {
  453. dev_priv->uncore.funcs.force_wake_get = vlv_force_wake_get;
  454. dev_priv->uncore.funcs.force_wake_put = vlv_force_wake_put;
  455. } else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
  456. dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get;
  457. dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put;
  458. } else if (IS_IVYBRIDGE(dev)) {
  459. u32 ecobus;
  460. /* IVB configs may use multi-threaded forcewake */
  461. /* A small trick here - if the bios hasn't configured
  462. * MT forcewake, and if the device is in RC6, then
  463. * force_wake_mt_get will not wake the device and the
  464. * ECOBUS read will return zero. Which will be
  465. * (correctly) interpreted by the test below as MT
  466. * forcewake being disabled.
  467. */
  468. mutex_lock(&dev->struct_mutex);
  469. __gen6_gt_force_wake_mt_get(dev_priv);
  470. ecobus = __raw_i915_read32(dev_priv, ECOBUS);
  471. __gen6_gt_force_wake_mt_put(dev_priv);
  472. mutex_unlock(&dev->struct_mutex);
  473. if (ecobus & FORCEWAKE_MT_ENABLE) {
  474. dev_priv->uncore.funcs.force_wake_get =
  475. __gen6_gt_force_wake_mt_get;
  476. dev_priv->uncore.funcs.force_wake_put =
  477. __gen6_gt_force_wake_mt_put;
  478. } else {
  479. DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
  480. DRM_INFO("when using vblank-synced partial screen updates.\n");
  481. dev_priv->uncore.funcs.force_wake_get =
  482. __gen6_gt_force_wake_get;
  483. dev_priv->uncore.funcs.force_wake_put =
  484. __gen6_gt_force_wake_put;
  485. }
  486. } else if (IS_GEN6(dev)) {
  487. dev_priv->uncore.funcs.force_wake_get =
  488. __gen6_gt_force_wake_get;
  489. dev_priv->uncore.funcs.force_wake_put =
  490. __gen6_gt_force_wake_put;
  491. }
  492. switch (INTEL_INFO(dev)->gen) {
  493. default:
  494. dev_priv->uncore.funcs.mmio_writeb = gen8_write8;
  495. dev_priv->uncore.funcs.mmio_writew = gen8_write16;
  496. dev_priv->uncore.funcs.mmio_writel = gen8_write32;
  497. dev_priv->uncore.funcs.mmio_writeq = gen8_write64;
  498. dev_priv->uncore.funcs.mmio_readb = gen6_read8;
  499. dev_priv->uncore.funcs.mmio_readw = gen6_read16;
  500. dev_priv->uncore.funcs.mmio_readl = gen6_read32;
  501. dev_priv->uncore.funcs.mmio_readq = gen6_read64;
  502. break;
  503. case 7:
  504. case 6:
  505. if (IS_HASWELL(dev)) {
  506. dev_priv->uncore.funcs.mmio_writeb = hsw_write8;
  507. dev_priv->uncore.funcs.mmio_writew = hsw_write16;
  508. dev_priv->uncore.funcs.mmio_writel = hsw_write32;
  509. dev_priv->uncore.funcs.mmio_writeq = hsw_write64;
  510. } else {
  511. dev_priv->uncore.funcs.mmio_writeb = gen6_write8;
  512. dev_priv->uncore.funcs.mmio_writew = gen6_write16;
  513. dev_priv->uncore.funcs.mmio_writel = gen6_write32;
  514. dev_priv->uncore.funcs.mmio_writeq = gen6_write64;
  515. }
  516. dev_priv->uncore.funcs.mmio_readb = gen6_read8;
  517. dev_priv->uncore.funcs.mmio_readw = gen6_read16;
  518. dev_priv->uncore.funcs.mmio_readl = gen6_read32;
  519. dev_priv->uncore.funcs.mmio_readq = gen6_read64;
  520. break;
  521. case 5:
  522. dev_priv->uncore.funcs.mmio_writeb = gen5_write8;
  523. dev_priv->uncore.funcs.mmio_writew = gen5_write16;
  524. dev_priv->uncore.funcs.mmio_writel = gen5_write32;
  525. dev_priv->uncore.funcs.mmio_writeq = gen5_write64;
  526. dev_priv->uncore.funcs.mmio_readb = gen5_read8;
  527. dev_priv->uncore.funcs.mmio_readw = gen5_read16;
  528. dev_priv->uncore.funcs.mmio_readl = gen5_read32;
  529. dev_priv->uncore.funcs.mmio_readq = gen5_read64;
  530. break;
  531. case 4:
  532. case 3:
  533. case 2:
  534. dev_priv->uncore.funcs.mmio_writeb = gen4_write8;
  535. dev_priv->uncore.funcs.mmio_writew = gen4_write16;
  536. dev_priv->uncore.funcs.mmio_writel = gen4_write32;
  537. dev_priv->uncore.funcs.mmio_writeq = gen4_write64;
  538. dev_priv->uncore.funcs.mmio_readb = gen4_read8;
  539. dev_priv->uncore.funcs.mmio_readw = gen4_read16;
  540. dev_priv->uncore.funcs.mmio_readl = gen4_read32;
  541. dev_priv->uncore.funcs.mmio_readq = gen4_read64;
  542. break;
  543. }
  544. }
  545. void intel_uncore_fini(struct drm_device *dev)
  546. {
  547. struct drm_i915_private *dev_priv = dev->dev_private;
  548. flush_delayed_work(&dev_priv->uncore.force_wake_work);
  549. /* Paranoia: make sure we have disabled everything before we exit. */
  550. intel_uncore_sanitize(dev);
  551. }
  552. static const struct register_whitelist {
  553. uint64_t offset;
  554. uint32_t size;
  555. uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
  556. } whitelist[] = {
  557. { RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 },
  558. };
  559. int i915_reg_read_ioctl(struct drm_device *dev,
  560. void *data, struct drm_file *file)
  561. {
  562. struct drm_i915_private *dev_priv = dev->dev_private;
  563. struct drm_i915_reg_read *reg = data;
  564. struct register_whitelist const *entry = whitelist;
  565. int i;
  566. for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
  567. if (entry->offset == reg->offset &&
  568. (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
  569. break;
  570. }
  571. if (i == ARRAY_SIZE(whitelist))
  572. return -EINVAL;
  573. switch (entry->size) {
  574. case 8:
  575. reg->val = I915_READ64(reg->offset);
  576. break;
  577. case 4:
  578. reg->val = I915_READ(reg->offset);
  579. break;
  580. case 2:
  581. reg->val = I915_READ16(reg->offset);
  582. break;
  583. case 1:
  584. reg->val = I915_READ8(reg->offset);
  585. break;
  586. default:
  587. WARN_ON(1);
  588. return -EINVAL;
  589. }
  590. return 0;
  591. }
  592. static int i965_reset_complete(struct drm_device *dev)
  593. {
  594. u8 gdrst;
  595. pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
  596. return (gdrst & GRDOM_RESET_ENABLE) == 0;
  597. }
  598. static int i965_do_reset(struct drm_device *dev)
  599. {
  600. int ret;
  601. /*
  602. * Set the domains we want to reset (GRDOM/bits 2 and 3) as
  603. * well as the reset bit (GR/bit 0). Setting the GR bit
  604. * triggers the reset; when done, the hardware will clear it.
  605. */
  606. pci_write_config_byte(dev->pdev, I965_GDRST,
  607. GRDOM_RENDER | GRDOM_RESET_ENABLE);
  608. ret = wait_for(i965_reset_complete(dev), 500);
  609. if (ret)
  610. return ret;
  611. /* We can't reset render&media without also resetting display ... */
  612. pci_write_config_byte(dev->pdev, I965_GDRST,
  613. GRDOM_MEDIA | GRDOM_RESET_ENABLE);
  614. ret = wait_for(i965_reset_complete(dev), 500);
  615. if (ret)
  616. return ret;
  617. pci_write_config_byte(dev->pdev, I965_GDRST, 0);
  618. return 0;
  619. }
  620. static int ironlake_do_reset(struct drm_device *dev)
  621. {
  622. struct drm_i915_private *dev_priv = dev->dev_private;
  623. u32 gdrst;
  624. int ret;
  625. gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
  626. gdrst &= ~GRDOM_MASK;
  627. I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
  628. gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE);
  629. ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
  630. if (ret)
  631. return ret;
  632. /* We can't reset render&media without also resetting display ... */
  633. gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
  634. gdrst &= ~GRDOM_MASK;
  635. I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
  636. gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE);
  637. return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
  638. }
  639. static int gen6_do_reset(struct drm_device *dev)
  640. {
  641. struct drm_i915_private *dev_priv = dev->dev_private;
  642. int ret;
  643. unsigned long irqflags;
  644. /* Hold uncore.lock across reset to prevent any register access
  645. * with forcewake not set correctly
  646. */
  647. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  648. /* Reset the chip */
  649. /* GEN6_GDRST is not in the gt power well, no need to check
  650. * for fifo space for the write or forcewake the chip for
  651. * the read
  652. */
  653. __raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL);
  654. /* Spin waiting for the device to ack the reset request */
  655. ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
  656. intel_uncore_forcewake_reset(dev);
  657. /* If reset with a user forcewake, try to restore, otherwise turn it off */
  658. if (dev_priv->uncore.forcewake_count)
  659. dev_priv->uncore.funcs.force_wake_get(dev_priv);
  660. else
  661. dev_priv->uncore.funcs.force_wake_put(dev_priv);
  662. /* Restore fifo count */
  663. dev_priv->uncore.fifo_count = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
  664. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  665. return ret;
  666. }
  667. int intel_gpu_reset(struct drm_device *dev)
  668. {
  669. switch (INTEL_INFO(dev)->gen) {
  670. case 7:
  671. case 6: return gen6_do_reset(dev);
  672. case 5: return ironlake_do_reset(dev);
  673. case 4: return i965_do_reset(dev);
  674. default: return -ENODEV;
  675. }
  676. }
  677. void intel_uncore_clear_errors(struct drm_device *dev)
  678. {
  679. struct drm_i915_private *dev_priv = dev->dev_private;
  680. /* XXX needs spinlock around caller's grouping */
  681. if (HAS_FPGA_DBG_UNCLAIMED(dev))
  682. __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
  683. }
  684. void intel_uncore_check_errors(struct drm_device *dev)
  685. {
  686. struct drm_i915_private *dev_priv = dev->dev_private;
  687. if (HAS_FPGA_DBG_UNCLAIMED(dev) &&
  688. (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
  689. DRM_ERROR("Unclaimed register before interrupt\n");
  690. __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
  691. }
  692. }