intel_uncore.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811
  1. /*
  2. * Copyright © 2013 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. */
  23. #include "i915_drv.h"
  24. #include "intel_drv.h"
  25. #define FORCEWAKE_ACK_TIMEOUT_MS 2
  26. #define __raw_i915_read8(dev_priv__, reg__) readb((dev_priv__)->regs + (reg__))
  27. #define __raw_i915_write8(dev_priv__, reg__, val__) writeb(val__, (dev_priv__)->regs + (reg__))
  28. #define __raw_i915_read16(dev_priv__, reg__) readw((dev_priv__)->regs + (reg__))
  29. #define __raw_i915_write16(dev_priv__, reg__, val__) writew(val__, (dev_priv__)->regs + (reg__))
  30. #define __raw_i915_read32(dev_priv__, reg__) readl((dev_priv__)->regs + (reg__))
  31. #define __raw_i915_write32(dev_priv__, reg__, val__) writel(val__, (dev_priv__)->regs + (reg__))
  32. #define __raw_i915_read64(dev_priv__, reg__) readq((dev_priv__)->regs + (reg__))
  33. #define __raw_i915_write64(dev_priv__, reg__, val__) writeq(val__, (dev_priv__)->regs + (reg__))
  34. #define __raw_posting_read(dev_priv__, reg__) (void)__raw_i915_read32(dev_priv__, reg__)
  35. static void __gen6_gt_wait_for_thread_c0(struct drm_i915_private *dev_priv)
  36. {
  37. u32 gt_thread_status_mask;
  38. if (IS_HASWELL(dev_priv->dev))
  39. gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK_HSW;
  40. else
  41. gt_thread_status_mask = GEN6_GT_THREAD_STATUS_CORE_MASK;
  42. /* w/a for a sporadic read returning 0 by waiting for the GT
  43. * thread to wake up.
  44. */
  45. if (wait_for_atomic_us((__raw_i915_read32(dev_priv, GEN6_GT_THREAD_STATUS_REG) & gt_thread_status_mask) == 0, 500))
  46. DRM_ERROR("GT thread status wait timed out\n");
  47. }
  48. static void __gen6_gt_force_wake_reset(struct drm_i915_private *dev_priv)
  49. {
  50. __raw_i915_write32(dev_priv, FORCEWAKE, 0);
  51. /* something from same cacheline, but !FORCEWAKE */
  52. __raw_posting_read(dev_priv, ECOBUS);
  53. }
  54. static void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
  55. {
  56. if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1) == 0,
  57. FORCEWAKE_ACK_TIMEOUT_MS))
  58. DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
  59. __raw_i915_write32(dev_priv, FORCEWAKE, 1);
  60. /* something from same cacheline, but !FORCEWAKE */
  61. __raw_posting_read(dev_priv, ECOBUS);
  62. if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK) & 1),
  63. FORCEWAKE_ACK_TIMEOUT_MS))
  64. DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
  65. /* WaRsForcewakeWaitTC0:snb */
  66. __gen6_gt_wait_for_thread_c0(dev_priv);
  67. }
  68. static void __gen6_gt_force_wake_mt_reset(struct drm_i915_private *dev_priv)
  69. {
  70. __raw_i915_write32(dev_priv, FORCEWAKE_MT, _MASKED_BIT_DISABLE(0xffff));
  71. /* something from same cacheline, but !FORCEWAKE_MT */
  72. __raw_posting_read(dev_priv, ECOBUS);
  73. }
  74. static void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv)
  75. {
  76. u32 forcewake_ack;
  77. if (IS_HASWELL(dev_priv->dev) || IS_GEN8(dev_priv->dev))
  78. forcewake_ack = FORCEWAKE_ACK_HSW;
  79. else
  80. forcewake_ack = FORCEWAKE_MT_ACK;
  81. if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL) == 0,
  82. FORCEWAKE_ACK_TIMEOUT_MS))
  83. DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
  84. __raw_i915_write32(dev_priv, FORCEWAKE_MT,
  85. _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
  86. /* something from same cacheline, but !FORCEWAKE_MT */
  87. __raw_posting_read(dev_priv, ECOBUS);
  88. if (wait_for_atomic((__raw_i915_read32(dev_priv, forcewake_ack) & FORCEWAKE_KERNEL),
  89. FORCEWAKE_ACK_TIMEOUT_MS))
  90. DRM_ERROR("Timed out waiting for forcewake to ack request.\n");
  91. /* WaRsForcewakeWaitTC0:ivb,hsw */
  92. if (INTEL_INFO(dev_priv->dev)->gen < 8)
  93. __gen6_gt_wait_for_thread_c0(dev_priv);
  94. }
  95. static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv)
  96. {
  97. u32 gtfifodbg;
  98. gtfifodbg = __raw_i915_read32(dev_priv, GTFIFODBG);
  99. if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK,
  100. "MMIO read or write has been dropped %x\n", gtfifodbg))
  101. __raw_i915_write32(dev_priv, GTFIFODBG, GT_FIFO_CPU_ERROR_MASK);
  102. }
  103. static void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
  104. {
  105. __raw_i915_write32(dev_priv, FORCEWAKE, 0);
  106. /* something from same cacheline, but !FORCEWAKE */
  107. __raw_posting_read(dev_priv, ECOBUS);
  108. gen6_gt_check_fifodbg(dev_priv);
  109. }
  110. static void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv)
  111. {
  112. __raw_i915_write32(dev_priv, FORCEWAKE_MT,
  113. _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
  114. /* something from same cacheline, but !FORCEWAKE_MT */
  115. __raw_posting_read(dev_priv, ECOBUS);
  116. gen6_gt_check_fifodbg(dev_priv);
  117. }
  118. static int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv)
  119. {
  120. int ret = 0;
  121. if (dev_priv->uncore.fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) {
  122. int loop = 500;
  123. u32 fifo = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
  124. while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) {
  125. udelay(10);
  126. fifo = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
  127. }
  128. if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES))
  129. ++ret;
  130. dev_priv->uncore.fifo_count = fifo;
  131. }
  132. dev_priv->uncore.fifo_count--;
  133. return ret;
  134. }
  135. static void vlv_force_wake_reset(struct drm_i915_private *dev_priv)
  136. {
  137. __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
  138. _MASKED_BIT_DISABLE(0xffff));
  139. /* something from same cacheline, but !FORCEWAKE_VLV */
  140. __raw_posting_read(dev_priv, FORCEWAKE_ACK_VLV);
  141. }
  142. static void vlv_force_wake_get(struct drm_i915_private *dev_priv)
  143. {
  144. if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL) == 0,
  145. FORCEWAKE_ACK_TIMEOUT_MS))
  146. DRM_ERROR("Timed out waiting for forcewake old ack to clear.\n");
  147. __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
  148. _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
  149. __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
  150. _MASKED_BIT_ENABLE(FORCEWAKE_KERNEL));
  151. if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_VLV) & FORCEWAKE_KERNEL),
  152. FORCEWAKE_ACK_TIMEOUT_MS))
  153. DRM_ERROR("Timed out waiting for GT to ack forcewake request.\n");
  154. if (wait_for_atomic((__raw_i915_read32(dev_priv, FORCEWAKE_ACK_MEDIA_VLV) &
  155. FORCEWAKE_KERNEL),
  156. FORCEWAKE_ACK_TIMEOUT_MS))
  157. DRM_ERROR("Timed out waiting for media to ack forcewake request.\n");
  158. /* WaRsForcewakeWaitTC0:vlv */
  159. __gen6_gt_wait_for_thread_c0(dev_priv);
  160. }
  161. static void vlv_force_wake_put(struct drm_i915_private *dev_priv)
  162. {
  163. __raw_i915_write32(dev_priv, FORCEWAKE_VLV,
  164. _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
  165. __raw_i915_write32(dev_priv, FORCEWAKE_MEDIA_VLV,
  166. _MASKED_BIT_DISABLE(FORCEWAKE_KERNEL));
  167. /* The below doubles as a POSTING_READ */
  168. gen6_gt_check_fifodbg(dev_priv);
  169. }
  170. static void gen6_force_wake_work(struct work_struct *work)
  171. {
  172. struct drm_i915_private *dev_priv =
  173. container_of(work, typeof(*dev_priv), uncore.force_wake_work.work);
  174. unsigned long irqflags;
  175. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  176. if (--dev_priv->uncore.forcewake_count == 0)
  177. dev_priv->uncore.funcs.force_wake_put(dev_priv);
  178. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  179. }
  180. void intel_uncore_early_sanitize(struct drm_device *dev)
  181. {
  182. struct drm_i915_private *dev_priv = dev->dev_private;
  183. if (HAS_FPGA_DBG_UNCLAIMED(dev))
  184. __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
  185. if (IS_HASWELL(dev) &&
  186. (__raw_i915_read32(dev_priv, HSW_EDRAM_PRESENT) == 1)) {
  187. /* The docs do not explain exactly how the calculation can be
  188. * made. It is somewhat guessable, but for now, it's always
  189. * 128MB.
  190. * NB: We can't write IDICR yet because we do not have gt funcs
  191. * set up */
  192. dev_priv->ellc_size = 128;
  193. DRM_INFO("Found %zuMB of eLLC\n", dev_priv->ellc_size);
  194. }
  195. }
  196. static void intel_uncore_forcewake_reset(struct drm_device *dev)
  197. {
  198. struct drm_i915_private *dev_priv = dev->dev_private;
  199. if (IS_VALLEYVIEW(dev)) {
  200. vlv_force_wake_reset(dev_priv);
  201. } else if (INTEL_INFO(dev)->gen >= 6) {
  202. __gen6_gt_force_wake_reset(dev_priv);
  203. if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev))
  204. __gen6_gt_force_wake_mt_reset(dev_priv);
  205. }
  206. }
  207. void intel_uncore_sanitize(struct drm_device *dev)
  208. {
  209. struct drm_i915_private *dev_priv = dev->dev_private;
  210. u32 reg_val;
  211. intel_uncore_forcewake_reset(dev);
  212. /* BIOS often leaves RC6 enabled, but disable it for hw init */
  213. intel_disable_gt_powersave(dev);
  214. /* Turn off power gate, require especially for the BIOS less system */
  215. if (IS_VALLEYVIEW(dev)) {
  216. mutex_lock(&dev_priv->rps.hw_lock);
  217. reg_val = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_STATUS);
  218. if (reg_val & (RENDER_PWRGT | MEDIA_PWRGT | DISP2D_PWRGT))
  219. vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, 0x0);
  220. mutex_unlock(&dev_priv->rps.hw_lock);
  221. }
  222. }
  223. /*
  224. * Generally this is called implicitly by the register read function. However,
  225. * if some sequence requires the GT to not power down then this function should
  226. * be called at the beginning of the sequence followed by a call to
  227. * gen6_gt_force_wake_put() at the end of the sequence.
  228. */
  229. void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv)
  230. {
  231. unsigned long irqflags;
  232. if (!dev_priv->uncore.funcs.force_wake_get)
  233. return;
  234. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  235. if (dev_priv->uncore.forcewake_count++ == 0)
  236. dev_priv->uncore.funcs.force_wake_get(dev_priv);
  237. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  238. }
  239. /*
  240. * see gen6_gt_force_wake_get()
  241. */
  242. void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv)
  243. {
  244. unsigned long irqflags;
  245. if (!dev_priv->uncore.funcs.force_wake_put)
  246. return;
  247. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  248. if (--dev_priv->uncore.forcewake_count == 0) {
  249. dev_priv->uncore.forcewake_count++;
  250. mod_delayed_work(dev_priv->wq,
  251. &dev_priv->uncore.force_wake_work,
  252. 1);
  253. }
  254. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  255. }
  256. /* We give fast paths for the really cool registers */
  257. #define NEEDS_FORCE_WAKE(dev_priv, reg) \
  258. ((reg) < 0x40000 && (reg) != FORCEWAKE)
  259. static void
  260. ilk_dummy_write(struct drm_i915_private *dev_priv)
  261. {
  262. /* WaIssueDummyWriteToWakeupFromRC6:ilk Issue a dummy write to wake up
  263. * the chip from rc6 before touching it for real. MI_MODE is masked,
  264. * hence harmless to write 0 into. */
  265. __raw_i915_write32(dev_priv, MI_MODE, 0);
  266. }
  267. static void
  268. hsw_unclaimed_reg_clear(struct drm_i915_private *dev_priv, u32 reg)
  269. {
  270. if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
  271. DRM_ERROR("Unknown unclaimed register before writing to %x\n",
  272. reg);
  273. __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
  274. }
  275. }
  276. static void
  277. hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg)
  278. {
  279. if (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM) {
  280. DRM_ERROR("Unclaimed write to %x\n", reg);
  281. __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
  282. }
  283. }
  284. #define REG_READ_HEADER(x) \
  285. unsigned long irqflags; \
  286. u##x val = 0; \
  287. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
  288. #define REG_READ_FOOTER \
  289. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
  290. trace_i915_reg_rw(false, reg, val, sizeof(val), trace); \
  291. return val
  292. #define __gen4_read(x) \
  293. static u##x \
  294. gen4_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
  295. REG_READ_HEADER(x); \
  296. val = __raw_i915_read##x(dev_priv, reg); \
  297. REG_READ_FOOTER; \
  298. }
  299. #define __gen5_read(x) \
  300. static u##x \
  301. gen5_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
  302. REG_READ_HEADER(x); \
  303. ilk_dummy_write(dev_priv); \
  304. val = __raw_i915_read##x(dev_priv, reg); \
  305. REG_READ_FOOTER; \
  306. }
  307. #define __gen6_read(x) \
  308. static u##x \
  309. gen6_read##x(struct drm_i915_private *dev_priv, off_t reg, bool trace) { \
  310. REG_READ_HEADER(x); \
  311. if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
  312. if (dev_priv->uncore.forcewake_count == 0) \
  313. dev_priv->uncore.funcs.force_wake_get(dev_priv); \
  314. val = __raw_i915_read##x(dev_priv, reg); \
  315. if (dev_priv->uncore.forcewake_count == 0) \
  316. dev_priv->uncore.funcs.force_wake_put(dev_priv); \
  317. } else { \
  318. val = __raw_i915_read##x(dev_priv, reg); \
  319. } \
  320. REG_READ_FOOTER; \
  321. }
  322. __gen6_read(8)
  323. __gen6_read(16)
  324. __gen6_read(32)
  325. __gen6_read(64)
  326. __gen5_read(8)
  327. __gen5_read(16)
  328. __gen5_read(32)
  329. __gen5_read(64)
  330. __gen4_read(8)
  331. __gen4_read(16)
  332. __gen4_read(32)
  333. __gen4_read(64)
  334. #undef __gen6_read
  335. #undef __gen5_read
  336. #undef __gen4_read
  337. #undef REG_READ_FOOTER
  338. #undef REG_READ_HEADER
  339. #define REG_WRITE_HEADER \
  340. unsigned long irqflags; \
  341. trace_i915_reg_rw(true, reg, val, sizeof(val), trace); \
  342. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags)
  343. #define __gen4_write(x) \
  344. static void \
  345. gen4_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
  346. REG_WRITE_HEADER; \
  347. __raw_i915_write##x(dev_priv, reg, val); \
  348. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
  349. }
  350. #define __gen5_write(x) \
  351. static void \
  352. gen5_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
  353. REG_WRITE_HEADER; \
  354. ilk_dummy_write(dev_priv); \
  355. __raw_i915_write##x(dev_priv, reg, val); \
  356. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
  357. }
  358. #define __gen6_write(x) \
  359. static void \
  360. gen6_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
  361. u32 __fifo_ret = 0; \
  362. REG_WRITE_HEADER; \
  363. if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
  364. __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
  365. } \
  366. __raw_i915_write##x(dev_priv, reg, val); \
  367. if (unlikely(__fifo_ret)) { \
  368. gen6_gt_check_fifodbg(dev_priv); \
  369. } \
  370. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
  371. }
  372. #define __hsw_write(x) \
  373. static void \
  374. hsw_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
  375. u32 __fifo_ret = 0; \
  376. REG_WRITE_HEADER; \
  377. if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \
  378. __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \
  379. } \
  380. hsw_unclaimed_reg_clear(dev_priv, reg); \
  381. __raw_i915_write##x(dev_priv, reg, val); \
  382. if (unlikely(__fifo_ret)) { \
  383. gen6_gt_check_fifodbg(dev_priv); \
  384. } \
  385. hsw_unclaimed_reg_check(dev_priv, reg); \
  386. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
  387. }
  388. static const u32 gen8_shadowed_regs[] = {
  389. FORCEWAKE_MT,
  390. GEN6_RPNSWREQ,
  391. GEN6_RC_VIDEO_FREQ,
  392. RING_TAIL(RENDER_RING_BASE),
  393. RING_TAIL(GEN6_BSD_RING_BASE),
  394. RING_TAIL(VEBOX_RING_BASE),
  395. RING_TAIL(BLT_RING_BASE),
  396. /* TODO: Other registers are not yet used */
  397. };
  398. static bool is_gen8_shadowed(struct drm_i915_private *dev_priv, u32 reg)
  399. {
  400. int i;
  401. for (i = 0; i < ARRAY_SIZE(gen8_shadowed_regs); i++)
  402. if (reg == gen8_shadowed_regs[i])
  403. return true;
  404. return false;
  405. }
  406. #define __gen8_write(x) \
  407. static void \
  408. gen8_write##x(struct drm_i915_private *dev_priv, off_t reg, u##x val, bool trace) { \
  409. bool __needs_put = !is_gen8_shadowed(dev_priv, reg); \
  410. REG_WRITE_HEADER; \
  411. if (__needs_put) { \
  412. dev_priv->uncore.funcs.force_wake_get(dev_priv); \
  413. } \
  414. __raw_i915_write##x(dev_priv, reg, val); \
  415. if (__needs_put) { \
  416. dev_priv->uncore.funcs.force_wake_put(dev_priv); \
  417. } \
  418. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); \
  419. }
  420. __gen8_write(8)
  421. __gen8_write(16)
  422. __gen8_write(32)
  423. __gen8_write(64)
  424. __hsw_write(8)
  425. __hsw_write(16)
  426. __hsw_write(32)
  427. __hsw_write(64)
  428. __gen6_write(8)
  429. __gen6_write(16)
  430. __gen6_write(32)
  431. __gen6_write(64)
  432. __gen5_write(8)
  433. __gen5_write(16)
  434. __gen5_write(32)
  435. __gen5_write(64)
  436. __gen4_write(8)
  437. __gen4_write(16)
  438. __gen4_write(32)
  439. __gen4_write(64)
  440. #undef __gen8_write
  441. #undef __hsw_write
  442. #undef __gen6_write
  443. #undef __gen5_write
  444. #undef __gen4_write
  445. #undef REG_WRITE_HEADER
  446. void intel_uncore_init(struct drm_device *dev)
  447. {
  448. struct drm_i915_private *dev_priv = dev->dev_private;
  449. INIT_DELAYED_WORK(&dev_priv->uncore.force_wake_work,
  450. gen6_force_wake_work);
  451. if (IS_VALLEYVIEW(dev)) {
  452. dev_priv->uncore.funcs.force_wake_get = vlv_force_wake_get;
  453. dev_priv->uncore.funcs.force_wake_put = vlv_force_wake_put;
  454. } else if (IS_HASWELL(dev) || IS_GEN8(dev)) {
  455. dev_priv->uncore.funcs.force_wake_get = __gen6_gt_force_wake_mt_get;
  456. dev_priv->uncore.funcs.force_wake_put = __gen6_gt_force_wake_mt_put;
  457. } else if (IS_IVYBRIDGE(dev)) {
  458. u32 ecobus;
  459. /* IVB configs may use multi-threaded forcewake */
  460. /* A small trick here - if the bios hasn't configured
  461. * MT forcewake, and if the device is in RC6, then
  462. * force_wake_mt_get will not wake the device and the
  463. * ECOBUS read will return zero. Which will be
  464. * (correctly) interpreted by the test below as MT
  465. * forcewake being disabled.
  466. */
  467. mutex_lock(&dev->struct_mutex);
  468. __gen6_gt_force_wake_mt_get(dev_priv);
  469. ecobus = __raw_i915_read32(dev_priv, ECOBUS);
  470. __gen6_gt_force_wake_mt_put(dev_priv);
  471. mutex_unlock(&dev->struct_mutex);
  472. if (ecobus & FORCEWAKE_MT_ENABLE) {
  473. dev_priv->uncore.funcs.force_wake_get =
  474. __gen6_gt_force_wake_mt_get;
  475. dev_priv->uncore.funcs.force_wake_put =
  476. __gen6_gt_force_wake_mt_put;
  477. } else {
  478. DRM_INFO("No MT forcewake available on Ivybridge, this can result in issues\n");
  479. DRM_INFO("when using vblank-synced partial screen updates.\n");
  480. dev_priv->uncore.funcs.force_wake_get =
  481. __gen6_gt_force_wake_get;
  482. dev_priv->uncore.funcs.force_wake_put =
  483. __gen6_gt_force_wake_put;
  484. }
  485. } else if (IS_GEN6(dev)) {
  486. dev_priv->uncore.funcs.force_wake_get =
  487. __gen6_gt_force_wake_get;
  488. dev_priv->uncore.funcs.force_wake_put =
  489. __gen6_gt_force_wake_put;
  490. }
  491. switch (INTEL_INFO(dev)->gen) {
  492. default:
  493. dev_priv->uncore.funcs.mmio_writeb = gen8_write8;
  494. dev_priv->uncore.funcs.mmio_writew = gen8_write16;
  495. dev_priv->uncore.funcs.mmio_writel = gen8_write32;
  496. dev_priv->uncore.funcs.mmio_writeq = gen8_write64;
  497. dev_priv->uncore.funcs.mmio_readb = gen6_read8;
  498. dev_priv->uncore.funcs.mmio_readw = gen6_read16;
  499. dev_priv->uncore.funcs.mmio_readl = gen6_read32;
  500. dev_priv->uncore.funcs.mmio_readq = gen6_read64;
  501. break;
  502. case 7:
  503. case 6:
  504. if (IS_HASWELL(dev)) {
  505. dev_priv->uncore.funcs.mmio_writeb = hsw_write8;
  506. dev_priv->uncore.funcs.mmio_writew = hsw_write16;
  507. dev_priv->uncore.funcs.mmio_writel = hsw_write32;
  508. dev_priv->uncore.funcs.mmio_writeq = hsw_write64;
  509. } else {
  510. dev_priv->uncore.funcs.mmio_writeb = gen6_write8;
  511. dev_priv->uncore.funcs.mmio_writew = gen6_write16;
  512. dev_priv->uncore.funcs.mmio_writel = gen6_write32;
  513. dev_priv->uncore.funcs.mmio_writeq = gen6_write64;
  514. }
  515. dev_priv->uncore.funcs.mmio_readb = gen6_read8;
  516. dev_priv->uncore.funcs.mmio_readw = gen6_read16;
  517. dev_priv->uncore.funcs.mmio_readl = gen6_read32;
  518. dev_priv->uncore.funcs.mmio_readq = gen6_read64;
  519. break;
  520. case 5:
  521. dev_priv->uncore.funcs.mmio_writeb = gen5_write8;
  522. dev_priv->uncore.funcs.mmio_writew = gen5_write16;
  523. dev_priv->uncore.funcs.mmio_writel = gen5_write32;
  524. dev_priv->uncore.funcs.mmio_writeq = gen5_write64;
  525. dev_priv->uncore.funcs.mmio_readb = gen5_read8;
  526. dev_priv->uncore.funcs.mmio_readw = gen5_read16;
  527. dev_priv->uncore.funcs.mmio_readl = gen5_read32;
  528. dev_priv->uncore.funcs.mmio_readq = gen5_read64;
  529. break;
  530. case 4:
  531. case 3:
  532. case 2:
  533. dev_priv->uncore.funcs.mmio_writeb = gen4_write8;
  534. dev_priv->uncore.funcs.mmio_writew = gen4_write16;
  535. dev_priv->uncore.funcs.mmio_writel = gen4_write32;
  536. dev_priv->uncore.funcs.mmio_writeq = gen4_write64;
  537. dev_priv->uncore.funcs.mmio_readb = gen4_read8;
  538. dev_priv->uncore.funcs.mmio_readw = gen4_read16;
  539. dev_priv->uncore.funcs.mmio_readl = gen4_read32;
  540. dev_priv->uncore.funcs.mmio_readq = gen4_read64;
  541. break;
  542. }
  543. }
  544. void intel_uncore_fini(struct drm_device *dev)
  545. {
  546. struct drm_i915_private *dev_priv = dev->dev_private;
  547. flush_delayed_work(&dev_priv->uncore.force_wake_work);
  548. /* Paranoia: make sure we have disabled everything before we exit. */
  549. intel_uncore_sanitize(dev);
  550. }
  551. static const struct register_whitelist {
  552. uint64_t offset;
  553. uint32_t size;
  554. uint32_t gen_bitmask; /* support gens, 0x10 for 4, 0x30 for 4 and 5, etc. */
  555. } whitelist[] = {
  556. { RING_TIMESTAMP(RENDER_RING_BASE), 8, 0xF0 },
  557. };
  558. int i915_reg_read_ioctl(struct drm_device *dev,
  559. void *data, struct drm_file *file)
  560. {
  561. struct drm_i915_private *dev_priv = dev->dev_private;
  562. struct drm_i915_reg_read *reg = data;
  563. struct register_whitelist const *entry = whitelist;
  564. int i;
  565. for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
  566. if (entry->offset == reg->offset &&
  567. (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
  568. break;
  569. }
  570. if (i == ARRAY_SIZE(whitelist))
  571. return -EINVAL;
  572. switch (entry->size) {
  573. case 8:
  574. reg->val = I915_READ64(reg->offset);
  575. break;
  576. case 4:
  577. reg->val = I915_READ(reg->offset);
  578. break;
  579. case 2:
  580. reg->val = I915_READ16(reg->offset);
  581. break;
  582. case 1:
  583. reg->val = I915_READ8(reg->offset);
  584. break;
  585. default:
  586. WARN_ON(1);
  587. return -EINVAL;
  588. }
  589. return 0;
  590. }
  591. static int i965_reset_complete(struct drm_device *dev)
  592. {
  593. u8 gdrst;
  594. pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst);
  595. return (gdrst & GRDOM_RESET_ENABLE) == 0;
  596. }
  597. static int i965_do_reset(struct drm_device *dev)
  598. {
  599. int ret;
  600. /*
  601. * Set the domains we want to reset (GRDOM/bits 2 and 3) as
  602. * well as the reset bit (GR/bit 0). Setting the GR bit
  603. * triggers the reset; when done, the hardware will clear it.
  604. */
  605. pci_write_config_byte(dev->pdev, I965_GDRST,
  606. GRDOM_RENDER | GRDOM_RESET_ENABLE);
  607. ret = wait_for(i965_reset_complete(dev), 500);
  608. if (ret)
  609. return ret;
  610. /* We can't reset render&media without also resetting display ... */
  611. pci_write_config_byte(dev->pdev, I965_GDRST,
  612. GRDOM_MEDIA | GRDOM_RESET_ENABLE);
  613. ret = wait_for(i965_reset_complete(dev), 500);
  614. if (ret)
  615. return ret;
  616. pci_write_config_byte(dev->pdev, I965_GDRST, 0);
  617. return 0;
  618. }
  619. static int ironlake_do_reset(struct drm_device *dev)
  620. {
  621. struct drm_i915_private *dev_priv = dev->dev_private;
  622. u32 gdrst;
  623. int ret;
  624. gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
  625. gdrst &= ~GRDOM_MASK;
  626. I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
  627. gdrst | GRDOM_RENDER | GRDOM_RESET_ENABLE);
  628. ret = wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
  629. if (ret)
  630. return ret;
  631. /* We can't reset render&media without also resetting display ... */
  632. gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR);
  633. gdrst &= ~GRDOM_MASK;
  634. I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR,
  635. gdrst | GRDOM_MEDIA | GRDOM_RESET_ENABLE);
  636. return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
  637. }
  638. static int gen6_do_reset(struct drm_device *dev)
  639. {
  640. struct drm_i915_private *dev_priv = dev->dev_private;
  641. int ret;
  642. unsigned long irqflags;
  643. /* Hold uncore.lock across reset to prevent any register access
  644. * with forcewake not set correctly
  645. */
  646. spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
  647. /* Reset the chip */
  648. /* GEN6_GDRST is not in the gt power well, no need to check
  649. * for fifo space for the write or forcewake the chip for
  650. * the read
  651. */
  652. __raw_i915_write32(dev_priv, GEN6_GDRST, GEN6_GRDOM_FULL);
  653. /* Spin waiting for the device to ack the reset request */
  654. ret = wait_for((__raw_i915_read32(dev_priv, GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
  655. intel_uncore_forcewake_reset(dev);
  656. /* If reset with a user forcewake, try to restore, otherwise turn it off */
  657. if (dev_priv->uncore.forcewake_count)
  658. dev_priv->uncore.funcs.force_wake_get(dev_priv);
  659. else
  660. dev_priv->uncore.funcs.force_wake_put(dev_priv);
  661. /* Restore fifo count */
  662. dev_priv->uncore.fifo_count = __raw_i915_read32(dev_priv, GT_FIFO_FREE_ENTRIES);
  663. spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
  664. return ret;
  665. }
  666. int intel_gpu_reset(struct drm_device *dev)
  667. {
  668. switch (INTEL_INFO(dev)->gen) {
  669. case 7:
  670. case 6: return gen6_do_reset(dev);
  671. case 5: return ironlake_do_reset(dev);
  672. case 4: return i965_do_reset(dev);
  673. default: return -ENODEV;
  674. }
  675. }
  676. void intel_uncore_clear_errors(struct drm_device *dev)
  677. {
  678. struct drm_i915_private *dev_priv = dev->dev_private;
  679. /* XXX needs spinlock around caller's grouping */
  680. if (HAS_FPGA_DBG_UNCLAIMED(dev))
  681. __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
  682. }
  683. void intel_uncore_check_errors(struct drm_device *dev)
  684. {
  685. struct drm_i915_private *dev_priv = dev->dev_private;
  686. if (HAS_FPGA_DBG_UNCLAIMED(dev) &&
  687. (__raw_i915_read32(dev_priv, FPGA_DBG) & FPGA_DBG_RM_NOCLAIM)) {
  688. DRM_ERROR("Unclaimed register before interrupt\n");
  689. __raw_i915_write32(dev_priv, FPGA_DBG, FPGA_DBG_RM_NOCLAIM);
  690. }
  691. }