intel_ringbuffer.c 49 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906
  1. /*
  2. * Copyright © 2008-2010 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eric Anholt <eric@anholt.net>
  25. * Zou Nan hai <nanhai.zou@intel.com>
  26. * Xiang Hai hao<haihao.xiang@intel.com>
  27. *
  28. */
  29. #include <drm/drmP.h>
  30. #include "i915_drv.h"
  31. #include <drm/i915_drm.h>
  32. #include "i915_trace.h"
  33. #include "intel_drv.h"
  34. /*
  35. * 965+ support PIPE_CONTROL commands, which provide finer grained control
  36. * over cache flushing.
  37. */
  38. struct pipe_control {
  39. struct drm_i915_gem_object *obj;
  40. volatile u32 *cpu_page;
  41. u32 gtt_offset;
  42. };
  43. static inline int ring_space(struct intel_ring_buffer *ring)
  44. {
  45. int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE);
  46. if (space < 0)
  47. space += ring->size;
  48. return space;
  49. }
  50. static int
  51. gen2_render_ring_flush(struct intel_ring_buffer *ring,
  52. u32 invalidate_domains,
  53. u32 flush_domains)
  54. {
  55. u32 cmd;
  56. int ret;
  57. cmd = MI_FLUSH;
  58. if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0)
  59. cmd |= MI_NO_WRITE_FLUSH;
  60. if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
  61. cmd |= MI_READ_FLUSH;
  62. ret = intel_ring_begin(ring, 2);
  63. if (ret)
  64. return ret;
  65. intel_ring_emit(ring, cmd);
  66. intel_ring_emit(ring, MI_NOOP);
  67. intel_ring_advance(ring);
  68. return 0;
  69. }
  70. static int
  71. gen4_render_ring_flush(struct intel_ring_buffer *ring,
  72. u32 invalidate_domains,
  73. u32 flush_domains)
  74. {
  75. struct drm_device *dev = ring->dev;
  76. u32 cmd;
  77. int ret;
  78. /*
  79. * read/write caches:
  80. *
  81. * I915_GEM_DOMAIN_RENDER is always invalidated, but is
  82. * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
  83. * also flushed at 2d versus 3d pipeline switches.
  84. *
  85. * read-only caches:
  86. *
  87. * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
  88. * MI_READ_FLUSH is set, and is always flushed on 965.
  89. *
  90. * I915_GEM_DOMAIN_COMMAND may not exist?
  91. *
  92. * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
  93. * invalidated when MI_EXE_FLUSH is set.
  94. *
  95. * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
  96. * invalidated with every MI_FLUSH.
  97. *
  98. * TLBs:
  99. *
  100. * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
  101. * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
  102. * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
  103. * are flushed at any MI_FLUSH.
  104. */
  105. cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
  106. if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER)
  107. cmd &= ~MI_NO_WRITE_FLUSH;
  108. if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
  109. cmd |= MI_EXE_FLUSH;
  110. if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
  111. (IS_G4X(dev) || IS_GEN5(dev)))
  112. cmd |= MI_INVALIDATE_ISP;
  113. ret = intel_ring_begin(ring, 2);
  114. if (ret)
  115. return ret;
  116. intel_ring_emit(ring, cmd);
  117. intel_ring_emit(ring, MI_NOOP);
  118. intel_ring_advance(ring);
  119. return 0;
  120. }
  121. /**
  122. * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
  123. * implementing two workarounds on gen6. From section 1.4.7.1
  124. * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
  125. *
  126. * [DevSNB-C+{W/A}] Before any depth stall flush (including those
  127. * produced by non-pipelined state commands), software needs to first
  128. * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
  129. * 0.
  130. *
  131. * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
  132. * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
  133. *
  134. * And the workaround for these two requires this workaround first:
  135. *
  136. * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
  137. * BEFORE the pipe-control with a post-sync op and no write-cache
  138. * flushes.
  139. *
  140. * And this last workaround is tricky because of the requirements on
  141. * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
  142. * volume 2 part 1:
  143. *
  144. * "1 of the following must also be set:
  145. * - Render Target Cache Flush Enable ([12] of DW1)
  146. * - Depth Cache Flush Enable ([0] of DW1)
  147. * - Stall at Pixel Scoreboard ([1] of DW1)
  148. * - Depth Stall ([13] of DW1)
  149. * - Post-Sync Operation ([13] of DW1)
  150. * - Notify Enable ([8] of DW1)"
  151. *
  152. * The cache flushes require the workaround flush that triggered this
  153. * one, so we can't use it. Depth stall would trigger the same.
  154. * Post-sync nonzero is what triggered this second workaround, so we
  155. * can't use that one either. Notify enable is IRQs, which aren't
  156. * really our business. That leaves only stall at scoreboard.
  157. */
  158. static int
  159. intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
  160. {
  161. struct pipe_control *pc = ring->private;
  162. u32 scratch_addr = pc->gtt_offset + 128;
  163. int ret;
  164. ret = intel_ring_begin(ring, 6);
  165. if (ret)
  166. return ret;
  167. intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
  168. intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
  169. PIPE_CONTROL_STALL_AT_SCOREBOARD);
  170. intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
  171. intel_ring_emit(ring, 0); /* low dword */
  172. intel_ring_emit(ring, 0); /* high dword */
  173. intel_ring_emit(ring, MI_NOOP);
  174. intel_ring_advance(ring);
  175. ret = intel_ring_begin(ring, 6);
  176. if (ret)
  177. return ret;
  178. intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
  179. intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
  180. intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
  181. intel_ring_emit(ring, 0);
  182. intel_ring_emit(ring, 0);
  183. intel_ring_emit(ring, MI_NOOP);
  184. intel_ring_advance(ring);
  185. return 0;
  186. }
  187. static int
  188. gen6_render_ring_flush(struct intel_ring_buffer *ring,
  189. u32 invalidate_domains, u32 flush_domains)
  190. {
  191. u32 flags = 0;
  192. struct pipe_control *pc = ring->private;
  193. u32 scratch_addr = pc->gtt_offset + 128;
  194. int ret;
  195. /* Force SNB workarounds for PIPE_CONTROL flushes */
  196. ret = intel_emit_post_sync_nonzero_flush(ring);
  197. if (ret)
  198. return ret;
  199. /* Just flush everything. Experiments have shown that reducing the
  200. * number of bits based on the write domains has little performance
  201. * impact.
  202. */
  203. if (flush_domains) {
  204. flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
  205. flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
  206. /*
  207. * Ensure that any following seqno writes only happen
  208. * when the render cache is indeed flushed.
  209. */
  210. flags |= PIPE_CONTROL_CS_STALL;
  211. }
  212. if (invalidate_domains) {
  213. flags |= PIPE_CONTROL_TLB_INVALIDATE;
  214. flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
  215. flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
  216. flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
  217. flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
  218. flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
  219. /*
  220. * TLB invalidate requires a post-sync write.
  221. */
  222. flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
  223. }
  224. ret = intel_ring_begin(ring, 4);
  225. if (ret)
  226. return ret;
  227. intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
  228. intel_ring_emit(ring, flags);
  229. intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
  230. intel_ring_emit(ring, 0);
  231. intel_ring_advance(ring);
  232. return 0;
  233. }
  234. static int
  235. gen7_render_ring_cs_stall_wa(struct intel_ring_buffer *ring)
  236. {
  237. int ret;
  238. ret = intel_ring_begin(ring, 4);
  239. if (ret)
  240. return ret;
  241. intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
  242. intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
  243. PIPE_CONTROL_STALL_AT_SCOREBOARD);
  244. intel_ring_emit(ring, 0);
  245. intel_ring_emit(ring, 0);
  246. intel_ring_advance(ring);
  247. return 0;
  248. }
  249. static int
  250. gen7_render_ring_flush(struct intel_ring_buffer *ring,
  251. u32 invalidate_domains, u32 flush_domains)
  252. {
  253. u32 flags = 0;
  254. struct pipe_control *pc = ring->private;
  255. u32 scratch_addr = pc->gtt_offset + 128;
  256. int ret;
  257. /*
  258. * Ensure that any following seqno writes only happen when the render
  259. * cache is indeed flushed.
  260. *
  261. * Workaround: 4th PIPE_CONTROL command (except the ones with only
  262. * read-cache invalidate bits set) must have the CS_STALL bit set. We
  263. * don't try to be clever and just set it unconditionally.
  264. */
  265. flags |= PIPE_CONTROL_CS_STALL;
  266. /* Just flush everything. Experiments have shown that reducing the
  267. * number of bits based on the write domains has little performance
  268. * impact.
  269. */
  270. if (flush_domains) {
  271. flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
  272. flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
  273. }
  274. if (invalidate_domains) {
  275. flags |= PIPE_CONTROL_TLB_INVALIDATE;
  276. flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
  277. flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
  278. flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
  279. flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
  280. flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
  281. /*
  282. * TLB invalidate requires a post-sync write.
  283. */
  284. flags |= PIPE_CONTROL_QW_WRITE;
  285. /* Workaround: we must issue a pipe_control with CS-stall bit
  286. * set before a pipe_control command that has the state cache
  287. * invalidate bit set. */
  288. gen7_render_ring_cs_stall_wa(ring);
  289. }
  290. ret = intel_ring_begin(ring, 4);
  291. if (ret)
  292. return ret;
  293. intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
  294. intel_ring_emit(ring, flags);
  295. intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
  296. intel_ring_emit(ring, 0);
  297. intel_ring_advance(ring);
  298. return 0;
  299. }
  300. static void ring_write_tail(struct intel_ring_buffer *ring,
  301. u32 value)
  302. {
  303. drm_i915_private_t *dev_priv = ring->dev->dev_private;
  304. I915_WRITE_TAIL(ring, value);
  305. }
  306. u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
  307. {
  308. drm_i915_private_t *dev_priv = ring->dev->dev_private;
  309. u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
  310. RING_ACTHD(ring->mmio_base) : ACTHD;
  311. return I915_READ(acthd_reg);
  312. }
  313. static int init_ring_common(struct intel_ring_buffer *ring)
  314. {
  315. struct drm_device *dev = ring->dev;
  316. drm_i915_private_t *dev_priv = dev->dev_private;
  317. struct drm_i915_gem_object *obj = ring->obj;
  318. int ret = 0;
  319. u32 head;
  320. if (HAS_FORCE_WAKE(dev))
  321. gen6_gt_force_wake_get(dev_priv);
  322. /* Stop the ring if it's running. */
  323. I915_WRITE_CTL(ring, 0);
  324. I915_WRITE_HEAD(ring, 0);
  325. ring->write_tail(ring, 0);
  326. head = I915_READ_HEAD(ring) & HEAD_ADDR;
  327. /* G45 ring initialization fails to reset head to zero */
  328. if (head != 0) {
  329. DRM_DEBUG_KMS("%s head not reset to zero "
  330. "ctl %08x head %08x tail %08x start %08x\n",
  331. ring->name,
  332. I915_READ_CTL(ring),
  333. I915_READ_HEAD(ring),
  334. I915_READ_TAIL(ring),
  335. I915_READ_START(ring));
  336. I915_WRITE_HEAD(ring, 0);
  337. if (I915_READ_HEAD(ring) & HEAD_ADDR) {
  338. DRM_ERROR("failed to set %s head to zero "
  339. "ctl %08x head %08x tail %08x start %08x\n",
  340. ring->name,
  341. I915_READ_CTL(ring),
  342. I915_READ_HEAD(ring),
  343. I915_READ_TAIL(ring),
  344. I915_READ_START(ring));
  345. }
  346. }
  347. /* Initialize the ring. This must happen _after_ we've cleared the ring
  348. * registers with the above sequence (the readback of the HEAD registers
  349. * also enforces ordering), otherwise the hw might lose the new ring
  350. * register values. */
  351. I915_WRITE_START(ring, obj->gtt_offset);
  352. I915_WRITE_CTL(ring,
  353. ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
  354. | RING_VALID);
  355. /* If the head is still not zero, the ring is dead */
  356. if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
  357. I915_READ_START(ring) == obj->gtt_offset &&
  358. (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
  359. DRM_ERROR("%s initialization failed "
  360. "ctl %08x head %08x tail %08x start %08x\n",
  361. ring->name,
  362. I915_READ_CTL(ring),
  363. I915_READ_HEAD(ring),
  364. I915_READ_TAIL(ring),
  365. I915_READ_START(ring));
  366. ret = -EIO;
  367. goto out;
  368. }
  369. if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
  370. i915_kernel_lost_context(ring->dev);
  371. else {
  372. ring->head = I915_READ_HEAD(ring);
  373. ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
  374. ring->space = ring_space(ring);
  375. ring->last_retired_head = -1;
  376. }
  377. out:
  378. if (HAS_FORCE_WAKE(dev))
  379. gen6_gt_force_wake_put(dev_priv);
  380. return ret;
  381. }
  382. static int
  383. init_pipe_control(struct intel_ring_buffer *ring)
  384. {
  385. struct pipe_control *pc;
  386. struct drm_i915_gem_object *obj;
  387. int ret;
  388. if (ring->private)
  389. return 0;
  390. pc = kmalloc(sizeof(*pc), GFP_KERNEL);
  391. if (!pc)
  392. return -ENOMEM;
  393. obj = i915_gem_alloc_object(ring->dev, 4096);
  394. if (obj == NULL) {
  395. DRM_ERROR("Failed to allocate seqno page\n");
  396. ret = -ENOMEM;
  397. goto err;
  398. }
  399. i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
  400. ret = i915_gem_object_pin(obj, 4096, true, false);
  401. if (ret)
  402. goto err_unref;
  403. pc->gtt_offset = obj->gtt_offset;
  404. pc->cpu_page = kmap(sg_page(obj->pages->sgl));
  405. if (pc->cpu_page == NULL)
  406. goto err_unpin;
  407. pc->obj = obj;
  408. ring->private = pc;
  409. return 0;
  410. err_unpin:
  411. i915_gem_object_unpin(obj);
  412. err_unref:
  413. drm_gem_object_unreference(&obj->base);
  414. err:
  415. kfree(pc);
  416. return ret;
  417. }
  418. static void
  419. cleanup_pipe_control(struct intel_ring_buffer *ring)
  420. {
  421. struct pipe_control *pc = ring->private;
  422. struct drm_i915_gem_object *obj;
  423. if (!ring->private)
  424. return;
  425. obj = pc->obj;
  426. kunmap(sg_page(obj->pages->sgl));
  427. i915_gem_object_unpin(obj);
  428. drm_gem_object_unreference(&obj->base);
  429. kfree(pc);
  430. ring->private = NULL;
  431. }
  432. static int init_render_ring(struct intel_ring_buffer *ring)
  433. {
  434. struct drm_device *dev = ring->dev;
  435. struct drm_i915_private *dev_priv = dev->dev_private;
  436. int ret = init_ring_common(ring);
  437. if (INTEL_INFO(dev)->gen > 3) {
  438. I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
  439. if (IS_GEN7(dev))
  440. I915_WRITE(GFX_MODE_GEN7,
  441. _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
  442. _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
  443. }
  444. if (INTEL_INFO(dev)->gen >= 5) {
  445. ret = init_pipe_control(ring);
  446. if (ret)
  447. return ret;
  448. }
  449. if (IS_GEN6(dev)) {
  450. /* From the Sandybridge PRM, volume 1 part 3, page 24:
  451. * "If this bit is set, STCunit will have LRA as replacement
  452. * policy. [...] This bit must be reset. LRA replacement
  453. * policy is not supported."
  454. */
  455. I915_WRITE(CACHE_MODE_0,
  456. _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
  457. /* This is not explicitly set for GEN6, so read the register.
  458. * see intel_ring_mi_set_context() for why we care.
  459. * TODO: consider explicitly setting the bit for GEN5
  460. */
  461. ring->itlb_before_ctx_switch =
  462. !!(I915_READ(GFX_MODE) & GFX_TLB_INVALIDATE_ALWAYS);
  463. }
  464. if (INTEL_INFO(dev)->gen >= 6)
  465. I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
  466. if (HAS_L3_GPU_CACHE(dev))
  467. I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
  468. return ret;
  469. }
  470. static void render_ring_cleanup(struct intel_ring_buffer *ring)
  471. {
  472. struct drm_device *dev = ring->dev;
  473. if (!ring->private)
  474. return;
  475. if (HAS_BROKEN_CS_TLB(dev))
  476. drm_gem_object_unreference(to_gem_object(ring->private));
  477. cleanup_pipe_control(ring);
  478. }
  479. static void
  480. update_mboxes(struct intel_ring_buffer *ring,
  481. u32 mmio_offset)
  482. {
  483. intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
  484. intel_ring_emit(ring, mmio_offset);
  485. intel_ring_emit(ring, ring->outstanding_lazy_request);
  486. }
  487. /**
  488. * gen6_add_request - Update the semaphore mailbox registers
  489. *
  490. * @ring - ring that is adding a request
  491. * @seqno - return seqno stuck into the ring
  492. *
  493. * Update the mailbox registers in the *other* rings with the current seqno.
  494. * This acts like a signal in the canonical semaphore.
  495. */
  496. static int
  497. gen6_add_request(struct intel_ring_buffer *ring)
  498. {
  499. u32 mbox1_reg;
  500. u32 mbox2_reg;
  501. int ret;
  502. ret = intel_ring_begin(ring, 10);
  503. if (ret)
  504. return ret;
  505. mbox1_reg = ring->signal_mbox[0];
  506. mbox2_reg = ring->signal_mbox[1];
  507. update_mboxes(ring, mbox1_reg);
  508. update_mboxes(ring, mbox2_reg);
  509. intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
  510. intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
  511. intel_ring_emit(ring, ring->outstanding_lazy_request);
  512. intel_ring_emit(ring, MI_USER_INTERRUPT);
  513. intel_ring_advance(ring);
  514. return 0;
  515. }
  516. static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
  517. u32 seqno)
  518. {
  519. struct drm_i915_private *dev_priv = dev->dev_private;
  520. return dev_priv->last_seqno < seqno;
  521. }
  522. /**
  523. * intel_ring_sync - sync the waiter to the signaller on seqno
  524. *
  525. * @waiter - ring that is waiting
  526. * @signaller - ring which has, or will signal
  527. * @seqno - seqno which the waiter will block on
  528. */
  529. static int
  530. gen6_ring_sync(struct intel_ring_buffer *waiter,
  531. struct intel_ring_buffer *signaller,
  532. u32 seqno)
  533. {
  534. int ret;
  535. u32 dw1 = MI_SEMAPHORE_MBOX |
  536. MI_SEMAPHORE_COMPARE |
  537. MI_SEMAPHORE_REGISTER;
  538. /* Throughout all of the GEM code, seqno passed implies our current
  539. * seqno is >= the last seqno executed. However for hardware the
  540. * comparison is strictly greater than.
  541. */
  542. seqno -= 1;
  543. WARN_ON(signaller->semaphore_register[waiter->id] ==
  544. MI_SEMAPHORE_SYNC_INVALID);
  545. ret = intel_ring_begin(waiter, 4);
  546. if (ret)
  547. return ret;
  548. /* If seqno wrap happened, omit the wait with no-ops */
  549. if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) {
  550. intel_ring_emit(waiter,
  551. dw1 |
  552. signaller->semaphore_register[waiter->id]);
  553. intel_ring_emit(waiter, seqno);
  554. intel_ring_emit(waiter, 0);
  555. intel_ring_emit(waiter, MI_NOOP);
  556. } else {
  557. intel_ring_emit(waiter, MI_NOOP);
  558. intel_ring_emit(waiter, MI_NOOP);
  559. intel_ring_emit(waiter, MI_NOOP);
  560. intel_ring_emit(waiter, MI_NOOP);
  561. }
  562. intel_ring_advance(waiter);
  563. return 0;
  564. }
  565. #define PIPE_CONTROL_FLUSH(ring__, addr__) \
  566. do { \
  567. intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \
  568. PIPE_CONTROL_DEPTH_STALL); \
  569. intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \
  570. intel_ring_emit(ring__, 0); \
  571. intel_ring_emit(ring__, 0); \
  572. } while (0)
  573. static int
  574. pc_render_add_request(struct intel_ring_buffer *ring)
  575. {
  576. struct pipe_control *pc = ring->private;
  577. u32 scratch_addr = pc->gtt_offset + 128;
  578. int ret;
  579. /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
  580. * incoherent with writes to memory, i.e. completely fubar,
  581. * so we need to use PIPE_NOTIFY instead.
  582. *
  583. * However, we also need to workaround the qword write
  584. * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
  585. * memory before requesting an interrupt.
  586. */
  587. ret = intel_ring_begin(ring, 32);
  588. if (ret)
  589. return ret;
  590. intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
  591. PIPE_CONTROL_WRITE_FLUSH |
  592. PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
  593. intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
  594. intel_ring_emit(ring, ring->outstanding_lazy_request);
  595. intel_ring_emit(ring, 0);
  596. PIPE_CONTROL_FLUSH(ring, scratch_addr);
  597. scratch_addr += 128; /* write to separate cachelines */
  598. PIPE_CONTROL_FLUSH(ring, scratch_addr);
  599. scratch_addr += 128;
  600. PIPE_CONTROL_FLUSH(ring, scratch_addr);
  601. scratch_addr += 128;
  602. PIPE_CONTROL_FLUSH(ring, scratch_addr);
  603. scratch_addr += 128;
  604. PIPE_CONTROL_FLUSH(ring, scratch_addr);
  605. scratch_addr += 128;
  606. PIPE_CONTROL_FLUSH(ring, scratch_addr);
  607. intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
  608. PIPE_CONTROL_WRITE_FLUSH |
  609. PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
  610. PIPE_CONTROL_NOTIFY);
  611. intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
  612. intel_ring_emit(ring, ring->outstanding_lazy_request);
  613. intel_ring_emit(ring, 0);
  614. intel_ring_advance(ring);
  615. return 0;
  616. }
  617. static u32
  618. gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
  619. {
  620. /* Workaround to force correct ordering between irq and seqno writes on
  621. * ivb (and maybe also on snb) by reading from a CS register (like
  622. * ACTHD) before reading the status page. */
  623. if (!lazy_coherency)
  624. intel_ring_get_active_head(ring);
  625. return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
  626. }
  627. static u32
  628. ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
  629. {
  630. return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
  631. }
  632. static void
  633. ring_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
  634. {
  635. intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
  636. }
  637. static u32
  638. pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
  639. {
  640. struct pipe_control *pc = ring->private;
  641. return pc->cpu_page[0];
  642. }
  643. static void
  644. pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
  645. {
  646. struct pipe_control *pc = ring->private;
  647. pc->cpu_page[0] = seqno;
  648. }
  649. static bool
  650. gen5_ring_get_irq(struct intel_ring_buffer *ring)
  651. {
  652. struct drm_device *dev = ring->dev;
  653. drm_i915_private_t *dev_priv = dev->dev_private;
  654. unsigned long flags;
  655. if (!dev->irq_enabled)
  656. return false;
  657. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  658. if (ring->irq_refcount++ == 0) {
  659. dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
  660. I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
  661. POSTING_READ(GTIMR);
  662. }
  663. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  664. return true;
  665. }
  666. static void
  667. gen5_ring_put_irq(struct intel_ring_buffer *ring)
  668. {
  669. struct drm_device *dev = ring->dev;
  670. drm_i915_private_t *dev_priv = dev->dev_private;
  671. unsigned long flags;
  672. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  673. if (--ring->irq_refcount == 0) {
  674. dev_priv->gt_irq_mask |= ring->irq_enable_mask;
  675. I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
  676. POSTING_READ(GTIMR);
  677. }
  678. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  679. }
  680. static bool
  681. i9xx_ring_get_irq(struct intel_ring_buffer *ring)
  682. {
  683. struct drm_device *dev = ring->dev;
  684. drm_i915_private_t *dev_priv = dev->dev_private;
  685. unsigned long flags;
  686. if (!dev->irq_enabled)
  687. return false;
  688. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  689. if (ring->irq_refcount++ == 0) {
  690. dev_priv->irq_mask &= ~ring->irq_enable_mask;
  691. I915_WRITE(IMR, dev_priv->irq_mask);
  692. POSTING_READ(IMR);
  693. }
  694. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  695. return true;
  696. }
  697. static void
  698. i9xx_ring_put_irq(struct intel_ring_buffer *ring)
  699. {
  700. struct drm_device *dev = ring->dev;
  701. drm_i915_private_t *dev_priv = dev->dev_private;
  702. unsigned long flags;
  703. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  704. if (--ring->irq_refcount == 0) {
  705. dev_priv->irq_mask |= ring->irq_enable_mask;
  706. I915_WRITE(IMR, dev_priv->irq_mask);
  707. POSTING_READ(IMR);
  708. }
  709. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  710. }
  711. static bool
  712. i8xx_ring_get_irq(struct intel_ring_buffer *ring)
  713. {
  714. struct drm_device *dev = ring->dev;
  715. drm_i915_private_t *dev_priv = dev->dev_private;
  716. unsigned long flags;
  717. if (!dev->irq_enabled)
  718. return false;
  719. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  720. if (ring->irq_refcount++ == 0) {
  721. dev_priv->irq_mask &= ~ring->irq_enable_mask;
  722. I915_WRITE16(IMR, dev_priv->irq_mask);
  723. POSTING_READ16(IMR);
  724. }
  725. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  726. return true;
  727. }
  728. static void
  729. i8xx_ring_put_irq(struct intel_ring_buffer *ring)
  730. {
  731. struct drm_device *dev = ring->dev;
  732. drm_i915_private_t *dev_priv = dev->dev_private;
  733. unsigned long flags;
  734. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  735. if (--ring->irq_refcount == 0) {
  736. dev_priv->irq_mask |= ring->irq_enable_mask;
  737. I915_WRITE16(IMR, dev_priv->irq_mask);
  738. POSTING_READ16(IMR);
  739. }
  740. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  741. }
  742. void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
  743. {
  744. struct drm_device *dev = ring->dev;
  745. drm_i915_private_t *dev_priv = ring->dev->dev_private;
  746. u32 mmio = 0;
  747. /* The ring status page addresses are no longer next to the rest of
  748. * the ring registers as of gen7.
  749. */
  750. if (IS_GEN7(dev)) {
  751. switch (ring->id) {
  752. case RCS:
  753. mmio = RENDER_HWS_PGA_GEN7;
  754. break;
  755. case BCS:
  756. mmio = BLT_HWS_PGA_GEN7;
  757. break;
  758. case VCS:
  759. mmio = BSD_HWS_PGA_GEN7;
  760. break;
  761. }
  762. } else if (IS_GEN6(ring->dev)) {
  763. mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
  764. } else {
  765. mmio = RING_HWS_PGA(ring->mmio_base);
  766. }
  767. I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
  768. POSTING_READ(mmio);
  769. }
  770. static int
  771. bsd_ring_flush(struct intel_ring_buffer *ring,
  772. u32 invalidate_domains,
  773. u32 flush_domains)
  774. {
  775. int ret;
  776. ret = intel_ring_begin(ring, 2);
  777. if (ret)
  778. return ret;
  779. intel_ring_emit(ring, MI_FLUSH);
  780. intel_ring_emit(ring, MI_NOOP);
  781. intel_ring_advance(ring);
  782. return 0;
  783. }
  784. static int
  785. i9xx_add_request(struct intel_ring_buffer *ring)
  786. {
  787. int ret;
  788. ret = intel_ring_begin(ring, 4);
  789. if (ret)
  790. return ret;
  791. intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
  792. intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
  793. intel_ring_emit(ring, ring->outstanding_lazy_request);
  794. intel_ring_emit(ring, MI_USER_INTERRUPT);
  795. intel_ring_advance(ring);
  796. return 0;
  797. }
  798. static bool
  799. gen6_ring_get_irq(struct intel_ring_buffer *ring)
  800. {
  801. struct drm_device *dev = ring->dev;
  802. drm_i915_private_t *dev_priv = dev->dev_private;
  803. unsigned long flags;
  804. if (!dev->irq_enabled)
  805. return false;
  806. /* It looks like we need to prevent the gt from suspending while waiting
  807. * for an notifiy irq, otherwise irqs seem to get lost on at least the
  808. * blt/bsd rings on ivb. */
  809. gen6_gt_force_wake_get(dev_priv);
  810. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  811. if (ring->irq_refcount++ == 0) {
  812. if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
  813. I915_WRITE_IMR(ring, ~(ring->irq_enable_mask |
  814. GEN6_RENDER_L3_PARITY_ERROR));
  815. else
  816. I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
  817. dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
  818. I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
  819. POSTING_READ(GTIMR);
  820. }
  821. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  822. return true;
  823. }
  824. static void
  825. gen6_ring_put_irq(struct intel_ring_buffer *ring)
  826. {
  827. struct drm_device *dev = ring->dev;
  828. drm_i915_private_t *dev_priv = dev->dev_private;
  829. unsigned long flags;
  830. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  831. if (--ring->irq_refcount == 0) {
  832. if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
  833. I915_WRITE_IMR(ring, ~GEN6_RENDER_L3_PARITY_ERROR);
  834. else
  835. I915_WRITE_IMR(ring, ~0);
  836. dev_priv->gt_irq_mask |= ring->irq_enable_mask;
  837. I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
  838. POSTING_READ(GTIMR);
  839. }
  840. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  841. gen6_gt_force_wake_put(dev_priv);
  842. }
  843. static int
  844. i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
  845. u32 offset, u32 length,
  846. unsigned flags)
  847. {
  848. int ret;
  849. ret = intel_ring_begin(ring, 2);
  850. if (ret)
  851. return ret;
  852. intel_ring_emit(ring,
  853. MI_BATCH_BUFFER_START |
  854. MI_BATCH_GTT |
  855. (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
  856. intel_ring_emit(ring, offset);
  857. intel_ring_advance(ring);
  858. return 0;
  859. }
  860. /* Just userspace ABI convention to limit the wa batch bo to a resonable size */
  861. #define I830_BATCH_LIMIT (256*1024)
  862. static int
  863. i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
  864. u32 offset, u32 len,
  865. unsigned flags)
  866. {
  867. int ret;
  868. if (flags & I915_DISPATCH_PINNED) {
  869. ret = intel_ring_begin(ring, 4);
  870. if (ret)
  871. return ret;
  872. intel_ring_emit(ring, MI_BATCH_BUFFER);
  873. intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
  874. intel_ring_emit(ring, offset + len - 8);
  875. intel_ring_emit(ring, MI_NOOP);
  876. intel_ring_advance(ring);
  877. } else {
  878. struct drm_i915_gem_object *obj = ring->private;
  879. u32 cs_offset = obj->gtt_offset;
  880. if (len > I830_BATCH_LIMIT)
  881. return -ENOSPC;
  882. ret = intel_ring_begin(ring, 9+3);
  883. if (ret)
  884. return ret;
  885. /* Blit the batch (which has now all relocs applied) to the stable batch
  886. * scratch bo area (so that the CS never stumbles over its tlb
  887. * invalidation bug) ... */
  888. intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD |
  889. XY_SRC_COPY_BLT_WRITE_ALPHA |
  890. XY_SRC_COPY_BLT_WRITE_RGB);
  891. intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096);
  892. intel_ring_emit(ring, 0);
  893. intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024);
  894. intel_ring_emit(ring, cs_offset);
  895. intel_ring_emit(ring, 0);
  896. intel_ring_emit(ring, 4096);
  897. intel_ring_emit(ring, offset);
  898. intel_ring_emit(ring, MI_FLUSH);
  899. /* ... and execute it. */
  900. intel_ring_emit(ring, MI_BATCH_BUFFER);
  901. intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
  902. intel_ring_emit(ring, cs_offset + len - 8);
  903. intel_ring_advance(ring);
  904. }
  905. return 0;
  906. }
  907. static int
  908. i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
  909. u32 offset, u32 len,
  910. unsigned flags)
  911. {
  912. int ret;
  913. ret = intel_ring_begin(ring, 2);
  914. if (ret)
  915. return ret;
  916. intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
  917. intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
  918. intel_ring_advance(ring);
  919. return 0;
  920. }
  921. static void cleanup_status_page(struct intel_ring_buffer *ring)
  922. {
  923. struct drm_i915_gem_object *obj;
  924. obj = ring->status_page.obj;
  925. if (obj == NULL)
  926. return;
  927. kunmap(sg_page(obj->pages->sgl));
  928. i915_gem_object_unpin(obj);
  929. drm_gem_object_unreference(&obj->base);
  930. ring->status_page.obj = NULL;
  931. }
  932. static int init_status_page(struct intel_ring_buffer *ring)
  933. {
  934. struct drm_device *dev = ring->dev;
  935. struct drm_i915_gem_object *obj;
  936. int ret;
  937. obj = i915_gem_alloc_object(dev, 4096);
  938. if (obj == NULL) {
  939. DRM_ERROR("Failed to allocate status page\n");
  940. ret = -ENOMEM;
  941. goto err;
  942. }
  943. i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
  944. ret = i915_gem_object_pin(obj, 4096, true, false);
  945. if (ret != 0) {
  946. goto err_unref;
  947. }
  948. ring->status_page.gfx_addr = obj->gtt_offset;
  949. ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
  950. if (ring->status_page.page_addr == NULL) {
  951. ret = -ENOMEM;
  952. goto err_unpin;
  953. }
  954. ring->status_page.obj = obj;
  955. memset(ring->status_page.page_addr, 0, PAGE_SIZE);
  956. intel_ring_setup_status_page(ring);
  957. DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
  958. ring->name, ring->status_page.gfx_addr);
  959. return 0;
  960. err_unpin:
  961. i915_gem_object_unpin(obj);
  962. err_unref:
  963. drm_gem_object_unreference(&obj->base);
  964. err:
  965. return ret;
  966. }
  967. static int init_phys_hws_pga(struct intel_ring_buffer *ring)
  968. {
  969. struct drm_i915_private *dev_priv = ring->dev->dev_private;
  970. u32 addr;
  971. if (!dev_priv->status_page_dmah) {
  972. dev_priv->status_page_dmah =
  973. drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE);
  974. if (!dev_priv->status_page_dmah)
  975. return -ENOMEM;
  976. }
  977. addr = dev_priv->status_page_dmah->busaddr;
  978. if (INTEL_INFO(ring->dev)->gen >= 4)
  979. addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
  980. I915_WRITE(HWS_PGA, addr);
  981. ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
  982. memset(ring->status_page.page_addr, 0, PAGE_SIZE);
  983. return 0;
  984. }
  985. static int intel_init_ring_buffer(struct drm_device *dev,
  986. struct intel_ring_buffer *ring)
  987. {
  988. struct drm_i915_gem_object *obj;
  989. struct drm_i915_private *dev_priv = dev->dev_private;
  990. int ret;
  991. ring->dev = dev;
  992. INIT_LIST_HEAD(&ring->active_list);
  993. INIT_LIST_HEAD(&ring->request_list);
  994. ring->size = 32 * PAGE_SIZE;
  995. memset(ring->sync_seqno, 0, sizeof(ring->sync_seqno));
  996. init_waitqueue_head(&ring->irq_queue);
  997. if (I915_NEED_GFX_HWS(dev)) {
  998. ret = init_status_page(ring);
  999. if (ret)
  1000. return ret;
  1001. } else {
  1002. BUG_ON(ring->id != RCS);
  1003. ret = init_phys_hws_pga(ring);
  1004. if (ret)
  1005. return ret;
  1006. }
  1007. obj = NULL;
  1008. if (!HAS_LLC(dev))
  1009. obj = i915_gem_object_create_stolen(dev, ring->size);
  1010. if (obj == NULL)
  1011. obj = i915_gem_alloc_object(dev, ring->size);
  1012. if (obj == NULL) {
  1013. DRM_ERROR("Failed to allocate ringbuffer\n");
  1014. ret = -ENOMEM;
  1015. goto err_hws;
  1016. }
  1017. ring->obj = obj;
  1018. ret = i915_gem_object_pin(obj, PAGE_SIZE, true, false);
  1019. if (ret)
  1020. goto err_unref;
  1021. ret = i915_gem_object_set_to_gtt_domain(obj, true);
  1022. if (ret)
  1023. goto err_unpin;
  1024. ring->virtual_start =
  1025. ioremap_wc(dev_priv->gtt.mappable_base + obj->gtt_offset,
  1026. ring->size);
  1027. if (ring->virtual_start == NULL) {
  1028. DRM_ERROR("Failed to map ringbuffer.\n");
  1029. ret = -EINVAL;
  1030. goto err_unpin;
  1031. }
  1032. ret = ring->init(ring);
  1033. if (ret)
  1034. goto err_unmap;
  1035. /* Workaround an erratum on the i830 which causes a hang if
  1036. * the TAIL pointer points to within the last 2 cachelines
  1037. * of the buffer.
  1038. */
  1039. ring->effective_size = ring->size;
  1040. if (IS_I830(ring->dev) || IS_845G(ring->dev))
  1041. ring->effective_size -= 128;
  1042. intel_ring_init_seqno(ring, dev_priv->last_seqno);
  1043. return 0;
  1044. err_unmap:
  1045. iounmap(ring->virtual_start);
  1046. err_unpin:
  1047. i915_gem_object_unpin(obj);
  1048. err_unref:
  1049. drm_gem_object_unreference(&obj->base);
  1050. ring->obj = NULL;
  1051. err_hws:
  1052. cleanup_status_page(ring);
  1053. return ret;
  1054. }
  1055. void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
  1056. {
  1057. struct drm_i915_private *dev_priv;
  1058. int ret;
  1059. if (ring->obj == NULL)
  1060. return;
  1061. /* Disable the ring buffer. The ring must be idle at this point */
  1062. dev_priv = ring->dev->dev_private;
  1063. ret = intel_ring_idle(ring);
  1064. if (ret)
  1065. DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
  1066. ring->name, ret);
  1067. I915_WRITE_CTL(ring, 0);
  1068. iounmap(ring->virtual_start);
  1069. i915_gem_object_unpin(ring->obj);
  1070. drm_gem_object_unreference(&ring->obj->base);
  1071. ring->obj = NULL;
  1072. if (ring->cleanup)
  1073. ring->cleanup(ring);
  1074. cleanup_status_page(ring);
  1075. }
  1076. static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
  1077. {
  1078. int ret;
  1079. ret = i915_wait_seqno(ring, seqno);
  1080. if (!ret)
  1081. i915_gem_retire_requests_ring(ring);
  1082. return ret;
  1083. }
  1084. static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
  1085. {
  1086. struct drm_i915_gem_request *request;
  1087. u32 seqno = 0;
  1088. int ret;
  1089. i915_gem_retire_requests_ring(ring);
  1090. if (ring->last_retired_head != -1) {
  1091. ring->head = ring->last_retired_head;
  1092. ring->last_retired_head = -1;
  1093. ring->space = ring_space(ring);
  1094. if (ring->space >= n)
  1095. return 0;
  1096. }
  1097. list_for_each_entry(request, &ring->request_list, list) {
  1098. int space;
  1099. if (request->tail == -1)
  1100. continue;
  1101. space = request->tail - (ring->tail + I915_RING_FREE_SPACE);
  1102. if (space < 0)
  1103. space += ring->size;
  1104. if (space >= n) {
  1105. seqno = request->seqno;
  1106. break;
  1107. }
  1108. /* Consume this request in case we need more space than
  1109. * is available and so need to prevent a race between
  1110. * updating last_retired_head and direct reads of
  1111. * I915_RING_HEAD. It also provides a nice sanity check.
  1112. */
  1113. request->tail = -1;
  1114. }
  1115. if (seqno == 0)
  1116. return -ENOSPC;
  1117. ret = intel_ring_wait_seqno(ring, seqno);
  1118. if (ret)
  1119. return ret;
  1120. if (WARN_ON(ring->last_retired_head == -1))
  1121. return -ENOSPC;
  1122. ring->head = ring->last_retired_head;
  1123. ring->last_retired_head = -1;
  1124. ring->space = ring_space(ring);
  1125. if (WARN_ON(ring->space < n))
  1126. return -ENOSPC;
  1127. return 0;
  1128. }
  1129. static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
  1130. {
  1131. struct drm_device *dev = ring->dev;
  1132. struct drm_i915_private *dev_priv = dev->dev_private;
  1133. unsigned long end;
  1134. int ret;
  1135. ret = intel_ring_wait_request(ring, n);
  1136. if (ret != -ENOSPC)
  1137. return ret;
  1138. trace_i915_ring_wait_begin(ring);
  1139. /* With GEM the hangcheck timer should kick us out of the loop,
  1140. * leaving it early runs the risk of corrupting GEM state (due
  1141. * to running on almost untested codepaths). But on resume
  1142. * timers don't work yet, so prevent a complete hang in that
  1143. * case by choosing an insanely large timeout. */
  1144. end = jiffies + 60 * HZ;
  1145. do {
  1146. ring->head = I915_READ_HEAD(ring);
  1147. ring->space = ring_space(ring);
  1148. if (ring->space >= n) {
  1149. trace_i915_ring_wait_end(ring);
  1150. return 0;
  1151. }
  1152. if (dev->primary->master) {
  1153. struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
  1154. if (master_priv->sarea_priv)
  1155. master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
  1156. }
  1157. msleep(1);
  1158. ret = i915_gem_check_wedge(&dev_priv->gpu_error,
  1159. dev_priv->mm.interruptible);
  1160. if (ret)
  1161. return ret;
  1162. } while (!time_after(jiffies, end));
  1163. trace_i915_ring_wait_end(ring);
  1164. return -EBUSY;
  1165. }
  1166. static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
  1167. {
  1168. uint32_t __iomem *virt;
  1169. int rem = ring->size - ring->tail;
  1170. if (ring->space < rem) {
  1171. int ret = ring_wait_for_space(ring, rem);
  1172. if (ret)
  1173. return ret;
  1174. }
  1175. virt = ring->virtual_start + ring->tail;
  1176. rem /= 4;
  1177. while (rem--)
  1178. iowrite32(MI_NOOP, virt++);
  1179. ring->tail = 0;
  1180. ring->space = ring_space(ring);
  1181. return 0;
  1182. }
  1183. int intel_ring_idle(struct intel_ring_buffer *ring)
  1184. {
  1185. u32 seqno;
  1186. int ret;
  1187. /* We need to add any requests required to flush the objects and ring */
  1188. if (ring->outstanding_lazy_request) {
  1189. ret = i915_add_request(ring, NULL, NULL);
  1190. if (ret)
  1191. return ret;
  1192. }
  1193. /* Wait upon the last request to be completed */
  1194. if (list_empty(&ring->request_list))
  1195. return 0;
  1196. seqno = list_entry(ring->request_list.prev,
  1197. struct drm_i915_gem_request,
  1198. list)->seqno;
  1199. return i915_wait_seqno(ring, seqno);
  1200. }
  1201. static int
  1202. intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
  1203. {
  1204. if (ring->outstanding_lazy_request)
  1205. return 0;
  1206. return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_request);
  1207. }
  1208. static int __intel_ring_begin(struct intel_ring_buffer *ring,
  1209. int bytes)
  1210. {
  1211. int ret;
  1212. if (unlikely(ring->tail + bytes > ring->effective_size)) {
  1213. ret = intel_wrap_ring_buffer(ring);
  1214. if (unlikely(ret))
  1215. return ret;
  1216. }
  1217. if (unlikely(ring->space < bytes)) {
  1218. ret = ring_wait_for_space(ring, bytes);
  1219. if (unlikely(ret))
  1220. return ret;
  1221. }
  1222. ring->space -= bytes;
  1223. return 0;
  1224. }
  1225. int intel_ring_begin(struct intel_ring_buffer *ring,
  1226. int num_dwords)
  1227. {
  1228. drm_i915_private_t *dev_priv = ring->dev->dev_private;
  1229. int ret;
  1230. ret = i915_gem_check_wedge(&dev_priv->gpu_error,
  1231. dev_priv->mm.interruptible);
  1232. if (ret)
  1233. return ret;
  1234. /* Preallocate the olr before touching the ring */
  1235. ret = intel_ring_alloc_seqno(ring);
  1236. if (ret)
  1237. return ret;
  1238. return __intel_ring_begin(ring, num_dwords * sizeof(uint32_t));
  1239. }
  1240. void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
  1241. {
  1242. struct drm_i915_private *dev_priv = ring->dev->dev_private;
  1243. BUG_ON(ring->outstanding_lazy_request);
  1244. if (INTEL_INFO(ring->dev)->gen >= 6) {
  1245. I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
  1246. I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
  1247. }
  1248. ring->set_seqno(ring, seqno);
  1249. }
  1250. void intel_ring_advance(struct intel_ring_buffer *ring)
  1251. {
  1252. struct drm_i915_private *dev_priv = ring->dev->dev_private;
  1253. ring->tail &= ring->size - 1;
  1254. if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring))
  1255. return;
  1256. ring->write_tail(ring, ring->tail);
  1257. }
  1258. static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
  1259. u32 value)
  1260. {
  1261. drm_i915_private_t *dev_priv = ring->dev->dev_private;
  1262. /* Every tail move must follow the sequence below */
  1263. /* Disable notification that the ring is IDLE. The GT
  1264. * will then assume that it is busy and bring it out of rc6.
  1265. */
  1266. I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
  1267. _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
  1268. /* Clear the context id. Here be magic! */
  1269. I915_WRITE64(GEN6_BSD_RNCID, 0x0);
  1270. /* Wait for the ring not to be idle, i.e. for it to wake up. */
  1271. if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
  1272. GEN6_BSD_SLEEP_INDICATOR) == 0,
  1273. 50))
  1274. DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
  1275. /* Now that the ring is fully powered up, update the tail */
  1276. I915_WRITE_TAIL(ring, value);
  1277. POSTING_READ(RING_TAIL(ring->mmio_base));
  1278. /* Let the ring send IDLE messages to the GT again,
  1279. * and so let it sleep to conserve power when idle.
  1280. */
  1281. I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
  1282. _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
  1283. }
  1284. static int gen6_ring_flush(struct intel_ring_buffer *ring,
  1285. u32 invalidate, u32 flush)
  1286. {
  1287. uint32_t cmd;
  1288. int ret;
  1289. ret = intel_ring_begin(ring, 4);
  1290. if (ret)
  1291. return ret;
  1292. cmd = MI_FLUSH_DW;
  1293. /*
  1294. * Bspec vol 1c.5 - video engine command streamer:
  1295. * "If ENABLED, all TLBs will be invalidated once the flush
  1296. * operation is complete. This bit is only valid when the
  1297. * Post-Sync Operation field is a value of 1h or 3h."
  1298. */
  1299. if (invalidate & I915_GEM_GPU_DOMAINS)
  1300. cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
  1301. MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
  1302. intel_ring_emit(ring, cmd);
  1303. intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
  1304. intel_ring_emit(ring, 0);
  1305. intel_ring_emit(ring, MI_NOOP);
  1306. intel_ring_advance(ring);
  1307. return 0;
  1308. }
  1309. static int
  1310. hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
  1311. u32 offset, u32 len,
  1312. unsigned flags)
  1313. {
  1314. int ret;
  1315. ret = intel_ring_begin(ring, 2);
  1316. if (ret)
  1317. return ret;
  1318. intel_ring_emit(ring,
  1319. MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW |
  1320. (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW));
  1321. /* bit0-7 is the length on GEN6+ */
  1322. intel_ring_emit(ring, offset);
  1323. intel_ring_advance(ring);
  1324. return 0;
  1325. }
  1326. static int
  1327. gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
  1328. u32 offset, u32 len,
  1329. unsigned flags)
  1330. {
  1331. int ret;
  1332. ret = intel_ring_begin(ring, 2);
  1333. if (ret)
  1334. return ret;
  1335. intel_ring_emit(ring,
  1336. MI_BATCH_BUFFER_START |
  1337. (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
  1338. /* bit0-7 is the length on GEN6+ */
  1339. intel_ring_emit(ring, offset);
  1340. intel_ring_advance(ring);
  1341. return 0;
  1342. }
  1343. /* Blitter support (SandyBridge+) */
  1344. static int blt_ring_flush(struct intel_ring_buffer *ring,
  1345. u32 invalidate, u32 flush)
  1346. {
  1347. uint32_t cmd;
  1348. int ret;
  1349. ret = intel_ring_begin(ring, 4);
  1350. if (ret)
  1351. return ret;
  1352. cmd = MI_FLUSH_DW;
  1353. /*
  1354. * Bspec vol 1c.3 - blitter engine command streamer:
  1355. * "If ENABLED, all TLBs will be invalidated once the flush
  1356. * operation is complete. This bit is only valid when the
  1357. * Post-Sync Operation field is a value of 1h or 3h."
  1358. */
  1359. if (invalidate & I915_GEM_DOMAIN_RENDER)
  1360. cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
  1361. MI_FLUSH_DW_OP_STOREDW;
  1362. intel_ring_emit(ring, cmd);
  1363. intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
  1364. intel_ring_emit(ring, 0);
  1365. intel_ring_emit(ring, MI_NOOP);
  1366. intel_ring_advance(ring);
  1367. return 0;
  1368. }
  1369. int intel_init_render_ring_buffer(struct drm_device *dev)
  1370. {
  1371. drm_i915_private_t *dev_priv = dev->dev_private;
  1372. struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
  1373. ring->name = "render ring";
  1374. ring->id = RCS;
  1375. ring->mmio_base = RENDER_RING_BASE;
  1376. if (INTEL_INFO(dev)->gen >= 6) {
  1377. ring->add_request = gen6_add_request;
  1378. ring->flush = gen7_render_ring_flush;
  1379. if (INTEL_INFO(dev)->gen == 6)
  1380. ring->flush = gen6_render_ring_flush;
  1381. ring->irq_get = gen6_ring_get_irq;
  1382. ring->irq_put = gen6_ring_put_irq;
  1383. ring->irq_enable_mask = GT_USER_INTERRUPT;
  1384. ring->get_seqno = gen6_ring_get_seqno;
  1385. ring->set_seqno = ring_set_seqno;
  1386. ring->sync_to = gen6_ring_sync;
  1387. ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID;
  1388. ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV;
  1389. ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_RB;
  1390. ring->signal_mbox[0] = GEN6_VRSYNC;
  1391. ring->signal_mbox[1] = GEN6_BRSYNC;
  1392. } else if (IS_GEN5(dev)) {
  1393. ring->add_request = pc_render_add_request;
  1394. ring->flush = gen4_render_ring_flush;
  1395. ring->get_seqno = pc_render_get_seqno;
  1396. ring->set_seqno = pc_render_set_seqno;
  1397. ring->irq_get = gen5_ring_get_irq;
  1398. ring->irq_put = gen5_ring_put_irq;
  1399. ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY;
  1400. } else {
  1401. ring->add_request = i9xx_add_request;
  1402. if (INTEL_INFO(dev)->gen < 4)
  1403. ring->flush = gen2_render_ring_flush;
  1404. else
  1405. ring->flush = gen4_render_ring_flush;
  1406. ring->get_seqno = ring_get_seqno;
  1407. ring->set_seqno = ring_set_seqno;
  1408. if (IS_GEN2(dev)) {
  1409. ring->irq_get = i8xx_ring_get_irq;
  1410. ring->irq_put = i8xx_ring_put_irq;
  1411. } else {
  1412. ring->irq_get = i9xx_ring_get_irq;
  1413. ring->irq_put = i9xx_ring_put_irq;
  1414. }
  1415. ring->irq_enable_mask = I915_USER_INTERRUPT;
  1416. }
  1417. ring->write_tail = ring_write_tail;
  1418. if (IS_HASWELL(dev))
  1419. ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
  1420. else if (INTEL_INFO(dev)->gen >= 6)
  1421. ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
  1422. else if (INTEL_INFO(dev)->gen >= 4)
  1423. ring->dispatch_execbuffer = i965_dispatch_execbuffer;
  1424. else if (IS_I830(dev) || IS_845G(dev))
  1425. ring->dispatch_execbuffer = i830_dispatch_execbuffer;
  1426. else
  1427. ring->dispatch_execbuffer = i915_dispatch_execbuffer;
  1428. ring->init = init_render_ring;
  1429. ring->cleanup = render_ring_cleanup;
  1430. /* Workaround batchbuffer to combat CS tlb bug. */
  1431. if (HAS_BROKEN_CS_TLB(dev)) {
  1432. struct drm_i915_gem_object *obj;
  1433. int ret;
  1434. obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT);
  1435. if (obj == NULL) {
  1436. DRM_ERROR("Failed to allocate batch bo\n");
  1437. return -ENOMEM;
  1438. }
  1439. ret = i915_gem_object_pin(obj, 0, true, false);
  1440. if (ret != 0) {
  1441. drm_gem_object_unreference(&obj->base);
  1442. DRM_ERROR("Failed to ping batch bo\n");
  1443. return ret;
  1444. }
  1445. ring->private = obj;
  1446. }
  1447. return intel_init_ring_buffer(dev, ring);
  1448. }
  1449. int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
  1450. {
  1451. drm_i915_private_t *dev_priv = dev->dev_private;
  1452. struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
  1453. int ret;
  1454. ring->name = "render ring";
  1455. ring->id = RCS;
  1456. ring->mmio_base = RENDER_RING_BASE;
  1457. if (INTEL_INFO(dev)->gen >= 6) {
  1458. /* non-kms not supported on gen6+ */
  1459. return -ENODEV;
  1460. }
  1461. /* Note: gem is not supported on gen5/ilk without kms (the corresponding
  1462. * gem_init ioctl returns with -ENODEV). Hence we do not need to set up
  1463. * the special gen5 functions. */
  1464. ring->add_request = i9xx_add_request;
  1465. if (INTEL_INFO(dev)->gen < 4)
  1466. ring->flush = gen2_render_ring_flush;
  1467. else
  1468. ring->flush = gen4_render_ring_flush;
  1469. ring->get_seqno = ring_get_seqno;
  1470. ring->set_seqno = ring_set_seqno;
  1471. if (IS_GEN2(dev)) {
  1472. ring->irq_get = i8xx_ring_get_irq;
  1473. ring->irq_put = i8xx_ring_put_irq;
  1474. } else {
  1475. ring->irq_get = i9xx_ring_get_irq;
  1476. ring->irq_put = i9xx_ring_put_irq;
  1477. }
  1478. ring->irq_enable_mask = I915_USER_INTERRUPT;
  1479. ring->write_tail = ring_write_tail;
  1480. if (INTEL_INFO(dev)->gen >= 4)
  1481. ring->dispatch_execbuffer = i965_dispatch_execbuffer;
  1482. else if (IS_I830(dev) || IS_845G(dev))
  1483. ring->dispatch_execbuffer = i830_dispatch_execbuffer;
  1484. else
  1485. ring->dispatch_execbuffer = i915_dispatch_execbuffer;
  1486. ring->init = init_render_ring;
  1487. ring->cleanup = render_ring_cleanup;
  1488. ring->dev = dev;
  1489. INIT_LIST_HEAD(&ring->active_list);
  1490. INIT_LIST_HEAD(&ring->request_list);
  1491. ring->size = size;
  1492. ring->effective_size = ring->size;
  1493. if (IS_I830(ring->dev) || IS_845G(ring->dev))
  1494. ring->effective_size -= 128;
  1495. ring->virtual_start = ioremap_wc(start, size);
  1496. if (ring->virtual_start == NULL) {
  1497. DRM_ERROR("can not ioremap virtual address for"
  1498. " ring buffer\n");
  1499. return -ENOMEM;
  1500. }
  1501. if (!I915_NEED_GFX_HWS(dev)) {
  1502. ret = init_phys_hws_pga(ring);
  1503. if (ret)
  1504. return ret;
  1505. }
  1506. return 0;
  1507. }
  1508. int intel_init_bsd_ring_buffer(struct drm_device *dev)
  1509. {
  1510. drm_i915_private_t *dev_priv = dev->dev_private;
  1511. struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
  1512. ring->name = "bsd ring";
  1513. ring->id = VCS;
  1514. ring->write_tail = ring_write_tail;
  1515. if (IS_GEN6(dev) || IS_GEN7(dev)) {
  1516. ring->mmio_base = GEN6_BSD_RING_BASE;
  1517. /* gen6 bsd needs a special wa for tail updates */
  1518. if (IS_GEN6(dev))
  1519. ring->write_tail = gen6_bsd_ring_write_tail;
  1520. ring->flush = gen6_ring_flush;
  1521. ring->add_request = gen6_add_request;
  1522. ring->get_seqno = gen6_ring_get_seqno;
  1523. ring->set_seqno = ring_set_seqno;
  1524. ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT;
  1525. ring->irq_get = gen6_ring_get_irq;
  1526. ring->irq_put = gen6_ring_put_irq;
  1527. ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
  1528. ring->sync_to = gen6_ring_sync;
  1529. ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_VR;
  1530. ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_INVALID;
  1531. ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_VB;
  1532. ring->signal_mbox[0] = GEN6_RVSYNC;
  1533. ring->signal_mbox[1] = GEN6_BVSYNC;
  1534. } else {
  1535. ring->mmio_base = BSD_RING_BASE;
  1536. ring->flush = bsd_ring_flush;
  1537. ring->add_request = i9xx_add_request;
  1538. ring->get_seqno = ring_get_seqno;
  1539. ring->set_seqno = ring_set_seqno;
  1540. if (IS_GEN5(dev)) {
  1541. ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
  1542. ring->irq_get = gen5_ring_get_irq;
  1543. ring->irq_put = gen5_ring_put_irq;
  1544. } else {
  1545. ring->irq_enable_mask = I915_BSD_USER_INTERRUPT;
  1546. ring->irq_get = i9xx_ring_get_irq;
  1547. ring->irq_put = i9xx_ring_put_irq;
  1548. }
  1549. ring->dispatch_execbuffer = i965_dispatch_execbuffer;
  1550. }
  1551. ring->init = init_ring_common;
  1552. return intel_init_ring_buffer(dev, ring);
  1553. }
  1554. int intel_init_blt_ring_buffer(struct drm_device *dev)
  1555. {
  1556. drm_i915_private_t *dev_priv = dev->dev_private;
  1557. struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
  1558. ring->name = "blitter ring";
  1559. ring->id = BCS;
  1560. ring->mmio_base = BLT_RING_BASE;
  1561. ring->write_tail = ring_write_tail;
  1562. ring->flush = blt_ring_flush;
  1563. ring->add_request = gen6_add_request;
  1564. ring->get_seqno = gen6_ring_get_seqno;
  1565. ring->set_seqno = ring_set_seqno;
  1566. ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT;
  1567. ring->irq_get = gen6_ring_get_irq;
  1568. ring->irq_put = gen6_ring_put_irq;
  1569. ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
  1570. ring->sync_to = gen6_ring_sync;
  1571. ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_BR;
  1572. ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_BV;
  1573. ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_INVALID;
  1574. ring->signal_mbox[0] = GEN6_RBSYNC;
  1575. ring->signal_mbox[1] = GEN6_VBSYNC;
  1576. ring->init = init_ring_common;
  1577. return intel_init_ring_buffer(dev, ring);
  1578. }
  1579. int
  1580. intel_ring_flush_all_caches(struct intel_ring_buffer *ring)
  1581. {
  1582. int ret;
  1583. if (!ring->gpu_caches_dirty)
  1584. return 0;
  1585. ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS);
  1586. if (ret)
  1587. return ret;
  1588. trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS);
  1589. ring->gpu_caches_dirty = false;
  1590. return 0;
  1591. }
  1592. int
  1593. intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring)
  1594. {
  1595. uint32_t flush_domains;
  1596. int ret;
  1597. flush_domains = 0;
  1598. if (ring->gpu_caches_dirty)
  1599. flush_domains = I915_GEM_GPU_DOMAINS;
  1600. ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
  1601. if (ret)
  1602. return ret;
  1603. trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
  1604. ring->gpu_caches_dirty = false;
  1605. return 0;
  1606. }