intel_ringbuffer.c 53 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036
  1. /*
  2. * Copyright © 2008-2010 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eric Anholt <eric@anholt.net>
  25. * Zou Nan hai <nanhai.zou@intel.com>
  26. * Xiang Hai hao<haihao.xiang@intel.com>
  27. *
  28. */
  29. #include <drm/drmP.h>
  30. #include "i915_drv.h"
  31. #include <drm/i915_drm.h>
  32. #include "i915_trace.h"
  33. #include "intel_drv.h"
  34. static inline int ring_space(struct intel_ring_buffer *ring)
  35. {
  36. int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE);
  37. if (space < 0)
  38. space += ring->size;
  39. return space;
  40. }
  41. void __intel_ring_advance(struct intel_ring_buffer *ring)
  42. {
  43. struct drm_i915_private *dev_priv = ring->dev->dev_private;
  44. ring->tail &= ring->size - 1;
  45. if (dev_priv->gpu_error.stop_rings & intel_ring_flag(ring))
  46. return;
  47. ring->write_tail(ring, ring->tail);
  48. }
  49. static int
  50. gen2_render_ring_flush(struct intel_ring_buffer *ring,
  51. u32 invalidate_domains,
  52. u32 flush_domains)
  53. {
  54. u32 cmd;
  55. int ret;
  56. cmd = MI_FLUSH;
  57. if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0)
  58. cmd |= MI_NO_WRITE_FLUSH;
  59. if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
  60. cmd |= MI_READ_FLUSH;
  61. ret = intel_ring_begin(ring, 2);
  62. if (ret)
  63. return ret;
  64. intel_ring_emit(ring, cmd);
  65. intel_ring_emit(ring, MI_NOOP);
  66. intel_ring_advance(ring);
  67. return 0;
  68. }
  69. static int
  70. gen4_render_ring_flush(struct intel_ring_buffer *ring,
  71. u32 invalidate_domains,
  72. u32 flush_domains)
  73. {
  74. struct drm_device *dev = ring->dev;
  75. u32 cmd;
  76. int ret;
  77. /*
  78. * read/write caches:
  79. *
  80. * I915_GEM_DOMAIN_RENDER is always invalidated, but is
  81. * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
  82. * also flushed at 2d versus 3d pipeline switches.
  83. *
  84. * read-only caches:
  85. *
  86. * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
  87. * MI_READ_FLUSH is set, and is always flushed on 965.
  88. *
  89. * I915_GEM_DOMAIN_COMMAND may not exist?
  90. *
  91. * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
  92. * invalidated when MI_EXE_FLUSH is set.
  93. *
  94. * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
  95. * invalidated with every MI_FLUSH.
  96. *
  97. * TLBs:
  98. *
  99. * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
  100. * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
  101. * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
  102. * are flushed at any MI_FLUSH.
  103. */
  104. cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
  105. if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER)
  106. cmd &= ~MI_NO_WRITE_FLUSH;
  107. if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
  108. cmd |= MI_EXE_FLUSH;
  109. if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
  110. (IS_G4X(dev) || IS_GEN5(dev)))
  111. cmd |= MI_INVALIDATE_ISP;
  112. ret = intel_ring_begin(ring, 2);
  113. if (ret)
  114. return ret;
  115. intel_ring_emit(ring, cmd);
  116. intel_ring_emit(ring, MI_NOOP);
  117. intel_ring_advance(ring);
  118. return 0;
  119. }
  120. /**
  121. * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
  122. * implementing two workarounds on gen6. From section 1.4.7.1
  123. * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
  124. *
  125. * [DevSNB-C+{W/A}] Before any depth stall flush (including those
  126. * produced by non-pipelined state commands), software needs to first
  127. * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
  128. * 0.
  129. *
  130. * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
  131. * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
  132. *
  133. * And the workaround for these two requires this workaround first:
  134. *
  135. * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
  136. * BEFORE the pipe-control with a post-sync op and no write-cache
  137. * flushes.
  138. *
  139. * And this last workaround is tricky because of the requirements on
  140. * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
  141. * volume 2 part 1:
  142. *
  143. * "1 of the following must also be set:
  144. * - Render Target Cache Flush Enable ([12] of DW1)
  145. * - Depth Cache Flush Enable ([0] of DW1)
  146. * - Stall at Pixel Scoreboard ([1] of DW1)
  147. * - Depth Stall ([13] of DW1)
  148. * - Post-Sync Operation ([13] of DW1)
  149. * - Notify Enable ([8] of DW1)"
  150. *
  151. * The cache flushes require the workaround flush that triggered this
  152. * one, so we can't use it. Depth stall would trigger the same.
  153. * Post-sync nonzero is what triggered this second workaround, so we
  154. * can't use that one either. Notify enable is IRQs, which aren't
  155. * really our business. That leaves only stall at scoreboard.
  156. */
  157. static int
  158. intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
  159. {
  160. u32 scratch_addr = ring->scratch.gtt_offset + 128;
  161. int ret;
  162. ret = intel_ring_begin(ring, 6);
  163. if (ret)
  164. return ret;
  165. intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
  166. intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
  167. PIPE_CONTROL_STALL_AT_SCOREBOARD);
  168. intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
  169. intel_ring_emit(ring, 0); /* low dword */
  170. intel_ring_emit(ring, 0); /* high dword */
  171. intel_ring_emit(ring, MI_NOOP);
  172. intel_ring_advance(ring);
  173. ret = intel_ring_begin(ring, 6);
  174. if (ret)
  175. return ret;
  176. intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
  177. intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
  178. intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
  179. intel_ring_emit(ring, 0);
  180. intel_ring_emit(ring, 0);
  181. intel_ring_emit(ring, MI_NOOP);
  182. intel_ring_advance(ring);
  183. return 0;
  184. }
  185. static int
  186. gen6_render_ring_flush(struct intel_ring_buffer *ring,
  187. u32 invalidate_domains, u32 flush_domains)
  188. {
  189. u32 flags = 0;
  190. u32 scratch_addr = ring->scratch.gtt_offset + 128;
  191. int ret;
  192. /* Force SNB workarounds for PIPE_CONTROL flushes */
  193. ret = intel_emit_post_sync_nonzero_flush(ring);
  194. if (ret)
  195. return ret;
  196. /* Just flush everything. Experiments have shown that reducing the
  197. * number of bits based on the write domains has little performance
  198. * impact.
  199. */
  200. if (flush_domains) {
  201. flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
  202. flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
  203. /*
  204. * Ensure that any following seqno writes only happen
  205. * when the render cache is indeed flushed.
  206. */
  207. flags |= PIPE_CONTROL_CS_STALL;
  208. }
  209. if (invalidate_domains) {
  210. flags |= PIPE_CONTROL_TLB_INVALIDATE;
  211. flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
  212. flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
  213. flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
  214. flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
  215. flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
  216. /*
  217. * TLB invalidate requires a post-sync write.
  218. */
  219. flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
  220. }
  221. ret = intel_ring_begin(ring, 4);
  222. if (ret)
  223. return ret;
  224. intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
  225. intel_ring_emit(ring, flags);
  226. intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
  227. intel_ring_emit(ring, 0);
  228. intel_ring_advance(ring);
  229. return 0;
  230. }
  231. static int
  232. gen7_render_ring_cs_stall_wa(struct intel_ring_buffer *ring)
  233. {
  234. int ret;
  235. ret = intel_ring_begin(ring, 4);
  236. if (ret)
  237. return ret;
  238. intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
  239. intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
  240. PIPE_CONTROL_STALL_AT_SCOREBOARD);
  241. intel_ring_emit(ring, 0);
  242. intel_ring_emit(ring, 0);
  243. intel_ring_advance(ring);
  244. return 0;
  245. }
  246. static int gen7_ring_fbc_flush(struct intel_ring_buffer *ring, u32 value)
  247. {
  248. int ret;
  249. if (!ring->fbc_dirty)
  250. return 0;
  251. ret = intel_ring_begin(ring, 4);
  252. if (ret)
  253. return ret;
  254. intel_ring_emit(ring, MI_NOOP);
  255. /* WaFbcNukeOn3DBlt:ivb/hsw */
  256. intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
  257. intel_ring_emit(ring, MSG_FBC_REND_STATE);
  258. intel_ring_emit(ring, value);
  259. intel_ring_advance(ring);
  260. ring->fbc_dirty = false;
  261. return 0;
  262. }
  263. static int
  264. gen7_render_ring_flush(struct intel_ring_buffer *ring,
  265. u32 invalidate_domains, u32 flush_domains)
  266. {
  267. u32 flags = 0;
  268. u32 scratch_addr = ring->scratch.gtt_offset + 128;
  269. int ret;
  270. /*
  271. * Ensure that any following seqno writes only happen when the render
  272. * cache is indeed flushed.
  273. *
  274. * Workaround: 4th PIPE_CONTROL command (except the ones with only
  275. * read-cache invalidate bits set) must have the CS_STALL bit set. We
  276. * don't try to be clever and just set it unconditionally.
  277. */
  278. flags |= PIPE_CONTROL_CS_STALL;
  279. /* Just flush everything. Experiments have shown that reducing the
  280. * number of bits based on the write domains has little performance
  281. * impact.
  282. */
  283. if (flush_domains) {
  284. flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
  285. flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
  286. }
  287. if (invalidate_domains) {
  288. flags |= PIPE_CONTROL_TLB_INVALIDATE;
  289. flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
  290. flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
  291. flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
  292. flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
  293. flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
  294. /*
  295. * TLB invalidate requires a post-sync write.
  296. */
  297. flags |= PIPE_CONTROL_QW_WRITE;
  298. flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
  299. /* Workaround: we must issue a pipe_control with CS-stall bit
  300. * set before a pipe_control command that has the state cache
  301. * invalidate bit set. */
  302. gen7_render_ring_cs_stall_wa(ring);
  303. }
  304. ret = intel_ring_begin(ring, 4);
  305. if (ret)
  306. return ret;
  307. intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
  308. intel_ring_emit(ring, flags);
  309. intel_ring_emit(ring, scratch_addr);
  310. intel_ring_emit(ring, 0);
  311. intel_ring_advance(ring);
  312. if (flush_domains)
  313. return gen7_ring_fbc_flush(ring, FBC_REND_NUKE);
  314. return 0;
  315. }
  316. static void ring_write_tail(struct intel_ring_buffer *ring,
  317. u32 value)
  318. {
  319. drm_i915_private_t *dev_priv = ring->dev->dev_private;
  320. I915_WRITE_TAIL(ring, value);
  321. }
  322. u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
  323. {
  324. drm_i915_private_t *dev_priv = ring->dev->dev_private;
  325. u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
  326. RING_ACTHD(ring->mmio_base) : ACTHD;
  327. return I915_READ(acthd_reg);
  328. }
  329. static void ring_setup_phys_status_page(struct intel_ring_buffer *ring)
  330. {
  331. struct drm_i915_private *dev_priv = ring->dev->dev_private;
  332. u32 addr;
  333. addr = dev_priv->status_page_dmah->busaddr;
  334. if (INTEL_INFO(ring->dev)->gen >= 4)
  335. addr |= (dev_priv->status_page_dmah->busaddr >> 28) & 0xf0;
  336. I915_WRITE(HWS_PGA, addr);
  337. }
  338. static int init_ring_common(struct intel_ring_buffer *ring)
  339. {
  340. struct drm_device *dev = ring->dev;
  341. drm_i915_private_t *dev_priv = dev->dev_private;
  342. struct drm_i915_gem_object *obj = ring->obj;
  343. int ret = 0;
  344. u32 head;
  345. if (HAS_FORCE_WAKE(dev))
  346. gen6_gt_force_wake_get(dev_priv);
  347. if (I915_NEED_GFX_HWS(dev))
  348. intel_ring_setup_status_page(ring);
  349. else
  350. ring_setup_phys_status_page(ring);
  351. /* Stop the ring if it's running. */
  352. I915_WRITE_CTL(ring, 0);
  353. I915_WRITE_HEAD(ring, 0);
  354. ring->write_tail(ring, 0);
  355. head = I915_READ_HEAD(ring) & HEAD_ADDR;
  356. /* G45 ring initialization fails to reset head to zero */
  357. if (head != 0) {
  358. DRM_DEBUG_KMS("%s head not reset to zero "
  359. "ctl %08x head %08x tail %08x start %08x\n",
  360. ring->name,
  361. I915_READ_CTL(ring),
  362. I915_READ_HEAD(ring),
  363. I915_READ_TAIL(ring),
  364. I915_READ_START(ring));
  365. I915_WRITE_HEAD(ring, 0);
  366. if (I915_READ_HEAD(ring) & HEAD_ADDR) {
  367. DRM_ERROR("failed to set %s head to zero "
  368. "ctl %08x head %08x tail %08x start %08x\n",
  369. ring->name,
  370. I915_READ_CTL(ring),
  371. I915_READ_HEAD(ring),
  372. I915_READ_TAIL(ring),
  373. I915_READ_START(ring));
  374. }
  375. }
  376. /* Initialize the ring. This must happen _after_ we've cleared the ring
  377. * registers with the above sequence (the readback of the HEAD registers
  378. * also enforces ordering), otherwise the hw might lose the new ring
  379. * register values. */
  380. I915_WRITE_START(ring, i915_gem_obj_ggtt_offset(obj));
  381. I915_WRITE_CTL(ring,
  382. ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
  383. | RING_VALID);
  384. /* If the head is still not zero, the ring is dead */
  385. if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
  386. I915_READ_START(ring) == i915_gem_obj_ggtt_offset(obj) &&
  387. (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
  388. DRM_ERROR("%s initialization failed "
  389. "ctl %08x head %08x tail %08x start %08x\n",
  390. ring->name,
  391. I915_READ_CTL(ring),
  392. I915_READ_HEAD(ring),
  393. I915_READ_TAIL(ring),
  394. I915_READ_START(ring));
  395. ret = -EIO;
  396. goto out;
  397. }
  398. if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
  399. i915_kernel_lost_context(ring->dev);
  400. else {
  401. ring->head = I915_READ_HEAD(ring);
  402. ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
  403. ring->space = ring_space(ring);
  404. ring->last_retired_head = -1;
  405. }
  406. memset(&ring->hangcheck, 0, sizeof(ring->hangcheck));
  407. out:
  408. if (HAS_FORCE_WAKE(dev))
  409. gen6_gt_force_wake_put(dev_priv);
  410. return ret;
  411. }
  412. static int
  413. init_pipe_control(struct intel_ring_buffer *ring)
  414. {
  415. int ret;
  416. if (ring->scratch.obj)
  417. return 0;
  418. ring->scratch.obj = i915_gem_alloc_object(ring->dev, 4096);
  419. if (ring->scratch.obj == NULL) {
  420. DRM_ERROR("Failed to allocate seqno page\n");
  421. ret = -ENOMEM;
  422. goto err;
  423. }
  424. i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC);
  425. ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, true, false);
  426. if (ret)
  427. goto err_unref;
  428. ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(ring->scratch.obj);
  429. ring->scratch.cpu_page = kmap(sg_page(ring->scratch.obj->pages->sgl));
  430. if (ring->scratch.cpu_page == NULL) {
  431. ret = -ENOMEM;
  432. goto err_unpin;
  433. }
  434. DRM_DEBUG_DRIVER("%s pipe control offset: 0x%08x\n",
  435. ring->name, ring->scratch.gtt_offset);
  436. return 0;
  437. err_unpin:
  438. i915_gem_object_unpin(ring->scratch.obj);
  439. err_unref:
  440. drm_gem_object_unreference(&ring->scratch.obj->base);
  441. err:
  442. return ret;
  443. }
  444. static int init_render_ring(struct intel_ring_buffer *ring)
  445. {
  446. struct drm_device *dev = ring->dev;
  447. struct drm_i915_private *dev_priv = dev->dev_private;
  448. int ret = init_ring_common(ring);
  449. if (INTEL_INFO(dev)->gen > 3)
  450. I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
  451. /* We need to disable the AsyncFlip performance optimisations in order
  452. * to use MI_WAIT_FOR_EVENT within the CS. It should already be
  453. * programmed to '1' on all products.
  454. *
  455. * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
  456. */
  457. if (INTEL_INFO(dev)->gen >= 6)
  458. I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
  459. /* Required for the hardware to program scanline values for waiting */
  460. if (INTEL_INFO(dev)->gen == 6)
  461. I915_WRITE(GFX_MODE,
  462. _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_ALWAYS));
  463. if (IS_GEN7(dev))
  464. I915_WRITE(GFX_MODE_GEN7,
  465. _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
  466. _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
  467. if (INTEL_INFO(dev)->gen >= 5) {
  468. ret = init_pipe_control(ring);
  469. if (ret)
  470. return ret;
  471. }
  472. if (IS_GEN6(dev)) {
  473. /* From the Sandybridge PRM, volume 1 part 3, page 24:
  474. * "If this bit is set, STCunit will have LRA as replacement
  475. * policy. [...] This bit must be reset. LRA replacement
  476. * policy is not supported."
  477. */
  478. I915_WRITE(CACHE_MODE_0,
  479. _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
  480. /* This is not explicitly set for GEN6, so read the register.
  481. * see intel_ring_mi_set_context() for why we care.
  482. * TODO: consider explicitly setting the bit for GEN5
  483. */
  484. ring->itlb_before_ctx_switch =
  485. !!(I915_READ(GFX_MODE) & GFX_TLB_INVALIDATE_ALWAYS);
  486. }
  487. if (INTEL_INFO(dev)->gen >= 6)
  488. I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
  489. if (HAS_L3_GPU_CACHE(dev))
  490. I915_WRITE_IMR(ring, ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
  491. return ret;
  492. }
  493. static void render_ring_cleanup(struct intel_ring_buffer *ring)
  494. {
  495. struct drm_device *dev = ring->dev;
  496. if (ring->scratch.obj == NULL)
  497. return;
  498. if (INTEL_INFO(dev)->gen >= 5) {
  499. kunmap(sg_page(ring->scratch.obj->pages->sgl));
  500. i915_gem_object_unpin(ring->scratch.obj);
  501. }
  502. drm_gem_object_unreference(&ring->scratch.obj->base);
  503. ring->scratch.obj = NULL;
  504. }
  505. static void
  506. update_mboxes(struct intel_ring_buffer *ring,
  507. u32 mmio_offset)
  508. {
  509. /* NB: In order to be able to do semaphore MBOX updates for varying number
  510. * of rings, it's easiest if we round up each individual update to a
  511. * multiple of 2 (since ring updates must always be a multiple of 2)
  512. * even though the actual update only requires 3 dwords.
  513. */
  514. #define MBOX_UPDATE_DWORDS 4
  515. intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
  516. intel_ring_emit(ring, mmio_offset);
  517. intel_ring_emit(ring, ring->outstanding_lazy_seqno);
  518. intel_ring_emit(ring, MI_NOOP);
  519. }
  520. /**
  521. * gen6_add_request - Update the semaphore mailbox registers
  522. *
  523. * @ring - ring that is adding a request
  524. * @seqno - return seqno stuck into the ring
  525. *
  526. * Update the mailbox registers in the *other* rings with the current seqno.
  527. * This acts like a signal in the canonical semaphore.
  528. */
  529. static int
  530. gen6_add_request(struct intel_ring_buffer *ring)
  531. {
  532. struct drm_device *dev = ring->dev;
  533. struct drm_i915_private *dev_priv = dev->dev_private;
  534. struct intel_ring_buffer *useless;
  535. int i, ret;
  536. ret = intel_ring_begin(ring, ((I915_NUM_RINGS-1) *
  537. MBOX_UPDATE_DWORDS) +
  538. 4);
  539. if (ret)
  540. return ret;
  541. #undef MBOX_UPDATE_DWORDS
  542. for_each_ring(useless, dev_priv, i) {
  543. u32 mbox_reg = ring->signal_mbox[i];
  544. if (mbox_reg != GEN6_NOSYNC)
  545. update_mboxes(ring, mbox_reg);
  546. }
  547. intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
  548. intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
  549. intel_ring_emit(ring, ring->outstanding_lazy_seqno);
  550. intel_ring_emit(ring, MI_USER_INTERRUPT);
  551. __intel_ring_advance(ring);
  552. return 0;
  553. }
  554. static inline bool i915_gem_has_seqno_wrapped(struct drm_device *dev,
  555. u32 seqno)
  556. {
  557. struct drm_i915_private *dev_priv = dev->dev_private;
  558. return dev_priv->last_seqno < seqno;
  559. }
  560. /**
  561. * intel_ring_sync - sync the waiter to the signaller on seqno
  562. *
  563. * @waiter - ring that is waiting
  564. * @signaller - ring which has, or will signal
  565. * @seqno - seqno which the waiter will block on
  566. */
  567. static int
  568. gen6_ring_sync(struct intel_ring_buffer *waiter,
  569. struct intel_ring_buffer *signaller,
  570. u32 seqno)
  571. {
  572. int ret;
  573. u32 dw1 = MI_SEMAPHORE_MBOX |
  574. MI_SEMAPHORE_COMPARE |
  575. MI_SEMAPHORE_REGISTER;
  576. /* Throughout all of the GEM code, seqno passed implies our current
  577. * seqno is >= the last seqno executed. However for hardware the
  578. * comparison is strictly greater than.
  579. */
  580. seqno -= 1;
  581. WARN_ON(signaller->semaphore_register[waiter->id] ==
  582. MI_SEMAPHORE_SYNC_INVALID);
  583. ret = intel_ring_begin(waiter, 4);
  584. if (ret)
  585. return ret;
  586. /* If seqno wrap happened, omit the wait with no-ops */
  587. if (likely(!i915_gem_has_seqno_wrapped(waiter->dev, seqno))) {
  588. intel_ring_emit(waiter,
  589. dw1 |
  590. signaller->semaphore_register[waiter->id]);
  591. intel_ring_emit(waiter, seqno);
  592. intel_ring_emit(waiter, 0);
  593. intel_ring_emit(waiter, MI_NOOP);
  594. } else {
  595. intel_ring_emit(waiter, MI_NOOP);
  596. intel_ring_emit(waiter, MI_NOOP);
  597. intel_ring_emit(waiter, MI_NOOP);
  598. intel_ring_emit(waiter, MI_NOOP);
  599. }
  600. intel_ring_advance(waiter);
  601. return 0;
  602. }
  603. #define PIPE_CONTROL_FLUSH(ring__, addr__) \
  604. do { \
  605. intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \
  606. PIPE_CONTROL_DEPTH_STALL); \
  607. intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \
  608. intel_ring_emit(ring__, 0); \
  609. intel_ring_emit(ring__, 0); \
  610. } while (0)
  611. static int
  612. pc_render_add_request(struct intel_ring_buffer *ring)
  613. {
  614. u32 scratch_addr = ring->scratch.gtt_offset + 128;
  615. int ret;
  616. /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
  617. * incoherent with writes to memory, i.e. completely fubar,
  618. * so we need to use PIPE_NOTIFY instead.
  619. *
  620. * However, we also need to workaround the qword write
  621. * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
  622. * memory before requesting an interrupt.
  623. */
  624. ret = intel_ring_begin(ring, 32);
  625. if (ret)
  626. return ret;
  627. intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
  628. PIPE_CONTROL_WRITE_FLUSH |
  629. PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
  630. intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
  631. intel_ring_emit(ring, ring->outstanding_lazy_seqno);
  632. intel_ring_emit(ring, 0);
  633. PIPE_CONTROL_FLUSH(ring, scratch_addr);
  634. scratch_addr += 128; /* write to separate cachelines */
  635. PIPE_CONTROL_FLUSH(ring, scratch_addr);
  636. scratch_addr += 128;
  637. PIPE_CONTROL_FLUSH(ring, scratch_addr);
  638. scratch_addr += 128;
  639. PIPE_CONTROL_FLUSH(ring, scratch_addr);
  640. scratch_addr += 128;
  641. PIPE_CONTROL_FLUSH(ring, scratch_addr);
  642. scratch_addr += 128;
  643. PIPE_CONTROL_FLUSH(ring, scratch_addr);
  644. intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
  645. PIPE_CONTROL_WRITE_FLUSH |
  646. PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
  647. PIPE_CONTROL_NOTIFY);
  648. intel_ring_emit(ring, ring->scratch.gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
  649. intel_ring_emit(ring, ring->outstanding_lazy_seqno);
  650. intel_ring_emit(ring, 0);
  651. __intel_ring_advance(ring);
  652. return 0;
  653. }
  654. static u32
  655. gen6_ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
  656. {
  657. /* Workaround to force correct ordering between irq and seqno writes on
  658. * ivb (and maybe also on snb) by reading from a CS register (like
  659. * ACTHD) before reading the status page. */
  660. if (!lazy_coherency)
  661. intel_ring_get_active_head(ring);
  662. return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
  663. }
  664. static u32
  665. ring_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
  666. {
  667. return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
  668. }
  669. static void
  670. ring_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
  671. {
  672. intel_write_status_page(ring, I915_GEM_HWS_INDEX, seqno);
  673. }
  674. static u32
  675. pc_render_get_seqno(struct intel_ring_buffer *ring, bool lazy_coherency)
  676. {
  677. return ring->scratch.cpu_page[0];
  678. }
  679. static void
  680. pc_render_set_seqno(struct intel_ring_buffer *ring, u32 seqno)
  681. {
  682. ring->scratch.cpu_page[0] = seqno;
  683. }
  684. static bool
  685. gen5_ring_get_irq(struct intel_ring_buffer *ring)
  686. {
  687. struct drm_device *dev = ring->dev;
  688. drm_i915_private_t *dev_priv = dev->dev_private;
  689. unsigned long flags;
  690. if (!dev->irq_enabled)
  691. return false;
  692. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  693. if (ring->irq_refcount++ == 0)
  694. ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
  695. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  696. return true;
  697. }
  698. static void
  699. gen5_ring_put_irq(struct intel_ring_buffer *ring)
  700. {
  701. struct drm_device *dev = ring->dev;
  702. drm_i915_private_t *dev_priv = dev->dev_private;
  703. unsigned long flags;
  704. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  705. if (--ring->irq_refcount == 0)
  706. ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
  707. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  708. }
  709. static bool
  710. i9xx_ring_get_irq(struct intel_ring_buffer *ring)
  711. {
  712. struct drm_device *dev = ring->dev;
  713. drm_i915_private_t *dev_priv = dev->dev_private;
  714. unsigned long flags;
  715. if (!dev->irq_enabled)
  716. return false;
  717. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  718. if (ring->irq_refcount++ == 0) {
  719. dev_priv->irq_mask &= ~ring->irq_enable_mask;
  720. I915_WRITE(IMR, dev_priv->irq_mask);
  721. POSTING_READ(IMR);
  722. }
  723. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  724. return true;
  725. }
  726. static void
  727. i9xx_ring_put_irq(struct intel_ring_buffer *ring)
  728. {
  729. struct drm_device *dev = ring->dev;
  730. drm_i915_private_t *dev_priv = dev->dev_private;
  731. unsigned long flags;
  732. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  733. if (--ring->irq_refcount == 0) {
  734. dev_priv->irq_mask |= ring->irq_enable_mask;
  735. I915_WRITE(IMR, dev_priv->irq_mask);
  736. POSTING_READ(IMR);
  737. }
  738. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  739. }
  740. static bool
  741. i8xx_ring_get_irq(struct intel_ring_buffer *ring)
  742. {
  743. struct drm_device *dev = ring->dev;
  744. drm_i915_private_t *dev_priv = dev->dev_private;
  745. unsigned long flags;
  746. if (!dev->irq_enabled)
  747. return false;
  748. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  749. if (ring->irq_refcount++ == 0) {
  750. dev_priv->irq_mask &= ~ring->irq_enable_mask;
  751. I915_WRITE16(IMR, dev_priv->irq_mask);
  752. POSTING_READ16(IMR);
  753. }
  754. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  755. return true;
  756. }
  757. static void
  758. i8xx_ring_put_irq(struct intel_ring_buffer *ring)
  759. {
  760. struct drm_device *dev = ring->dev;
  761. drm_i915_private_t *dev_priv = dev->dev_private;
  762. unsigned long flags;
  763. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  764. if (--ring->irq_refcount == 0) {
  765. dev_priv->irq_mask |= ring->irq_enable_mask;
  766. I915_WRITE16(IMR, dev_priv->irq_mask);
  767. POSTING_READ16(IMR);
  768. }
  769. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  770. }
  771. void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
  772. {
  773. struct drm_device *dev = ring->dev;
  774. drm_i915_private_t *dev_priv = ring->dev->dev_private;
  775. u32 mmio = 0;
  776. /* The ring status page addresses are no longer next to the rest of
  777. * the ring registers as of gen7.
  778. */
  779. if (IS_GEN7(dev)) {
  780. switch (ring->id) {
  781. case RCS:
  782. mmio = RENDER_HWS_PGA_GEN7;
  783. break;
  784. case BCS:
  785. mmio = BLT_HWS_PGA_GEN7;
  786. break;
  787. case VCS:
  788. mmio = BSD_HWS_PGA_GEN7;
  789. break;
  790. case VECS:
  791. mmio = VEBOX_HWS_PGA_GEN7;
  792. break;
  793. }
  794. } else if (IS_GEN6(ring->dev)) {
  795. mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
  796. } else {
  797. mmio = RING_HWS_PGA(ring->mmio_base);
  798. }
  799. I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
  800. POSTING_READ(mmio);
  801. /* Flush the TLB for this page */
  802. if (INTEL_INFO(dev)->gen >= 6) {
  803. u32 reg = RING_INSTPM(ring->mmio_base);
  804. I915_WRITE(reg,
  805. _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
  806. INSTPM_SYNC_FLUSH));
  807. if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0,
  808. 1000))
  809. DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
  810. ring->name);
  811. }
  812. }
  813. static int
  814. bsd_ring_flush(struct intel_ring_buffer *ring,
  815. u32 invalidate_domains,
  816. u32 flush_domains)
  817. {
  818. int ret;
  819. ret = intel_ring_begin(ring, 2);
  820. if (ret)
  821. return ret;
  822. intel_ring_emit(ring, MI_FLUSH);
  823. intel_ring_emit(ring, MI_NOOP);
  824. intel_ring_advance(ring);
  825. return 0;
  826. }
  827. static int
  828. i9xx_add_request(struct intel_ring_buffer *ring)
  829. {
  830. int ret;
  831. ret = intel_ring_begin(ring, 4);
  832. if (ret)
  833. return ret;
  834. intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
  835. intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
  836. intel_ring_emit(ring, ring->outstanding_lazy_seqno);
  837. intel_ring_emit(ring, MI_USER_INTERRUPT);
  838. __intel_ring_advance(ring);
  839. return 0;
  840. }
  841. static bool
  842. gen6_ring_get_irq(struct intel_ring_buffer *ring)
  843. {
  844. struct drm_device *dev = ring->dev;
  845. drm_i915_private_t *dev_priv = dev->dev_private;
  846. unsigned long flags;
  847. if (!dev->irq_enabled)
  848. return false;
  849. /* It looks like we need to prevent the gt from suspending while waiting
  850. * for an notifiy irq, otherwise irqs seem to get lost on at least the
  851. * blt/bsd rings on ivb. */
  852. gen6_gt_force_wake_get(dev_priv);
  853. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  854. if (ring->irq_refcount++ == 0) {
  855. if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
  856. I915_WRITE_IMR(ring,
  857. ~(ring->irq_enable_mask |
  858. GT_RENDER_L3_PARITY_ERROR_INTERRUPT));
  859. else
  860. I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
  861. ilk_enable_gt_irq(dev_priv, ring->irq_enable_mask);
  862. }
  863. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  864. return true;
  865. }
  866. static void
  867. gen6_ring_put_irq(struct intel_ring_buffer *ring)
  868. {
  869. struct drm_device *dev = ring->dev;
  870. drm_i915_private_t *dev_priv = dev->dev_private;
  871. unsigned long flags;
  872. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  873. if (--ring->irq_refcount == 0) {
  874. if (HAS_L3_GPU_CACHE(dev) && ring->id == RCS)
  875. I915_WRITE_IMR(ring,
  876. ~GT_RENDER_L3_PARITY_ERROR_INTERRUPT);
  877. else
  878. I915_WRITE_IMR(ring, ~0);
  879. ilk_disable_gt_irq(dev_priv, ring->irq_enable_mask);
  880. }
  881. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  882. gen6_gt_force_wake_put(dev_priv);
  883. }
  884. static bool
  885. hsw_vebox_get_irq(struct intel_ring_buffer *ring)
  886. {
  887. struct drm_device *dev = ring->dev;
  888. struct drm_i915_private *dev_priv = dev->dev_private;
  889. unsigned long flags;
  890. if (!dev->irq_enabled)
  891. return false;
  892. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  893. if (ring->irq_refcount++ == 0) {
  894. I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
  895. snb_enable_pm_irq(dev_priv, ring->irq_enable_mask);
  896. }
  897. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  898. return true;
  899. }
  900. static void
  901. hsw_vebox_put_irq(struct intel_ring_buffer *ring)
  902. {
  903. struct drm_device *dev = ring->dev;
  904. struct drm_i915_private *dev_priv = dev->dev_private;
  905. unsigned long flags;
  906. if (!dev->irq_enabled)
  907. return;
  908. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  909. if (--ring->irq_refcount == 0) {
  910. I915_WRITE_IMR(ring, ~0);
  911. snb_disable_pm_irq(dev_priv, ring->irq_enable_mask);
  912. }
  913. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  914. }
  915. static int
  916. i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
  917. u32 offset, u32 length,
  918. unsigned flags)
  919. {
  920. int ret;
  921. ret = intel_ring_begin(ring, 2);
  922. if (ret)
  923. return ret;
  924. intel_ring_emit(ring,
  925. MI_BATCH_BUFFER_START |
  926. MI_BATCH_GTT |
  927. (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
  928. intel_ring_emit(ring, offset);
  929. intel_ring_advance(ring);
  930. return 0;
  931. }
  932. /* Just userspace ABI convention to limit the wa batch bo to a resonable size */
  933. #define I830_BATCH_LIMIT (256*1024)
  934. static int
  935. i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
  936. u32 offset, u32 len,
  937. unsigned flags)
  938. {
  939. int ret;
  940. if (flags & I915_DISPATCH_PINNED) {
  941. ret = intel_ring_begin(ring, 4);
  942. if (ret)
  943. return ret;
  944. intel_ring_emit(ring, MI_BATCH_BUFFER);
  945. intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
  946. intel_ring_emit(ring, offset + len - 8);
  947. intel_ring_emit(ring, MI_NOOP);
  948. intel_ring_advance(ring);
  949. } else {
  950. u32 cs_offset = ring->scratch.gtt_offset;
  951. if (len > I830_BATCH_LIMIT)
  952. return -ENOSPC;
  953. ret = intel_ring_begin(ring, 9+3);
  954. if (ret)
  955. return ret;
  956. /* Blit the batch (which has now all relocs applied) to the stable batch
  957. * scratch bo area (so that the CS never stumbles over its tlb
  958. * invalidation bug) ... */
  959. intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD |
  960. XY_SRC_COPY_BLT_WRITE_ALPHA |
  961. XY_SRC_COPY_BLT_WRITE_RGB);
  962. intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096);
  963. intel_ring_emit(ring, 0);
  964. intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024);
  965. intel_ring_emit(ring, cs_offset);
  966. intel_ring_emit(ring, 0);
  967. intel_ring_emit(ring, 4096);
  968. intel_ring_emit(ring, offset);
  969. intel_ring_emit(ring, MI_FLUSH);
  970. /* ... and execute it. */
  971. intel_ring_emit(ring, MI_BATCH_BUFFER);
  972. intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
  973. intel_ring_emit(ring, cs_offset + len - 8);
  974. intel_ring_advance(ring);
  975. }
  976. return 0;
  977. }
  978. static int
  979. i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
  980. u32 offset, u32 len,
  981. unsigned flags)
  982. {
  983. int ret;
  984. ret = intel_ring_begin(ring, 2);
  985. if (ret)
  986. return ret;
  987. intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
  988. intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
  989. intel_ring_advance(ring);
  990. return 0;
  991. }
  992. static void cleanup_status_page(struct intel_ring_buffer *ring)
  993. {
  994. struct drm_i915_gem_object *obj;
  995. obj = ring->status_page.obj;
  996. if (obj == NULL)
  997. return;
  998. kunmap(sg_page(obj->pages->sgl));
  999. i915_gem_object_unpin(obj);
  1000. drm_gem_object_unreference(&obj->base);
  1001. ring->status_page.obj = NULL;
  1002. }
  1003. static int init_status_page(struct intel_ring_buffer *ring)
  1004. {
  1005. struct drm_device *dev = ring->dev;
  1006. struct drm_i915_gem_object *obj;
  1007. int ret;
  1008. obj = i915_gem_alloc_object(dev, 4096);
  1009. if (obj == NULL) {
  1010. DRM_ERROR("Failed to allocate status page\n");
  1011. ret = -ENOMEM;
  1012. goto err;
  1013. }
  1014. i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
  1015. ret = i915_gem_obj_ggtt_pin(obj, 4096, true, false);
  1016. if (ret != 0) {
  1017. goto err_unref;
  1018. }
  1019. ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj);
  1020. ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl));
  1021. if (ring->status_page.page_addr == NULL) {
  1022. ret = -ENOMEM;
  1023. goto err_unpin;
  1024. }
  1025. ring->status_page.obj = obj;
  1026. memset(ring->status_page.page_addr, 0, PAGE_SIZE);
  1027. DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
  1028. ring->name, ring->status_page.gfx_addr);
  1029. return 0;
  1030. err_unpin:
  1031. i915_gem_object_unpin(obj);
  1032. err_unref:
  1033. drm_gem_object_unreference(&obj->base);
  1034. err:
  1035. return ret;
  1036. }
  1037. static int init_phys_status_page(struct intel_ring_buffer *ring)
  1038. {
  1039. struct drm_i915_private *dev_priv = ring->dev->dev_private;
  1040. if (!dev_priv->status_page_dmah) {
  1041. dev_priv->status_page_dmah =
  1042. drm_pci_alloc(ring->dev, PAGE_SIZE, PAGE_SIZE);
  1043. if (!dev_priv->status_page_dmah)
  1044. return -ENOMEM;
  1045. }
  1046. ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
  1047. memset(ring->status_page.page_addr, 0, PAGE_SIZE);
  1048. return 0;
  1049. }
  1050. static int intel_init_ring_buffer(struct drm_device *dev,
  1051. struct intel_ring_buffer *ring)
  1052. {
  1053. struct drm_i915_gem_object *obj;
  1054. struct drm_i915_private *dev_priv = dev->dev_private;
  1055. int ret;
  1056. ring->dev = dev;
  1057. INIT_LIST_HEAD(&ring->active_list);
  1058. INIT_LIST_HEAD(&ring->request_list);
  1059. ring->size = 32 * PAGE_SIZE;
  1060. memset(ring->sync_seqno, 0, sizeof(ring->sync_seqno));
  1061. init_waitqueue_head(&ring->irq_queue);
  1062. if (I915_NEED_GFX_HWS(dev)) {
  1063. ret = init_status_page(ring);
  1064. if (ret)
  1065. return ret;
  1066. } else {
  1067. BUG_ON(ring->id != RCS);
  1068. ret = init_phys_status_page(ring);
  1069. if (ret)
  1070. return ret;
  1071. }
  1072. obj = NULL;
  1073. if (!HAS_LLC(dev))
  1074. obj = i915_gem_object_create_stolen(dev, ring->size);
  1075. if (obj == NULL)
  1076. obj = i915_gem_alloc_object(dev, ring->size);
  1077. if (obj == NULL) {
  1078. DRM_ERROR("Failed to allocate ringbuffer\n");
  1079. ret = -ENOMEM;
  1080. goto err_hws;
  1081. }
  1082. ring->obj = obj;
  1083. ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, true, false);
  1084. if (ret)
  1085. goto err_unref;
  1086. ret = i915_gem_object_set_to_gtt_domain(obj, true);
  1087. if (ret)
  1088. goto err_unpin;
  1089. ring->virtual_start =
  1090. ioremap_wc(dev_priv->gtt.mappable_base + i915_gem_obj_ggtt_offset(obj),
  1091. ring->size);
  1092. if (ring->virtual_start == NULL) {
  1093. DRM_ERROR("Failed to map ringbuffer.\n");
  1094. ret = -EINVAL;
  1095. goto err_unpin;
  1096. }
  1097. ret = ring->init(ring);
  1098. if (ret)
  1099. goto err_unmap;
  1100. /* Workaround an erratum on the i830 which causes a hang if
  1101. * the TAIL pointer points to within the last 2 cachelines
  1102. * of the buffer.
  1103. */
  1104. ring->effective_size = ring->size;
  1105. if (IS_I830(ring->dev) || IS_845G(ring->dev))
  1106. ring->effective_size -= 128;
  1107. return 0;
  1108. err_unmap:
  1109. iounmap(ring->virtual_start);
  1110. err_unpin:
  1111. i915_gem_object_unpin(obj);
  1112. err_unref:
  1113. drm_gem_object_unreference(&obj->base);
  1114. ring->obj = NULL;
  1115. err_hws:
  1116. cleanup_status_page(ring);
  1117. return ret;
  1118. }
  1119. void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
  1120. {
  1121. struct drm_i915_private *dev_priv;
  1122. int ret;
  1123. if (ring->obj == NULL)
  1124. return;
  1125. /* Disable the ring buffer. The ring must be idle at this point */
  1126. dev_priv = ring->dev->dev_private;
  1127. ret = intel_ring_idle(ring);
  1128. if (ret)
  1129. DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
  1130. ring->name, ret);
  1131. I915_WRITE_CTL(ring, 0);
  1132. iounmap(ring->virtual_start);
  1133. i915_gem_object_unpin(ring->obj);
  1134. drm_gem_object_unreference(&ring->obj->base);
  1135. ring->obj = NULL;
  1136. if (ring->cleanup)
  1137. ring->cleanup(ring);
  1138. cleanup_status_page(ring);
  1139. }
  1140. static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
  1141. {
  1142. int ret;
  1143. ret = i915_wait_seqno(ring, seqno);
  1144. if (!ret)
  1145. i915_gem_retire_requests_ring(ring);
  1146. return ret;
  1147. }
  1148. static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
  1149. {
  1150. struct drm_i915_gem_request *request;
  1151. u32 seqno = 0;
  1152. int ret;
  1153. i915_gem_retire_requests_ring(ring);
  1154. if (ring->last_retired_head != -1) {
  1155. ring->head = ring->last_retired_head;
  1156. ring->last_retired_head = -1;
  1157. ring->space = ring_space(ring);
  1158. if (ring->space >= n)
  1159. return 0;
  1160. }
  1161. list_for_each_entry(request, &ring->request_list, list) {
  1162. int space;
  1163. if (request->tail == -1)
  1164. continue;
  1165. space = request->tail - (ring->tail + I915_RING_FREE_SPACE);
  1166. if (space < 0)
  1167. space += ring->size;
  1168. if (space >= n) {
  1169. seqno = request->seqno;
  1170. break;
  1171. }
  1172. /* Consume this request in case we need more space than
  1173. * is available and so need to prevent a race between
  1174. * updating last_retired_head and direct reads of
  1175. * I915_RING_HEAD. It also provides a nice sanity check.
  1176. */
  1177. request->tail = -1;
  1178. }
  1179. if (seqno == 0)
  1180. return -ENOSPC;
  1181. ret = intel_ring_wait_seqno(ring, seqno);
  1182. if (ret)
  1183. return ret;
  1184. if (WARN_ON(ring->last_retired_head == -1))
  1185. return -ENOSPC;
  1186. ring->head = ring->last_retired_head;
  1187. ring->last_retired_head = -1;
  1188. ring->space = ring_space(ring);
  1189. if (WARN_ON(ring->space < n))
  1190. return -ENOSPC;
  1191. return 0;
  1192. }
  1193. static int ring_wait_for_space(struct intel_ring_buffer *ring, int n)
  1194. {
  1195. struct drm_device *dev = ring->dev;
  1196. struct drm_i915_private *dev_priv = dev->dev_private;
  1197. unsigned long end;
  1198. int ret;
  1199. ret = intel_ring_wait_request(ring, n);
  1200. if (ret != -ENOSPC)
  1201. return ret;
  1202. /* force the tail write in case we have been skipping them */
  1203. __intel_ring_advance(ring);
  1204. trace_i915_ring_wait_begin(ring);
  1205. /* With GEM the hangcheck timer should kick us out of the loop,
  1206. * leaving it early runs the risk of corrupting GEM state (due
  1207. * to running on almost untested codepaths). But on resume
  1208. * timers don't work yet, so prevent a complete hang in that
  1209. * case by choosing an insanely large timeout. */
  1210. end = jiffies + 60 * HZ;
  1211. do {
  1212. ring->head = I915_READ_HEAD(ring);
  1213. ring->space = ring_space(ring);
  1214. if (ring->space >= n) {
  1215. trace_i915_ring_wait_end(ring);
  1216. return 0;
  1217. }
  1218. if (dev->primary->master) {
  1219. struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
  1220. if (master_priv->sarea_priv)
  1221. master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
  1222. }
  1223. msleep(1);
  1224. ret = i915_gem_check_wedge(&dev_priv->gpu_error,
  1225. dev_priv->mm.interruptible);
  1226. if (ret)
  1227. return ret;
  1228. } while (!time_after(jiffies, end));
  1229. trace_i915_ring_wait_end(ring);
  1230. return -EBUSY;
  1231. }
  1232. static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
  1233. {
  1234. uint32_t __iomem *virt;
  1235. int rem = ring->size - ring->tail;
  1236. if (ring->space < rem) {
  1237. int ret = ring_wait_for_space(ring, rem);
  1238. if (ret)
  1239. return ret;
  1240. }
  1241. virt = ring->virtual_start + ring->tail;
  1242. rem /= 4;
  1243. while (rem--)
  1244. iowrite32(MI_NOOP, virt++);
  1245. ring->tail = 0;
  1246. ring->space = ring_space(ring);
  1247. return 0;
  1248. }
  1249. int intel_ring_idle(struct intel_ring_buffer *ring)
  1250. {
  1251. u32 seqno;
  1252. int ret;
  1253. /* We need to add any requests required to flush the objects and ring */
  1254. if (ring->outstanding_lazy_seqno) {
  1255. ret = i915_add_request(ring, NULL);
  1256. if (ret)
  1257. return ret;
  1258. }
  1259. /* Wait upon the last request to be completed */
  1260. if (list_empty(&ring->request_list))
  1261. return 0;
  1262. seqno = list_entry(ring->request_list.prev,
  1263. struct drm_i915_gem_request,
  1264. list)->seqno;
  1265. return i915_wait_seqno(ring, seqno);
  1266. }
  1267. static int
  1268. intel_ring_alloc_seqno(struct intel_ring_buffer *ring)
  1269. {
  1270. if (ring->outstanding_lazy_seqno)
  1271. return 0;
  1272. if (ring->preallocated_lazy_request == NULL) {
  1273. struct drm_i915_gem_request *request;
  1274. request = kmalloc(sizeof(*request), GFP_KERNEL);
  1275. if (request == NULL)
  1276. return -ENOMEM;
  1277. ring->preallocated_lazy_request = request;
  1278. }
  1279. return i915_gem_get_seqno(ring->dev, &ring->outstanding_lazy_seqno);
  1280. }
  1281. static int __intel_ring_begin(struct intel_ring_buffer *ring,
  1282. int bytes)
  1283. {
  1284. int ret;
  1285. if (unlikely(ring->tail + bytes > ring->effective_size)) {
  1286. ret = intel_wrap_ring_buffer(ring);
  1287. if (unlikely(ret))
  1288. return ret;
  1289. }
  1290. if (unlikely(ring->space < bytes)) {
  1291. ret = ring_wait_for_space(ring, bytes);
  1292. if (unlikely(ret))
  1293. return ret;
  1294. }
  1295. ring->space -= bytes;
  1296. return 0;
  1297. }
  1298. int intel_ring_begin(struct intel_ring_buffer *ring,
  1299. int num_dwords)
  1300. {
  1301. drm_i915_private_t *dev_priv = ring->dev->dev_private;
  1302. int ret;
  1303. ret = i915_gem_check_wedge(&dev_priv->gpu_error,
  1304. dev_priv->mm.interruptible);
  1305. if (ret)
  1306. return ret;
  1307. /* Preallocate the olr before touching the ring */
  1308. ret = intel_ring_alloc_seqno(ring);
  1309. if (ret)
  1310. return ret;
  1311. return __intel_ring_begin(ring, num_dwords * sizeof(uint32_t));
  1312. }
  1313. void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno)
  1314. {
  1315. struct drm_i915_private *dev_priv = ring->dev->dev_private;
  1316. BUG_ON(ring->outstanding_lazy_seqno);
  1317. if (INTEL_INFO(ring->dev)->gen >= 6) {
  1318. I915_WRITE(RING_SYNC_0(ring->mmio_base), 0);
  1319. I915_WRITE(RING_SYNC_1(ring->mmio_base), 0);
  1320. if (HAS_VEBOX(ring->dev))
  1321. I915_WRITE(RING_SYNC_2(ring->mmio_base), 0);
  1322. }
  1323. ring->set_seqno(ring, seqno);
  1324. ring->hangcheck.seqno = seqno;
  1325. }
  1326. static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
  1327. u32 value)
  1328. {
  1329. drm_i915_private_t *dev_priv = ring->dev->dev_private;
  1330. /* Every tail move must follow the sequence below */
  1331. /* Disable notification that the ring is IDLE. The GT
  1332. * will then assume that it is busy and bring it out of rc6.
  1333. */
  1334. I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
  1335. _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
  1336. /* Clear the context id. Here be magic! */
  1337. I915_WRITE64(GEN6_BSD_RNCID, 0x0);
  1338. /* Wait for the ring not to be idle, i.e. for it to wake up. */
  1339. if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
  1340. GEN6_BSD_SLEEP_INDICATOR) == 0,
  1341. 50))
  1342. DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
  1343. /* Now that the ring is fully powered up, update the tail */
  1344. I915_WRITE_TAIL(ring, value);
  1345. POSTING_READ(RING_TAIL(ring->mmio_base));
  1346. /* Let the ring send IDLE messages to the GT again,
  1347. * and so let it sleep to conserve power when idle.
  1348. */
  1349. I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
  1350. _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
  1351. }
  1352. static int gen6_bsd_ring_flush(struct intel_ring_buffer *ring,
  1353. u32 invalidate, u32 flush)
  1354. {
  1355. uint32_t cmd;
  1356. int ret;
  1357. ret = intel_ring_begin(ring, 4);
  1358. if (ret)
  1359. return ret;
  1360. cmd = MI_FLUSH_DW;
  1361. /*
  1362. * Bspec vol 1c.5 - video engine command streamer:
  1363. * "If ENABLED, all TLBs will be invalidated once the flush
  1364. * operation is complete. This bit is only valid when the
  1365. * Post-Sync Operation field is a value of 1h or 3h."
  1366. */
  1367. if (invalidate & I915_GEM_GPU_DOMAINS)
  1368. cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD |
  1369. MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
  1370. intel_ring_emit(ring, cmd);
  1371. intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
  1372. intel_ring_emit(ring, 0);
  1373. intel_ring_emit(ring, MI_NOOP);
  1374. intel_ring_advance(ring);
  1375. return 0;
  1376. }
  1377. static int
  1378. hsw_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
  1379. u32 offset, u32 len,
  1380. unsigned flags)
  1381. {
  1382. int ret;
  1383. ret = intel_ring_begin(ring, 2);
  1384. if (ret)
  1385. return ret;
  1386. intel_ring_emit(ring,
  1387. MI_BATCH_BUFFER_START | MI_BATCH_PPGTT_HSW |
  1388. (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_HSW));
  1389. /* bit0-7 is the length on GEN6+ */
  1390. intel_ring_emit(ring, offset);
  1391. intel_ring_advance(ring);
  1392. return 0;
  1393. }
  1394. static int
  1395. gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
  1396. u32 offset, u32 len,
  1397. unsigned flags)
  1398. {
  1399. int ret;
  1400. ret = intel_ring_begin(ring, 2);
  1401. if (ret)
  1402. return ret;
  1403. intel_ring_emit(ring,
  1404. MI_BATCH_BUFFER_START |
  1405. (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965));
  1406. /* bit0-7 is the length on GEN6+ */
  1407. intel_ring_emit(ring, offset);
  1408. intel_ring_advance(ring);
  1409. return 0;
  1410. }
  1411. /* Blitter support (SandyBridge+) */
  1412. static int gen6_ring_flush(struct intel_ring_buffer *ring,
  1413. u32 invalidate, u32 flush)
  1414. {
  1415. struct drm_device *dev = ring->dev;
  1416. uint32_t cmd;
  1417. int ret;
  1418. ret = intel_ring_begin(ring, 4);
  1419. if (ret)
  1420. return ret;
  1421. cmd = MI_FLUSH_DW;
  1422. /*
  1423. * Bspec vol 1c.3 - blitter engine command streamer:
  1424. * "If ENABLED, all TLBs will be invalidated once the flush
  1425. * operation is complete. This bit is only valid when the
  1426. * Post-Sync Operation field is a value of 1h or 3h."
  1427. */
  1428. if (invalidate & I915_GEM_DOMAIN_RENDER)
  1429. cmd |= MI_INVALIDATE_TLB | MI_FLUSH_DW_STORE_INDEX |
  1430. MI_FLUSH_DW_OP_STOREDW;
  1431. intel_ring_emit(ring, cmd);
  1432. intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
  1433. intel_ring_emit(ring, 0);
  1434. intel_ring_emit(ring, MI_NOOP);
  1435. intel_ring_advance(ring);
  1436. if (IS_GEN7(dev) && flush)
  1437. return gen7_ring_fbc_flush(ring, FBC_REND_CACHE_CLEAN);
  1438. return 0;
  1439. }
  1440. int intel_init_render_ring_buffer(struct drm_device *dev)
  1441. {
  1442. drm_i915_private_t *dev_priv = dev->dev_private;
  1443. struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
  1444. ring->name = "render ring";
  1445. ring->id = RCS;
  1446. ring->mmio_base = RENDER_RING_BASE;
  1447. if (INTEL_INFO(dev)->gen >= 6) {
  1448. ring->add_request = gen6_add_request;
  1449. ring->flush = gen7_render_ring_flush;
  1450. if (INTEL_INFO(dev)->gen == 6)
  1451. ring->flush = gen6_render_ring_flush;
  1452. ring->irq_get = gen6_ring_get_irq;
  1453. ring->irq_put = gen6_ring_put_irq;
  1454. ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
  1455. ring->get_seqno = gen6_ring_get_seqno;
  1456. ring->set_seqno = ring_set_seqno;
  1457. ring->sync_to = gen6_ring_sync;
  1458. ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_INVALID;
  1459. ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_RV;
  1460. ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_RB;
  1461. ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_RVE;
  1462. ring->signal_mbox[RCS] = GEN6_NOSYNC;
  1463. ring->signal_mbox[VCS] = GEN6_VRSYNC;
  1464. ring->signal_mbox[BCS] = GEN6_BRSYNC;
  1465. ring->signal_mbox[VECS] = GEN6_VERSYNC;
  1466. } else if (IS_GEN5(dev)) {
  1467. ring->add_request = pc_render_add_request;
  1468. ring->flush = gen4_render_ring_flush;
  1469. ring->get_seqno = pc_render_get_seqno;
  1470. ring->set_seqno = pc_render_set_seqno;
  1471. ring->irq_get = gen5_ring_get_irq;
  1472. ring->irq_put = gen5_ring_put_irq;
  1473. ring->irq_enable_mask = GT_RENDER_USER_INTERRUPT |
  1474. GT_RENDER_PIPECTL_NOTIFY_INTERRUPT;
  1475. } else {
  1476. ring->add_request = i9xx_add_request;
  1477. if (INTEL_INFO(dev)->gen < 4)
  1478. ring->flush = gen2_render_ring_flush;
  1479. else
  1480. ring->flush = gen4_render_ring_flush;
  1481. ring->get_seqno = ring_get_seqno;
  1482. ring->set_seqno = ring_set_seqno;
  1483. if (IS_GEN2(dev)) {
  1484. ring->irq_get = i8xx_ring_get_irq;
  1485. ring->irq_put = i8xx_ring_put_irq;
  1486. } else {
  1487. ring->irq_get = i9xx_ring_get_irq;
  1488. ring->irq_put = i9xx_ring_put_irq;
  1489. }
  1490. ring->irq_enable_mask = I915_USER_INTERRUPT;
  1491. }
  1492. ring->write_tail = ring_write_tail;
  1493. if (IS_HASWELL(dev))
  1494. ring->dispatch_execbuffer = hsw_ring_dispatch_execbuffer;
  1495. else if (INTEL_INFO(dev)->gen >= 6)
  1496. ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
  1497. else if (INTEL_INFO(dev)->gen >= 4)
  1498. ring->dispatch_execbuffer = i965_dispatch_execbuffer;
  1499. else if (IS_I830(dev) || IS_845G(dev))
  1500. ring->dispatch_execbuffer = i830_dispatch_execbuffer;
  1501. else
  1502. ring->dispatch_execbuffer = i915_dispatch_execbuffer;
  1503. ring->init = init_render_ring;
  1504. ring->cleanup = render_ring_cleanup;
  1505. /* Workaround batchbuffer to combat CS tlb bug. */
  1506. if (HAS_BROKEN_CS_TLB(dev)) {
  1507. struct drm_i915_gem_object *obj;
  1508. int ret;
  1509. obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT);
  1510. if (obj == NULL) {
  1511. DRM_ERROR("Failed to allocate batch bo\n");
  1512. return -ENOMEM;
  1513. }
  1514. ret = i915_gem_obj_ggtt_pin(obj, 0, true, false);
  1515. if (ret != 0) {
  1516. drm_gem_object_unreference(&obj->base);
  1517. DRM_ERROR("Failed to ping batch bo\n");
  1518. return ret;
  1519. }
  1520. ring->scratch.obj = obj;
  1521. ring->scratch.gtt_offset = i915_gem_obj_ggtt_offset(obj);
  1522. }
  1523. return intel_init_ring_buffer(dev, ring);
  1524. }
  1525. int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
  1526. {
  1527. drm_i915_private_t *dev_priv = dev->dev_private;
  1528. struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
  1529. int ret;
  1530. ring->name = "render ring";
  1531. ring->id = RCS;
  1532. ring->mmio_base = RENDER_RING_BASE;
  1533. if (INTEL_INFO(dev)->gen >= 6) {
  1534. /* non-kms not supported on gen6+ */
  1535. return -ENODEV;
  1536. }
  1537. /* Note: gem is not supported on gen5/ilk without kms (the corresponding
  1538. * gem_init ioctl returns with -ENODEV). Hence we do not need to set up
  1539. * the special gen5 functions. */
  1540. ring->add_request = i9xx_add_request;
  1541. if (INTEL_INFO(dev)->gen < 4)
  1542. ring->flush = gen2_render_ring_flush;
  1543. else
  1544. ring->flush = gen4_render_ring_flush;
  1545. ring->get_seqno = ring_get_seqno;
  1546. ring->set_seqno = ring_set_seqno;
  1547. if (IS_GEN2(dev)) {
  1548. ring->irq_get = i8xx_ring_get_irq;
  1549. ring->irq_put = i8xx_ring_put_irq;
  1550. } else {
  1551. ring->irq_get = i9xx_ring_get_irq;
  1552. ring->irq_put = i9xx_ring_put_irq;
  1553. }
  1554. ring->irq_enable_mask = I915_USER_INTERRUPT;
  1555. ring->write_tail = ring_write_tail;
  1556. if (INTEL_INFO(dev)->gen >= 4)
  1557. ring->dispatch_execbuffer = i965_dispatch_execbuffer;
  1558. else if (IS_I830(dev) || IS_845G(dev))
  1559. ring->dispatch_execbuffer = i830_dispatch_execbuffer;
  1560. else
  1561. ring->dispatch_execbuffer = i915_dispatch_execbuffer;
  1562. ring->init = init_render_ring;
  1563. ring->cleanup = render_ring_cleanup;
  1564. ring->dev = dev;
  1565. INIT_LIST_HEAD(&ring->active_list);
  1566. INIT_LIST_HEAD(&ring->request_list);
  1567. ring->size = size;
  1568. ring->effective_size = ring->size;
  1569. if (IS_I830(ring->dev) || IS_845G(ring->dev))
  1570. ring->effective_size -= 128;
  1571. ring->virtual_start = ioremap_wc(start, size);
  1572. if (ring->virtual_start == NULL) {
  1573. DRM_ERROR("can not ioremap virtual address for"
  1574. " ring buffer\n");
  1575. return -ENOMEM;
  1576. }
  1577. if (!I915_NEED_GFX_HWS(dev)) {
  1578. ret = init_phys_status_page(ring);
  1579. if (ret)
  1580. return ret;
  1581. }
  1582. return 0;
  1583. }
  1584. int intel_init_bsd_ring_buffer(struct drm_device *dev)
  1585. {
  1586. drm_i915_private_t *dev_priv = dev->dev_private;
  1587. struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
  1588. ring->name = "bsd ring";
  1589. ring->id = VCS;
  1590. ring->write_tail = ring_write_tail;
  1591. if (IS_GEN6(dev) || IS_GEN7(dev)) {
  1592. ring->mmio_base = GEN6_BSD_RING_BASE;
  1593. /* gen6 bsd needs a special wa for tail updates */
  1594. if (IS_GEN6(dev))
  1595. ring->write_tail = gen6_bsd_ring_write_tail;
  1596. ring->flush = gen6_bsd_ring_flush;
  1597. ring->add_request = gen6_add_request;
  1598. ring->get_seqno = gen6_ring_get_seqno;
  1599. ring->set_seqno = ring_set_seqno;
  1600. ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
  1601. ring->irq_get = gen6_ring_get_irq;
  1602. ring->irq_put = gen6_ring_put_irq;
  1603. ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
  1604. ring->sync_to = gen6_ring_sync;
  1605. ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VR;
  1606. ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_INVALID;
  1607. ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_VB;
  1608. ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_VVE;
  1609. ring->signal_mbox[RCS] = GEN6_RVSYNC;
  1610. ring->signal_mbox[VCS] = GEN6_NOSYNC;
  1611. ring->signal_mbox[BCS] = GEN6_BVSYNC;
  1612. ring->signal_mbox[VECS] = GEN6_VEVSYNC;
  1613. } else {
  1614. ring->mmio_base = BSD_RING_BASE;
  1615. ring->flush = bsd_ring_flush;
  1616. ring->add_request = i9xx_add_request;
  1617. ring->get_seqno = ring_get_seqno;
  1618. ring->set_seqno = ring_set_seqno;
  1619. if (IS_GEN5(dev)) {
  1620. ring->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
  1621. ring->irq_get = gen5_ring_get_irq;
  1622. ring->irq_put = gen5_ring_put_irq;
  1623. } else {
  1624. ring->irq_enable_mask = I915_BSD_USER_INTERRUPT;
  1625. ring->irq_get = i9xx_ring_get_irq;
  1626. ring->irq_put = i9xx_ring_put_irq;
  1627. }
  1628. ring->dispatch_execbuffer = i965_dispatch_execbuffer;
  1629. }
  1630. ring->init = init_ring_common;
  1631. return intel_init_ring_buffer(dev, ring);
  1632. }
  1633. int intel_init_blt_ring_buffer(struct drm_device *dev)
  1634. {
  1635. drm_i915_private_t *dev_priv = dev->dev_private;
  1636. struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
  1637. ring->name = "blitter ring";
  1638. ring->id = BCS;
  1639. ring->mmio_base = BLT_RING_BASE;
  1640. ring->write_tail = ring_write_tail;
  1641. ring->flush = gen6_ring_flush;
  1642. ring->add_request = gen6_add_request;
  1643. ring->get_seqno = gen6_ring_get_seqno;
  1644. ring->set_seqno = ring_set_seqno;
  1645. ring->irq_enable_mask = GT_BLT_USER_INTERRUPT;
  1646. ring->irq_get = gen6_ring_get_irq;
  1647. ring->irq_put = gen6_ring_put_irq;
  1648. ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
  1649. ring->sync_to = gen6_ring_sync;
  1650. ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_BR;
  1651. ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_BV;
  1652. ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_INVALID;
  1653. ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_BVE;
  1654. ring->signal_mbox[RCS] = GEN6_RBSYNC;
  1655. ring->signal_mbox[VCS] = GEN6_VBSYNC;
  1656. ring->signal_mbox[BCS] = GEN6_NOSYNC;
  1657. ring->signal_mbox[VECS] = GEN6_VEBSYNC;
  1658. ring->init = init_ring_common;
  1659. return intel_init_ring_buffer(dev, ring);
  1660. }
  1661. int intel_init_vebox_ring_buffer(struct drm_device *dev)
  1662. {
  1663. drm_i915_private_t *dev_priv = dev->dev_private;
  1664. struct intel_ring_buffer *ring = &dev_priv->ring[VECS];
  1665. ring->name = "video enhancement ring";
  1666. ring->id = VECS;
  1667. ring->mmio_base = VEBOX_RING_BASE;
  1668. ring->write_tail = ring_write_tail;
  1669. ring->flush = gen6_ring_flush;
  1670. ring->add_request = gen6_add_request;
  1671. ring->get_seqno = gen6_ring_get_seqno;
  1672. ring->set_seqno = ring_set_seqno;
  1673. ring->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
  1674. ring->irq_get = hsw_vebox_get_irq;
  1675. ring->irq_put = hsw_vebox_put_irq;
  1676. ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
  1677. ring->sync_to = gen6_ring_sync;
  1678. ring->semaphore_register[RCS] = MI_SEMAPHORE_SYNC_VER;
  1679. ring->semaphore_register[VCS] = MI_SEMAPHORE_SYNC_VEV;
  1680. ring->semaphore_register[BCS] = MI_SEMAPHORE_SYNC_VEB;
  1681. ring->semaphore_register[VECS] = MI_SEMAPHORE_SYNC_INVALID;
  1682. ring->signal_mbox[RCS] = GEN6_RVESYNC;
  1683. ring->signal_mbox[VCS] = GEN6_VVESYNC;
  1684. ring->signal_mbox[BCS] = GEN6_BVESYNC;
  1685. ring->signal_mbox[VECS] = GEN6_NOSYNC;
  1686. ring->init = init_ring_common;
  1687. return intel_init_ring_buffer(dev, ring);
  1688. }
  1689. int
  1690. intel_ring_flush_all_caches(struct intel_ring_buffer *ring)
  1691. {
  1692. int ret;
  1693. if (!ring->gpu_caches_dirty)
  1694. return 0;
  1695. ret = ring->flush(ring, 0, I915_GEM_GPU_DOMAINS);
  1696. if (ret)
  1697. return ret;
  1698. trace_i915_gem_ring_flush(ring, 0, I915_GEM_GPU_DOMAINS);
  1699. ring->gpu_caches_dirty = false;
  1700. return 0;
  1701. }
  1702. int
  1703. intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring)
  1704. {
  1705. uint32_t flush_domains;
  1706. int ret;
  1707. flush_domains = 0;
  1708. if (ring->gpu_caches_dirty)
  1709. flush_domains = I915_GEM_GPU_DOMAINS;
  1710. ret = ring->flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
  1711. if (ret)
  1712. return ret;
  1713. trace_i915_gem_ring_flush(ring, I915_GEM_GPU_DOMAINS, flush_domains);
  1714. ring->gpu_caches_dirty = false;
  1715. return 0;
  1716. }