intel_ringbuffer.c 39 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536
  1. /*
  2. * Copyright © 2008-2010 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eric Anholt <eric@anholt.net>
  25. * Zou Nan hai <nanhai.zou@intel.com>
  26. * Xiang Hai hao<haihao.xiang@intel.com>
  27. *
  28. */
  29. #include "drmP.h"
  30. #include "drm.h"
  31. #include "i915_drv.h"
  32. #include "i915_drm.h"
  33. #include "i915_trace.h"
  34. #include "intel_drv.h"
  35. /*
  36. * 965+ support PIPE_CONTROL commands, which provide finer grained control
  37. * over cache flushing.
  38. */
  39. struct pipe_control {
  40. struct drm_i915_gem_object *obj;
  41. volatile u32 *cpu_page;
  42. u32 gtt_offset;
  43. };
  44. static inline int ring_space(struct intel_ring_buffer *ring)
  45. {
  46. int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
  47. if (space < 0)
  48. space += ring->size;
  49. return space;
  50. }
  51. static int
  52. gen2_render_ring_flush(struct intel_ring_buffer *ring,
  53. u32 invalidate_domains,
  54. u32 flush_domains)
  55. {
  56. u32 cmd;
  57. int ret;
  58. cmd = MI_FLUSH;
  59. if (((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER) == 0)
  60. cmd |= MI_NO_WRITE_FLUSH;
  61. if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
  62. cmd |= MI_READ_FLUSH;
  63. ret = intel_ring_begin(ring, 2);
  64. if (ret)
  65. return ret;
  66. intel_ring_emit(ring, cmd);
  67. intel_ring_emit(ring, MI_NOOP);
  68. intel_ring_advance(ring);
  69. return 0;
  70. }
  71. static int
  72. gen4_render_ring_flush(struct intel_ring_buffer *ring,
  73. u32 invalidate_domains,
  74. u32 flush_domains)
  75. {
  76. struct drm_device *dev = ring->dev;
  77. u32 cmd;
  78. int ret;
  79. /*
  80. * read/write caches:
  81. *
  82. * I915_GEM_DOMAIN_RENDER is always invalidated, but is
  83. * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
  84. * also flushed at 2d versus 3d pipeline switches.
  85. *
  86. * read-only caches:
  87. *
  88. * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
  89. * MI_READ_FLUSH is set, and is always flushed on 965.
  90. *
  91. * I915_GEM_DOMAIN_COMMAND may not exist?
  92. *
  93. * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
  94. * invalidated when MI_EXE_FLUSH is set.
  95. *
  96. * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
  97. * invalidated with every MI_FLUSH.
  98. *
  99. * TLBs:
  100. *
  101. * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
  102. * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
  103. * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
  104. * are flushed at any MI_FLUSH.
  105. */
  106. cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
  107. if ((invalidate_domains|flush_domains) & I915_GEM_DOMAIN_RENDER)
  108. cmd &= ~MI_NO_WRITE_FLUSH;
  109. if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
  110. cmd |= MI_EXE_FLUSH;
  111. if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
  112. (IS_G4X(dev) || IS_GEN5(dev)))
  113. cmd |= MI_INVALIDATE_ISP;
  114. ret = intel_ring_begin(ring, 2);
  115. if (ret)
  116. return ret;
  117. intel_ring_emit(ring, cmd);
  118. intel_ring_emit(ring, MI_NOOP);
  119. intel_ring_advance(ring);
  120. return 0;
  121. }
  122. /**
  123. * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
  124. * implementing two workarounds on gen6. From section 1.4.7.1
  125. * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
  126. *
  127. * [DevSNB-C+{W/A}] Before any depth stall flush (including those
  128. * produced by non-pipelined state commands), software needs to first
  129. * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
  130. * 0.
  131. *
  132. * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
  133. * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
  134. *
  135. * And the workaround for these two requires this workaround first:
  136. *
  137. * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
  138. * BEFORE the pipe-control with a post-sync op and no write-cache
  139. * flushes.
  140. *
  141. * And this last workaround is tricky because of the requirements on
  142. * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
  143. * volume 2 part 1:
  144. *
  145. * "1 of the following must also be set:
  146. * - Render Target Cache Flush Enable ([12] of DW1)
  147. * - Depth Cache Flush Enable ([0] of DW1)
  148. * - Stall at Pixel Scoreboard ([1] of DW1)
  149. * - Depth Stall ([13] of DW1)
  150. * - Post-Sync Operation ([13] of DW1)
  151. * - Notify Enable ([8] of DW1)"
  152. *
  153. * The cache flushes require the workaround flush that triggered this
  154. * one, so we can't use it. Depth stall would trigger the same.
  155. * Post-sync nonzero is what triggered this second workaround, so we
  156. * can't use that one either. Notify enable is IRQs, which aren't
  157. * really our business. That leaves only stall at scoreboard.
  158. */
  159. static int
  160. intel_emit_post_sync_nonzero_flush(struct intel_ring_buffer *ring)
  161. {
  162. struct pipe_control *pc = ring->private;
  163. u32 scratch_addr = pc->gtt_offset + 128;
  164. int ret;
  165. ret = intel_ring_begin(ring, 6);
  166. if (ret)
  167. return ret;
  168. intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
  169. intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
  170. PIPE_CONTROL_STALL_AT_SCOREBOARD);
  171. intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
  172. intel_ring_emit(ring, 0); /* low dword */
  173. intel_ring_emit(ring, 0); /* high dword */
  174. intel_ring_emit(ring, MI_NOOP);
  175. intel_ring_advance(ring);
  176. ret = intel_ring_begin(ring, 6);
  177. if (ret)
  178. return ret;
  179. intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
  180. intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
  181. intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT); /* address */
  182. intel_ring_emit(ring, 0);
  183. intel_ring_emit(ring, 0);
  184. intel_ring_emit(ring, MI_NOOP);
  185. intel_ring_advance(ring);
  186. return 0;
  187. }
  188. static int
  189. gen6_render_ring_flush(struct intel_ring_buffer *ring,
  190. u32 invalidate_domains, u32 flush_domains)
  191. {
  192. u32 flags = 0;
  193. struct pipe_control *pc = ring->private;
  194. u32 scratch_addr = pc->gtt_offset + 128;
  195. int ret;
  196. /* Force SNB workarounds for PIPE_CONTROL flushes */
  197. intel_emit_post_sync_nonzero_flush(ring);
  198. /* Just flush everything. Experiments have shown that reducing the
  199. * number of bits based on the write domains has little performance
  200. * impact.
  201. */
  202. flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
  203. flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
  204. flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
  205. flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
  206. flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
  207. flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
  208. flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
  209. ret = intel_ring_begin(ring, 6);
  210. if (ret)
  211. return ret;
  212. intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
  213. intel_ring_emit(ring, flags);
  214. intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
  215. intel_ring_emit(ring, 0); /* lower dword */
  216. intel_ring_emit(ring, 0); /* uppwer dword */
  217. intel_ring_emit(ring, MI_NOOP);
  218. intel_ring_advance(ring);
  219. return 0;
  220. }
  221. static void ring_write_tail(struct intel_ring_buffer *ring,
  222. u32 value)
  223. {
  224. drm_i915_private_t *dev_priv = ring->dev->dev_private;
  225. I915_WRITE_TAIL(ring, value);
  226. }
  227. u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
  228. {
  229. drm_i915_private_t *dev_priv = ring->dev->dev_private;
  230. u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
  231. RING_ACTHD(ring->mmio_base) : ACTHD;
  232. return I915_READ(acthd_reg);
  233. }
  234. static int init_ring_common(struct intel_ring_buffer *ring)
  235. {
  236. struct drm_device *dev = ring->dev;
  237. drm_i915_private_t *dev_priv = dev->dev_private;
  238. struct drm_i915_gem_object *obj = ring->obj;
  239. int ret = 0;
  240. u32 head;
  241. if (HAS_FORCE_WAKE(dev))
  242. gen6_gt_force_wake_get(dev_priv);
  243. /* Stop the ring if it's running. */
  244. I915_WRITE_CTL(ring, 0);
  245. I915_WRITE_HEAD(ring, 0);
  246. ring->write_tail(ring, 0);
  247. /* Initialize the ring. */
  248. I915_WRITE_START(ring, obj->gtt_offset);
  249. head = I915_READ_HEAD(ring) & HEAD_ADDR;
  250. /* G45 ring initialization fails to reset head to zero */
  251. if (head != 0) {
  252. DRM_DEBUG_KMS("%s head not reset to zero "
  253. "ctl %08x head %08x tail %08x start %08x\n",
  254. ring->name,
  255. I915_READ_CTL(ring),
  256. I915_READ_HEAD(ring),
  257. I915_READ_TAIL(ring),
  258. I915_READ_START(ring));
  259. I915_WRITE_HEAD(ring, 0);
  260. if (I915_READ_HEAD(ring) & HEAD_ADDR) {
  261. DRM_ERROR("failed to set %s head to zero "
  262. "ctl %08x head %08x tail %08x start %08x\n",
  263. ring->name,
  264. I915_READ_CTL(ring),
  265. I915_READ_HEAD(ring),
  266. I915_READ_TAIL(ring),
  267. I915_READ_START(ring));
  268. }
  269. }
  270. I915_WRITE_CTL(ring,
  271. ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
  272. | RING_VALID);
  273. /* If the head is still not zero, the ring is dead */
  274. if (wait_for((I915_READ_CTL(ring) & RING_VALID) != 0 &&
  275. I915_READ_START(ring) == obj->gtt_offset &&
  276. (I915_READ_HEAD(ring) & HEAD_ADDR) == 0, 50)) {
  277. DRM_ERROR("%s initialization failed "
  278. "ctl %08x head %08x tail %08x start %08x\n",
  279. ring->name,
  280. I915_READ_CTL(ring),
  281. I915_READ_HEAD(ring),
  282. I915_READ_TAIL(ring),
  283. I915_READ_START(ring));
  284. ret = -EIO;
  285. goto out;
  286. }
  287. if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
  288. i915_kernel_lost_context(ring->dev);
  289. else {
  290. ring->head = I915_READ_HEAD(ring);
  291. ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
  292. ring->space = ring_space(ring);
  293. ring->last_retired_head = -1;
  294. }
  295. out:
  296. if (HAS_FORCE_WAKE(dev))
  297. gen6_gt_force_wake_put(dev_priv);
  298. return ret;
  299. }
  300. static int
  301. init_pipe_control(struct intel_ring_buffer *ring)
  302. {
  303. struct pipe_control *pc;
  304. struct drm_i915_gem_object *obj;
  305. int ret;
  306. if (ring->private)
  307. return 0;
  308. pc = kmalloc(sizeof(*pc), GFP_KERNEL);
  309. if (!pc)
  310. return -ENOMEM;
  311. obj = i915_gem_alloc_object(ring->dev, 4096);
  312. if (obj == NULL) {
  313. DRM_ERROR("Failed to allocate seqno page\n");
  314. ret = -ENOMEM;
  315. goto err;
  316. }
  317. i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
  318. ret = i915_gem_object_pin(obj, 4096, true);
  319. if (ret)
  320. goto err_unref;
  321. pc->gtt_offset = obj->gtt_offset;
  322. pc->cpu_page = kmap(obj->pages[0]);
  323. if (pc->cpu_page == NULL)
  324. goto err_unpin;
  325. pc->obj = obj;
  326. ring->private = pc;
  327. return 0;
  328. err_unpin:
  329. i915_gem_object_unpin(obj);
  330. err_unref:
  331. drm_gem_object_unreference(&obj->base);
  332. err:
  333. kfree(pc);
  334. return ret;
  335. }
  336. static void
  337. cleanup_pipe_control(struct intel_ring_buffer *ring)
  338. {
  339. struct pipe_control *pc = ring->private;
  340. struct drm_i915_gem_object *obj;
  341. if (!ring->private)
  342. return;
  343. obj = pc->obj;
  344. kunmap(obj->pages[0]);
  345. i915_gem_object_unpin(obj);
  346. drm_gem_object_unreference(&obj->base);
  347. kfree(pc);
  348. ring->private = NULL;
  349. }
  350. static int init_render_ring(struct intel_ring_buffer *ring)
  351. {
  352. struct drm_device *dev = ring->dev;
  353. struct drm_i915_private *dev_priv = dev->dev_private;
  354. int ret = init_ring_common(ring);
  355. if (INTEL_INFO(dev)->gen > 3) {
  356. I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
  357. if (IS_GEN7(dev))
  358. I915_WRITE(GFX_MODE_GEN7,
  359. _MASKED_BIT_DISABLE(GFX_TLB_INVALIDATE_ALWAYS) |
  360. _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
  361. }
  362. if (INTEL_INFO(dev)->gen >= 5) {
  363. ret = init_pipe_control(ring);
  364. if (ret)
  365. return ret;
  366. }
  367. if (IS_GEN6(dev)) {
  368. /* From the Sandybridge PRM, volume 1 part 3, page 24:
  369. * "If this bit is set, STCunit will have LRA as replacement
  370. * policy. [...] This bit must be reset. LRA replacement
  371. * policy is not supported."
  372. */
  373. I915_WRITE(CACHE_MODE_0,
  374. _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
  375. }
  376. if (INTEL_INFO(dev)->gen >= 6)
  377. I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
  378. return ret;
  379. }
  380. static void render_ring_cleanup(struct intel_ring_buffer *ring)
  381. {
  382. if (!ring->private)
  383. return;
  384. cleanup_pipe_control(ring);
  385. }
  386. static void
  387. update_mboxes(struct intel_ring_buffer *ring,
  388. u32 seqno,
  389. u32 mmio_offset)
  390. {
  391. intel_ring_emit(ring, MI_SEMAPHORE_MBOX |
  392. MI_SEMAPHORE_GLOBAL_GTT |
  393. MI_SEMAPHORE_REGISTER |
  394. MI_SEMAPHORE_UPDATE);
  395. intel_ring_emit(ring, seqno);
  396. intel_ring_emit(ring, mmio_offset);
  397. }
  398. /**
  399. * gen6_add_request - Update the semaphore mailbox registers
  400. *
  401. * @ring - ring that is adding a request
  402. * @seqno - return seqno stuck into the ring
  403. *
  404. * Update the mailbox registers in the *other* rings with the current seqno.
  405. * This acts like a signal in the canonical semaphore.
  406. */
  407. static int
  408. gen6_add_request(struct intel_ring_buffer *ring,
  409. u32 *seqno)
  410. {
  411. u32 mbox1_reg;
  412. u32 mbox2_reg;
  413. int ret;
  414. ret = intel_ring_begin(ring, 10);
  415. if (ret)
  416. return ret;
  417. mbox1_reg = ring->signal_mbox[0];
  418. mbox2_reg = ring->signal_mbox[1];
  419. *seqno = i915_gem_next_request_seqno(ring);
  420. update_mboxes(ring, *seqno, mbox1_reg);
  421. update_mboxes(ring, *seqno, mbox2_reg);
  422. intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
  423. intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
  424. intel_ring_emit(ring, *seqno);
  425. intel_ring_emit(ring, MI_USER_INTERRUPT);
  426. intel_ring_advance(ring);
  427. return 0;
  428. }
  429. /**
  430. * intel_ring_sync - sync the waiter to the signaller on seqno
  431. *
  432. * @waiter - ring that is waiting
  433. * @signaller - ring which has, or will signal
  434. * @seqno - seqno which the waiter will block on
  435. */
  436. static int
  437. gen6_ring_sync(struct intel_ring_buffer *waiter,
  438. struct intel_ring_buffer *signaller,
  439. u32 seqno)
  440. {
  441. int ret;
  442. u32 dw1 = MI_SEMAPHORE_MBOX |
  443. MI_SEMAPHORE_COMPARE |
  444. MI_SEMAPHORE_REGISTER;
  445. /* Throughout all of the GEM code, seqno passed implies our current
  446. * seqno is >= the last seqno executed. However for hardware the
  447. * comparison is strictly greater than.
  448. */
  449. seqno -= 1;
  450. WARN_ON(signaller->semaphore_register[waiter->id] ==
  451. MI_SEMAPHORE_SYNC_INVALID);
  452. ret = intel_ring_begin(waiter, 4);
  453. if (ret)
  454. return ret;
  455. intel_ring_emit(waiter,
  456. dw1 | signaller->semaphore_register[waiter->id]);
  457. intel_ring_emit(waiter, seqno);
  458. intel_ring_emit(waiter, 0);
  459. intel_ring_emit(waiter, MI_NOOP);
  460. intel_ring_advance(waiter);
  461. return 0;
  462. }
  463. #define PIPE_CONTROL_FLUSH(ring__, addr__) \
  464. do { \
  465. intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE | \
  466. PIPE_CONTROL_DEPTH_STALL); \
  467. intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \
  468. intel_ring_emit(ring__, 0); \
  469. intel_ring_emit(ring__, 0); \
  470. } while (0)
  471. static int
  472. pc_render_add_request(struct intel_ring_buffer *ring,
  473. u32 *result)
  474. {
  475. u32 seqno = i915_gem_next_request_seqno(ring);
  476. struct pipe_control *pc = ring->private;
  477. u32 scratch_addr = pc->gtt_offset + 128;
  478. int ret;
  479. /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
  480. * incoherent with writes to memory, i.e. completely fubar,
  481. * so we need to use PIPE_NOTIFY instead.
  482. *
  483. * However, we also need to workaround the qword write
  484. * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
  485. * memory before requesting an interrupt.
  486. */
  487. ret = intel_ring_begin(ring, 32);
  488. if (ret)
  489. return ret;
  490. intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
  491. PIPE_CONTROL_WRITE_FLUSH |
  492. PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE);
  493. intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
  494. intel_ring_emit(ring, seqno);
  495. intel_ring_emit(ring, 0);
  496. PIPE_CONTROL_FLUSH(ring, scratch_addr);
  497. scratch_addr += 128; /* write to separate cachelines */
  498. PIPE_CONTROL_FLUSH(ring, scratch_addr);
  499. scratch_addr += 128;
  500. PIPE_CONTROL_FLUSH(ring, scratch_addr);
  501. scratch_addr += 128;
  502. PIPE_CONTROL_FLUSH(ring, scratch_addr);
  503. scratch_addr += 128;
  504. PIPE_CONTROL_FLUSH(ring, scratch_addr);
  505. scratch_addr += 128;
  506. PIPE_CONTROL_FLUSH(ring, scratch_addr);
  507. intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE |
  508. PIPE_CONTROL_WRITE_FLUSH |
  509. PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
  510. PIPE_CONTROL_NOTIFY);
  511. intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
  512. intel_ring_emit(ring, seqno);
  513. intel_ring_emit(ring, 0);
  514. intel_ring_advance(ring);
  515. *result = seqno;
  516. return 0;
  517. }
  518. static u32
  519. gen6_ring_get_seqno(struct intel_ring_buffer *ring)
  520. {
  521. struct drm_device *dev = ring->dev;
  522. /* Workaround to force correct ordering between irq and seqno writes on
  523. * ivb (and maybe also on snb) by reading from a CS register (like
  524. * ACTHD) before reading the status page. */
  525. if (IS_GEN6(dev) || IS_GEN7(dev))
  526. intel_ring_get_active_head(ring);
  527. return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
  528. }
  529. static u32
  530. ring_get_seqno(struct intel_ring_buffer *ring)
  531. {
  532. return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
  533. }
  534. static u32
  535. pc_render_get_seqno(struct intel_ring_buffer *ring)
  536. {
  537. struct pipe_control *pc = ring->private;
  538. return pc->cpu_page[0];
  539. }
  540. static bool
  541. gen5_ring_get_irq(struct intel_ring_buffer *ring)
  542. {
  543. struct drm_device *dev = ring->dev;
  544. drm_i915_private_t *dev_priv = dev->dev_private;
  545. unsigned long flags;
  546. if (!dev->irq_enabled)
  547. return false;
  548. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  549. if (ring->irq_refcount++ == 0) {
  550. dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
  551. I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
  552. POSTING_READ(GTIMR);
  553. }
  554. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  555. return true;
  556. }
  557. static void
  558. gen5_ring_put_irq(struct intel_ring_buffer *ring)
  559. {
  560. struct drm_device *dev = ring->dev;
  561. drm_i915_private_t *dev_priv = dev->dev_private;
  562. unsigned long flags;
  563. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  564. if (--ring->irq_refcount == 0) {
  565. dev_priv->gt_irq_mask |= ring->irq_enable_mask;
  566. I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
  567. POSTING_READ(GTIMR);
  568. }
  569. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  570. }
  571. static bool
  572. i9xx_ring_get_irq(struct intel_ring_buffer *ring)
  573. {
  574. struct drm_device *dev = ring->dev;
  575. drm_i915_private_t *dev_priv = dev->dev_private;
  576. unsigned long flags;
  577. if (!dev->irq_enabled)
  578. return false;
  579. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  580. if (ring->irq_refcount++ == 0) {
  581. dev_priv->irq_mask &= ~ring->irq_enable_mask;
  582. I915_WRITE(IMR, dev_priv->irq_mask);
  583. POSTING_READ(IMR);
  584. }
  585. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  586. return true;
  587. }
  588. static void
  589. i9xx_ring_put_irq(struct intel_ring_buffer *ring)
  590. {
  591. struct drm_device *dev = ring->dev;
  592. drm_i915_private_t *dev_priv = dev->dev_private;
  593. unsigned long flags;
  594. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  595. if (--ring->irq_refcount == 0) {
  596. dev_priv->irq_mask |= ring->irq_enable_mask;
  597. I915_WRITE(IMR, dev_priv->irq_mask);
  598. POSTING_READ(IMR);
  599. }
  600. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  601. }
  602. static bool
  603. i8xx_ring_get_irq(struct intel_ring_buffer *ring)
  604. {
  605. struct drm_device *dev = ring->dev;
  606. drm_i915_private_t *dev_priv = dev->dev_private;
  607. unsigned long flags;
  608. if (!dev->irq_enabled)
  609. return false;
  610. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  611. if (ring->irq_refcount++ == 0) {
  612. dev_priv->irq_mask &= ~ring->irq_enable_mask;
  613. I915_WRITE16(IMR, dev_priv->irq_mask);
  614. POSTING_READ16(IMR);
  615. }
  616. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  617. return true;
  618. }
  619. static void
  620. i8xx_ring_put_irq(struct intel_ring_buffer *ring)
  621. {
  622. struct drm_device *dev = ring->dev;
  623. drm_i915_private_t *dev_priv = dev->dev_private;
  624. unsigned long flags;
  625. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  626. if (--ring->irq_refcount == 0) {
  627. dev_priv->irq_mask |= ring->irq_enable_mask;
  628. I915_WRITE16(IMR, dev_priv->irq_mask);
  629. POSTING_READ16(IMR);
  630. }
  631. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  632. }
  633. void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
  634. {
  635. struct drm_device *dev = ring->dev;
  636. drm_i915_private_t *dev_priv = ring->dev->dev_private;
  637. u32 mmio = 0;
  638. /* The ring status page addresses are no longer next to the rest of
  639. * the ring registers as of gen7.
  640. */
  641. if (IS_GEN7(dev)) {
  642. switch (ring->id) {
  643. case RCS:
  644. mmio = RENDER_HWS_PGA_GEN7;
  645. break;
  646. case BCS:
  647. mmio = BLT_HWS_PGA_GEN7;
  648. break;
  649. case VCS:
  650. mmio = BSD_HWS_PGA_GEN7;
  651. break;
  652. }
  653. } else if (IS_GEN6(ring->dev)) {
  654. mmio = RING_HWS_PGA_GEN6(ring->mmio_base);
  655. } else {
  656. mmio = RING_HWS_PGA(ring->mmio_base);
  657. }
  658. I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
  659. POSTING_READ(mmio);
  660. }
  661. static int
  662. bsd_ring_flush(struct intel_ring_buffer *ring,
  663. u32 invalidate_domains,
  664. u32 flush_domains)
  665. {
  666. int ret;
  667. ret = intel_ring_begin(ring, 2);
  668. if (ret)
  669. return ret;
  670. intel_ring_emit(ring, MI_FLUSH);
  671. intel_ring_emit(ring, MI_NOOP);
  672. intel_ring_advance(ring);
  673. return 0;
  674. }
  675. static int
  676. i9xx_add_request(struct intel_ring_buffer *ring,
  677. u32 *result)
  678. {
  679. u32 seqno;
  680. int ret;
  681. ret = intel_ring_begin(ring, 4);
  682. if (ret)
  683. return ret;
  684. seqno = i915_gem_next_request_seqno(ring);
  685. intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
  686. intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
  687. intel_ring_emit(ring, seqno);
  688. intel_ring_emit(ring, MI_USER_INTERRUPT);
  689. intel_ring_advance(ring);
  690. *result = seqno;
  691. return 0;
  692. }
  693. static bool
  694. gen6_ring_get_irq(struct intel_ring_buffer *ring)
  695. {
  696. struct drm_device *dev = ring->dev;
  697. drm_i915_private_t *dev_priv = dev->dev_private;
  698. unsigned long flags;
  699. if (!dev->irq_enabled)
  700. return false;
  701. /* It looks like we need to prevent the gt from suspending while waiting
  702. * for an notifiy irq, otherwise irqs seem to get lost on at least the
  703. * blt/bsd rings on ivb. */
  704. gen6_gt_force_wake_get(dev_priv);
  705. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  706. if (ring->irq_refcount++ == 0) {
  707. I915_WRITE_IMR(ring, ~ring->irq_enable_mask);
  708. dev_priv->gt_irq_mask &= ~ring->irq_enable_mask;
  709. I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
  710. POSTING_READ(GTIMR);
  711. }
  712. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  713. return true;
  714. }
  715. static void
  716. gen6_ring_put_irq(struct intel_ring_buffer *ring)
  717. {
  718. struct drm_device *dev = ring->dev;
  719. drm_i915_private_t *dev_priv = dev->dev_private;
  720. unsigned long flags;
  721. spin_lock_irqsave(&dev_priv->irq_lock, flags);
  722. if (--ring->irq_refcount == 0) {
  723. I915_WRITE_IMR(ring, ~0);
  724. dev_priv->gt_irq_mask |= ring->irq_enable_mask;
  725. I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
  726. POSTING_READ(GTIMR);
  727. }
  728. spin_unlock_irqrestore(&dev_priv->irq_lock, flags);
  729. gen6_gt_force_wake_put(dev_priv);
  730. }
  731. static int
  732. i965_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
  733. {
  734. int ret;
  735. ret = intel_ring_begin(ring, 2);
  736. if (ret)
  737. return ret;
  738. intel_ring_emit(ring,
  739. MI_BATCH_BUFFER_START |
  740. MI_BATCH_GTT |
  741. MI_BATCH_NON_SECURE_I965);
  742. intel_ring_emit(ring, offset);
  743. intel_ring_advance(ring);
  744. return 0;
  745. }
  746. static int
  747. i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
  748. u32 offset, u32 len)
  749. {
  750. int ret;
  751. ret = intel_ring_begin(ring, 4);
  752. if (ret)
  753. return ret;
  754. intel_ring_emit(ring, MI_BATCH_BUFFER);
  755. intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
  756. intel_ring_emit(ring, offset + len - 8);
  757. intel_ring_emit(ring, 0);
  758. intel_ring_advance(ring);
  759. return 0;
  760. }
  761. static int
  762. i915_dispatch_execbuffer(struct intel_ring_buffer *ring,
  763. u32 offset, u32 len)
  764. {
  765. int ret;
  766. ret = intel_ring_begin(ring, 2);
  767. if (ret)
  768. return ret;
  769. intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
  770. intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
  771. intel_ring_advance(ring);
  772. return 0;
  773. }
  774. static void cleanup_status_page(struct intel_ring_buffer *ring)
  775. {
  776. struct drm_i915_gem_object *obj;
  777. obj = ring->status_page.obj;
  778. if (obj == NULL)
  779. return;
  780. kunmap(obj->pages[0]);
  781. i915_gem_object_unpin(obj);
  782. drm_gem_object_unreference(&obj->base);
  783. ring->status_page.obj = NULL;
  784. }
  785. static int init_status_page(struct intel_ring_buffer *ring)
  786. {
  787. struct drm_device *dev = ring->dev;
  788. struct drm_i915_gem_object *obj;
  789. int ret;
  790. obj = i915_gem_alloc_object(dev, 4096);
  791. if (obj == NULL) {
  792. DRM_ERROR("Failed to allocate status page\n");
  793. ret = -ENOMEM;
  794. goto err;
  795. }
  796. i915_gem_object_set_cache_level(obj, I915_CACHE_LLC);
  797. ret = i915_gem_object_pin(obj, 4096, true);
  798. if (ret != 0) {
  799. goto err_unref;
  800. }
  801. ring->status_page.gfx_addr = obj->gtt_offset;
  802. ring->status_page.page_addr = kmap(obj->pages[0]);
  803. if (ring->status_page.page_addr == NULL) {
  804. goto err_unpin;
  805. }
  806. ring->status_page.obj = obj;
  807. memset(ring->status_page.page_addr, 0, PAGE_SIZE);
  808. intel_ring_setup_status_page(ring);
  809. DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
  810. ring->name, ring->status_page.gfx_addr);
  811. return 0;
  812. err_unpin:
  813. i915_gem_object_unpin(obj);
  814. err_unref:
  815. drm_gem_object_unreference(&obj->base);
  816. err:
  817. return ret;
  818. }
  819. static int intel_init_ring_buffer(struct drm_device *dev,
  820. struct intel_ring_buffer *ring)
  821. {
  822. struct drm_i915_gem_object *obj;
  823. int ret;
  824. ring->dev = dev;
  825. INIT_LIST_HEAD(&ring->active_list);
  826. INIT_LIST_HEAD(&ring->request_list);
  827. INIT_LIST_HEAD(&ring->gpu_write_list);
  828. ring->size = 32 * PAGE_SIZE;
  829. init_waitqueue_head(&ring->irq_queue);
  830. if (I915_NEED_GFX_HWS(dev)) {
  831. ret = init_status_page(ring);
  832. if (ret)
  833. return ret;
  834. }
  835. obj = i915_gem_alloc_object(dev, ring->size);
  836. if (obj == NULL) {
  837. DRM_ERROR("Failed to allocate ringbuffer\n");
  838. ret = -ENOMEM;
  839. goto err_hws;
  840. }
  841. ring->obj = obj;
  842. ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
  843. if (ret)
  844. goto err_unref;
  845. ret = i915_gem_object_set_to_gtt_domain(obj, true);
  846. if (ret)
  847. goto err_unpin;
  848. ring->virtual_start = ioremap_wc(dev->agp->base + obj->gtt_offset,
  849. ring->size);
  850. if (ring->virtual_start == NULL) {
  851. DRM_ERROR("Failed to map ringbuffer.\n");
  852. ret = -EINVAL;
  853. goto err_unpin;
  854. }
  855. ret = ring->init(ring);
  856. if (ret)
  857. goto err_unmap;
  858. /* Workaround an erratum on the i830 which causes a hang if
  859. * the TAIL pointer points to within the last 2 cachelines
  860. * of the buffer.
  861. */
  862. ring->effective_size = ring->size;
  863. if (IS_I830(ring->dev) || IS_845G(ring->dev))
  864. ring->effective_size -= 128;
  865. return 0;
  866. err_unmap:
  867. iounmap(ring->virtual_start);
  868. err_unpin:
  869. i915_gem_object_unpin(obj);
  870. err_unref:
  871. drm_gem_object_unreference(&obj->base);
  872. ring->obj = NULL;
  873. err_hws:
  874. cleanup_status_page(ring);
  875. return ret;
  876. }
  877. void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
  878. {
  879. struct drm_i915_private *dev_priv;
  880. int ret;
  881. if (ring->obj == NULL)
  882. return;
  883. /* Disable the ring buffer. The ring must be idle at this point */
  884. dev_priv = ring->dev->dev_private;
  885. ret = intel_wait_ring_idle(ring);
  886. if (ret)
  887. DRM_ERROR("failed to quiesce %s whilst cleaning up: %d\n",
  888. ring->name, ret);
  889. I915_WRITE_CTL(ring, 0);
  890. iounmap(ring->virtual_start);
  891. i915_gem_object_unpin(ring->obj);
  892. drm_gem_object_unreference(&ring->obj->base);
  893. ring->obj = NULL;
  894. if (ring->cleanup)
  895. ring->cleanup(ring);
  896. cleanup_status_page(ring);
  897. }
  898. static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
  899. {
  900. uint32_t __iomem *virt;
  901. int rem = ring->size - ring->tail;
  902. if (ring->space < rem) {
  903. int ret = intel_wait_ring_buffer(ring, rem);
  904. if (ret)
  905. return ret;
  906. }
  907. virt = ring->virtual_start + ring->tail;
  908. rem /= 4;
  909. while (rem--)
  910. iowrite32(MI_NOOP, virt++);
  911. ring->tail = 0;
  912. ring->space = ring_space(ring);
  913. return 0;
  914. }
  915. static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno)
  916. {
  917. struct drm_i915_private *dev_priv = ring->dev->dev_private;
  918. bool was_interruptible;
  919. int ret;
  920. /* XXX As we have not yet audited all the paths to check that
  921. * they are ready for ERESTARTSYS from intel_ring_begin, do not
  922. * allow us to be interruptible by a signal.
  923. */
  924. was_interruptible = dev_priv->mm.interruptible;
  925. dev_priv->mm.interruptible = false;
  926. ret = i915_wait_request(ring, seqno);
  927. dev_priv->mm.interruptible = was_interruptible;
  928. if (!ret)
  929. i915_gem_retire_requests_ring(ring);
  930. return ret;
  931. }
  932. static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
  933. {
  934. struct drm_i915_gem_request *request;
  935. u32 seqno = 0;
  936. int ret;
  937. i915_gem_retire_requests_ring(ring);
  938. if (ring->last_retired_head != -1) {
  939. ring->head = ring->last_retired_head;
  940. ring->last_retired_head = -1;
  941. ring->space = ring_space(ring);
  942. if (ring->space >= n)
  943. return 0;
  944. }
  945. list_for_each_entry(request, &ring->request_list, list) {
  946. int space;
  947. if (request->tail == -1)
  948. continue;
  949. space = request->tail - (ring->tail + 8);
  950. if (space < 0)
  951. space += ring->size;
  952. if (space >= n) {
  953. seqno = request->seqno;
  954. break;
  955. }
  956. /* Consume this request in case we need more space than
  957. * is available and so need to prevent a race between
  958. * updating last_retired_head and direct reads of
  959. * I915_RING_HEAD. It also provides a nice sanity check.
  960. */
  961. request->tail = -1;
  962. }
  963. if (seqno == 0)
  964. return -ENOSPC;
  965. ret = intel_ring_wait_seqno(ring, seqno);
  966. if (ret)
  967. return ret;
  968. if (WARN_ON(ring->last_retired_head == -1))
  969. return -ENOSPC;
  970. ring->head = ring->last_retired_head;
  971. ring->last_retired_head = -1;
  972. ring->space = ring_space(ring);
  973. if (WARN_ON(ring->space < n))
  974. return -ENOSPC;
  975. return 0;
  976. }
  977. int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
  978. {
  979. struct drm_device *dev = ring->dev;
  980. struct drm_i915_private *dev_priv = dev->dev_private;
  981. unsigned long end;
  982. int ret;
  983. ret = intel_ring_wait_request(ring, n);
  984. if (ret != -ENOSPC)
  985. return ret;
  986. trace_i915_ring_wait_begin(ring);
  987. /* With GEM the hangcheck timer should kick us out of the loop,
  988. * leaving it early runs the risk of corrupting GEM state (due
  989. * to running on almost untested codepaths). But on resume
  990. * timers don't work yet, so prevent a complete hang in that
  991. * case by choosing an insanely large timeout. */
  992. end = jiffies + 60 * HZ;
  993. do {
  994. ring->head = I915_READ_HEAD(ring);
  995. ring->space = ring_space(ring);
  996. if (ring->space >= n) {
  997. trace_i915_ring_wait_end(ring);
  998. return 0;
  999. }
  1000. if (dev->primary->master) {
  1001. struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
  1002. if (master_priv->sarea_priv)
  1003. master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
  1004. }
  1005. msleep(1);
  1006. if (atomic_read(&dev_priv->mm.wedged))
  1007. return -EAGAIN;
  1008. } while (!time_after(jiffies, end));
  1009. trace_i915_ring_wait_end(ring);
  1010. return -EBUSY;
  1011. }
  1012. int intel_ring_begin(struct intel_ring_buffer *ring,
  1013. int num_dwords)
  1014. {
  1015. struct drm_i915_private *dev_priv = ring->dev->dev_private;
  1016. int n = 4*num_dwords;
  1017. int ret;
  1018. if (unlikely(atomic_read(&dev_priv->mm.wedged)))
  1019. return -EIO;
  1020. if (unlikely(ring->tail + n > ring->effective_size)) {
  1021. ret = intel_wrap_ring_buffer(ring);
  1022. if (unlikely(ret))
  1023. return ret;
  1024. }
  1025. if (unlikely(ring->space < n)) {
  1026. ret = intel_wait_ring_buffer(ring, n);
  1027. if (unlikely(ret))
  1028. return ret;
  1029. }
  1030. ring->space -= n;
  1031. return 0;
  1032. }
  1033. void intel_ring_advance(struct intel_ring_buffer *ring)
  1034. {
  1035. struct drm_i915_private *dev_priv = ring->dev->dev_private;
  1036. ring->tail &= ring->size - 1;
  1037. if (dev_priv->stop_rings & intel_ring_flag(ring))
  1038. return;
  1039. ring->write_tail(ring, ring->tail);
  1040. }
  1041. static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
  1042. u32 value)
  1043. {
  1044. drm_i915_private_t *dev_priv = ring->dev->dev_private;
  1045. /* Every tail move must follow the sequence below */
  1046. I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
  1047. GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
  1048. GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
  1049. I915_WRITE(GEN6_BSD_RNCID, 0x0);
  1050. if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
  1051. GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0,
  1052. 50))
  1053. DRM_ERROR("timed out waiting for IDLE Indicator\n");
  1054. I915_WRITE_TAIL(ring, value);
  1055. I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
  1056. GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
  1057. GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
  1058. }
  1059. static int gen6_ring_flush(struct intel_ring_buffer *ring,
  1060. u32 invalidate, u32 flush)
  1061. {
  1062. uint32_t cmd;
  1063. int ret;
  1064. ret = intel_ring_begin(ring, 4);
  1065. if (ret)
  1066. return ret;
  1067. cmd = MI_FLUSH_DW;
  1068. if (invalidate & I915_GEM_GPU_DOMAINS)
  1069. cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
  1070. intel_ring_emit(ring, cmd);
  1071. intel_ring_emit(ring, 0);
  1072. intel_ring_emit(ring, 0);
  1073. intel_ring_emit(ring, MI_NOOP);
  1074. intel_ring_advance(ring);
  1075. return 0;
  1076. }
  1077. static int
  1078. gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
  1079. u32 offset, u32 len)
  1080. {
  1081. int ret;
  1082. ret = intel_ring_begin(ring, 2);
  1083. if (ret)
  1084. return ret;
  1085. intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
  1086. /* bit0-7 is the length on GEN6+ */
  1087. intel_ring_emit(ring, offset);
  1088. intel_ring_advance(ring);
  1089. return 0;
  1090. }
  1091. /* Blitter support (SandyBridge+) */
  1092. static int blt_ring_flush(struct intel_ring_buffer *ring,
  1093. u32 invalidate, u32 flush)
  1094. {
  1095. uint32_t cmd;
  1096. int ret;
  1097. ret = intel_ring_begin(ring, 4);
  1098. if (ret)
  1099. return ret;
  1100. cmd = MI_FLUSH_DW;
  1101. if (invalidate & I915_GEM_DOMAIN_RENDER)
  1102. cmd |= MI_INVALIDATE_TLB;
  1103. intel_ring_emit(ring, cmd);
  1104. intel_ring_emit(ring, 0);
  1105. intel_ring_emit(ring, 0);
  1106. intel_ring_emit(ring, MI_NOOP);
  1107. intel_ring_advance(ring);
  1108. return 0;
  1109. }
  1110. int intel_init_render_ring_buffer(struct drm_device *dev)
  1111. {
  1112. drm_i915_private_t *dev_priv = dev->dev_private;
  1113. struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
  1114. ring->name = "render ring";
  1115. ring->id = RCS;
  1116. ring->mmio_base = RENDER_RING_BASE;
  1117. if (INTEL_INFO(dev)->gen >= 6) {
  1118. ring->add_request = gen6_add_request;
  1119. ring->flush = gen6_render_ring_flush;
  1120. ring->irq_get = gen6_ring_get_irq;
  1121. ring->irq_put = gen6_ring_put_irq;
  1122. ring->irq_enable_mask = GT_USER_INTERRUPT;
  1123. ring->get_seqno = gen6_ring_get_seqno;
  1124. ring->sync_to = gen6_ring_sync;
  1125. ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_INVALID;
  1126. ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_RV;
  1127. ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_RB;
  1128. ring->signal_mbox[0] = GEN6_VRSYNC;
  1129. ring->signal_mbox[1] = GEN6_BRSYNC;
  1130. } else if (IS_GEN5(dev)) {
  1131. ring->add_request = pc_render_add_request;
  1132. ring->flush = gen4_render_ring_flush;
  1133. ring->get_seqno = pc_render_get_seqno;
  1134. ring->irq_get = gen5_ring_get_irq;
  1135. ring->irq_put = gen5_ring_put_irq;
  1136. ring->irq_enable_mask = GT_USER_INTERRUPT | GT_PIPE_NOTIFY;
  1137. } else {
  1138. ring->add_request = i9xx_add_request;
  1139. if (INTEL_INFO(dev)->gen < 4)
  1140. ring->flush = gen2_render_ring_flush;
  1141. else
  1142. ring->flush = gen4_render_ring_flush;
  1143. ring->get_seqno = ring_get_seqno;
  1144. if (IS_GEN2(dev)) {
  1145. ring->irq_get = i8xx_ring_get_irq;
  1146. ring->irq_put = i8xx_ring_put_irq;
  1147. } else {
  1148. ring->irq_get = i9xx_ring_get_irq;
  1149. ring->irq_put = i9xx_ring_put_irq;
  1150. }
  1151. ring->irq_enable_mask = I915_USER_INTERRUPT;
  1152. }
  1153. ring->write_tail = ring_write_tail;
  1154. if (INTEL_INFO(dev)->gen >= 6)
  1155. ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
  1156. else if (INTEL_INFO(dev)->gen >= 4)
  1157. ring->dispatch_execbuffer = i965_dispatch_execbuffer;
  1158. else if (IS_I830(dev) || IS_845G(dev))
  1159. ring->dispatch_execbuffer = i830_dispatch_execbuffer;
  1160. else
  1161. ring->dispatch_execbuffer = i915_dispatch_execbuffer;
  1162. ring->init = init_render_ring;
  1163. ring->cleanup = render_ring_cleanup;
  1164. if (!I915_NEED_GFX_HWS(dev)) {
  1165. ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
  1166. memset(ring->status_page.page_addr, 0, PAGE_SIZE);
  1167. }
  1168. return intel_init_ring_buffer(dev, ring);
  1169. }
  1170. int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size)
  1171. {
  1172. drm_i915_private_t *dev_priv = dev->dev_private;
  1173. struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
  1174. ring->name = "render ring";
  1175. ring->id = RCS;
  1176. ring->mmio_base = RENDER_RING_BASE;
  1177. if (INTEL_INFO(dev)->gen >= 6) {
  1178. /* non-kms not supported on gen6+ */
  1179. return -ENODEV;
  1180. }
  1181. /* Note: gem is not supported on gen5/ilk without kms (the corresponding
  1182. * gem_init ioctl returns with -ENODEV). Hence we do not need to set up
  1183. * the special gen5 functions. */
  1184. ring->add_request = i9xx_add_request;
  1185. if (INTEL_INFO(dev)->gen < 4)
  1186. ring->flush = gen2_render_ring_flush;
  1187. else
  1188. ring->flush = gen4_render_ring_flush;
  1189. ring->get_seqno = ring_get_seqno;
  1190. if (IS_GEN2(dev)) {
  1191. ring->irq_get = i8xx_ring_get_irq;
  1192. ring->irq_put = i8xx_ring_put_irq;
  1193. } else {
  1194. ring->irq_get = i9xx_ring_get_irq;
  1195. ring->irq_put = i9xx_ring_put_irq;
  1196. }
  1197. ring->irq_enable_mask = I915_USER_INTERRUPT;
  1198. ring->write_tail = ring_write_tail;
  1199. if (INTEL_INFO(dev)->gen >= 4)
  1200. ring->dispatch_execbuffer = i965_dispatch_execbuffer;
  1201. else if (IS_I830(dev) || IS_845G(dev))
  1202. ring->dispatch_execbuffer = i830_dispatch_execbuffer;
  1203. else
  1204. ring->dispatch_execbuffer = i915_dispatch_execbuffer;
  1205. ring->init = init_render_ring;
  1206. ring->cleanup = render_ring_cleanup;
  1207. if (!I915_NEED_GFX_HWS(dev))
  1208. ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
  1209. ring->dev = dev;
  1210. INIT_LIST_HEAD(&ring->active_list);
  1211. INIT_LIST_HEAD(&ring->request_list);
  1212. INIT_LIST_HEAD(&ring->gpu_write_list);
  1213. ring->size = size;
  1214. ring->effective_size = ring->size;
  1215. if (IS_I830(ring->dev))
  1216. ring->effective_size -= 128;
  1217. ring->virtual_start = ioremap_wc(start, size);
  1218. if (ring->virtual_start == NULL) {
  1219. DRM_ERROR("can not ioremap virtual address for"
  1220. " ring buffer\n");
  1221. return -ENOMEM;
  1222. }
  1223. return 0;
  1224. }
  1225. int intel_init_bsd_ring_buffer(struct drm_device *dev)
  1226. {
  1227. drm_i915_private_t *dev_priv = dev->dev_private;
  1228. struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
  1229. ring->name = "bsd ring";
  1230. ring->id = VCS;
  1231. ring->write_tail = ring_write_tail;
  1232. if (IS_GEN6(dev) || IS_GEN7(dev)) {
  1233. ring->mmio_base = GEN6_BSD_RING_BASE;
  1234. /* gen6 bsd needs a special wa for tail updates */
  1235. if (IS_GEN6(dev))
  1236. ring->write_tail = gen6_bsd_ring_write_tail;
  1237. ring->flush = gen6_ring_flush;
  1238. ring->add_request = gen6_add_request;
  1239. ring->get_seqno = gen6_ring_get_seqno;
  1240. ring->irq_enable_mask = GEN6_BSD_USER_INTERRUPT;
  1241. ring->irq_get = gen6_ring_get_irq;
  1242. ring->irq_put = gen6_ring_put_irq;
  1243. ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
  1244. ring->sync_to = gen6_ring_sync;
  1245. ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_VR;
  1246. ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_INVALID;
  1247. ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_VB;
  1248. ring->signal_mbox[0] = GEN6_RVSYNC;
  1249. ring->signal_mbox[1] = GEN6_BVSYNC;
  1250. } else {
  1251. ring->mmio_base = BSD_RING_BASE;
  1252. ring->flush = bsd_ring_flush;
  1253. ring->add_request = i9xx_add_request;
  1254. ring->get_seqno = ring_get_seqno;
  1255. if (IS_GEN5(dev)) {
  1256. ring->irq_enable_mask = GT_BSD_USER_INTERRUPT;
  1257. ring->irq_get = gen5_ring_get_irq;
  1258. ring->irq_put = gen5_ring_put_irq;
  1259. } else {
  1260. ring->irq_enable_mask = I915_BSD_USER_INTERRUPT;
  1261. ring->irq_get = i9xx_ring_get_irq;
  1262. ring->irq_put = i9xx_ring_put_irq;
  1263. }
  1264. ring->dispatch_execbuffer = i965_dispatch_execbuffer;
  1265. }
  1266. ring->init = init_ring_common;
  1267. return intel_init_ring_buffer(dev, ring);
  1268. }
  1269. int intel_init_blt_ring_buffer(struct drm_device *dev)
  1270. {
  1271. drm_i915_private_t *dev_priv = dev->dev_private;
  1272. struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
  1273. ring->name = "blitter ring";
  1274. ring->id = BCS;
  1275. ring->mmio_base = BLT_RING_BASE;
  1276. ring->write_tail = ring_write_tail;
  1277. ring->flush = blt_ring_flush;
  1278. ring->add_request = gen6_add_request;
  1279. ring->get_seqno = gen6_ring_get_seqno;
  1280. ring->irq_enable_mask = GEN6_BLITTER_USER_INTERRUPT;
  1281. ring->irq_get = gen6_ring_get_irq;
  1282. ring->irq_put = gen6_ring_put_irq;
  1283. ring->dispatch_execbuffer = gen6_ring_dispatch_execbuffer;
  1284. ring->sync_to = gen6_ring_sync;
  1285. ring->semaphore_register[0] = MI_SEMAPHORE_SYNC_BR;
  1286. ring->semaphore_register[1] = MI_SEMAPHORE_SYNC_BV;
  1287. ring->semaphore_register[2] = MI_SEMAPHORE_SYNC_INVALID;
  1288. ring->signal_mbox[0] = GEN6_RBSYNC;
  1289. ring->signal_mbox[1] = GEN6_VBSYNC;
  1290. ring->init = init_ring_common;
  1291. return intel_init_ring_buffer(dev, ring);
  1292. }