intel_ringbuffer.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864
  1. /*
  2. * Copyright © 2008-2010 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eric Anholt <eric@anholt.net>
  25. * Zou Nan hai <nanhai.zou@intel.com>
  26. * Xiang Hai hao<haihao.xiang@intel.com>
  27. *
  28. */
  29. #include "drmP.h"
  30. #include "drm.h"
  31. #include "i915_drv.h"
  32. #include "i915_drm.h"
  33. #include "i915_trace.h"
  34. static u32 i915_gem_get_seqno(struct drm_device *dev)
  35. {
  36. drm_i915_private_t *dev_priv = dev->dev_private;
  37. u32 seqno;
  38. seqno = dev_priv->next_seqno;
  39. /* reserve 0 for non-seqno */
  40. if (++dev_priv->next_seqno == 0)
  41. dev_priv->next_seqno = 1;
  42. return seqno;
  43. }
  44. static void
  45. render_ring_flush(struct drm_device *dev,
  46. struct intel_ring_buffer *ring,
  47. u32 invalidate_domains,
  48. u32 flush_domains)
  49. {
  50. drm_i915_private_t *dev_priv = dev->dev_private;
  51. u32 cmd;
  52. #if WATCH_EXEC
  53. DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
  54. invalidate_domains, flush_domains);
  55. #endif
  56. trace_i915_gem_request_flush(dev, dev_priv->next_seqno,
  57. invalidate_domains, flush_domains);
  58. if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
  59. /*
  60. * read/write caches:
  61. *
  62. * I915_GEM_DOMAIN_RENDER is always invalidated, but is
  63. * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
  64. * also flushed at 2d versus 3d pipeline switches.
  65. *
  66. * read-only caches:
  67. *
  68. * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
  69. * MI_READ_FLUSH is set, and is always flushed on 965.
  70. *
  71. * I915_GEM_DOMAIN_COMMAND may not exist?
  72. *
  73. * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
  74. * invalidated when MI_EXE_FLUSH is set.
  75. *
  76. * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
  77. * invalidated with every MI_FLUSH.
  78. *
  79. * TLBs:
  80. *
  81. * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
  82. * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
  83. * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
  84. * are flushed at any MI_FLUSH.
  85. */
  86. cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
  87. if ((invalidate_domains|flush_domains) &
  88. I915_GEM_DOMAIN_RENDER)
  89. cmd &= ~MI_NO_WRITE_FLUSH;
  90. if (!IS_I965G(dev)) {
  91. /*
  92. * On the 965, the sampler cache always gets flushed
  93. * and this bit is reserved.
  94. */
  95. if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
  96. cmd |= MI_READ_FLUSH;
  97. }
  98. if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
  99. cmd |= MI_EXE_FLUSH;
  100. #if WATCH_EXEC
  101. DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
  102. #endif
  103. intel_ring_begin(dev, ring, 2);
  104. intel_ring_emit(dev, ring, cmd);
  105. intel_ring_emit(dev, ring, MI_NOOP);
  106. intel_ring_advance(dev, ring);
  107. }
  108. }
  109. static unsigned int render_ring_get_head(struct drm_device *dev,
  110. struct intel_ring_buffer *ring)
  111. {
  112. drm_i915_private_t *dev_priv = dev->dev_private;
  113. return I915_READ(PRB0_HEAD) & HEAD_ADDR;
  114. }
  115. static unsigned int render_ring_get_tail(struct drm_device *dev,
  116. struct intel_ring_buffer *ring)
  117. {
  118. drm_i915_private_t *dev_priv = dev->dev_private;
  119. return I915_READ(PRB0_TAIL) & TAIL_ADDR;
  120. }
  121. static unsigned int render_ring_get_active_head(struct drm_device *dev,
  122. struct intel_ring_buffer *ring)
  123. {
  124. drm_i915_private_t *dev_priv = dev->dev_private;
  125. u32 acthd_reg = IS_I965G(dev) ? ACTHD_I965 : ACTHD;
  126. return I915_READ(acthd_reg);
  127. }
  128. static void render_ring_advance_ring(struct drm_device *dev,
  129. struct intel_ring_buffer *ring)
  130. {
  131. drm_i915_private_t *dev_priv = dev->dev_private;
  132. I915_WRITE(PRB0_TAIL, ring->tail);
  133. }
  134. static int init_ring_common(struct drm_device *dev,
  135. struct intel_ring_buffer *ring)
  136. {
  137. u32 head;
  138. drm_i915_private_t *dev_priv = dev->dev_private;
  139. struct drm_i915_gem_object *obj_priv;
  140. obj_priv = to_intel_bo(ring->gem_object);
  141. /* Stop the ring if it's running. */
  142. I915_WRITE(ring->regs.ctl, 0);
  143. I915_WRITE(ring->regs.head, 0);
  144. I915_WRITE(ring->regs.tail, 0);
  145. /* Initialize the ring. */
  146. I915_WRITE(ring->regs.start, obj_priv->gtt_offset);
  147. head = ring->get_head(dev, ring);
  148. /* G45 ring initialization fails to reset head to zero */
  149. if (head != 0) {
  150. DRM_ERROR("%s head not reset to zero "
  151. "ctl %08x head %08x tail %08x start %08x\n",
  152. ring->name,
  153. I915_READ(ring->regs.ctl),
  154. I915_READ(ring->regs.head),
  155. I915_READ(ring->regs.tail),
  156. I915_READ(ring->regs.start));
  157. I915_WRITE(ring->regs.head, 0);
  158. DRM_ERROR("%s head forced to zero "
  159. "ctl %08x head %08x tail %08x start %08x\n",
  160. ring->name,
  161. I915_READ(ring->regs.ctl),
  162. I915_READ(ring->regs.head),
  163. I915_READ(ring->regs.tail),
  164. I915_READ(ring->regs.start));
  165. }
  166. I915_WRITE(ring->regs.ctl,
  167. ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
  168. | RING_NO_REPORT | RING_VALID);
  169. head = I915_READ(ring->regs.head) & HEAD_ADDR;
  170. /* If the head is still not zero, the ring is dead */
  171. if (head != 0) {
  172. DRM_ERROR("%s initialization failed "
  173. "ctl %08x head %08x tail %08x start %08x\n",
  174. ring->name,
  175. I915_READ(ring->regs.ctl),
  176. I915_READ(ring->regs.head),
  177. I915_READ(ring->regs.tail),
  178. I915_READ(ring->regs.start));
  179. return -EIO;
  180. }
  181. if (!drm_core_check_feature(dev, DRIVER_MODESET))
  182. i915_kernel_lost_context(dev);
  183. else {
  184. ring->head = ring->get_head(dev, ring);
  185. ring->tail = ring->get_tail(dev, ring);
  186. ring->space = ring->head - (ring->tail + 8);
  187. if (ring->space < 0)
  188. ring->space += ring->size;
  189. }
  190. return 0;
  191. }
  192. static int init_render_ring(struct drm_device *dev,
  193. struct intel_ring_buffer *ring)
  194. {
  195. drm_i915_private_t *dev_priv = dev->dev_private;
  196. int ret = init_ring_common(dev, ring);
  197. if (IS_I9XX(dev) && !IS_GEN3(dev)) {
  198. I915_WRITE(MI_MODE,
  199. (VS_TIMER_DISPATCH) << 16 | VS_TIMER_DISPATCH);
  200. }
  201. return ret;
  202. }
  203. #define PIPE_CONTROL_FLUSH(addr) \
  204. do { \
  205. OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
  206. PIPE_CONTROL_DEPTH_STALL | 2); \
  207. OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \
  208. OUT_RING(0); \
  209. OUT_RING(0); \
  210. } while (0)
  211. /**
  212. * Creates a new sequence number, emitting a write of it to the status page
  213. * plus an interrupt, which will trigger i915_user_interrupt_handler.
  214. *
  215. * Must be called with struct_lock held.
  216. *
  217. * Returned sequence numbers are nonzero on success.
  218. */
  219. static u32
  220. render_ring_add_request(struct drm_device *dev,
  221. struct intel_ring_buffer *ring,
  222. struct drm_file *file_priv,
  223. u32 flush_domains)
  224. {
  225. drm_i915_private_t *dev_priv = dev->dev_private;
  226. u32 seqno;
  227. seqno = i915_gem_get_seqno(dev);
  228. if (IS_GEN6(dev)) {
  229. BEGIN_LP_RING(6);
  230. OUT_RING(GFX_OP_PIPE_CONTROL | 3);
  231. OUT_RING(PIPE_CONTROL_QW_WRITE |
  232. PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_IS_FLUSH |
  233. PIPE_CONTROL_NOTIFY);
  234. OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
  235. OUT_RING(seqno);
  236. OUT_RING(0);
  237. OUT_RING(0);
  238. ADVANCE_LP_RING();
  239. } else if (HAS_PIPE_CONTROL(dev)) {
  240. u32 scratch_addr = dev_priv->seqno_gfx_addr + 128;
  241. /*
  242. * Workaround qword write incoherence by flushing the
  243. * PIPE_NOTIFY buffers out to memory before requesting
  244. * an interrupt.
  245. */
  246. BEGIN_LP_RING(32);
  247. OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
  248. PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
  249. OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
  250. OUT_RING(seqno);
  251. OUT_RING(0);
  252. PIPE_CONTROL_FLUSH(scratch_addr);
  253. scratch_addr += 128; /* write to separate cachelines */
  254. PIPE_CONTROL_FLUSH(scratch_addr);
  255. scratch_addr += 128;
  256. PIPE_CONTROL_FLUSH(scratch_addr);
  257. scratch_addr += 128;
  258. PIPE_CONTROL_FLUSH(scratch_addr);
  259. scratch_addr += 128;
  260. PIPE_CONTROL_FLUSH(scratch_addr);
  261. scratch_addr += 128;
  262. PIPE_CONTROL_FLUSH(scratch_addr);
  263. OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
  264. PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
  265. PIPE_CONTROL_NOTIFY);
  266. OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
  267. OUT_RING(seqno);
  268. OUT_RING(0);
  269. ADVANCE_LP_RING();
  270. } else {
  271. BEGIN_LP_RING(4);
  272. OUT_RING(MI_STORE_DWORD_INDEX);
  273. OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
  274. OUT_RING(seqno);
  275. OUT_RING(MI_USER_INTERRUPT);
  276. ADVANCE_LP_RING();
  277. }
  278. return seqno;
  279. }
  280. static u32
  281. render_ring_get_gem_seqno(struct drm_device *dev,
  282. struct intel_ring_buffer *ring)
  283. {
  284. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  285. if (HAS_PIPE_CONTROL(dev))
  286. return ((volatile u32 *)(dev_priv->seqno_page))[0];
  287. else
  288. return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
  289. }
  290. static void
  291. render_ring_get_user_irq(struct drm_device *dev,
  292. struct intel_ring_buffer *ring)
  293. {
  294. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  295. unsigned long irqflags;
  296. spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
  297. if (dev->irq_enabled && (++ring->user_irq_refcount == 1)) {
  298. if (HAS_PCH_SPLIT(dev))
  299. ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
  300. else
  301. i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
  302. }
  303. spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
  304. }
  305. static void
  306. render_ring_put_user_irq(struct drm_device *dev,
  307. struct intel_ring_buffer *ring)
  308. {
  309. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  310. unsigned long irqflags;
  311. spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
  312. BUG_ON(dev->irq_enabled && ring->user_irq_refcount <= 0);
  313. if (dev->irq_enabled && (--ring->user_irq_refcount == 0)) {
  314. if (HAS_PCH_SPLIT(dev))
  315. ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
  316. else
  317. i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
  318. }
  319. spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
  320. }
  321. static void render_setup_status_page(struct drm_device *dev,
  322. struct intel_ring_buffer *ring)
  323. {
  324. drm_i915_private_t *dev_priv = dev->dev_private;
  325. if (IS_GEN6(dev)) {
  326. I915_WRITE(HWS_PGA_GEN6, ring->status_page.gfx_addr);
  327. I915_READ(HWS_PGA_GEN6); /* posting read */
  328. } else {
  329. I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
  330. I915_READ(HWS_PGA); /* posting read */
  331. }
  332. }
  333. void
  334. bsd_ring_flush(struct drm_device *dev,
  335. struct intel_ring_buffer *ring,
  336. u32 invalidate_domains,
  337. u32 flush_domains)
  338. {
  339. intel_ring_begin(dev, ring, 2);
  340. intel_ring_emit(dev, ring, MI_FLUSH);
  341. intel_ring_emit(dev, ring, MI_NOOP);
  342. intel_ring_advance(dev, ring);
  343. }
  344. static inline unsigned int bsd_ring_get_head(struct drm_device *dev,
  345. struct intel_ring_buffer *ring)
  346. {
  347. drm_i915_private_t *dev_priv = dev->dev_private;
  348. return I915_READ(BSD_RING_HEAD) & HEAD_ADDR;
  349. }
  350. static inline unsigned int bsd_ring_get_tail(struct drm_device *dev,
  351. struct intel_ring_buffer *ring)
  352. {
  353. drm_i915_private_t *dev_priv = dev->dev_private;
  354. return I915_READ(BSD_RING_TAIL) & TAIL_ADDR;
  355. }
  356. static inline unsigned int bsd_ring_get_active_head(struct drm_device *dev,
  357. struct intel_ring_buffer *ring)
  358. {
  359. drm_i915_private_t *dev_priv = dev->dev_private;
  360. return I915_READ(BSD_RING_ACTHD);
  361. }
  362. static inline void bsd_ring_advance_ring(struct drm_device *dev,
  363. struct intel_ring_buffer *ring)
  364. {
  365. drm_i915_private_t *dev_priv = dev->dev_private;
  366. I915_WRITE(BSD_RING_TAIL, ring->tail);
  367. }
  368. static int init_bsd_ring(struct drm_device *dev,
  369. struct intel_ring_buffer *ring)
  370. {
  371. return init_ring_common(dev, ring);
  372. }
  373. static u32
  374. bsd_ring_add_request(struct drm_device *dev,
  375. struct intel_ring_buffer *ring,
  376. struct drm_file *file_priv,
  377. u32 flush_domains)
  378. {
  379. u32 seqno;
  380. seqno = i915_gem_get_seqno(dev);
  381. intel_ring_begin(dev, ring, 4);
  382. intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
  383. intel_ring_emit(dev, ring,
  384. I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
  385. intel_ring_emit(dev, ring, seqno);
  386. intel_ring_emit(dev, ring, MI_USER_INTERRUPT);
  387. intel_ring_advance(dev, ring);
  388. DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
  389. return seqno;
  390. }
  391. static void bsd_setup_status_page(struct drm_device *dev,
  392. struct intel_ring_buffer *ring)
  393. {
  394. drm_i915_private_t *dev_priv = dev->dev_private;
  395. I915_WRITE(BSD_HWS_PGA, ring->status_page.gfx_addr);
  396. I915_READ(BSD_HWS_PGA);
  397. }
  398. static void
  399. bsd_ring_get_user_irq(struct drm_device *dev,
  400. struct intel_ring_buffer *ring)
  401. {
  402. /* do nothing */
  403. }
  404. static void
  405. bsd_ring_put_user_irq(struct drm_device *dev,
  406. struct intel_ring_buffer *ring)
  407. {
  408. /* do nothing */
  409. }
  410. static u32
  411. bsd_ring_get_gem_seqno(struct drm_device *dev,
  412. struct intel_ring_buffer *ring)
  413. {
  414. return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
  415. }
  416. static int
  417. bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev,
  418. struct intel_ring_buffer *ring,
  419. struct drm_i915_gem_execbuffer2 *exec,
  420. struct drm_clip_rect *cliprects,
  421. uint64_t exec_offset)
  422. {
  423. uint32_t exec_start;
  424. exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
  425. intel_ring_begin(dev, ring, 2);
  426. intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START |
  427. (2 << 6) | MI_BATCH_NON_SECURE_I965);
  428. intel_ring_emit(dev, ring, exec_start);
  429. intel_ring_advance(dev, ring);
  430. return 0;
  431. }
  432. static int
  433. render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
  434. struct intel_ring_buffer *ring,
  435. struct drm_i915_gem_execbuffer2 *exec,
  436. struct drm_clip_rect *cliprects,
  437. uint64_t exec_offset)
  438. {
  439. drm_i915_private_t *dev_priv = dev->dev_private;
  440. int nbox = exec->num_cliprects;
  441. int i = 0, count;
  442. uint32_t exec_start, exec_len;
  443. exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
  444. exec_len = (uint32_t) exec->batch_len;
  445. trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1);
  446. count = nbox ? nbox : 1;
  447. for (i = 0; i < count; i++) {
  448. if (i < nbox) {
  449. int ret = i915_emit_box(dev, cliprects, i,
  450. exec->DR1, exec->DR4);
  451. if (ret)
  452. return ret;
  453. }
  454. if (IS_I830(dev) || IS_845G(dev)) {
  455. intel_ring_begin(dev, ring, 4);
  456. intel_ring_emit(dev, ring, MI_BATCH_BUFFER);
  457. intel_ring_emit(dev, ring,
  458. exec_start | MI_BATCH_NON_SECURE);
  459. intel_ring_emit(dev, ring, exec_start + exec_len - 4);
  460. intel_ring_emit(dev, ring, 0);
  461. } else {
  462. intel_ring_begin(dev, ring, 4);
  463. if (IS_I965G(dev)) {
  464. intel_ring_emit(dev, ring,
  465. MI_BATCH_BUFFER_START | (2 << 6)
  466. | MI_BATCH_NON_SECURE_I965);
  467. intel_ring_emit(dev, ring, exec_start);
  468. } else {
  469. intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START
  470. | (2 << 6));
  471. intel_ring_emit(dev, ring, exec_start |
  472. MI_BATCH_NON_SECURE);
  473. }
  474. }
  475. intel_ring_advance(dev, ring);
  476. }
  477. if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
  478. intel_ring_begin(dev, ring, 2);
  479. intel_ring_emit(dev, ring, MI_FLUSH |
  480. MI_NO_WRITE_FLUSH |
  481. MI_INVALIDATE_ISP );
  482. intel_ring_emit(dev, ring, MI_NOOP);
  483. intel_ring_advance(dev, ring);
  484. }
  485. /* XXX breadcrumb */
  486. return 0;
  487. }
  488. static void cleanup_status_page(struct drm_device *dev,
  489. struct intel_ring_buffer *ring)
  490. {
  491. drm_i915_private_t *dev_priv = dev->dev_private;
  492. struct drm_gem_object *obj;
  493. struct drm_i915_gem_object *obj_priv;
  494. obj = ring->status_page.obj;
  495. if (obj == NULL)
  496. return;
  497. obj_priv = to_intel_bo(obj);
  498. kunmap(obj_priv->pages[0]);
  499. i915_gem_object_unpin(obj);
  500. drm_gem_object_unreference(obj);
  501. ring->status_page.obj = NULL;
  502. memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
  503. }
  504. static int init_status_page(struct drm_device *dev,
  505. struct intel_ring_buffer *ring)
  506. {
  507. drm_i915_private_t *dev_priv = dev->dev_private;
  508. struct drm_gem_object *obj;
  509. struct drm_i915_gem_object *obj_priv;
  510. int ret;
  511. obj = i915_gem_alloc_object(dev, 4096);
  512. if (obj == NULL) {
  513. DRM_ERROR("Failed to allocate status page\n");
  514. ret = -ENOMEM;
  515. goto err;
  516. }
  517. obj_priv = to_intel_bo(obj);
  518. obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
  519. ret = i915_gem_object_pin(obj, 4096);
  520. if (ret != 0) {
  521. goto err_unref;
  522. }
  523. ring->status_page.gfx_addr = obj_priv->gtt_offset;
  524. ring->status_page.page_addr = kmap(obj_priv->pages[0]);
  525. if (ring->status_page.page_addr == NULL) {
  526. memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
  527. goto err_unpin;
  528. }
  529. ring->status_page.obj = obj;
  530. memset(ring->status_page.page_addr, 0, PAGE_SIZE);
  531. ring->setup_status_page(dev, ring);
  532. DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
  533. ring->name, ring->status_page.gfx_addr);
  534. return 0;
  535. err_unpin:
  536. i915_gem_object_unpin(obj);
  537. err_unref:
  538. drm_gem_object_unreference(obj);
  539. err:
  540. return ret;
  541. }
  542. int intel_init_ring_buffer(struct drm_device *dev,
  543. struct intel_ring_buffer *ring)
  544. {
  545. struct drm_i915_gem_object *obj_priv;
  546. struct drm_gem_object *obj;
  547. int ret;
  548. ring->dev = dev;
  549. if (I915_NEED_GFX_HWS(dev)) {
  550. ret = init_status_page(dev, ring);
  551. if (ret)
  552. return ret;
  553. }
  554. obj = i915_gem_alloc_object(dev, ring->size);
  555. if (obj == NULL) {
  556. DRM_ERROR("Failed to allocate ringbuffer\n");
  557. ret = -ENOMEM;
  558. goto err_hws;
  559. }
  560. ring->gem_object = obj;
  561. ret = i915_gem_object_pin(obj, ring->alignment);
  562. if (ret)
  563. goto err_unref;
  564. obj_priv = to_intel_bo(obj);
  565. ring->map.size = ring->size;
  566. ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
  567. ring->map.type = 0;
  568. ring->map.flags = 0;
  569. ring->map.mtrr = 0;
  570. drm_core_ioremap_wc(&ring->map, dev);
  571. if (ring->map.handle == NULL) {
  572. DRM_ERROR("Failed to map ringbuffer.\n");
  573. ret = -EINVAL;
  574. goto err_unpin;
  575. }
  576. ring->virtual_start = ring->map.handle;
  577. ret = ring->init(dev, ring);
  578. if (ret)
  579. goto err_unmap;
  580. if (!drm_core_check_feature(dev, DRIVER_MODESET))
  581. i915_kernel_lost_context(dev);
  582. else {
  583. ring->head = ring->get_head(dev, ring);
  584. ring->tail = ring->get_tail(dev, ring);
  585. ring->space = ring->head - (ring->tail + 8);
  586. if (ring->space < 0)
  587. ring->space += ring->size;
  588. }
  589. INIT_LIST_HEAD(&ring->active_list);
  590. INIT_LIST_HEAD(&ring->request_list);
  591. return ret;
  592. err_unmap:
  593. drm_core_ioremapfree(&ring->map, dev);
  594. err_unpin:
  595. i915_gem_object_unpin(obj);
  596. err_unref:
  597. drm_gem_object_unreference(obj);
  598. ring->gem_object = NULL;
  599. err_hws:
  600. cleanup_status_page(dev, ring);
  601. return ret;
  602. }
  603. void intel_cleanup_ring_buffer(struct drm_device *dev,
  604. struct intel_ring_buffer *ring)
  605. {
  606. if (ring->gem_object == NULL)
  607. return;
  608. drm_core_ioremapfree(&ring->map, dev);
  609. i915_gem_object_unpin(ring->gem_object);
  610. drm_gem_object_unreference(ring->gem_object);
  611. ring->gem_object = NULL;
  612. cleanup_status_page(dev, ring);
  613. }
  614. int intel_wrap_ring_buffer(struct drm_device *dev,
  615. struct intel_ring_buffer *ring)
  616. {
  617. unsigned int *virt;
  618. int rem;
  619. rem = ring->size - ring->tail;
  620. if (ring->space < rem) {
  621. int ret = intel_wait_ring_buffer(dev, ring, rem);
  622. if (ret)
  623. return ret;
  624. }
  625. virt = (unsigned int *)(ring->virtual_start + ring->tail);
  626. rem /= 8;
  627. while (rem--) {
  628. *virt++ = MI_NOOP;
  629. *virt++ = MI_NOOP;
  630. }
  631. ring->tail = 0;
  632. ring->space = ring->head - 8;
  633. return 0;
  634. }
  635. int intel_wait_ring_buffer(struct drm_device *dev,
  636. struct intel_ring_buffer *ring, int n)
  637. {
  638. unsigned long end;
  639. trace_i915_ring_wait_begin (dev);
  640. end = jiffies + 3 * HZ;
  641. do {
  642. ring->head = ring->get_head(dev, ring);
  643. ring->space = ring->head - (ring->tail + 8);
  644. if (ring->space < 0)
  645. ring->space += ring->size;
  646. if (ring->space >= n) {
  647. trace_i915_ring_wait_end (dev);
  648. return 0;
  649. }
  650. if (dev->primary->master) {
  651. struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
  652. if (master_priv->sarea_priv)
  653. master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
  654. }
  655. yield();
  656. } while (!time_after(jiffies, end));
  657. trace_i915_ring_wait_end (dev);
  658. return -EBUSY;
  659. }
  660. void intel_ring_begin(struct drm_device *dev,
  661. struct intel_ring_buffer *ring, int num_dwords)
  662. {
  663. int n = 4*num_dwords;
  664. if (unlikely(ring->tail + n > ring->size))
  665. intel_wrap_ring_buffer(dev, ring);
  666. if (unlikely(ring->space < n))
  667. intel_wait_ring_buffer(dev, ring, n);
  668. ring->space -= n;
  669. }
  670. void intel_ring_advance(struct drm_device *dev,
  671. struct intel_ring_buffer *ring)
  672. {
  673. ring->tail &= ring->size - 1;
  674. ring->advance_ring(dev, ring);
  675. }
  676. void intel_fill_struct(struct drm_device *dev,
  677. struct intel_ring_buffer *ring,
  678. void *data,
  679. unsigned int len)
  680. {
  681. unsigned int *virt = ring->virtual_start + ring->tail;
  682. BUG_ON((len&~(4-1)) != 0);
  683. intel_ring_begin(dev, ring, len/4);
  684. memcpy(virt, data, len);
  685. ring->tail += len;
  686. ring->tail &= ring->size - 1;
  687. ring->space -= len;
  688. intel_ring_advance(dev, ring);
  689. }
  690. struct intel_ring_buffer render_ring = {
  691. .name = "render ring",
  692. .regs = {
  693. .ctl = PRB0_CTL,
  694. .head = PRB0_HEAD,
  695. .tail = PRB0_TAIL,
  696. .start = PRB0_START
  697. },
  698. .ring_flag = I915_EXEC_RENDER,
  699. .size = 32 * PAGE_SIZE,
  700. .alignment = PAGE_SIZE,
  701. .virtual_start = NULL,
  702. .dev = NULL,
  703. .gem_object = NULL,
  704. .head = 0,
  705. .tail = 0,
  706. .space = 0,
  707. .user_irq_refcount = 0,
  708. .irq_gem_seqno = 0,
  709. .waiting_gem_seqno = 0,
  710. .setup_status_page = render_setup_status_page,
  711. .init = init_render_ring,
  712. .get_head = render_ring_get_head,
  713. .get_tail = render_ring_get_tail,
  714. .get_active_head = render_ring_get_active_head,
  715. .advance_ring = render_ring_advance_ring,
  716. .flush = render_ring_flush,
  717. .add_request = render_ring_add_request,
  718. .get_gem_seqno = render_ring_get_gem_seqno,
  719. .user_irq_get = render_ring_get_user_irq,
  720. .user_irq_put = render_ring_put_user_irq,
  721. .dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer,
  722. .status_page = {NULL, 0, NULL},
  723. .map = {0,}
  724. };
  725. /* ring buffer for bit-stream decoder */
  726. struct intel_ring_buffer bsd_ring = {
  727. .name = "bsd ring",
  728. .regs = {
  729. .ctl = BSD_RING_CTL,
  730. .head = BSD_RING_HEAD,
  731. .tail = BSD_RING_TAIL,
  732. .start = BSD_RING_START
  733. },
  734. .ring_flag = I915_EXEC_BSD,
  735. .size = 32 * PAGE_SIZE,
  736. .alignment = PAGE_SIZE,
  737. .virtual_start = NULL,
  738. .dev = NULL,
  739. .gem_object = NULL,
  740. .head = 0,
  741. .tail = 0,
  742. .space = 0,
  743. .user_irq_refcount = 0,
  744. .irq_gem_seqno = 0,
  745. .waiting_gem_seqno = 0,
  746. .setup_status_page = bsd_setup_status_page,
  747. .init = init_bsd_ring,
  748. .get_head = bsd_ring_get_head,
  749. .get_tail = bsd_ring_get_tail,
  750. .get_active_head = bsd_ring_get_active_head,
  751. .advance_ring = bsd_ring_advance_ring,
  752. .flush = bsd_ring_flush,
  753. .add_request = bsd_ring_add_request,
  754. .get_gem_seqno = bsd_ring_get_gem_seqno,
  755. .user_irq_get = bsd_ring_get_user_irq,
  756. .user_irq_put = bsd_ring_put_user_irq,
  757. .dispatch_gem_execbuffer = bsd_ring_dispatch_gem_execbuffer,
  758. .status_page = {NULL, 0, NULL},
  759. .map = {0,}
  760. };