intel_ringbuffer.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913
  1. /*
  2. * Copyright © 2008-2010 Intel Corporation
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice (including the next
  12. * paragraph) shall be included in all copies or substantial portions of the
  13. * Software.
  14. *
  15. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  18. * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20. * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21. * IN THE SOFTWARE.
  22. *
  23. * Authors:
  24. * Eric Anholt <eric@anholt.net>
  25. * Zou Nan hai <nanhai.zou@intel.com>
  26. * Xiang Hai hao<haihao.xiang@intel.com>
  27. *
  28. */
  29. #include "drmP.h"
  30. #include "drm.h"
  31. #include "i915_drv.h"
  32. #include "i915_drm.h"
  33. #include "i915_trace.h"
  34. #include "intel_drv.h"
  35. static u32 i915_gem_get_seqno(struct drm_device *dev)
  36. {
  37. drm_i915_private_t *dev_priv = dev->dev_private;
  38. u32 seqno;
  39. seqno = dev_priv->next_seqno;
  40. /* reserve 0 for non-seqno */
  41. if (++dev_priv->next_seqno == 0)
  42. dev_priv->next_seqno = 1;
  43. return seqno;
  44. }
  45. static void
  46. render_ring_flush(struct drm_device *dev,
  47. struct intel_ring_buffer *ring,
  48. u32 invalidate_domains,
  49. u32 flush_domains)
  50. {
  51. drm_i915_private_t *dev_priv = dev->dev_private;
  52. u32 cmd;
  53. #if WATCH_EXEC
  54. DRM_INFO("%s: invalidate %08x flush %08x\n", __func__,
  55. invalidate_domains, flush_domains);
  56. #endif
  57. trace_i915_gem_request_flush(dev, dev_priv->next_seqno,
  58. invalidate_domains, flush_domains);
  59. if ((invalidate_domains | flush_domains) & I915_GEM_GPU_DOMAINS) {
  60. /*
  61. * read/write caches:
  62. *
  63. * I915_GEM_DOMAIN_RENDER is always invalidated, but is
  64. * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
  65. * also flushed at 2d versus 3d pipeline switches.
  66. *
  67. * read-only caches:
  68. *
  69. * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
  70. * MI_READ_FLUSH is set, and is always flushed on 965.
  71. *
  72. * I915_GEM_DOMAIN_COMMAND may not exist?
  73. *
  74. * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
  75. * invalidated when MI_EXE_FLUSH is set.
  76. *
  77. * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
  78. * invalidated with every MI_FLUSH.
  79. *
  80. * TLBs:
  81. *
  82. * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
  83. * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
  84. * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
  85. * are flushed at any MI_FLUSH.
  86. */
  87. cmd = MI_FLUSH | MI_NO_WRITE_FLUSH;
  88. if ((invalidate_domains|flush_domains) &
  89. I915_GEM_DOMAIN_RENDER)
  90. cmd &= ~MI_NO_WRITE_FLUSH;
  91. if (INTEL_INFO(dev)->gen < 4) {
  92. /*
  93. * On the 965, the sampler cache always gets flushed
  94. * and this bit is reserved.
  95. */
  96. if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER)
  97. cmd |= MI_READ_FLUSH;
  98. }
  99. if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
  100. cmd |= MI_EXE_FLUSH;
  101. #if WATCH_EXEC
  102. DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
  103. #endif
  104. intel_ring_begin(dev, ring, 2);
  105. intel_ring_emit(dev, ring, cmd);
  106. intel_ring_emit(dev, ring, MI_NOOP);
  107. intel_ring_advance(dev, ring);
  108. }
  109. }
  110. static void ring_set_tail(struct drm_device *dev,
  111. struct intel_ring_buffer *ring,
  112. u32 value)
  113. {
  114. drm_i915_private_t *dev_priv = dev->dev_private;
  115. I915_WRITE_TAIL(ring, ring->tail);
  116. }
  117. static unsigned int render_ring_get_active_head(struct drm_device *dev,
  118. struct intel_ring_buffer *ring)
  119. {
  120. drm_i915_private_t *dev_priv = dev->dev_private;
  121. u32 acthd_reg = INTEL_INFO(dev)->gen ? ACTHD_I965 : ACTHD;
  122. return I915_READ(acthd_reg);
  123. }
  124. static int init_ring_common(struct drm_device *dev,
  125. struct intel_ring_buffer *ring)
  126. {
  127. u32 head;
  128. drm_i915_private_t *dev_priv = dev->dev_private;
  129. struct drm_i915_gem_object *obj_priv;
  130. obj_priv = to_intel_bo(ring->gem_object);
  131. /* Stop the ring if it's running. */
  132. I915_WRITE_CTL(ring, 0);
  133. I915_WRITE_HEAD(ring, 0);
  134. ring->set_tail(dev, ring, 0);
  135. /* Initialize the ring. */
  136. I915_WRITE_START(ring, obj_priv->gtt_offset);
  137. head = I915_READ_HEAD(ring) & HEAD_ADDR;
  138. /* G45 ring initialization fails to reset head to zero */
  139. if (head != 0) {
  140. DRM_ERROR("%s head not reset to zero "
  141. "ctl %08x head %08x tail %08x start %08x\n",
  142. ring->name,
  143. I915_READ_CTL(ring),
  144. I915_READ_HEAD(ring),
  145. I915_READ_TAIL(ring),
  146. I915_READ_START(ring));
  147. I915_WRITE_HEAD(ring, 0);
  148. DRM_ERROR("%s head forced to zero "
  149. "ctl %08x head %08x tail %08x start %08x\n",
  150. ring->name,
  151. I915_READ_CTL(ring),
  152. I915_READ_HEAD(ring),
  153. I915_READ_TAIL(ring),
  154. I915_READ_START(ring));
  155. }
  156. I915_WRITE_CTL(ring,
  157. ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
  158. | RING_NO_REPORT | RING_VALID);
  159. head = I915_READ_HEAD(ring) & HEAD_ADDR;
  160. /* If the head is still not zero, the ring is dead */
  161. if (head != 0) {
  162. DRM_ERROR("%s initialization failed "
  163. "ctl %08x head %08x tail %08x start %08x\n",
  164. ring->name,
  165. I915_READ_CTL(ring),
  166. I915_READ_HEAD(ring),
  167. I915_READ_TAIL(ring),
  168. I915_READ_START(ring));
  169. return -EIO;
  170. }
  171. if (!drm_core_check_feature(dev, DRIVER_MODESET))
  172. i915_kernel_lost_context(dev);
  173. else {
  174. ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
  175. ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
  176. ring->space = ring->head - (ring->tail + 8);
  177. if (ring->space < 0)
  178. ring->space += ring->size;
  179. }
  180. return 0;
  181. }
  182. static int init_render_ring(struct drm_device *dev,
  183. struct intel_ring_buffer *ring)
  184. {
  185. drm_i915_private_t *dev_priv = dev->dev_private;
  186. int ret = init_ring_common(dev, ring);
  187. int mode;
  188. if (INTEL_INFO(dev)->gen > 3) {
  189. mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
  190. if (IS_GEN6(dev))
  191. mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
  192. I915_WRITE(MI_MODE, mode);
  193. }
  194. return ret;
  195. }
  196. #define PIPE_CONTROL_FLUSH(addr) \
  197. do { \
  198. OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
  199. PIPE_CONTROL_DEPTH_STALL | 2); \
  200. OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \
  201. OUT_RING(0); \
  202. OUT_RING(0); \
  203. } while (0)
  204. /**
  205. * Creates a new sequence number, emitting a write of it to the status page
  206. * plus an interrupt, which will trigger i915_user_interrupt_handler.
  207. *
  208. * Must be called with struct_lock held.
  209. *
  210. * Returned sequence numbers are nonzero on success.
  211. */
  212. static u32
  213. render_ring_add_request(struct drm_device *dev,
  214. struct intel_ring_buffer *ring,
  215. struct drm_file *file_priv,
  216. u32 flush_domains)
  217. {
  218. drm_i915_private_t *dev_priv = dev->dev_private;
  219. u32 seqno;
  220. seqno = i915_gem_get_seqno(dev);
  221. if (IS_GEN6(dev)) {
  222. BEGIN_LP_RING(6);
  223. OUT_RING(GFX_OP_PIPE_CONTROL | 3);
  224. OUT_RING(PIPE_CONTROL_QW_WRITE |
  225. PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_IS_FLUSH |
  226. PIPE_CONTROL_NOTIFY);
  227. OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
  228. OUT_RING(seqno);
  229. OUT_RING(0);
  230. OUT_RING(0);
  231. ADVANCE_LP_RING();
  232. } else if (HAS_PIPE_CONTROL(dev)) {
  233. u32 scratch_addr = dev_priv->seqno_gfx_addr + 128;
  234. /*
  235. * Workaround qword write incoherence by flushing the
  236. * PIPE_NOTIFY buffers out to memory before requesting
  237. * an interrupt.
  238. */
  239. BEGIN_LP_RING(32);
  240. OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
  241. PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
  242. OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
  243. OUT_RING(seqno);
  244. OUT_RING(0);
  245. PIPE_CONTROL_FLUSH(scratch_addr);
  246. scratch_addr += 128; /* write to separate cachelines */
  247. PIPE_CONTROL_FLUSH(scratch_addr);
  248. scratch_addr += 128;
  249. PIPE_CONTROL_FLUSH(scratch_addr);
  250. scratch_addr += 128;
  251. PIPE_CONTROL_FLUSH(scratch_addr);
  252. scratch_addr += 128;
  253. PIPE_CONTROL_FLUSH(scratch_addr);
  254. scratch_addr += 128;
  255. PIPE_CONTROL_FLUSH(scratch_addr);
  256. OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
  257. PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
  258. PIPE_CONTROL_NOTIFY);
  259. OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
  260. OUT_RING(seqno);
  261. OUT_RING(0);
  262. ADVANCE_LP_RING();
  263. } else {
  264. BEGIN_LP_RING(4);
  265. OUT_RING(MI_STORE_DWORD_INDEX);
  266. OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
  267. OUT_RING(seqno);
  268. OUT_RING(MI_USER_INTERRUPT);
  269. ADVANCE_LP_RING();
  270. }
  271. return seqno;
  272. }
  273. static u32
  274. render_ring_get_gem_seqno(struct drm_device *dev,
  275. struct intel_ring_buffer *ring)
  276. {
  277. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  278. if (HAS_PIPE_CONTROL(dev))
  279. return ((volatile u32 *)(dev_priv->seqno_page))[0];
  280. else
  281. return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
  282. }
  283. static void
  284. render_ring_get_user_irq(struct drm_device *dev,
  285. struct intel_ring_buffer *ring)
  286. {
  287. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  288. unsigned long irqflags;
  289. spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
  290. if (dev->irq_enabled && (++ring->user_irq_refcount == 1)) {
  291. if (HAS_PCH_SPLIT(dev))
  292. ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
  293. else
  294. i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
  295. }
  296. spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
  297. }
  298. static void
  299. render_ring_put_user_irq(struct drm_device *dev,
  300. struct intel_ring_buffer *ring)
  301. {
  302. drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
  303. unsigned long irqflags;
  304. spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
  305. BUG_ON(dev->irq_enabled && ring->user_irq_refcount <= 0);
  306. if (dev->irq_enabled && (--ring->user_irq_refcount == 0)) {
  307. if (HAS_PCH_SPLIT(dev))
  308. ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
  309. else
  310. i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
  311. }
  312. spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
  313. }
  314. static void render_setup_status_page(struct drm_device *dev,
  315. struct intel_ring_buffer *ring)
  316. {
  317. drm_i915_private_t *dev_priv = dev->dev_private;
  318. if (IS_GEN6(dev)) {
  319. I915_WRITE(HWS_PGA_GEN6, ring->status_page.gfx_addr);
  320. I915_READ(HWS_PGA_GEN6); /* posting read */
  321. } else {
  322. I915_WRITE(HWS_PGA, ring->status_page.gfx_addr);
  323. I915_READ(HWS_PGA); /* posting read */
  324. }
  325. }
  326. void
  327. bsd_ring_flush(struct drm_device *dev,
  328. struct intel_ring_buffer *ring,
  329. u32 invalidate_domains,
  330. u32 flush_domains)
  331. {
  332. intel_ring_begin(dev, ring, 2);
  333. intel_ring_emit(dev, ring, MI_FLUSH);
  334. intel_ring_emit(dev, ring, MI_NOOP);
  335. intel_ring_advance(dev, ring);
  336. }
  337. static inline unsigned int bsd_ring_get_active_head(struct drm_device *dev,
  338. struct intel_ring_buffer *ring)
  339. {
  340. drm_i915_private_t *dev_priv = dev->dev_private;
  341. return I915_READ(BSD_RING_ACTHD);
  342. }
  343. static int init_bsd_ring(struct drm_device *dev,
  344. struct intel_ring_buffer *ring)
  345. {
  346. return init_ring_common(dev, ring);
  347. }
  348. static u32
  349. bsd_ring_add_request(struct drm_device *dev,
  350. struct intel_ring_buffer *ring,
  351. struct drm_file *file_priv,
  352. u32 flush_domains)
  353. {
  354. u32 seqno;
  355. seqno = i915_gem_get_seqno(dev);
  356. intel_ring_begin(dev, ring, 4);
  357. intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
  358. intel_ring_emit(dev, ring,
  359. I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
  360. intel_ring_emit(dev, ring, seqno);
  361. intel_ring_emit(dev, ring, MI_USER_INTERRUPT);
  362. intel_ring_advance(dev, ring);
  363. DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
  364. return seqno;
  365. }
  366. static void bsd_setup_status_page(struct drm_device *dev,
  367. struct intel_ring_buffer *ring)
  368. {
  369. drm_i915_private_t *dev_priv = dev->dev_private;
  370. I915_WRITE(BSD_HWS_PGA, ring->status_page.gfx_addr);
  371. I915_READ(BSD_HWS_PGA);
  372. }
  373. static void
  374. bsd_ring_get_user_irq(struct drm_device *dev,
  375. struct intel_ring_buffer *ring)
  376. {
  377. /* do nothing */
  378. }
  379. static void
  380. bsd_ring_put_user_irq(struct drm_device *dev,
  381. struct intel_ring_buffer *ring)
  382. {
  383. /* do nothing */
  384. }
  385. static u32
  386. bsd_ring_get_gem_seqno(struct drm_device *dev,
  387. struct intel_ring_buffer *ring)
  388. {
  389. return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
  390. }
  391. static int
  392. bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev,
  393. struct intel_ring_buffer *ring,
  394. struct drm_i915_gem_execbuffer2 *exec,
  395. struct drm_clip_rect *cliprects,
  396. uint64_t exec_offset)
  397. {
  398. uint32_t exec_start;
  399. exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
  400. intel_ring_begin(dev, ring, 2);
  401. intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START |
  402. (2 << 6) | MI_BATCH_NON_SECURE_I965);
  403. intel_ring_emit(dev, ring, exec_start);
  404. intel_ring_advance(dev, ring);
  405. return 0;
  406. }
  407. static int
  408. render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
  409. struct intel_ring_buffer *ring,
  410. struct drm_i915_gem_execbuffer2 *exec,
  411. struct drm_clip_rect *cliprects,
  412. uint64_t exec_offset)
  413. {
  414. drm_i915_private_t *dev_priv = dev->dev_private;
  415. int nbox = exec->num_cliprects;
  416. int i = 0, count;
  417. uint32_t exec_start, exec_len;
  418. exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
  419. exec_len = (uint32_t) exec->batch_len;
  420. trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1);
  421. count = nbox ? nbox : 1;
  422. for (i = 0; i < count; i++) {
  423. if (i < nbox) {
  424. int ret = i915_emit_box(dev, cliprects, i,
  425. exec->DR1, exec->DR4);
  426. if (ret)
  427. return ret;
  428. }
  429. if (IS_I830(dev) || IS_845G(dev)) {
  430. intel_ring_begin(dev, ring, 4);
  431. intel_ring_emit(dev, ring, MI_BATCH_BUFFER);
  432. intel_ring_emit(dev, ring,
  433. exec_start | MI_BATCH_NON_SECURE);
  434. intel_ring_emit(dev, ring, exec_start + exec_len - 4);
  435. intel_ring_emit(dev, ring, 0);
  436. } else {
  437. intel_ring_begin(dev, ring, 4);
  438. if (INTEL_INFO(dev)->gen >= 4) {
  439. intel_ring_emit(dev, ring,
  440. MI_BATCH_BUFFER_START | (2 << 6)
  441. | MI_BATCH_NON_SECURE_I965);
  442. intel_ring_emit(dev, ring, exec_start);
  443. } else {
  444. intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START
  445. | (2 << 6));
  446. intel_ring_emit(dev, ring, exec_start |
  447. MI_BATCH_NON_SECURE);
  448. }
  449. }
  450. intel_ring_advance(dev, ring);
  451. }
  452. if (IS_G4X(dev) || IS_IRONLAKE(dev)) {
  453. intel_ring_begin(dev, ring, 2);
  454. intel_ring_emit(dev, ring, MI_FLUSH |
  455. MI_NO_WRITE_FLUSH |
  456. MI_INVALIDATE_ISP );
  457. intel_ring_emit(dev, ring, MI_NOOP);
  458. intel_ring_advance(dev, ring);
  459. }
  460. /* XXX breadcrumb */
  461. return 0;
  462. }
  463. static void cleanup_status_page(struct drm_device *dev,
  464. struct intel_ring_buffer *ring)
  465. {
  466. drm_i915_private_t *dev_priv = dev->dev_private;
  467. struct drm_gem_object *obj;
  468. struct drm_i915_gem_object *obj_priv;
  469. obj = ring->status_page.obj;
  470. if (obj == NULL)
  471. return;
  472. obj_priv = to_intel_bo(obj);
  473. kunmap(obj_priv->pages[0]);
  474. i915_gem_object_unpin(obj);
  475. drm_gem_object_unreference(obj);
  476. ring->status_page.obj = NULL;
  477. memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
  478. }
  479. static int init_status_page(struct drm_device *dev,
  480. struct intel_ring_buffer *ring)
  481. {
  482. drm_i915_private_t *dev_priv = dev->dev_private;
  483. struct drm_gem_object *obj;
  484. struct drm_i915_gem_object *obj_priv;
  485. int ret;
  486. obj = i915_gem_alloc_object(dev, 4096);
  487. if (obj == NULL) {
  488. DRM_ERROR("Failed to allocate status page\n");
  489. ret = -ENOMEM;
  490. goto err;
  491. }
  492. obj_priv = to_intel_bo(obj);
  493. obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
  494. ret = i915_gem_object_pin(obj, 4096);
  495. if (ret != 0) {
  496. goto err_unref;
  497. }
  498. ring->status_page.gfx_addr = obj_priv->gtt_offset;
  499. ring->status_page.page_addr = kmap(obj_priv->pages[0]);
  500. if (ring->status_page.page_addr == NULL) {
  501. memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
  502. goto err_unpin;
  503. }
  504. ring->status_page.obj = obj;
  505. memset(ring->status_page.page_addr, 0, PAGE_SIZE);
  506. ring->setup_status_page(dev, ring);
  507. DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
  508. ring->name, ring->status_page.gfx_addr);
  509. return 0;
  510. err_unpin:
  511. i915_gem_object_unpin(obj);
  512. err_unref:
  513. drm_gem_object_unreference(obj);
  514. err:
  515. return ret;
  516. }
  517. int intel_init_ring_buffer(struct drm_device *dev,
  518. struct intel_ring_buffer *ring)
  519. {
  520. struct drm_i915_private *dev_priv = dev->dev_private;
  521. struct drm_i915_gem_object *obj_priv;
  522. struct drm_gem_object *obj;
  523. int ret;
  524. ring->dev = dev;
  525. if (I915_NEED_GFX_HWS(dev)) {
  526. ret = init_status_page(dev, ring);
  527. if (ret)
  528. return ret;
  529. }
  530. obj = i915_gem_alloc_object(dev, ring->size);
  531. if (obj == NULL) {
  532. DRM_ERROR("Failed to allocate ringbuffer\n");
  533. ret = -ENOMEM;
  534. goto err_hws;
  535. }
  536. ring->gem_object = obj;
  537. ret = i915_gem_object_pin(obj, PAGE_SIZE);
  538. if (ret)
  539. goto err_unref;
  540. obj_priv = to_intel_bo(obj);
  541. ring->map.size = ring->size;
  542. ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
  543. ring->map.type = 0;
  544. ring->map.flags = 0;
  545. ring->map.mtrr = 0;
  546. drm_core_ioremap_wc(&ring->map, dev);
  547. if (ring->map.handle == NULL) {
  548. DRM_ERROR("Failed to map ringbuffer.\n");
  549. ret = -EINVAL;
  550. goto err_unpin;
  551. }
  552. ring->virtual_start = ring->map.handle;
  553. ret = ring->init(dev, ring);
  554. if (ret)
  555. goto err_unmap;
  556. if (!drm_core_check_feature(dev, DRIVER_MODESET))
  557. i915_kernel_lost_context(dev);
  558. else {
  559. ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
  560. ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
  561. ring->space = ring->head - (ring->tail + 8);
  562. if (ring->space < 0)
  563. ring->space += ring->size;
  564. }
  565. INIT_LIST_HEAD(&ring->active_list);
  566. INIT_LIST_HEAD(&ring->request_list);
  567. return ret;
  568. err_unmap:
  569. drm_core_ioremapfree(&ring->map, dev);
  570. err_unpin:
  571. i915_gem_object_unpin(obj);
  572. err_unref:
  573. drm_gem_object_unreference(obj);
  574. ring->gem_object = NULL;
  575. err_hws:
  576. cleanup_status_page(dev, ring);
  577. return ret;
  578. }
  579. void intel_cleanup_ring_buffer(struct drm_device *dev,
  580. struct intel_ring_buffer *ring)
  581. {
  582. if (ring->gem_object == NULL)
  583. return;
  584. drm_core_ioremapfree(&ring->map, dev);
  585. i915_gem_object_unpin(ring->gem_object);
  586. drm_gem_object_unreference(ring->gem_object);
  587. ring->gem_object = NULL;
  588. cleanup_status_page(dev, ring);
  589. }
  590. int intel_wrap_ring_buffer(struct drm_device *dev,
  591. struct intel_ring_buffer *ring)
  592. {
  593. unsigned int *virt;
  594. int rem;
  595. rem = ring->size - ring->tail;
  596. if (ring->space < rem) {
  597. int ret = intel_wait_ring_buffer(dev, ring, rem);
  598. if (ret)
  599. return ret;
  600. }
  601. virt = (unsigned int *)(ring->virtual_start + ring->tail);
  602. rem /= 8;
  603. while (rem--) {
  604. *virt++ = MI_NOOP;
  605. *virt++ = MI_NOOP;
  606. }
  607. ring->tail = 0;
  608. ring->space = ring->head - 8;
  609. return 0;
  610. }
  611. int intel_wait_ring_buffer(struct drm_device *dev,
  612. struct intel_ring_buffer *ring, int n)
  613. {
  614. unsigned long end;
  615. drm_i915_private_t *dev_priv = dev->dev_private;
  616. trace_i915_ring_wait_begin (dev);
  617. end = jiffies + 3 * HZ;
  618. do {
  619. ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
  620. ring->space = ring->head - (ring->tail + 8);
  621. if (ring->space < 0)
  622. ring->space += ring->size;
  623. if (ring->space >= n) {
  624. trace_i915_ring_wait_end (dev);
  625. return 0;
  626. }
  627. if (dev->primary->master) {
  628. struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
  629. if (master_priv->sarea_priv)
  630. master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
  631. }
  632. yield();
  633. } while (!time_after(jiffies, end));
  634. trace_i915_ring_wait_end (dev);
  635. return -EBUSY;
  636. }
  637. void intel_ring_begin(struct drm_device *dev,
  638. struct intel_ring_buffer *ring, int num_dwords)
  639. {
  640. int n = 4*num_dwords;
  641. if (unlikely(ring->tail + n > ring->size))
  642. intel_wrap_ring_buffer(dev, ring);
  643. if (unlikely(ring->space < n))
  644. intel_wait_ring_buffer(dev, ring, n);
  645. ring->space -= n;
  646. }
  647. void intel_ring_advance(struct drm_device *dev,
  648. struct intel_ring_buffer *ring)
  649. {
  650. ring->tail &= ring->size - 1;
  651. ring->set_tail(dev, ring, ring->tail);
  652. }
  653. void intel_fill_struct(struct drm_device *dev,
  654. struct intel_ring_buffer *ring,
  655. void *data,
  656. unsigned int len)
  657. {
  658. unsigned int *virt = ring->virtual_start + ring->tail;
  659. BUG_ON((len&~(4-1)) != 0);
  660. intel_ring_begin(dev, ring, len/4);
  661. memcpy(virt, data, len);
  662. ring->tail += len;
  663. ring->tail &= ring->size - 1;
  664. ring->space -= len;
  665. intel_ring_advance(dev, ring);
  666. }
  667. static const struct intel_ring_buffer render_ring = {
  668. .name = "render ring",
  669. .id = RING_RENDER,
  670. .mmio_base = RENDER_RING_BASE,
  671. .size = 32 * PAGE_SIZE,
  672. .setup_status_page = render_setup_status_page,
  673. .init = init_render_ring,
  674. .set_tail = ring_set_tail,
  675. .get_active_head = render_ring_get_active_head,
  676. .flush = render_ring_flush,
  677. .add_request = render_ring_add_request,
  678. .get_gem_seqno = render_ring_get_gem_seqno,
  679. .user_irq_get = render_ring_get_user_irq,
  680. .user_irq_put = render_ring_put_user_irq,
  681. .dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer,
  682. };
  683. /* ring buffer for bit-stream decoder */
  684. static const struct intel_ring_buffer bsd_ring = {
  685. .name = "bsd ring",
  686. .id = RING_BSD,
  687. .mmio_base = BSD_RING_BASE,
  688. .size = 32 * PAGE_SIZE,
  689. .setup_status_page = bsd_setup_status_page,
  690. .init = init_bsd_ring,
  691. .set_tail = ring_set_tail,
  692. .get_active_head = bsd_ring_get_active_head,
  693. .flush = bsd_ring_flush,
  694. .add_request = bsd_ring_add_request,
  695. .get_gem_seqno = bsd_ring_get_gem_seqno,
  696. .user_irq_get = bsd_ring_get_user_irq,
  697. .user_irq_put = bsd_ring_put_user_irq,
  698. .dispatch_gem_execbuffer = bsd_ring_dispatch_gem_execbuffer,
  699. };
  700. static void gen6_bsd_setup_status_page(struct drm_device *dev,
  701. struct intel_ring_buffer *ring)
  702. {
  703. drm_i915_private_t *dev_priv = dev->dev_private;
  704. I915_WRITE(GEN6_BSD_HWS_PGA, ring->status_page.gfx_addr);
  705. I915_READ(GEN6_BSD_HWS_PGA);
  706. }
  707. static inline void gen6_bsd_ring_set_tail(struct drm_device *dev,
  708. struct intel_ring_buffer *ring,
  709. u32 value)
  710. {
  711. drm_i915_private_t *dev_priv = dev->dev_private;
  712. /* Every tail move must follow the sequence below */
  713. I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
  714. GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
  715. GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_DISABLE);
  716. I915_WRITE(GEN6_BSD_RNCID, 0x0);
  717. if (wait_for((I915_READ(GEN6_BSD_SLEEP_PSMI_CONTROL) &
  718. GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR) == 0,
  719. 50))
  720. DRM_ERROR("timed out waiting for IDLE Indicator\n");
  721. I915_WRITE_TAIL(ring, value);
  722. I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
  723. GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_MODIFY_MASK |
  724. GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
  725. }
  726. static inline unsigned int gen6_bsd_ring_get_active_head(struct drm_device *dev,
  727. struct intel_ring_buffer *ring)
  728. {
  729. drm_i915_private_t *dev_priv = dev->dev_private;
  730. return I915_READ(GEN6_BSD_RING_ACTHD);
  731. }
  732. static void gen6_bsd_ring_flush(struct drm_device *dev,
  733. struct intel_ring_buffer *ring,
  734. u32 invalidate_domains,
  735. u32 flush_domains)
  736. {
  737. intel_ring_begin(dev, ring, 4);
  738. intel_ring_emit(dev, ring, MI_FLUSH_DW);
  739. intel_ring_emit(dev, ring, 0);
  740. intel_ring_emit(dev, ring, 0);
  741. intel_ring_emit(dev, ring, 0);
  742. intel_ring_advance(dev, ring);
  743. }
  744. static int
  745. gen6_bsd_ring_dispatch_gem_execbuffer(struct drm_device *dev,
  746. struct intel_ring_buffer *ring,
  747. struct drm_i915_gem_execbuffer2 *exec,
  748. struct drm_clip_rect *cliprects,
  749. uint64_t exec_offset)
  750. {
  751. uint32_t exec_start;
  752. exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
  753. intel_ring_begin(dev, ring, 2);
  754. intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965); /* bit0-7 is the length on GEN6+ */
  755. intel_ring_emit(dev, ring, exec_start);
  756. intel_ring_advance(dev, ring);
  757. return 0;
  758. }
  759. /* ring buffer for Video Codec for Gen6+ */
  760. static const struct intel_ring_buffer gen6_bsd_ring = {
  761. .name = "gen6 bsd ring",
  762. .id = RING_BSD,
  763. .mmio_base = GEN6_BSD_RING_BASE,
  764. .size = 32 * PAGE_SIZE,
  765. .setup_status_page = gen6_bsd_setup_status_page,
  766. .init = init_bsd_ring,
  767. .set_tail = gen6_bsd_ring_set_tail,
  768. .get_active_head = gen6_bsd_ring_get_active_head,
  769. .flush = gen6_bsd_ring_flush,
  770. .add_request = bsd_ring_add_request,
  771. .get_gem_seqno = bsd_ring_get_gem_seqno,
  772. .user_irq_get = bsd_ring_get_user_irq,
  773. .user_irq_put = bsd_ring_put_user_irq,
  774. .dispatch_gem_execbuffer = gen6_bsd_ring_dispatch_gem_execbuffer,
  775. };
  776. int intel_init_render_ring_buffer(struct drm_device *dev)
  777. {
  778. drm_i915_private_t *dev_priv = dev->dev_private;
  779. dev_priv->render_ring = render_ring;
  780. if (!I915_NEED_GFX_HWS(dev)) {
  781. dev_priv->render_ring.status_page.page_addr
  782. = dev_priv->status_page_dmah->vaddr;
  783. memset(dev_priv->render_ring.status_page.page_addr,
  784. 0, PAGE_SIZE);
  785. }
  786. return intel_init_ring_buffer(dev, &dev_priv->render_ring);
  787. }
  788. int intel_init_bsd_ring_buffer(struct drm_device *dev)
  789. {
  790. drm_i915_private_t *dev_priv = dev->dev_private;
  791. if (IS_GEN6(dev))
  792. dev_priv->bsd_ring = gen6_bsd_ring;
  793. else
  794. dev_priv->bsd_ring = bsd_ring;
  795. return intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
  796. }