intel_ringbuffer.h 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239
  1. #ifndef _INTEL_RINGBUFFER_H_
  2. #define _INTEL_RINGBUFFER_H_
  3. struct intel_hw_status_page {
  4. u32 *page_addr;
  5. unsigned int gfx_addr;
  6. struct drm_i915_gem_object *obj;
  7. };
  8. #define I915_READ_TAIL(ring) I915_READ(RING_TAIL((ring)->mmio_base))
  9. #define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL((ring)->mmio_base), val)
  10. #define I915_READ_START(ring) I915_READ(RING_START((ring)->mmio_base))
  11. #define I915_WRITE_START(ring, val) I915_WRITE(RING_START((ring)->mmio_base), val)
  12. #define I915_READ_HEAD(ring) I915_READ(RING_HEAD((ring)->mmio_base))
  13. #define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD((ring)->mmio_base), val)
  14. #define I915_READ_CTL(ring) I915_READ(RING_CTL((ring)->mmio_base))
  15. #define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL((ring)->mmio_base), val)
  16. #define I915_READ_IMR(ring) I915_READ(RING_IMR((ring)->mmio_base))
  17. #define I915_WRITE_IMR(ring, val) I915_WRITE(RING_IMR((ring)->mmio_base), val)
  18. #define I915_READ_NOPID(ring) I915_READ(RING_NOPID((ring)->mmio_base))
  19. #define I915_READ_SYNC_0(ring) I915_READ(RING_SYNC_0((ring)->mmio_base))
  20. #define I915_READ_SYNC_1(ring) I915_READ(RING_SYNC_1((ring)->mmio_base))
  21. struct intel_ring_buffer {
  22. const char *name;
  23. enum intel_ring_id {
  24. RCS = 0x0,
  25. VCS,
  26. BCS,
  27. } id;
  28. #define I915_NUM_RINGS 3
  29. u32 mmio_base;
  30. void __iomem *virtual_start;
  31. struct drm_device *dev;
  32. struct drm_i915_gem_object *obj;
  33. u32 head;
  34. u32 tail;
  35. int space;
  36. int size;
  37. int effective_size;
  38. struct intel_hw_status_page status_page;
  39. /** We track the position of the requests in the ring buffer, and
  40. * when each is retired we increment last_retired_head as the GPU
  41. * must have finished processing the request and so we know we
  42. * can advance the ringbuffer up to that position.
  43. *
  44. * last_retired_head is set to -1 after the value is consumed so
  45. * we can detect new retirements.
  46. */
  47. u32 last_retired_head;
  48. u32 irq_refcount; /* protected by dev_priv->irq_lock */
  49. u32 irq_enable_mask; /* bitmask to enable ring interrupt */
  50. u32 trace_irq_seqno;
  51. u32 sync_seqno[I915_NUM_RINGS-1];
  52. bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
  53. void (*irq_put)(struct intel_ring_buffer *ring);
  54. int (*init)(struct intel_ring_buffer *ring);
  55. void (*write_tail)(struct intel_ring_buffer *ring,
  56. u32 value);
  57. int __must_check (*flush)(struct intel_ring_buffer *ring,
  58. u32 invalidate_domains,
  59. u32 flush_domains);
  60. int (*add_request)(struct intel_ring_buffer *ring);
  61. /* Some chipsets are not quite as coherent as advertised and need
  62. * an expensive kick to force a true read of the up-to-date seqno.
  63. * However, the up-to-date seqno is not always required and the last
  64. * seen value is good enough. Note that the seqno will always be
  65. * monotonic, even if not coherent.
  66. */
  67. u32 (*get_seqno)(struct intel_ring_buffer *ring,
  68. bool lazy_coherency);
  69. void (*set_seqno)(struct intel_ring_buffer *ring,
  70. u32 seqno);
  71. int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
  72. u32 offset, u32 length,
  73. unsigned flags);
  74. #define I915_DISPATCH_SECURE 0x1
  75. void (*cleanup)(struct intel_ring_buffer *ring);
  76. int (*sync_to)(struct intel_ring_buffer *ring,
  77. struct intel_ring_buffer *to,
  78. u32 seqno);
  79. u32 semaphore_register[3]; /*our mbox written by others */
  80. u32 signal_mbox[2]; /* mboxes this ring signals to */
  81. /**
  82. * List of objects currently involved in rendering from the
  83. * ringbuffer.
  84. *
  85. * Includes buffers having the contents of their GPU caches
  86. * flushed, not necessarily primitives. last_rendering_seqno
  87. * represents when the rendering involved will be completed.
  88. *
  89. * A reference is held on the buffer while on this list.
  90. */
  91. struct list_head active_list;
  92. /**
  93. * List of breadcrumbs associated with GPU requests currently
  94. * outstanding.
  95. */
  96. struct list_head request_list;
  97. /**
  98. * Do we have some not yet emitted requests outstanding?
  99. */
  100. u32 outstanding_lazy_request;
  101. bool gpu_caches_dirty;
  102. wait_queue_head_t irq_queue;
  103. /**
  104. * Do an explicit TLB flush before MI_SET_CONTEXT
  105. */
  106. bool itlb_before_ctx_switch;
  107. struct i915_hw_context *default_context;
  108. struct drm_i915_gem_object *last_context_obj;
  109. void *private;
  110. };
  111. static inline bool
  112. intel_ring_initialized(struct intel_ring_buffer *ring)
  113. {
  114. return ring->obj != NULL;
  115. }
  116. static inline unsigned
  117. intel_ring_flag(struct intel_ring_buffer *ring)
  118. {
  119. return 1 << ring->id;
  120. }
  121. static inline u32
  122. intel_ring_sync_index(struct intel_ring_buffer *ring,
  123. struct intel_ring_buffer *other)
  124. {
  125. int idx;
  126. /*
  127. * cs -> 0 = vcs, 1 = bcs
  128. * vcs -> 0 = bcs, 1 = cs,
  129. * bcs -> 0 = cs, 1 = vcs.
  130. */
  131. idx = (other - ring) - 1;
  132. if (idx < 0)
  133. idx += I915_NUM_RINGS;
  134. return idx;
  135. }
  136. static inline u32
  137. intel_read_status_page(struct intel_ring_buffer *ring,
  138. int reg)
  139. {
  140. /* Ensure that the compiler doesn't optimize away the load. */
  141. barrier();
  142. return ring->status_page.page_addr[reg];
  143. }
  144. static inline void
  145. intel_write_status_page(struct intel_ring_buffer *ring,
  146. int reg, u32 value)
  147. {
  148. ring->status_page.page_addr[reg] = value;
  149. }
  150. /**
  151. * Reads a dword out of the status page, which is written to from the command
  152. * queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
  153. * MI_STORE_DATA_IMM.
  154. *
  155. * The following dwords have a reserved meaning:
  156. * 0x00: ISR copy, updated when an ISR bit not set in the HWSTAM changes.
  157. * 0x04: ring 0 head pointer
  158. * 0x05: ring 1 head pointer (915-class)
  159. * 0x06: ring 2 head pointer (915-class)
  160. * 0x10-0x1b: Context status DWords (GM45)
  161. * 0x1f: Last written status offset. (GM45)
  162. *
  163. * The area from dword 0x20 to 0x3ff is available for driver usage.
  164. */
  165. #define I915_GEM_HWS_INDEX 0x20
  166. #define I915_GEM_HWS_SCRATCH_INDEX 0x30
  167. #define I915_GEM_HWS_SCRATCH_ADDR (I915_GEM_HWS_SCRATCH_INDEX << MI_STORE_DWORD_INDEX_SHIFT)
  168. void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
  169. int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
  170. static inline void intel_ring_emit(struct intel_ring_buffer *ring,
  171. u32 data)
  172. {
  173. iowrite32(data, ring->virtual_start + ring->tail);
  174. ring->tail += 4;
  175. }
  176. void intel_ring_advance(struct intel_ring_buffer *ring);
  177. int __must_check intel_ring_idle(struct intel_ring_buffer *ring);
  178. void intel_ring_init_seqno(struct intel_ring_buffer *ring, u32 seqno);
  179. int intel_ring_flush_all_caches(struct intel_ring_buffer *ring);
  180. int intel_ring_invalidate_all_caches(struct intel_ring_buffer *ring);
  181. int intel_init_render_ring_buffer(struct drm_device *dev);
  182. int intel_init_bsd_ring_buffer(struct drm_device *dev);
  183. int intel_init_blt_ring_buffer(struct drm_device *dev);
  184. u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
  185. void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
  186. static inline u32 intel_ring_get_tail(struct intel_ring_buffer *ring)
  187. {
  188. return ring->tail;
  189. }
  190. static inline u32 intel_ring_get_seqno(struct intel_ring_buffer *ring)
  191. {
  192. BUG_ON(ring->outstanding_lazy_request == 0);
  193. return ring->outstanding_lazy_request;
  194. }
  195. static inline void i915_trace_irq_get(struct intel_ring_buffer *ring, u32 seqno)
  196. {
  197. if (ring->trace_irq_seqno == 0 && ring->irq_get(ring))
  198. ring->trace_irq_seqno = seqno;
  199. }
  200. /* DRI warts */
  201. int intel_render_ring_init_dri(struct drm_device *dev, u64 start, u32 size);
  202. #endif /* _INTEL_RINGBUFFER_H_ */