nouveau_dma.c 6.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226
  1. /*
  2. * Copyright (C) 2007 Ben Skeggs.
  3. * All Rights Reserved.
  4. *
  5. * Permission is hereby granted, free of charge, to any person obtaining
  6. * a copy of this software and associated documentation files (the
  7. * "Software"), to deal in the Software without restriction, including
  8. * without limitation the rights to use, copy, modify, merge, publish,
  9. * distribute, sublicense, and/or sell copies of the Software, and to
  10. * permit persons to whom the Software is furnished to do so, subject to
  11. * the following conditions:
  12. *
  13. * The above copyright notice and this permission notice (including the
  14. * next paragraph) shall be included in all copies or substantial
  15. * portions of the Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  18. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  19. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
  20. * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
  21. * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
  22. * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
  23. * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  24. *
  25. */
  26. #include "drmP.h"
  27. #include "drm.h"
  28. #include "nouveau_drv.h"
  29. #include "nouveau_dma.h"
  30. void
  31. nouveau_dma_pre_init(struct nouveau_channel *chan)
  32. {
  33. chan->dma.max = (chan->pushbuf_bo->bo.mem.size >> 2) - 2;
  34. chan->dma.put = 0;
  35. chan->dma.cur = chan->dma.put;
  36. chan->dma.free = chan->dma.max - chan->dma.cur;
  37. }
  38. int
  39. nouveau_dma_init(struct nouveau_channel *chan)
  40. {
  41. struct drm_device *dev = chan->dev;
  42. struct drm_nouveau_private *dev_priv = dev->dev_private;
  43. struct nouveau_gpuobj *m2mf = NULL;
  44. struct nouveau_gpuobj *nvsw = NULL;
  45. int ret, i;
  46. /* Create NV_MEMORY_TO_MEMORY_FORMAT for buffer moves */
  47. ret = nouveau_gpuobj_gr_new(chan, dev_priv->card_type < NV_50 ?
  48. 0x0039 : 0x5039, &m2mf);
  49. if (ret)
  50. return ret;
  51. ret = nouveau_gpuobj_ref_add(dev, chan, NvM2MF, m2mf, NULL);
  52. if (ret)
  53. return ret;
  54. /* Create an NV_SW object for various sync purposes */
  55. ret = nouveau_gpuobj_sw_new(chan, NV_SW, &nvsw);
  56. if (ret)
  57. return ret;
  58. ret = nouveau_gpuobj_ref_add(dev, chan, NvSw, nvsw, NULL);
  59. if (ret)
  60. return ret;
  61. /* NV_MEMORY_TO_MEMORY_FORMAT requires a notifier object */
  62. ret = nouveau_notifier_alloc(chan, NvNotify0, 32, &chan->m2mf_ntfy);
  63. if (ret)
  64. return ret;
  65. /* Map push buffer */
  66. ret = nouveau_bo_map(chan->pushbuf_bo);
  67. if (ret)
  68. return ret;
  69. /* Map M2MF notifier object - fbcon. */
  70. if (drm_core_check_feature(dev, DRIVER_MODESET)) {
  71. ret = nouveau_bo_map(chan->notifier_bo);
  72. if (ret)
  73. return ret;
  74. }
  75. /* Insert NOPS for NOUVEAU_DMA_SKIPS */
  76. ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS);
  77. if (ret)
  78. return ret;
  79. for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
  80. OUT_RING(chan, 0);
  81. /* Initialise NV_MEMORY_TO_MEMORY_FORMAT */
  82. ret = RING_SPACE(chan, 4);
  83. if (ret)
  84. return ret;
  85. BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NAME, 1);
  86. OUT_RING(chan, NvM2MF);
  87. BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_NOTIFY, 1);
  88. OUT_RING(chan, NvNotify0);
  89. /* Initialise NV_SW */
  90. ret = RING_SPACE(chan, 2);
  91. if (ret)
  92. return ret;
  93. BEGIN_RING(chan, NvSubSw, 0, 1);
  94. OUT_RING(chan, NvSw);
  95. /* Sit back and pray the channel works.. */
  96. FIRE_RING(chan);
  97. return 0;
  98. }
  99. void
  100. OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords)
  101. {
  102. bool is_iomem;
  103. u32 *mem = ttm_kmap_obj_virtual(&chan->pushbuf_bo->kmap, &is_iomem);
  104. mem = &mem[chan->dma.cur];
  105. if (is_iomem)
  106. memcpy_toio((void __force __iomem *)mem, data, nr_dwords * 4);
  107. else
  108. memcpy(mem, data, nr_dwords * 4);
  109. chan->dma.cur += nr_dwords;
  110. }
  111. static inline bool
  112. READ_GET(struct nouveau_channel *chan, uint32_t *get)
  113. {
  114. uint32_t val;
  115. val = nvchan_rd32(chan, chan->user_get);
  116. if (val < chan->pushbuf_base ||
  117. val > chan->pushbuf_base + (chan->dma.max << 2)) {
  118. /* meaningless to dma_wait() except to know whether the
  119. * GPU has stalled or not
  120. */
  121. *get = val;
  122. return false;
  123. }
  124. *get = (val - chan->pushbuf_base) >> 2;
  125. return true;
  126. }
  127. int
  128. nouveau_dma_wait(struct nouveau_channel *chan, int size)
  129. {
  130. uint32_t get, prev_get = 0, cnt = 0;
  131. bool get_valid;
  132. while (chan->dma.free < size) {
  133. /* reset counter as long as GET is still advancing, this is
  134. * to avoid misdetecting a GPU lockup if the GPU happens to
  135. * just be processing an operation that takes a long time
  136. */
  137. get_valid = READ_GET(chan, &get);
  138. if (get != prev_get) {
  139. prev_get = get;
  140. cnt = 0;
  141. }
  142. if ((++cnt & 0xff) == 0) {
  143. DRM_UDELAY(1);
  144. if (cnt > 100000)
  145. return -EBUSY;
  146. }
  147. /* loop until we have a usable GET pointer. the value
  148. * we read from the GPU may be outside the main ring if
  149. * PFIFO is processing a buffer called from the main ring,
  150. * discard these values until something sensible is seen.
  151. *
  152. * the other case we discard GET is while the GPU is fetching
  153. * from the SKIPS area, so the code below doesn't have to deal
  154. * with some fun corner cases.
  155. */
  156. if (!get_valid || get < NOUVEAU_DMA_SKIPS)
  157. continue;
  158. if (get <= chan->dma.cur) {
  159. /* engine is fetching behind us, or is completely
  160. * idle (GET == PUT) so we have free space up until
  161. * the end of the push buffer
  162. *
  163. * we can only hit that path once per call due to
  164. * looping back to the beginning of the push buffer,
  165. * we'll hit the fetching-ahead-of-us path from that
  166. * point on.
  167. *
  168. * the *one* exception to that rule is if we read
  169. * GET==PUT, in which case the below conditional will
  170. * always succeed and break us out of the wait loop.
  171. */
  172. chan->dma.free = chan->dma.max - chan->dma.cur;
  173. if (chan->dma.free >= size)
  174. break;
  175. /* not enough space left at the end of the push buffer,
  176. * instruct the GPU to jump back to the start right
  177. * after processing the currently pending commands.
  178. */
  179. OUT_RING(chan, chan->pushbuf_base | 0x20000000);
  180. WRITE_PUT(NOUVEAU_DMA_SKIPS);
  181. /* we're now submitting commands at the start of
  182. * the push buffer.
  183. */
  184. chan->dma.cur =
  185. chan->dma.put = NOUVEAU_DMA_SKIPS;
  186. }
  187. /* engine fetching ahead of us, we have space up until the
  188. * current GET pointer. the "- 1" is to ensure there's
  189. * space left to emit a jump back to the beginning of the
  190. * push buffer if we require it. we can never get GET == PUT
  191. * here, so this is safe.
  192. */
  193. chan->dma.free = get - chan->dma.cur - 1;
  194. }
  195. return 0;
  196. }