nv50_fifo.c 8.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294
  1. /*
  2. * Copyright (C) 2012 Ben Skeggs.
  3. * All Rights Reserved.
  4. *
  5. * Permission is hereby granted, free of charge, to any person obtaining
  6. * a copy of this software and associated documentation files (the
  7. * "Software"), to deal in the Software without restriction, including
  8. * without limitation the rights to use, copy, modify, merge, publish,
  9. * distribute, sublicense, and/or sell copies of the Software, and to
  10. * permit persons to whom the Software is furnished to do so, subject to
  11. * the following conditions:
  12. *
  13. * The above copyright notice and this permission notice (including the
  14. * next paragraph) shall be included in all copies or substantial
  15. * portions of the Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  18. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  19. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
  20. * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
  21. * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
  22. * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
  23. * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  24. *
  25. */
  26. #include "drmP.h"
  27. #include "drm.h"
  28. #include "nouveau_drv.h"
  29. #include "nouveau_fifo.h"
  30. #include "nouveau_ramht.h"
  31. #include "nouveau_vm.h"
  32. struct nv50_fifo_priv {
  33. struct nouveau_fifo_priv base;
  34. struct nouveau_gpuobj *playlist[2];
  35. int cur_playlist;
  36. };
  37. struct nv50_fifo_chan {
  38. struct nouveau_fifo_chan base;
  39. };
  40. void
  41. nv50_fifo_playlist_update(struct drm_device *dev)
  42. {
  43. struct nv50_fifo_priv *priv = nv_engine(dev, NVOBJ_ENGINE_FIFO);
  44. struct drm_nouveau_private *dev_priv = dev->dev_private;
  45. struct nouveau_gpuobj *cur;
  46. int i, p;
  47. cur = priv->playlist[priv->cur_playlist];
  48. priv->cur_playlist = !priv->cur_playlist;
  49. for (i = 0, p = 0; i < priv->base.channels; i++) {
  50. if (nv_rd32(dev, 0x002600 + (i * 4)) & 0x80000000)
  51. nv_wo32(cur, p++ * 4, i);
  52. }
  53. dev_priv->engine.instmem.flush(dev);
  54. nv_wr32(dev, 0x0032f4, cur->vinst >> 12);
  55. nv_wr32(dev, 0x0032ec, p);
  56. nv_wr32(dev, 0x002500, 0x00000101);
  57. }
  58. static int
  59. nv50_fifo_context_new(struct nouveau_channel *chan, int engine)
  60. {
  61. struct nv50_fifo_priv *priv = nv_engine(chan->dev, engine);
  62. struct nv50_fifo_chan *fctx;
  63. struct drm_device *dev = chan->dev;
  64. struct drm_nouveau_private *dev_priv = dev->dev_private;
  65. u64 ib_offset = chan->pushbuf_base + chan->dma.ib_base * 4;
  66. u64 instance = chan->ramin->vinst >> 12;
  67. unsigned long flags;
  68. int ret = 0, i;
  69. fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
  70. if (!fctx)
  71. return -ENOMEM;
  72. atomic_inc(&chan->vm->engref[engine]);
  73. chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
  74. NV50_USER(chan->id), PAGE_SIZE);
  75. if (!chan->user) {
  76. ret = -ENOMEM;
  77. goto error;
  78. }
  79. for (i = 0; i < 0x100; i += 4)
  80. nv_wo32(chan->ramin, i, 0x00000000);
  81. nv_wo32(chan->ramin, 0x3c, 0x403f6078);
  82. nv_wo32(chan->ramin, 0x40, 0x00000000);
  83. nv_wo32(chan->ramin, 0x44, 0x01003fff);
  84. nv_wo32(chan->ramin, 0x48, chan->pushbuf->cinst >> 4);
  85. nv_wo32(chan->ramin, 0x50, lower_32_bits(ib_offset));
  86. nv_wo32(chan->ramin, 0x54, upper_32_bits(ib_offset) |
  87. drm_order(chan->dma.ib_max + 1) << 16);
  88. nv_wo32(chan->ramin, 0x60, 0x7fffffff);
  89. nv_wo32(chan->ramin, 0x78, 0x00000000);
  90. nv_wo32(chan->ramin, 0x7c, 0x30000001);
  91. nv_wo32(chan->ramin, 0x80, ((chan->ramht->bits - 9) << 27) |
  92. (4 << 24) /* SEARCH_FULL */ |
  93. (chan->ramht->gpuobj->cinst >> 4));
  94. dev_priv->engine.instmem.flush(dev);
  95. spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
  96. nv_wr32(dev, 0x002600 + (chan->id * 4), 0x80000000 | instance);
  97. nv50_fifo_playlist_update(dev);
  98. spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
  99. error:
  100. if (ret)
  101. priv->base.base.context_del(chan, engine);
  102. return ret;
  103. }
  104. static bool
  105. nv50_fifo_kickoff(struct nouveau_channel *chan)
  106. {
  107. struct drm_device *dev = chan->dev;
  108. bool done = true;
  109. u32 me;
  110. /* HW bug workaround:
  111. *
  112. * PFIFO will hang forever if the connected engines don't report
  113. * that they've processed the context switch request.
  114. *
  115. * In order for the kickoff to work, we need to ensure all the
  116. * connected engines are in a state where they can answer.
  117. *
  118. * Newer chipsets don't seem to suffer from this issue, and well,
  119. * there's also a "ignore these engines" bitmask reg we can use
  120. * if we hit the issue there..
  121. */
  122. /* PME: make sure engine is enabled */
  123. me = nv_mask(dev, 0x00b860, 0x00000001, 0x00000001);
  124. /* do the kickoff... */
  125. nv_wr32(dev, 0x0032fc, chan->ramin->vinst >> 12);
  126. if (!nv_wait_ne(dev, 0x0032fc, 0xffffffff, 0xffffffff)) {
  127. NV_INFO(dev, "PFIFO: channel %d unload timeout\n", chan->id);
  128. done = false;
  129. }
  130. /* restore any engine states we changed, and exit */
  131. nv_wr32(dev, 0x00b860, me);
  132. return done;
  133. }
  134. static void
  135. nv50_fifo_context_del(struct nouveau_channel *chan, int engine)
  136. {
  137. struct nv50_fifo_chan *fctx = chan->engctx[engine];
  138. struct drm_device *dev = chan->dev;
  139. struct drm_nouveau_private *dev_priv = dev->dev_private;
  140. unsigned long flags;
  141. /* remove channel from playlist, will context switch if active */
  142. spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
  143. nv_mask(dev, 0x002600 + (chan->id * 4), 0x80000000, 0x00000000);
  144. nv50_fifo_playlist_update(dev);
  145. /* tell any engines on this channel to unload their contexts */
  146. nv50_fifo_kickoff(chan);
  147. nv_wr32(dev, 0x002600 + (chan->id * 4), 0x00000000);
  148. spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
  149. /* clean up */
  150. if (chan->user) {
  151. iounmap(chan->user);
  152. chan->user = NULL;
  153. }
  154. atomic_dec(&chan->vm->engref[engine]);
  155. chan->engctx[engine] = NULL;
  156. kfree(fctx);
  157. }
  158. static int
  159. nv50_fifo_init(struct drm_device *dev, int engine)
  160. {
  161. struct drm_nouveau_private *dev_priv = dev->dev_private;
  162. u32 instance;
  163. int i;
  164. nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
  165. nv_mask(dev, 0x000200, 0x00000100, 0x00000100);
  166. nv_wr32(dev, 0x00250c, 0x6f3cfc34);
  167. nv_wr32(dev, 0x002044, 0x01003fff);
  168. nv_wr32(dev, 0x002100, 0xffffffff);
  169. nv_wr32(dev, 0x002140, 0xffffffff);
  170. for (i = 0; i < 128; i++) {
  171. struct nouveau_channel *chan = dev_priv->channels.ptr[i];
  172. if (chan && chan->engctx[engine])
  173. instance = 0x80000000 | chan->ramin->vinst >> 12;
  174. else
  175. instance = 0x00000000;
  176. nv_wr32(dev, 0x002600 + (i * 4), instance);
  177. }
  178. nv50_fifo_playlist_update(dev);
  179. nv_wr32(dev, 0x003200, 1);
  180. nv_wr32(dev, 0x003250, 1);
  181. nv_wr32(dev, 0x002500, 1);
  182. return 0;
  183. }
  184. static int
  185. nv50_fifo_fini(struct drm_device *dev, int engine, bool suspend)
  186. {
  187. struct drm_nouveau_private *dev_priv = dev->dev_private;
  188. struct nv50_fifo_priv *priv = nv_engine(dev, engine);
  189. int i;
  190. /* set playlist length to zero, fifo will unload context */
  191. nv_wr32(dev, 0x0032ec, 0);
  192. /* tell all connected engines to unload their contexts */
  193. for (i = 0; i < priv->base.channels; i++) {
  194. struct nouveau_channel *chan = dev_priv->channels.ptr[i];
  195. if (chan && !nv50_fifo_kickoff(chan))
  196. return -EBUSY;
  197. }
  198. nv_wr32(dev, 0x002140, 0);
  199. return 0;
  200. }
  201. void
  202. nv50_fifo_tlb_flush(struct drm_device *dev, int engine)
  203. {
  204. nv50_vm_flush_engine(dev, 5);
  205. }
  206. void
  207. nv50_fifo_destroy(struct drm_device *dev, int engine)
  208. {
  209. struct drm_nouveau_private *dev_priv = dev->dev_private;
  210. struct nv50_fifo_priv *priv = nv_engine(dev, engine);
  211. nouveau_irq_unregister(dev, 8);
  212. nouveau_gpuobj_ref(NULL, &priv->playlist[0]);
  213. nouveau_gpuobj_ref(NULL, &priv->playlist[1]);
  214. dev_priv->eng[engine] = NULL;
  215. kfree(priv);
  216. }
  217. int
  218. nv50_fifo_create(struct drm_device *dev)
  219. {
  220. struct drm_nouveau_private *dev_priv = dev->dev_private;
  221. struct nv50_fifo_priv *priv;
  222. int ret;
  223. priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  224. if (!priv)
  225. return -ENOMEM;
  226. priv->base.base.destroy = nv50_fifo_destroy;
  227. priv->base.base.init = nv50_fifo_init;
  228. priv->base.base.fini = nv50_fifo_fini;
  229. priv->base.base.context_new = nv50_fifo_context_new;
  230. priv->base.base.context_del = nv50_fifo_context_del;
  231. priv->base.base.tlb_flush = nv50_fifo_tlb_flush;
  232. priv->base.channels = 127;
  233. dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
  234. ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000,
  235. NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[0]);
  236. if (ret)
  237. goto error;
  238. ret = nouveau_gpuobj_new(dev, NULL, priv->base.channels * 4, 0x1000,
  239. NVOBJ_FLAG_ZERO_ALLOC, &priv->playlist[1]);
  240. if (ret)
  241. goto error;
  242. nouveau_irq_register(dev, 8, nv04_fifo_isr);
  243. error:
  244. if (ret)
  245. priv->base.base.destroy(dev, NVOBJ_ENGINE_FIFO);
  246. return ret;
  247. }