nv04_fifo.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506
  1. /*
  2. * Copyright (C) 2012 Ben Skeggs.
  3. * All Rights Reserved.
  4. *
  5. * Permission is hereby granted, free of charge, to any person obtaining
  6. * a copy of this software and associated documentation files (the
  7. * "Software"), to deal in the Software without restriction, including
  8. * without limitation the rights to use, copy, modify, merge, publish,
  9. * distribute, sublicense, and/or sell copies of the Software, and to
  10. * permit persons to whom the Software is furnished to do so, subject to
  11. * the following conditions:
  12. *
  13. * The above copyright notice and this permission notice (including the
  14. * next paragraph) shall be included in all copies or substantial
  15. * portions of the Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  18. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  19. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
  20. * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
  21. * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
  22. * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
  23. * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  24. *
  25. */
  26. #include "drmP.h"
  27. #include "drm.h"
  28. #include "nouveau_drv.h"
  29. #include "nouveau_fifo.h"
  30. #include "nouveau_util.h"
  31. #include "nouveau_ramht.h"
  32. #include "nouveau_software.h"
  33. static struct ramfc_desc {
  34. unsigned bits:6;
  35. unsigned ctxs:5;
  36. unsigned ctxp:8;
  37. unsigned regs:5;
  38. unsigned regp;
  39. } nv04_ramfc[] = {
  40. { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
  41. { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
  42. { 16, 0, 0x08, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
  43. { 16, 16, 0x08, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
  44. { 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_STATE },
  45. { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
  46. { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_ENGINE },
  47. { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_PULL1 },
  48. {}
  49. };
  50. struct nv04_fifo_priv {
  51. struct nouveau_fifo_priv base;
  52. struct ramfc_desc *ramfc_desc;
  53. };
  54. struct nv04_fifo_chan {
  55. struct nouveau_fifo_chan base;
  56. struct nouveau_gpuobj *ramfc;
  57. };
  58. bool
  59. nv04_fifo_cache_pull(struct drm_device *dev, bool enable)
  60. {
  61. int pull = nv_mask(dev, NV04_PFIFO_CACHE1_PULL0, 1, enable);
  62. if (!enable) {
  63. /* In some cases the PFIFO puller may be left in an
  64. * inconsistent state if you try to stop it when it's
  65. * busy translating handles. Sometimes you get a
  66. * PFIFO_CACHE_ERROR, sometimes it just fails silently
  67. * sending incorrect instance offsets to PGRAPH after
  68. * it's started up again. To avoid the latter we
  69. * invalidate the most recently calculated instance.
  70. */
  71. if (!nv_wait(dev, NV04_PFIFO_CACHE1_PULL0,
  72. NV04_PFIFO_CACHE1_PULL0_HASH_BUSY, 0))
  73. NV_ERROR(dev, "Timeout idling the PFIFO puller.\n");
  74. if (nv_rd32(dev, NV04_PFIFO_CACHE1_PULL0) &
  75. NV04_PFIFO_CACHE1_PULL0_HASH_FAILED)
  76. nv_wr32(dev, NV03_PFIFO_INTR_0,
  77. NV_PFIFO_INTR_CACHE_ERROR);
  78. nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
  79. }
  80. return pull & 1;
  81. }
  82. static int
  83. nv04_fifo_context_new(struct nouveau_channel *chan, int engine)
  84. {
  85. struct drm_device *dev = chan->dev;
  86. struct drm_nouveau_private *dev_priv = dev->dev_private;
  87. struct nv04_fifo_priv *priv = nv_engine(dev, engine);
  88. struct nv04_fifo_chan *fctx;
  89. unsigned long flags;
  90. int ret;
  91. fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
  92. if (!fctx)
  93. return -ENOMEM;
  94. /* map channel control registers */
  95. chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
  96. NV03_USER(chan->id), PAGE_SIZE);
  97. if (!chan->user) {
  98. ret = -ENOMEM;
  99. goto error;
  100. }
  101. /* initialise default fifo context */
  102. ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramfc->pinst +
  103. chan->id * 32, ~0, 32,
  104. NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc);
  105. if (ret)
  106. goto error;
  107. nv_wo32(fctx->ramfc, 0x00, chan->pushbuf_base);
  108. nv_wo32(fctx->ramfc, 0x04, chan->pushbuf_base);
  109. nv_wo32(fctx->ramfc, 0x08, chan->pushbuf->pinst >> 4);
  110. nv_wo32(fctx->ramfc, 0x0c, 0x00000000);
  111. nv_wo32(fctx->ramfc, 0x10, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
  112. NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
  113. #ifdef __BIG_ENDIAN
  114. NV_PFIFO_CACHE1_BIG_ENDIAN |
  115. #endif
  116. NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
  117. nv_wo32(fctx->ramfc, 0x14, 0x00000000);
  118. nv_wo32(fctx->ramfc, 0x18, 0x00000000);
  119. nv_wo32(fctx->ramfc, 0x1c, 0x00000000);
  120. /* enable dma mode on the channel */
  121. spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
  122. nv_mask(dev, NV04_PFIFO_MODE, (1 << chan->id), (1 << chan->id));
  123. spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
  124. error:
  125. if (ret)
  126. priv->base.base.context_del(chan, engine);
  127. return ret;
  128. }
  129. void
  130. nv04_fifo_context_del(struct nouveau_channel *chan, int engine)
  131. {
  132. struct drm_device *dev = chan->dev;
  133. struct drm_nouveau_private *dev_priv = dev->dev_private;
  134. struct nv04_fifo_priv *priv = nv_engine(chan->dev, engine);
  135. struct nv04_fifo_chan *fctx = chan->engctx[engine];
  136. struct ramfc_desc *c = priv->ramfc_desc;
  137. unsigned long flags;
  138. int chid;
  139. /* prevent fifo context switches */
  140. spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
  141. nv_wr32(dev, NV03_PFIFO_CACHES, 0);
  142. /* if this channel is active, replace it with a null context */
  143. chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & priv->base.channels;
  144. if (chid == chan->id) {
  145. nv_mask(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0);
  146. nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 0);
  147. nv_mask(dev, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0);
  148. do {
  149. u32 mask = ((1ULL << c->bits) - 1) << c->regs;
  150. nv_mask(dev, c->regp, mask, 0x00000000);
  151. } while ((++c)->bits);
  152. nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
  153. nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
  154. nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels);
  155. nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
  156. nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
  157. }
  158. /* restore normal operation, after disabling dma mode */
  159. nv_mask(dev, NV04_PFIFO_MODE, 1 << chan->id, 0);
  160. nv_wr32(dev, NV03_PFIFO_CACHES, 1);
  161. spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
  162. /* clean up */
  163. nouveau_gpuobj_ref(NULL, &fctx->ramfc);
  164. nouveau_gpuobj_ref(NULL, &chan->ramfc); /*XXX: nv40 */
  165. if (chan->user) {
  166. iounmap(chan->user);
  167. chan->user = NULL;
  168. }
  169. }
  170. int
  171. nv04_fifo_init(struct drm_device *dev, int engine)
  172. {
  173. struct drm_nouveau_private *dev_priv = dev->dev_private;
  174. struct nv04_fifo_priv *priv = nv_engine(dev, engine);
  175. int i;
  176. nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, 0);
  177. nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, NV_PMC_ENABLE_PFIFO);
  178. nv_wr32(dev, NV04_PFIFO_DELAY_0, 0x000000ff);
  179. nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
  180. nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
  181. ((dev_priv->ramht->bits - 9) << 16) |
  182. (dev_priv->ramht->gpuobj->pinst >> 8));
  183. nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8);
  184. nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc->pinst >> 8);
  185. nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels);
  186. nv_wr32(dev, NV03_PFIFO_INTR_0, 0xffffffff);
  187. nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xffffffff);
  188. nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
  189. nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
  190. nv_wr32(dev, NV03_PFIFO_CACHES, 1);
  191. for (i = 0; i < priv->base.channels; i++) {
  192. if (dev_priv->channels.ptr[i])
  193. nv_mask(dev, NV04_PFIFO_MODE, (1 << i), (1 << i));
  194. }
  195. return 0;
  196. }
  197. int
  198. nv04_fifo_fini(struct drm_device *dev, int engine, bool suspend)
  199. {
  200. struct drm_nouveau_private *dev_priv = dev->dev_private;
  201. struct nv04_fifo_priv *priv = nv_engine(dev, engine);
  202. struct nouveau_channel *chan;
  203. int chid;
  204. /* prevent context switches and halt fifo operation */
  205. nv_wr32(dev, NV03_PFIFO_CACHES, 0);
  206. nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
  207. nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 0);
  208. nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 0);
  209. /* store current fifo context in ramfc */
  210. chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & priv->base.channels;
  211. chan = dev_priv->channels.ptr[chid];
  212. if (suspend && chid != priv->base.channels && chan) {
  213. struct nv04_fifo_chan *fctx = chan->engctx[engine];
  214. struct nouveau_gpuobj *ctx = fctx->ramfc;
  215. struct ramfc_desc *c = priv->ramfc_desc;
  216. do {
  217. u32 rm = ((1ULL << c->bits) - 1) << c->regs;
  218. u32 cm = ((1ULL << c->bits) - 1) << c->ctxs;
  219. u32 rv = (nv_rd32(dev, c->regp) & rm) >> c->regs;
  220. u32 cv = (nv_ro32(ctx, c->ctxp) & ~cm);
  221. nv_wo32(ctx, c->ctxp, cv | (rv << c->ctxs));
  222. } while ((++c)->bits);
  223. }
  224. nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0x00000000);
  225. return 0;
  226. }
  227. static bool
  228. nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data)
  229. {
  230. struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
  231. struct drm_nouveau_private *dev_priv = dev->dev_private;
  232. struct nouveau_channel *chan = NULL;
  233. struct nouveau_gpuobj *obj;
  234. unsigned long flags;
  235. const int subc = (addr >> 13) & 0x7;
  236. const int mthd = addr & 0x1ffc;
  237. bool handled = false;
  238. u32 engine;
  239. spin_lock_irqsave(&dev_priv->channels.lock, flags);
  240. if (likely(chid >= 0 && chid < pfifo->channels))
  241. chan = dev_priv->channels.ptr[chid];
  242. if (unlikely(!chan))
  243. goto out;
  244. switch (mthd) {
  245. case 0x0000: /* bind object to subchannel */
  246. obj = nouveau_ramht_find(chan, data);
  247. if (unlikely(!obj || obj->engine != NVOBJ_ENGINE_SW))
  248. break;
  249. engine = 0x0000000f << (subc * 4);
  250. nv_mask(dev, NV04_PFIFO_CACHE1_ENGINE, engine, 0x00000000);
  251. handled = true;
  252. break;
  253. default:
  254. engine = nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE);
  255. if (unlikely(((engine >> (subc * 4)) & 0xf) != 0))
  256. break;
  257. if (!nouveau_gpuobj_mthd_call(chan, nouveau_software_class(dev),
  258. mthd, data))
  259. handled = true;
  260. break;
  261. }
  262. out:
  263. spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
  264. return handled;
  265. }
  266. static const char *nv_dma_state_err(u32 state)
  267. {
  268. static const char * const desc[] = {
  269. "NONE", "CALL_SUBR_ACTIVE", "INVALID_MTHD", "RET_SUBR_INACTIVE",
  270. "INVALID_CMD", "IB_EMPTY"/* NV50+ */, "MEM_FAULT", "UNK"
  271. };
  272. return desc[(state >> 29) & 0x7];
  273. }
  274. void
  275. nv04_fifo_isr(struct drm_device *dev)
  276. {
  277. struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
  278. struct drm_nouveau_private *dev_priv = dev->dev_private;
  279. uint32_t status, reassign;
  280. int cnt = 0;
  281. reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1;
  282. while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) {
  283. uint32_t chid, get;
  284. nv_wr32(dev, NV03_PFIFO_CACHES, 0);
  285. chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & pfifo->channels;
  286. get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET);
  287. if (status & NV_PFIFO_INTR_CACHE_ERROR) {
  288. uint32_t mthd, data;
  289. int ptr;
  290. /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before
  291. * wrapping on my G80 chips, but CACHE1 isn't big
  292. * enough for this much data.. Tests show that it
  293. * wraps around to the start at GET=0x800.. No clue
  294. * as to why..
  295. */
  296. ptr = (get & 0x7ff) >> 2;
  297. if (dev_priv->card_type < NV_40) {
  298. mthd = nv_rd32(dev,
  299. NV04_PFIFO_CACHE1_METHOD(ptr));
  300. data = nv_rd32(dev,
  301. NV04_PFIFO_CACHE1_DATA(ptr));
  302. } else {
  303. mthd = nv_rd32(dev,
  304. NV40_PFIFO_CACHE1_METHOD(ptr));
  305. data = nv_rd32(dev,
  306. NV40_PFIFO_CACHE1_DATA(ptr));
  307. }
  308. if (!nouveau_fifo_swmthd(dev, chid, mthd, data)) {
  309. NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d "
  310. "Mthd 0x%04x Data 0x%08x\n",
  311. chid, (mthd >> 13) & 7, mthd & 0x1ffc,
  312. data);
  313. }
  314. nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
  315. nv_wr32(dev, NV03_PFIFO_INTR_0,
  316. NV_PFIFO_INTR_CACHE_ERROR);
  317. nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
  318. nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1);
  319. nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
  320. nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
  321. nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1);
  322. nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
  323. nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH,
  324. nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
  325. nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
  326. status &= ~NV_PFIFO_INTR_CACHE_ERROR;
  327. }
  328. if (status & NV_PFIFO_INTR_DMA_PUSHER) {
  329. u32 dma_get = nv_rd32(dev, 0x003244);
  330. u32 dma_put = nv_rd32(dev, 0x003240);
  331. u32 push = nv_rd32(dev, 0x003220);
  332. u32 state = nv_rd32(dev, 0x003228);
  333. if (dev_priv->card_type == NV_50) {
  334. u32 ho_get = nv_rd32(dev, 0x003328);
  335. u32 ho_put = nv_rd32(dev, 0x003320);
  336. u32 ib_get = nv_rd32(dev, 0x003334);
  337. u32 ib_put = nv_rd32(dev, 0x003330);
  338. if (nouveau_ratelimit())
  339. NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x "
  340. "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
  341. "State 0x%08x (err: %s) Push 0x%08x\n",
  342. chid, ho_get, dma_get, ho_put,
  343. dma_put, ib_get, ib_put, state,
  344. nv_dma_state_err(state),
  345. push);
  346. /* METHOD_COUNT, in DMA_STATE on earlier chipsets */
  347. nv_wr32(dev, 0x003364, 0x00000000);
  348. if (dma_get != dma_put || ho_get != ho_put) {
  349. nv_wr32(dev, 0x003244, dma_put);
  350. nv_wr32(dev, 0x003328, ho_put);
  351. } else
  352. if (ib_get != ib_put) {
  353. nv_wr32(dev, 0x003334, ib_put);
  354. }
  355. } else {
  356. NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x "
  357. "Put 0x%08x State 0x%08x (err: %s) Push 0x%08x\n",
  358. chid, dma_get, dma_put, state,
  359. nv_dma_state_err(state), push);
  360. if (dma_get != dma_put)
  361. nv_wr32(dev, 0x003244, dma_put);
  362. }
  363. nv_wr32(dev, 0x003228, 0x00000000);
  364. nv_wr32(dev, 0x003220, 0x00000001);
  365. nv_wr32(dev, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
  366. status &= ~NV_PFIFO_INTR_DMA_PUSHER;
  367. }
  368. if (status & NV_PFIFO_INTR_SEMAPHORE) {
  369. uint32_t sem;
  370. status &= ~NV_PFIFO_INTR_SEMAPHORE;
  371. nv_wr32(dev, NV03_PFIFO_INTR_0,
  372. NV_PFIFO_INTR_SEMAPHORE);
  373. sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE);
  374. nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
  375. nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
  376. nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
  377. }
  378. if (dev_priv->card_type == NV_50) {
  379. if (status & 0x00000010) {
  380. nv50_fb_vm_trap(dev, nouveau_ratelimit());
  381. status &= ~0x00000010;
  382. nv_wr32(dev, 0x002100, 0x00000010);
  383. }
  384. }
  385. if (status) {
  386. if (nouveau_ratelimit())
  387. NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
  388. status, chid);
  389. nv_wr32(dev, NV03_PFIFO_INTR_0, status);
  390. status = 0;
  391. }
  392. nv_wr32(dev, NV03_PFIFO_CACHES, reassign);
  393. }
  394. if (status) {
  395. NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt);
  396. nv_wr32(dev, 0x2140, 0);
  397. nv_wr32(dev, 0x140, 0);
  398. }
  399. nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
  400. }
  401. void
  402. nv04_fifo_destroy(struct drm_device *dev, int engine)
  403. {
  404. struct drm_nouveau_private *dev_priv = dev->dev_private;
  405. struct nv04_fifo_priv *priv = nv_engine(dev, engine);
  406. nouveau_irq_unregister(dev, 8);
  407. dev_priv->eng[engine] = NULL;
  408. kfree(priv);
  409. }
  410. int
  411. nv04_fifo_create(struct drm_device *dev)
  412. {
  413. struct drm_nouveau_private *dev_priv = dev->dev_private;
  414. struct nv04_fifo_priv *priv;
  415. priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  416. if (!priv)
  417. return -ENOMEM;
  418. priv->base.base.destroy = nv04_fifo_destroy;
  419. priv->base.base.init = nv04_fifo_init;
  420. priv->base.base.fini = nv04_fifo_fini;
  421. priv->base.base.context_new = nv04_fifo_context_new;
  422. priv->base.base.context_del = nv04_fifo_context_del;
  423. priv->base.channels = 15;
  424. priv->ramfc_desc = nv04_ramfc;
  425. dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
  426. nouveau_irq_register(dev, 8, nv04_fifo_isr);
  427. return 0;
  428. }