nv04_fifo.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505
  1. /*
  2. * Copyright (C) 2012 Ben Skeggs.
  3. * All Rights Reserved.
  4. *
  5. * Permission is hereby granted, free of charge, to any person obtaining
  6. * a copy of this software and associated documentation files (the
  7. * "Software"), to deal in the Software without restriction, including
  8. * without limitation the rights to use, copy, modify, merge, publish,
  9. * distribute, sublicense, and/or sell copies of the Software, and to
  10. * permit persons to whom the Software is furnished to do so, subject to
  11. * the following conditions:
  12. *
  13. * The above copyright notice and this permission notice (including the
  14. * next paragraph) shall be included in all copies or substantial
  15. * portions of the Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  18. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  19. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
  20. * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
  21. * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
  22. * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
  23. * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  24. *
  25. */
  26. #include "drmP.h"
  27. #include "nouveau_drv.h"
  28. #include "nouveau_fifo.h"
  29. #include "nouveau_util.h"
  30. #include "nouveau_ramht.h"
  31. #include "nouveau_software.h"
  32. static struct ramfc_desc {
  33. unsigned bits:6;
  34. unsigned ctxs:5;
  35. unsigned ctxp:8;
  36. unsigned regs:5;
  37. unsigned regp;
  38. } nv04_ramfc[] = {
  39. { 32, 0, 0x00, 0, NV04_PFIFO_CACHE1_DMA_PUT },
  40. { 32, 0, 0x04, 0, NV04_PFIFO_CACHE1_DMA_GET },
  41. { 16, 0, 0x08, 0, NV04_PFIFO_CACHE1_DMA_INSTANCE },
  42. { 16, 16, 0x08, 0, NV04_PFIFO_CACHE1_DMA_DCOUNT },
  43. { 32, 0, 0x0c, 0, NV04_PFIFO_CACHE1_DMA_STATE },
  44. { 32, 0, 0x10, 0, NV04_PFIFO_CACHE1_DMA_FETCH },
  45. { 32, 0, 0x14, 0, NV04_PFIFO_CACHE1_ENGINE },
  46. { 32, 0, 0x18, 0, NV04_PFIFO_CACHE1_PULL1 },
  47. {}
  48. };
  49. struct nv04_fifo_priv {
  50. struct nouveau_fifo_priv base;
  51. struct ramfc_desc *ramfc_desc;
  52. };
  53. struct nv04_fifo_chan {
  54. struct nouveau_fifo_chan base;
  55. struct nouveau_gpuobj *ramfc;
  56. };
  57. bool
  58. nv04_fifo_cache_pull(struct drm_device *dev, bool enable)
  59. {
  60. int pull = nv_mask(dev, NV04_PFIFO_CACHE1_PULL0, 1, enable);
  61. if (!enable) {
  62. /* In some cases the PFIFO puller may be left in an
  63. * inconsistent state if you try to stop it when it's
  64. * busy translating handles. Sometimes you get a
  65. * PFIFO_CACHE_ERROR, sometimes it just fails silently
  66. * sending incorrect instance offsets to PGRAPH after
  67. * it's started up again. To avoid the latter we
  68. * invalidate the most recently calculated instance.
  69. */
  70. if (!nv_wait(dev, NV04_PFIFO_CACHE1_PULL0,
  71. NV04_PFIFO_CACHE1_PULL0_HASH_BUSY, 0))
  72. NV_ERROR(dev, "Timeout idling the PFIFO puller.\n");
  73. if (nv_rd32(dev, NV04_PFIFO_CACHE1_PULL0) &
  74. NV04_PFIFO_CACHE1_PULL0_HASH_FAILED)
  75. nv_wr32(dev, NV03_PFIFO_INTR_0,
  76. NV_PFIFO_INTR_CACHE_ERROR);
  77. nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
  78. }
  79. return pull & 1;
  80. }
  81. static int
  82. nv04_fifo_context_new(struct nouveau_channel *chan, int engine)
  83. {
  84. struct drm_device *dev = chan->dev;
  85. struct drm_nouveau_private *dev_priv = dev->dev_private;
  86. struct nv04_fifo_priv *priv = nv_engine(dev, engine);
  87. struct nv04_fifo_chan *fctx;
  88. unsigned long flags;
  89. int ret;
  90. fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL);
  91. if (!fctx)
  92. return -ENOMEM;
  93. /* map channel control registers */
  94. chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
  95. NV03_USER(chan->id), PAGE_SIZE);
  96. if (!chan->user) {
  97. ret = -ENOMEM;
  98. goto error;
  99. }
  100. /* initialise default fifo context */
  101. ret = nouveau_gpuobj_new_fake(dev, dev_priv->ramfc->pinst +
  102. chan->id * 32, ~0, 32,
  103. NVOBJ_FLAG_ZERO_FREE, &fctx->ramfc);
  104. if (ret)
  105. goto error;
  106. nv_wo32(fctx->ramfc, 0x00, chan->pushbuf_base);
  107. nv_wo32(fctx->ramfc, 0x04, chan->pushbuf_base);
  108. nv_wo32(fctx->ramfc, 0x08, chan->pushbuf->pinst >> 4);
  109. nv_wo32(fctx->ramfc, 0x0c, 0x00000000);
  110. nv_wo32(fctx->ramfc, 0x10, NV_PFIFO_CACHE1_DMA_FETCH_TRIG_128_BYTES |
  111. NV_PFIFO_CACHE1_DMA_FETCH_SIZE_128_BYTES |
  112. #ifdef __BIG_ENDIAN
  113. NV_PFIFO_CACHE1_BIG_ENDIAN |
  114. #endif
  115. NV_PFIFO_CACHE1_DMA_FETCH_MAX_REQS_8);
  116. nv_wo32(fctx->ramfc, 0x14, 0x00000000);
  117. nv_wo32(fctx->ramfc, 0x18, 0x00000000);
  118. nv_wo32(fctx->ramfc, 0x1c, 0x00000000);
  119. /* enable dma mode on the channel */
  120. spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
  121. nv_mask(dev, NV04_PFIFO_MODE, (1 << chan->id), (1 << chan->id));
  122. spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
  123. error:
  124. if (ret)
  125. priv->base.base.context_del(chan, engine);
  126. return ret;
  127. }
  128. void
  129. nv04_fifo_context_del(struct nouveau_channel *chan, int engine)
  130. {
  131. struct drm_device *dev = chan->dev;
  132. struct drm_nouveau_private *dev_priv = dev->dev_private;
  133. struct nv04_fifo_priv *priv = nv_engine(chan->dev, engine);
  134. struct nv04_fifo_chan *fctx = chan->engctx[engine];
  135. struct ramfc_desc *c = priv->ramfc_desc;
  136. unsigned long flags;
  137. int chid;
  138. /* prevent fifo context switches */
  139. spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
  140. nv_wr32(dev, NV03_PFIFO_CACHES, 0);
  141. /* if this channel is active, replace it with a null context */
  142. chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & priv->base.channels;
  143. if (chid == chan->id) {
  144. nv_mask(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0);
  145. nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 0);
  146. nv_mask(dev, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0);
  147. do {
  148. u32 mask = ((1ULL << c->bits) - 1) << c->regs;
  149. nv_mask(dev, c->regp, mask, 0x00000000);
  150. } while ((++c)->bits);
  151. nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
  152. nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, 0);
  153. nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels);
  154. nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
  155. nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
  156. }
  157. /* restore normal operation, after disabling dma mode */
  158. nv_mask(dev, NV04_PFIFO_MODE, 1 << chan->id, 0);
  159. nv_wr32(dev, NV03_PFIFO_CACHES, 1);
  160. spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
  161. /* clean up */
  162. nouveau_gpuobj_ref(NULL, &fctx->ramfc);
  163. nouveau_gpuobj_ref(NULL, &chan->ramfc); /*XXX: nv40 */
  164. if (chan->user) {
  165. iounmap(chan->user);
  166. chan->user = NULL;
  167. }
  168. }
  169. int
  170. nv04_fifo_init(struct drm_device *dev, int engine)
  171. {
  172. struct drm_nouveau_private *dev_priv = dev->dev_private;
  173. struct nv04_fifo_priv *priv = nv_engine(dev, engine);
  174. int i;
  175. nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, 0);
  176. nv_mask(dev, NV03_PMC_ENABLE, NV_PMC_ENABLE_PFIFO, NV_PMC_ENABLE_PFIFO);
  177. nv_wr32(dev, NV04_PFIFO_DELAY_0, 0x000000ff);
  178. nv_wr32(dev, NV04_PFIFO_DMA_TIMESLICE, 0x0101ffff);
  179. nv_wr32(dev, NV03_PFIFO_RAMHT, (0x03 << 24) /* search 128 */ |
  180. ((dev_priv->ramht->bits - 9) << 16) |
  181. (dev_priv->ramht->gpuobj->pinst >> 8));
  182. nv_wr32(dev, NV03_PFIFO_RAMRO, dev_priv->ramro->pinst >> 8);
  183. nv_wr32(dev, NV03_PFIFO_RAMFC, dev_priv->ramfc->pinst >> 8);
  184. nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, priv->base.channels);
  185. nv_wr32(dev, NV03_PFIFO_INTR_0, 0xffffffff);
  186. nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xffffffff);
  187. nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 1);
  188. nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
  189. nv_wr32(dev, NV03_PFIFO_CACHES, 1);
  190. for (i = 0; i < priv->base.channels; i++) {
  191. if (dev_priv->channels.ptr[i])
  192. nv_mask(dev, NV04_PFIFO_MODE, (1 << i), (1 << i));
  193. }
  194. return 0;
  195. }
  196. int
  197. nv04_fifo_fini(struct drm_device *dev, int engine, bool suspend)
  198. {
  199. struct drm_nouveau_private *dev_priv = dev->dev_private;
  200. struct nv04_fifo_priv *priv = nv_engine(dev, engine);
  201. struct nouveau_channel *chan;
  202. int chid;
  203. /* prevent context switches and halt fifo operation */
  204. nv_wr32(dev, NV03_PFIFO_CACHES, 0);
  205. nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
  206. nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0, 0);
  207. nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 0);
  208. /* store current fifo context in ramfc */
  209. chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & priv->base.channels;
  210. chan = dev_priv->channels.ptr[chid];
  211. if (suspend && chid != priv->base.channels && chan) {
  212. struct nv04_fifo_chan *fctx = chan->engctx[engine];
  213. struct nouveau_gpuobj *ctx = fctx->ramfc;
  214. struct ramfc_desc *c = priv->ramfc_desc;
  215. do {
  216. u32 rm = ((1ULL << c->bits) - 1) << c->regs;
  217. u32 cm = ((1ULL << c->bits) - 1) << c->ctxs;
  218. u32 rv = (nv_rd32(dev, c->regp) & rm) >> c->regs;
  219. u32 cv = (nv_ro32(ctx, c->ctxp) & ~cm);
  220. nv_wo32(ctx, c->ctxp, cv | (rv << c->ctxs));
  221. } while ((++c)->bits);
  222. }
  223. nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0x00000000);
  224. return 0;
  225. }
  226. static bool
  227. nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data)
  228. {
  229. struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
  230. struct drm_nouveau_private *dev_priv = dev->dev_private;
  231. struct nouveau_channel *chan = NULL;
  232. struct nouveau_gpuobj *obj;
  233. unsigned long flags;
  234. const int subc = (addr >> 13) & 0x7;
  235. const int mthd = addr & 0x1ffc;
  236. bool handled = false;
  237. u32 engine;
  238. spin_lock_irqsave(&dev_priv->channels.lock, flags);
  239. if (likely(chid >= 0 && chid < pfifo->channels))
  240. chan = dev_priv->channels.ptr[chid];
  241. if (unlikely(!chan))
  242. goto out;
  243. switch (mthd) {
  244. case 0x0000: /* bind object to subchannel */
  245. obj = nouveau_ramht_find(chan, data);
  246. if (unlikely(!obj || obj->engine != NVOBJ_ENGINE_SW))
  247. break;
  248. engine = 0x0000000f << (subc * 4);
  249. nv_mask(dev, NV04_PFIFO_CACHE1_ENGINE, engine, 0x00000000);
  250. handled = true;
  251. break;
  252. default:
  253. engine = nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE);
  254. if (unlikely(((engine >> (subc * 4)) & 0xf) != 0))
  255. break;
  256. if (!nouveau_gpuobj_mthd_call(chan, nouveau_software_class(dev),
  257. mthd, data))
  258. handled = true;
  259. break;
  260. }
  261. out:
  262. spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
  263. return handled;
  264. }
  265. static const char *nv_dma_state_err(u32 state)
  266. {
  267. static const char * const desc[] = {
  268. "NONE", "CALL_SUBR_ACTIVE", "INVALID_MTHD", "RET_SUBR_INACTIVE",
  269. "INVALID_CMD", "IB_EMPTY"/* NV50+ */, "MEM_FAULT", "UNK"
  270. };
  271. return desc[(state >> 29) & 0x7];
  272. }
  273. void
  274. nv04_fifo_isr(struct drm_device *dev)
  275. {
  276. struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
  277. struct drm_nouveau_private *dev_priv = dev->dev_private;
  278. uint32_t status, reassign;
  279. int cnt = 0;
  280. reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1;
  281. while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) {
  282. uint32_t chid, get;
  283. nv_wr32(dev, NV03_PFIFO_CACHES, 0);
  284. chid = nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) & pfifo->channels;
  285. get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET);
  286. if (status & NV_PFIFO_INTR_CACHE_ERROR) {
  287. uint32_t mthd, data;
  288. int ptr;
  289. /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before
  290. * wrapping on my G80 chips, but CACHE1 isn't big
  291. * enough for this much data.. Tests show that it
  292. * wraps around to the start at GET=0x800.. No clue
  293. * as to why..
  294. */
  295. ptr = (get & 0x7ff) >> 2;
  296. if (dev_priv->card_type < NV_40) {
  297. mthd = nv_rd32(dev,
  298. NV04_PFIFO_CACHE1_METHOD(ptr));
  299. data = nv_rd32(dev,
  300. NV04_PFIFO_CACHE1_DATA(ptr));
  301. } else {
  302. mthd = nv_rd32(dev,
  303. NV40_PFIFO_CACHE1_METHOD(ptr));
  304. data = nv_rd32(dev,
  305. NV40_PFIFO_CACHE1_DATA(ptr));
  306. }
  307. if (!nouveau_fifo_swmthd(dev, chid, mthd, data)) {
  308. NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d "
  309. "Mthd 0x%04x Data 0x%08x\n",
  310. chid, (mthd >> 13) & 7, mthd & 0x1ffc,
  311. data);
  312. }
  313. nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
  314. nv_wr32(dev, NV03_PFIFO_INTR_0,
  315. NV_PFIFO_INTR_CACHE_ERROR);
  316. nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
  317. nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1);
  318. nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
  319. nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
  320. nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1);
  321. nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
  322. nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH,
  323. nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
  324. nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
  325. status &= ~NV_PFIFO_INTR_CACHE_ERROR;
  326. }
  327. if (status & NV_PFIFO_INTR_DMA_PUSHER) {
  328. u32 dma_get = nv_rd32(dev, 0x003244);
  329. u32 dma_put = nv_rd32(dev, 0x003240);
  330. u32 push = nv_rd32(dev, 0x003220);
  331. u32 state = nv_rd32(dev, 0x003228);
  332. if (dev_priv->card_type == NV_50) {
  333. u32 ho_get = nv_rd32(dev, 0x003328);
  334. u32 ho_put = nv_rd32(dev, 0x003320);
  335. u32 ib_get = nv_rd32(dev, 0x003334);
  336. u32 ib_put = nv_rd32(dev, 0x003330);
  337. if (nouveau_ratelimit())
  338. NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x "
  339. "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
  340. "State 0x%08x (err: %s) Push 0x%08x\n",
  341. chid, ho_get, dma_get, ho_put,
  342. dma_put, ib_get, ib_put, state,
  343. nv_dma_state_err(state),
  344. push);
  345. /* METHOD_COUNT, in DMA_STATE on earlier chipsets */
  346. nv_wr32(dev, 0x003364, 0x00000000);
  347. if (dma_get != dma_put || ho_get != ho_put) {
  348. nv_wr32(dev, 0x003244, dma_put);
  349. nv_wr32(dev, 0x003328, ho_put);
  350. } else
  351. if (ib_get != ib_put) {
  352. nv_wr32(dev, 0x003334, ib_put);
  353. }
  354. } else {
  355. NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x "
  356. "Put 0x%08x State 0x%08x (err: %s) Push 0x%08x\n",
  357. chid, dma_get, dma_put, state,
  358. nv_dma_state_err(state), push);
  359. if (dma_get != dma_put)
  360. nv_wr32(dev, 0x003244, dma_put);
  361. }
  362. nv_wr32(dev, 0x003228, 0x00000000);
  363. nv_wr32(dev, 0x003220, 0x00000001);
  364. nv_wr32(dev, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
  365. status &= ~NV_PFIFO_INTR_DMA_PUSHER;
  366. }
  367. if (status & NV_PFIFO_INTR_SEMAPHORE) {
  368. uint32_t sem;
  369. status &= ~NV_PFIFO_INTR_SEMAPHORE;
  370. nv_wr32(dev, NV03_PFIFO_INTR_0,
  371. NV_PFIFO_INTR_SEMAPHORE);
  372. sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE);
  373. nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
  374. nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
  375. nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
  376. }
  377. if (dev_priv->card_type == NV_50) {
  378. if (status & 0x00000010) {
  379. nv50_fb_vm_trap(dev, nouveau_ratelimit());
  380. status &= ~0x00000010;
  381. nv_wr32(dev, 0x002100, 0x00000010);
  382. }
  383. }
  384. if (status) {
  385. if (nouveau_ratelimit())
  386. NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
  387. status, chid);
  388. nv_wr32(dev, NV03_PFIFO_INTR_0, status);
  389. status = 0;
  390. }
  391. nv_wr32(dev, NV03_PFIFO_CACHES, reassign);
  392. }
  393. if (status) {
  394. NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt);
  395. nv_wr32(dev, 0x2140, 0);
  396. nv_wr32(dev, 0x140, 0);
  397. }
  398. nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
  399. }
  400. void
  401. nv04_fifo_destroy(struct drm_device *dev, int engine)
  402. {
  403. struct drm_nouveau_private *dev_priv = dev->dev_private;
  404. struct nv04_fifo_priv *priv = nv_engine(dev, engine);
  405. nouveau_irq_unregister(dev, 8);
  406. dev_priv->eng[engine] = NULL;
  407. kfree(priv);
  408. }
  409. int
  410. nv04_fifo_create(struct drm_device *dev)
  411. {
  412. struct drm_nouveau_private *dev_priv = dev->dev_private;
  413. struct nv04_fifo_priv *priv;
  414. priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  415. if (!priv)
  416. return -ENOMEM;
  417. priv->base.base.destroy = nv04_fifo_destroy;
  418. priv->base.base.init = nv04_fifo_init;
  419. priv->base.base.fini = nv04_fifo_fini;
  420. priv->base.base.context_new = nv04_fifo_context_new;
  421. priv->base.base.context_del = nv04_fifo_context_del;
  422. priv->base.channels = 15;
  423. priv->ramfc_desc = nv04_ramfc;
  424. dev_priv->eng[NVOBJ_ENGINE_FIFO] = &priv->base.base;
  425. nouveau_irq_register(dev, 8, nv04_fifo_isr);
  426. return 0;
  427. }