nve0_fifo.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459
  1. /*
  2. * Copyright 2010 Red Hat Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Ben Skeggs
  23. */
  24. #include "drmP.h"
  25. #include "nouveau_drv.h"
  26. #include "nouveau_mm.h"
  27. #define NVE0_FIFO_ENGINE_NUM 32
  28. static void nve0_fifo_isr(struct drm_device *);
  29. struct nve0_fifo_engine {
  30. struct nouveau_gpuobj *playlist[2];
  31. int cur_playlist;
  32. };
  33. struct nve0_fifo_priv {
  34. struct nve0_fifo_engine engine[NVE0_FIFO_ENGINE_NUM];
  35. struct {
  36. struct nouveau_gpuobj *mem;
  37. struct nouveau_vma bar;
  38. } user;
  39. int spoon_nr;
  40. };
  41. struct nve0_fifo_chan {
  42. struct nouveau_gpuobj *ramfc;
  43. u32 engine;
  44. };
  45. static void
  46. nve0_fifo_playlist_update(struct drm_device *dev, u32 engine)
  47. {
  48. struct drm_nouveau_private *dev_priv = dev->dev_private;
  49. struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
  50. struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
  51. struct nve0_fifo_priv *priv = pfifo->priv;
  52. struct nve0_fifo_engine *peng = &priv->engine[engine];
  53. struct nouveau_gpuobj *cur;
  54. u32 match = (engine << 16) | 0x00000001;
  55. int ret, i, p;
  56. cur = peng->playlist[peng->cur_playlist];
  57. if (unlikely(cur == NULL)) {
  58. ret = nouveau_gpuobj_new(dev, NULL, 0x8000, 0x1000, 0, &cur);
  59. if (ret) {
  60. NV_ERROR(dev, "PFIFO: playlist alloc failed\n");
  61. return;
  62. }
  63. peng->playlist[peng->cur_playlist] = cur;
  64. }
  65. peng->cur_playlist = !peng->cur_playlist;
  66. for (i = 0, p = 0; i < pfifo->channels; i++) {
  67. u32 ctrl = nv_rd32(dev, 0x800004 + (i * 8)) & 0x001f0001;
  68. if (ctrl != match)
  69. continue;
  70. nv_wo32(cur, p + 0, i);
  71. nv_wo32(cur, p + 4, 0x00000000);
  72. p += 8;
  73. }
  74. pinstmem->flush(dev);
  75. nv_wr32(dev, 0x002270, cur->vinst >> 12);
  76. nv_wr32(dev, 0x002274, (engine << 20) | (p >> 3));
  77. if (!nv_wait(dev, 0x002284 + (engine * 4), 0x00100000, 0x00000000))
  78. NV_ERROR(dev, "PFIFO: playlist %d update timeout\n", engine);
  79. }
  80. int
  81. nve0_fifo_channel_id(struct drm_device *dev)
  82. {
  83. return 4095;
  84. }
  85. int
  86. nve0_fifo_create_context(struct nouveau_channel *chan)
  87. {
  88. struct drm_device *dev = chan->dev;
  89. struct drm_nouveau_private *dev_priv = dev->dev_private;
  90. struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
  91. struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
  92. struct nve0_fifo_priv *priv = pfifo->priv;
  93. struct nve0_fifo_chan *fifoch;
  94. u64 usermem = priv->user.mem->vinst + chan->id * 512;
  95. u64 ib_virt = chan->pushbuf_base + chan->dma.ib_base * 4;
  96. int ret;
  97. chan->fifo_priv = kzalloc(sizeof(*fifoch), GFP_KERNEL);
  98. if (!chan->fifo_priv)
  99. return -ENOMEM;
  100. fifoch = chan->fifo_priv;
  101. fifoch->engine = 0; /* PGRAPH */
  102. /* allocate vram for control regs, map into polling area */
  103. chan->user = ioremap_wc(pci_resource_start(dev->pdev, 1) +
  104. priv->user.bar.offset + (chan->id * 512), 512);
  105. if (!chan->user) {
  106. ret = -ENOMEM;
  107. goto error;
  108. }
  109. /* ramfc */
  110. ret = nouveau_gpuobj_new_fake(dev, chan->ramin->pinst,
  111. chan->ramin->vinst, 0x100,
  112. NVOBJ_FLAG_ZERO_ALLOC, &fifoch->ramfc);
  113. if (ret)
  114. goto error;
  115. nv_wo32(fifoch->ramfc, 0x08, lower_32_bits(usermem));
  116. nv_wo32(fifoch->ramfc, 0x0c, upper_32_bits(usermem));
  117. nv_wo32(fifoch->ramfc, 0x10, 0x0000face);
  118. nv_wo32(fifoch->ramfc, 0x30, 0xfffff902);
  119. nv_wo32(fifoch->ramfc, 0x48, lower_32_bits(ib_virt));
  120. nv_wo32(fifoch->ramfc, 0x4c, drm_order(chan->dma.ib_max + 1) << 16 |
  121. upper_32_bits(ib_virt));
  122. nv_wo32(fifoch->ramfc, 0x84, 0x20400000);
  123. nv_wo32(fifoch->ramfc, 0x94, 0x30000001);
  124. nv_wo32(fifoch->ramfc, 0x9c, 0x00000100);
  125. nv_wo32(fifoch->ramfc, 0xac, 0x0000001f);
  126. nv_wo32(fifoch->ramfc, 0xe4, 0x00000000);
  127. nv_wo32(fifoch->ramfc, 0xe8, chan->id);
  128. nv_wo32(fifoch->ramfc, 0xf8, 0x10003080); /* 0x002310 */
  129. nv_wo32(fifoch->ramfc, 0xfc, 0x10000010); /* 0x002350 */
  130. pinstmem->flush(dev);
  131. nv_wr32(dev, 0x800000 + (chan->id * 8), 0x80000000 |
  132. (chan->ramin->vinst >> 12));
  133. nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000400, 0x00000400);
  134. nve0_fifo_playlist_update(dev, fifoch->engine);
  135. nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000400, 0x00000400);
  136. return 0;
  137. error:
  138. pfifo->destroy_context(chan);
  139. return ret;
  140. }
  141. void
  142. nve0_fifo_destroy_context(struct nouveau_channel *chan)
  143. {
  144. struct nve0_fifo_chan *fifoch = chan->fifo_priv;
  145. struct drm_device *dev = chan->dev;
  146. if (!fifoch)
  147. return;
  148. nv_mask(dev, 0x800004 + (chan->id * 8), 0x00000800, 0x00000800);
  149. nv_wr32(dev, 0x002634, chan->id);
  150. if (!nv_wait(dev, 0x0002634, 0xffffffff, chan->id))
  151. NV_WARN(dev, "0x2634 != chid: 0x%08x\n", nv_rd32(dev, 0x2634));
  152. nve0_fifo_playlist_update(dev, fifoch->engine);
  153. nv_wr32(dev, 0x800000 + (chan->id * 8), 0x00000000);
  154. if (chan->user) {
  155. iounmap(chan->user);
  156. chan->user = NULL;
  157. }
  158. nouveau_gpuobj_ref(NULL, &fifoch->ramfc);
  159. chan->fifo_priv = NULL;
  160. kfree(fifoch);
  161. }
  162. int
  163. nve0_fifo_load_context(struct nouveau_channel *chan)
  164. {
  165. return 0;
  166. }
  167. int
  168. nve0_fifo_unload_context(struct drm_device *dev)
  169. {
  170. struct drm_nouveau_private *dev_priv = dev->dev_private;
  171. struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
  172. int i;
  173. for (i = 0; i < pfifo->channels; i++) {
  174. if (!(nv_rd32(dev, 0x800004 + (i * 8)) & 1))
  175. continue;
  176. nv_mask(dev, 0x800004 + (i * 8), 0x00000800, 0x00000800);
  177. nv_wr32(dev, 0x002634, i);
  178. if (!nv_wait(dev, 0x002634, 0xffffffff, i)) {
  179. NV_INFO(dev, "PFIFO: kick ch %d failed: 0x%08x\n",
  180. i, nv_rd32(dev, 0x002634));
  181. return -EBUSY;
  182. }
  183. }
  184. return 0;
  185. }
  186. static void
  187. nve0_fifo_destroy(struct drm_device *dev)
  188. {
  189. struct drm_nouveau_private *dev_priv = dev->dev_private;
  190. struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
  191. struct nve0_fifo_priv *priv;
  192. int i;
  193. priv = pfifo->priv;
  194. if (!priv)
  195. return;
  196. nouveau_vm_put(&priv->user.bar);
  197. nouveau_gpuobj_ref(NULL, &priv->user.mem);
  198. for (i = 0; i < NVE0_FIFO_ENGINE_NUM; i++) {
  199. nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[0]);
  200. nouveau_gpuobj_ref(NULL, &priv->engine[i].playlist[1]);
  201. }
  202. kfree(priv);
  203. }
  204. void
  205. nve0_fifo_takedown(struct drm_device *dev)
  206. {
  207. nv_wr32(dev, 0x002140, 0x00000000);
  208. nve0_fifo_destroy(dev);
  209. }
  210. static int
  211. nve0_fifo_create(struct drm_device *dev)
  212. {
  213. struct drm_nouveau_private *dev_priv = dev->dev_private;
  214. struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
  215. struct nve0_fifo_priv *priv;
  216. int ret;
  217. priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  218. if (!priv)
  219. return -ENOMEM;
  220. pfifo->priv = priv;
  221. ret = nouveau_gpuobj_new(dev, NULL, pfifo->channels * 512, 0x1000,
  222. NVOBJ_FLAG_ZERO_ALLOC, &priv->user.mem);
  223. if (ret)
  224. goto error;
  225. ret = nouveau_vm_get(dev_priv->bar1_vm, priv->user.mem->size,
  226. 12, NV_MEM_ACCESS_RW, &priv->user.bar);
  227. if (ret)
  228. goto error;
  229. nouveau_vm_map(&priv->user.bar, *(struct nouveau_mem **)priv->user.mem->node);
  230. nouveau_irq_register(dev, 8, nve0_fifo_isr);
  231. return 0;
  232. error:
  233. nve0_fifo_destroy(dev);
  234. return ret;
  235. }
  236. int
  237. nve0_fifo_init(struct drm_device *dev)
  238. {
  239. struct drm_nouveau_private *dev_priv = dev->dev_private;
  240. struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
  241. struct nouveau_channel *chan;
  242. struct nve0_fifo_chan *fifoch;
  243. struct nve0_fifo_priv *priv;
  244. int ret, i;
  245. if (!pfifo->priv) {
  246. ret = nve0_fifo_create(dev);
  247. if (ret)
  248. return ret;
  249. }
  250. priv = pfifo->priv;
  251. /* reset PFIFO, enable all available PSUBFIFO areas */
  252. nv_mask(dev, 0x000200, 0x00000100, 0x00000000);
  253. nv_mask(dev, 0x000200, 0x00000100, 0x00000100);
  254. nv_wr32(dev, 0x000204, 0xffffffff);
  255. priv->spoon_nr = hweight32(nv_rd32(dev, 0x000204));
  256. NV_DEBUG(dev, "PFIFO: %d subfifo(s)\n", priv->spoon_nr);
  257. /* PSUBFIFO[n] */
  258. for (i = 0; i < priv->spoon_nr; i++) {
  259. nv_mask(dev, 0x04013c + (i * 0x2000), 0x10000100, 0x00000000);
  260. nv_wr32(dev, 0x040108 + (i * 0x2000), 0xffffffff); /* INTR */
  261. nv_wr32(dev, 0x04010c + (i * 0x2000), 0xfffffeff); /* INTR_EN */
  262. }
  263. nv_wr32(dev, 0x002254, 0x10000000 | priv->user.bar.offset >> 12);
  264. nv_wr32(dev, 0x002a00, 0xffffffff);
  265. nv_wr32(dev, 0x002100, 0xffffffff);
  266. nv_wr32(dev, 0x002140, 0xbfffffff);
  267. /* restore PFIFO context table */
  268. for (i = 0; i < pfifo->channels; i++) {
  269. chan = dev_priv->channels.ptr[i];
  270. if (!chan || !chan->fifo_priv)
  271. continue;
  272. fifoch = chan->fifo_priv;
  273. nv_wr32(dev, 0x800000 + (i * 8), 0x80000000 |
  274. (chan->ramin->vinst >> 12));
  275. nv_mask(dev, 0x800004 + (i * 8), 0x00000400, 0x00000400);
  276. nve0_fifo_playlist_update(dev, fifoch->engine);
  277. nv_mask(dev, 0x800004 + (i * 8), 0x00000400, 0x00000400);
  278. }
  279. return 0;
  280. }
  281. struct nouveau_enum nve0_fifo_fault_unit[] = {
  282. {}
  283. };
  284. struct nouveau_enum nve0_fifo_fault_reason[] = {
  285. { 0x00, "PT_NOT_PRESENT" },
  286. { 0x01, "PT_TOO_SHORT" },
  287. { 0x02, "PAGE_NOT_PRESENT" },
  288. { 0x03, "VM_LIMIT_EXCEEDED" },
  289. { 0x04, "NO_CHANNEL" },
  290. { 0x05, "PAGE_SYSTEM_ONLY" },
  291. { 0x06, "PAGE_READ_ONLY" },
  292. { 0x0a, "COMPRESSED_SYSRAM" },
  293. { 0x0c, "INVALID_STORAGE_TYPE" },
  294. {}
  295. };
  296. struct nouveau_enum nve0_fifo_fault_hubclient[] = {
  297. {}
  298. };
  299. struct nouveau_enum nve0_fifo_fault_gpcclient[] = {
  300. {}
  301. };
  302. struct nouveau_bitfield nve0_fifo_subfifo_intr[] = {
  303. { 0x00200000, "ILLEGAL_MTHD" },
  304. { 0x00800000, "EMPTY_SUBC" },
  305. {}
  306. };
  307. static void
  308. nve0_fifo_isr_vm_fault(struct drm_device *dev, int unit)
  309. {
  310. u32 inst = nv_rd32(dev, 0x2800 + (unit * 0x10));
  311. u32 valo = nv_rd32(dev, 0x2804 + (unit * 0x10));
  312. u32 vahi = nv_rd32(dev, 0x2808 + (unit * 0x10));
  313. u32 stat = nv_rd32(dev, 0x280c + (unit * 0x10));
  314. u32 client = (stat & 0x00001f00) >> 8;
  315. NV_INFO(dev, "PFIFO: %s fault at 0x%010llx [",
  316. (stat & 0x00000080) ? "write" : "read", (u64)vahi << 32 | valo);
  317. nouveau_enum_print(nve0_fifo_fault_reason, stat & 0x0000000f);
  318. printk("] from ");
  319. nouveau_enum_print(nve0_fifo_fault_unit, unit);
  320. if (stat & 0x00000040) {
  321. printk("/");
  322. nouveau_enum_print(nve0_fifo_fault_hubclient, client);
  323. } else {
  324. printk("/GPC%d/", (stat & 0x1f000000) >> 24);
  325. nouveau_enum_print(nve0_fifo_fault_gpcclient, client);
  326. }
  327. printk(" on channel 0x%010llx\n", (u64)inst << 12);
  328. }
  329. static void
  330. nve0_fifo_isr_subfifo_intr(struct drm_device *dev, int unit)
  331. {
  332. u32 stat = nv_rd32(dev, 0x040108 + (unit * 0x2000));
  333. u32 addr = nv_rd32(dev, 0x0400c0 + (unit * 0x2000));
  334. u32 data = nv_rd32(dev, 0x0400c4 + (unit * 0x2000));
  335. u32 chid = nv_rd32(dev, 0x040120 + (unit * 0x2000)) & 0x7f;
  336. u32 subc = (addr & 0x00070000);
  337. u32 mthd = (addr & 0x00003ffc);
  338. NV_INFO(dev, "PSUBFIFO %d:", unit);
  339. nouveau_bitfield_print(nve0_fifo_subfifo_intr, stat);
  340. NV_INFO(dev, "PSUBFIFO %d: ch %d subc %d mthd 0x%04x data 0x%08x\n",
  341. unit, chid, subc, mthd, data);
  342. nv_wr32(dev, 0x0400c0 + (unit * 0x2000), 0x80600008);
  343. nv_wr32(dev, 0x040108 + (unit * 0x2000), stat);
  344. }
  345. static void
  346. nve0_fifo_isr(struct drm_device *dev)
  347. {
  348. u32 stat = nv_rd32(dev, 0x002100);
  349. if (stat & 0x00000100) {
  350. NV_INFO(dev, "PFIFO: unknown status 0x00000100\n");
  351. nv_wr32(dev, 0x002100, 0x00000100);
  352. stat &= ~0x00000100;
  353. }
  354. if (stat & 0x10000000) {
  355. u32 units = nv_rd32(dev, 0x00259c);
  356. u32 u = units;
  357. while (u) {
  358. int i = ffs(u) - 1;
  359. nve0_fifo_isr_vm_fault(dev, i);
  360. u &= ~(1 << i);
  361. }
  362. nv_wr32(dev, 0x00259c, units);
  363. stat &= ~0x10000000;
  364. }
  365. if (stat & 0x20000000) {
  366. u32 units = nv_rd32(dev, 0x0025a0);
  367. u32 u = units;
  368. while (u) {
  369. int i = ffs(u) - 1;
  370. nve0_fifo_isr_subfifo_intr(dev, i);
  371. u &= ~(1 << i);
  372. }
  373. nv_wr32(dev, 0x0025a0, units);
  374. stat &= ~0x20000000;
  375. }
  376. if (stat & 0x40000000) {
  377. NV_INFO(dev, "PFIFO: unknown status 0x40000000\n");
  378. nv_mask(dev, 0x002a00, 0x00000000, 0x00000000);
  379. stat &= ~0x40000000;
  380. }
  381. if (stat) {
  382. NV_INFO(dev, "PFIFO: unhandled status 0x%08x\n", stat);
  383. nv_wr32(dev, 0x002100, stat);
  384. nv_wr32(dev, 0x002140, 0);
  385. }
  386. }