nouveau_irq.c 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255
  1. /*
  2. * Copyright (C) 2006 Ben Skeggs.
  3. *
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining
  7. * a copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sublicense, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial
  16. * portions of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  19. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  20. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
  21. * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
  22. * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
  23. * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
  24. * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. */
  27. /*
  28. * Authors:
  29. * Ben Skeggs <darktama@iinet.net.au>
  30. */
  31. #include "drmP.h"
  32. #include "drm.h"
  33. #include "nouveau_drm.h"
  34. #include "nouveau_drv.h"
  35. #include "nouveau_reg.h"
  36. #include "nouveau_ramht.h"
  37. #include <linux/ratelimit.h>
  38. /* needed for hotplug irq */
  39. #include "nouveau_connector.h"
  40. #include "nv50_display.h"
  41. static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20);
  42. static int nouveau_ratelimit(void)
  43. {
  44. return __ratelimit(&nouveau_ratelimit_state);
  45. }
  46. void
  47. nouveau_irq_preinstall(struct drm_device *dev)
  48. {
  49. struct drm_nouveau_private *dev_priv = dev->dev_private;
  50. /* Master disable */
  51. nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
  52. if (dev_priv->card_type >= NV_50) {
  53. INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh);
  54. INIT_WORK(&dev_priv->hpd_work, nv50_display_irq_hotplug_bh);
  55. INIT_LIST_HEAD(&dev_priv->vbl_waiting);
  56. }
  57. }
  58. int
  59. nouveau_irq_postinstall(struct drm_device *dev)
  60. {
  61. /* Master enable */
  62. nv_wr32(dev, NV03_PMC_INTR_EN_0, NV_PMC_INTR_EN_0_MASTER_ENABLE);
  63. return 0;
  64. }
  65. void
  66. nouveau_irq_uninstall(struct drm_device *dev)
  67. {
  68. /* Master disable */
  69. nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
  70. }
  71. static int
  72. nouveau_call_method(struct nouveau_channel *chan, int class, int mthd, int data)
  73. {
  74. struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
  75. struct nouveau_pgraph_object_method *grm;
  76. struct nouveau_pgraph_object_class *grc;
  77. grc = dev_priv->engine.graph.grclass;
  78. while (grc->id) {
  79. if (grc->id == class)
  80. break;
  81. grc++;
  82. }
  83. if (grc->id != class || !grc->methods)
  84. return -ENOENT;
  85. grm = grc->methods;
  86. while (grm->id) {
  87. if (grm->id == mthd)
  88. return grm->exec(chan, class, mthd, data);
  89. grm++;
  90. }
  91. return -ENOENT;
  92. }
  93. static bool
  94. nouveau_fifo_swmthd(struct nouveau_channel *chan, uint32_t addr, uint32_t data)
  95. {
  96. struct drm_device *dev = chan->dev;
  97. const int subc = (addr >> 13) & 0x7;
  98. const int mthd = addr & 0x1ffc;
  99. if (mthd == 0x0000) {
  100. struct nouveau_gpuobj *gpuobj;
  101. gpuobj = nouveau_ramht_find(chan, data);
  102. if (!gpuobj)
  103. return false;
  104. if (gpuobj->engine != NVOBJ_ENGINE_SW)
  105. return false;
  106. chan->sw_subchannel[subc] = gpuobj->class;
  107. nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_rd32(dev,
  108. NV04_PFIFO_CACHE1_ENGINE) & ~(0xf << subc * 4));
  109. return true;
  110. }
  111. /* hw object */
  112. if (nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE) & (1 << (subc*4)))
  113. return false;
  114. if (nouveau_call_method(chan, chan->sw_subchannel[subc], mthd, data))
  115. return false;
  116. return true;
  117. }
  118. static void
  119. nouveau_fifo_irq_handler(struct drm_device *dev)
  120. {
  121. struct drm_nouveau_private *dev_priv = dev->dev_private;
  122. struct nouveau_engine *engine = &dev_priv->engine;
  123. uint32_t status, reassign;
  124. int cnt = 0;
  125. reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1;
  126. while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) {
  127. struct nouveau_channel *chan = NULL;
  128. uint32_t chid, get;
  129. nv_wr32(dev, NV03_PFIFO_CACHES, 0);
  130. chid = engine->fifo.channel_id(dev);
  131. if (chid >= 0 && chid < engine->fifo.channels)
  132. chan = dev_priv->fifos[chid];
  133. get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET);
  134. if (status & NV_PFIFO_INTR_CACHE_ERROR) {
  135. uint32_t mthd, data;
  136. int ptr;
  137. /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before
  138. * wrapping on my G80 chips, but CACHE1 isn't big
  139. * enough for this much data.. Tests show that it
  140. * wraps around to the start at GET=0x800.. No clue
  141. * as to why..
  142. */
  143. ptr = (get & 0x7ff) >> 2;
  144. if (dev_priv->card_type < NV_40) {
  145. mthd = nv_rd32(dev,
  146. NV04_PFIFO_CACHE1_METHOD(ptr));
  147. data = nv_rd32(dev,
  148. NV04_PFIFO_CACHE1_DATA(ptr));
  149. } else {
  150. mthd = nv_rd32(dev,
  151. NV40_PFIFO_CACHE1_METHOD(ptr));
  152. data = nv_rd32(dev,
  153. NV40_PFIFO_CACHE1_DATA(ptr));
  154. }
  155. if (!chan || !nouveau_fifo_swmthd(chan, mthd, data)) {
  156. NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d "
  157. "Mthd 0x%04x Data 0x%08x\n",
  158. chid, (mthd >> 13) & 7, mthd & 0x1ffc,
  159. data);
  160. }
  161. nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
  162. nv_wr32(dev, NV03_PFIFO_INTR_0,
  163. NV_PFIFO_INTR_CACHE_ERROR);
  164. nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
  165. nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1);
  166. nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
  167. nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
  168. nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1);
  169. nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
  170. nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH,
  171. nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
  172. nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
  173. status &= ~NV_PFIFO_INTR_CACHE_ERROR;
  174. }
  175. if (status & NV_PFIFO_INTR_DMA_PUSHER) {
  176. u32 get = nv_rd32(dev, 0x003244);
  177. u32 put = nv_rd32(dev, 0x003240);
  178. u32 push = nv_rd32(dev, 0x003220);
  179. u32 state = nv_rd32(dev, 0x003228);
  180. if (dev_priv->card_type == NV_50) {
  181. u32 ho_get = nv_rd32(dev, 0x003328);
  182. u32 ho_put = nv_rd32(dev, 0x003320);
  183. u32 ib_get = nv_rd32(dev, 0x003334);
  184. u32 ib_put = nv_rd32(dev, 0x003330);
  185. if (nouveau_ratelimit())
  186. NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x "
  187. "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
  188. "State 0x%08x Push 0x%08x\n",
  189. chid, ho_get, get, ho_put, put,
  190. ib_get, ib_put, state, push);
  191. /* METHOD_COUNT, in DMA_STATE on earlier chipsets */
  192. nv_wr32(dev, 0x003364, 0x00000000);
  193. if (get != put || ho_get != ho_put) {
  194. nv_wr32(dev, 0x003244, put);
  195. nv_wr32(dev, 0x003328, ho_put);
  196. } else
  197. if (ib_get != ib_put) {
  198. nv_wr32(dev, 0x003334, ib_put);
  199. }
  200. } else {
  201. NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x "
  202. "Put 0x%08x State 0x%08x Push 0x%08x\n",
  203. chid, get, put, state, push);
  204. if (get != put)
  205. nv_wr32(dev, 0x003244, put);
  206. }
  207. nv_wr32(dev, 0x003228, 0x00000000);
  208. nv_wr32(dev, 0x003220, 0x00000001);
  209. nv_wr32(dev, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
  210. status &= ~NV_PFIFO_INTR_DMA_PUSHER;
  211. }
  212. if (status & NV_PFIFO_INTR_SEMAPHORE) {
  213. uint32_t sem;
  214. status &= ~NV_PFIFO_INTR_SEMAPHORE;
  215. nv_wr32(dev, NV03_PFIFO_INTR_0,
  216. NV_PFIFO_INTR_SEMAPHORE);
  217. sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE);
  218. nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
  219. nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
  220. nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
  221. }
  222. if (dev_priv->card_type == NV_50) {
  223. if (status & 0x00000010) {
  224. nv50_fb_vm_trap(dev, 1, "PFIFO_BAR_FAULT");
  225. status &= ~0x00000010;
  226. nv_wr32(dev, 0x002100, 0x00000010);
  227. }
  228. }
  229. if (status) {
  230. if (nouveau_ratelimit())
  231. NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
  232. status, chid);
  233. nv_wr32(dev, NV03_PFIFO_INTR_0, status);
  234. status = 0;
  235. }
  236. nv_wr32(dev, NV03_PFIFO_CACHES, reassign);
  237. }
  238. if (status) {
  239. NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt);
  240. nv_wr32(dev, 0x2140, 0);
  241. nv_wr32(dev, 0x140, 0);
  242. }
  243. nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
  244. }
  245. struct nouveau_bitfield_names {
  246. uint32_t mask;
  247. const char *name;
  248. };
  249. static struct nouveau_bitfield_names nstatus_names[] =
  250. {
  251. { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
  252. { NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
  253. { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
  254. { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }
  255. };
  256. static struct nouveau_bitfield_names nstatus_names_nv10[] =
  257. {
  258. { NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
  259. { NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
  260. { NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
  261. { NV10_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }
  262. };
  263. static struct nouveau_bitfield_names nsource_names[] =
  264. {
  265. { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" },
  266. { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" },
  267. { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" },
  268. { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION, "RANGE_EXCEPTION" },
  269. { NV03_PGRAPH_NSOURCE_LIMIT_COLOR, "LIMIT_COLOR" },
  270. { NV03_PGRAPH_NSOURCE_LIMIT_ZETA, "LIMIT_ZETA" },
  271. { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD, "ILLEGAL_MTHD" },
  272. { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION, "DMA_R_PROTECTION" },
  273. { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION, "DMA_W_PROTECTION" },
  274. { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION, "FORMAT_EXCEPTION" },
  275. { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION, "PATCH_EXCEPTION" },
  276. { NV03_PGRAPH_NSOURCE_STATE_INVALID, "STATE_INVALID" },
  277. { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY, "DOUBLE_NOTIFY" },
  278. { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE, "NOTIFY_IN_USE" },
  279. { NV03_PGRAPH_NSOURCE_METHOD_CNT, "METHOD_CNT" },
  280. { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION, "BFR_NOTIFICATION" },
  281. { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" },
  282. { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" },
  283. { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" },
  284. };
  285. static void
  286. nouveau_print_bitfield_names_(uint32_t value,
  287. const struct nouveau_bitfield_names *namelist,
  288. const int namelist_len)
  289. {
  290. /*
  291. * Caller must have already printed the KERN_* log level for us.
  292. * Also the caller is responsible for adding the newline.
  293. */
  294. int i;
  295. for (i = 0; i < namelist_len; ++i) {
  296. uint32_t mask = namelist[i].mask;
  297. if (value & mask) {
  298. printk(" %s", namelist[i].name);
  299. value &= ~mask;
  300. }
  301. }
  302. if (value)
  303. printk(" (unknown bits 0x%08x)", value);
  304. }
  305. #define nouveau_print_bitfield_names(val, namelist) \
  306. nouveau_print_bitfield_names_((val), (namelist), ARRAY_SIZE(namelist))
  307. struct nouveau_enum_names {
  308. uint32_t value;
  309. const char *name;
  310. };
  311. static void
  312. nouveau_print_enum_names_(uint32_t value,
  313. const struct nouveau_enum_names *namelist,
  314. const int namelist_len)
  315. {
  316. /*
  317. * Caller must have already printed the KERN_* log level for us.
  318. * Also the caller is responsible for adding the newline.
  319. */
  320. int i;
  321. for (i = 0; i < namelist_len; ++i) {
  322. if (value == namelist[i].value) {
  323. printk("%s", namelist[i].name);
  324. return;
  325. }
  326. }
  327. printk("unknown value 0x%08x", value);
  328. }
  329. #define nouveau_print_enum_names(val, namelist) \
  330. nouveau_print_enum_names_((val), (namelist), ARRAY_SIZE(namelist))
  331. static int
  332. nouveau_graph_chid_from_grctx(struct drm_device *dev)
  333. {
  334. struct drm_nouveau_private *dev_priv = dev->dev_private;
  335. uint32_t inst;
  336. int i;
  337. if (dev_priv->card_type < NV_40)
  338. return dev_priv->engine.fifo.channels;
  339. else
  340. if (dev_priv->card_type < NV_50) {
  341. inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 4;
  342. for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
  343. struct nouveau_channel *chan = dev_priv->fifos[i];
  344. if (!chan || !chan->ramin_grctx)
  345. continue;
  346. if (inst == chan->ramin_grctx->pinst)
  347. break;
  348. }
  349. } else {
  350. inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 12;
  351. for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
  352. struct nouveau_channel *chan = dev_priv->fifos[i];
  353. if (!chan || !chan->ramin)
  354. continue;
  355. if (inst == chan->ramin->vinst)
  356. break;
  357. }
  358. }
  359. return i;
  360. }
  361. static int
  362. nouveau_graph_trapped_channel(struct drm_device *dev, int *channel_ret)
  363. {
  364. struct drm_nouveau_private *dev_priv = dev->dev_private;
  365. struct nouveau_engine *engine = &dev_priv->engine;
  366. int channel;
  367. if (dev_priv->card_type < NV_10)
  368. channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0xf;
  369. else
  370. if (dev_priv->card_type < NV_40)
  371. channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
  372. else
  373. channel = nouveau_graph_chid_from_grctx(dev);
  374. if (channel >= engine->fifo.channels || !dev_priv->fifos[channel]) {
  375. NV_ERROR(dev, "AIII, invalid/inactive channel id %d\n", channel);
  376. return -EINVAL;
  377. }
  378. *channel_ret = channel;
  379. return 0;
  380. }
  381. struct nouveau_pgraph_trap {
  382. int channel;
  383. int class;
  384. int subc, mthd, size;
  385. uint32_t data, data2;
  386. uint32_t nsource, nstatus;
  387. };
  388. static void
  389. nouveau_graph_trap_info(struct drm_device *dev,
  390. struct nouveau_pgraph_trap *trap)
  391. {
  392. struct drm_nouveau_private *dev_priv = dev->dev_private;
  393. uint32_t address;
  394. trap->nsource = trap->nstatus = 0;
  395. if (dev_priv->card_type < NV_50) {
  396. trap->nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
  397. trap->nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
  398. }
  399. if (nouveau_graph_trapped_channel(dev, &trap->channel))
  400. trap->channel = -1;
  401. address = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
  402. trap->mthd = address & 0x1FFC;
  403. trap->data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
  404. if (dev_priv->card_type < NV_10) {
  405. trap->subc = (address >> 13) & 0x7;
  406. } else {
  407. trap->subc = (address >> 16) & 0x7;
  408. trap->data2 = nv_rd32(dev, NV10_PGRAPH_TRAPPED_DATA_HIGH);
  409. }
  410. if (dev_priv->card_type < NV_10)
  411. trap->class = nv_rd32(dev, 0x400180 + trap->subc*4) & 0xFF;
  412. else if (dev_priv->card_type < NV_40)
  413. trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFF;
  414. else if (dev_priv->card_type < NV_50)
  415. trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFFF;
  416. else
  417. trap->class = nv_rd32(dev, 0x400814);
  418. }
  419. static void
  420. nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id,
  421. struct nouveau_pgraph_trap *trap)
  422. {
  423. struct drm_nouveau_private *dev_priv = dev->dev_private;
  424. uint32_t nsource = trap->nsource, nstatus = trap->nstatus;
  425. if (dev_priv->card_type < NV_50) {
  426. NV_INFO(dev, "%s - nSource:", id);
  427. nouveau_print_bitfield_names(nsource, nsource_names);
  428. printk(", nStatus:");
  429. if (dev_priv->card_type < NV_10)
  430. nouveau_print_bitfield_names(nstatus, nstatus_names);
  431. else
  432. nouveau_print_bitfield_names(nstatus, nstatus_names_nv10);
  433. printk("\n");
  434. }
  435. NV_INFO(dev, "%s - Ch %d/%d Class 0x%04x Mthd 0x%04x "
  436. "Data 0x%08x:0x%08x\n",
  437. id, trap->channel, trap->subc,
  438. trap->class, trap->mthd,
  439. trap->data2, trap->data);
  440. }
  441. static int
  442. nouveau_pgraph_intr_swmthd(struct drm_device *dev,
  443. struct nouveau_pgraph_trap *trap)
  444. {
  445. struct drm_nouveau_private *dev_priv = dev->dev_private;
  446. if (trap->channel < 0 ||
  447. trap->channel >= dev_priv->engine.fifo.channels ||
  448. !dev_priv->fifos[trap->channel])
  449. return -ENODEV;
  450. return nouveau_call_method(dev_priv->fifos[trap->channel],
  451. trap->class, trap->mthd, trap->data);
  452. }
  453. static inline void
  454. nouveau_pgraph_intr_notify(struct drm_device *dev, uint32_t nsource)
  455. {
  456. struct nouveau_pgraph_trap trap;
  457. int unhandled = 0;
  458. nouveau_graph_trap_info(dev, &trap);
  459. if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
  460. if (nouveau_pgraph_intr_swmthd(dev, &trap))
  461. unhandled = 1;
  462. } else {
  463. unhandled = 1;
  464. }
  465. if (unhandled)
  466. nouveau_graph_dump_trap_info(dev, "PGRAPH_NOTIFY", &trap);
  467. }
  468. static inline void
  469. nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource)
  470. {
  471. struct nouveau_pgraph_trap trap;
  472. int unhandled = 0;
  473. nouveau_graph_trap_info(dev, &trap);
  474. trap.nsource = nsource;
  475. if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
  476. if (nouveau_pgraph_intr_swmthd(dev, &trap))
  477. unhandled = 1;
  478. } else if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
  479. uint32_t v = nv_rd32(dev, 0x402000);
  480. nv_wr32(dev, 0x402000, v);
  481. /* dump the error anyway for now: it's useful for
  482. Gallium development */
  483. unhandled = 1;
  484. } else {
  485. unhandled = 1;
  486. }
  487. if (unhandled && nouveau_ratelimit())
  488. nouveau_graph_dump_trap_info(dev, "PGRAPH_ERROR", &trap);
  489. }
  490. static inline void
  491. nouveau_pgraph_intr_context_switch(struct drm_device *dev)
  492. {
  493. struct drm_nouveau_private *dev_priv = dev->dev_private;
  494. struct nouveau_engine *engine = &dev_priv->engine;
  495. uint32_t chid;
  496. chid = engine->fifo.channel_id(dev);
  497. NV_DEBUG(dev, "PGRAPH context switch interrupt channel %x\n", chid);
  498. switch (dev_priv->card_type) {
  499. case NV_04:
  500. nv04_graph_context_switch(dev);
  501. break;
  502. case NV_10:
  503. nv10_graph_context_switch(dev);
  504. break;
  505. default:
  506. NV_ERROR(dev, "Context switch not implemented\n");
  507. break;
  508. }
  509. }
  510. static void
  511. nouveau_pgraph_irq_handler(struct drm_device *dev)
  512. {
  513. uint32_t status;
  514. while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
  515. uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
  516. if (status & NV_PGRAPH_INTR_NOTIFY) {
  517. nouveau_pgraph_intr_notify(dev, nsource);
  518. status &= ~NV_PGRAPH_INTR_NOTIFY;
  519. nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_NOTIFY);
  520. }
  521. if (status & NV_PGRAPH_INTR_ERROR) {
  522. nouveau_pgraph_intr_error(dev, nsource);
  523. status &= ~NV_PGRAPH_INTR_ERROR;
  524. nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_ERROR);
  525. }
  526. if (status & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
  527. status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
  528. nv_wr32(dev, NV03_PGRAPH_INTR,
  529. NV_PGRAPH_INTR_CONTEXT_SWITCH);
  530. nouveau_pgraph_intr_context_switch(dev);
  531. }
  532. if (status) {
  533. NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status);
  534. nv_wr32(dev, NV03_PGRAPH_INTR, status);
  535. }
  536. if ((nv_rd32(dev, NV04_PGRAPH_FIFO) & (1 << 0)) == 0)
  537. nv_wr32(dev, NV04_PGRAPH_FIFO, 1);
  538. }
  539. nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
  540. }
  541. static struct nouveau_enum_names nv50_mp_exec_error_names[] =
  542. {
  543. { 3, "STACK_UNDERFLOW" },
  544. { 4, "QUADON_ACTIVE" },
  545. { 8, "TIMEOUT" },
  546. { 0x10, "INVALID_OPCODE" },
  547. { 0x40, "BREAKPOINT" },
  548. };
  549. static void
  550. nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display)
  551. {
  552. struct drm_nouveau_private *dev_priv = dev->dev_private;
  553. uint32_t units = nv_rd32(dev, 0x1540);
  554. uint32_t addr, mp10, status, pc, oplow, ophigh;
  555. int i;
  556. int mps = 0;
  557. for (i = 0; i < 4; i++) {
  558. if (!(units & 1 << (i+24)))
  559. continue;
  560. if (dev_priv->chipset < 0xa0)
  561. addr = 0x408200 + (tpid << 12) + (i << 7);
  562. else
  563. addr = 0x408100 + (tpid << 11) + (i << 7);
  564. mp10 = nv_rd32(dev, addr + 0x10);
  565. status = nv_rd32(dev, addr + 0x14);
  566. if (!status)
  567. continue;
  568. if (display) {
  569. nv_rd32(dev, addr + 0x20);
  570. pc = nv_rd32(dev, addr + 0x24);
  571. oplow = nv_rd32(dev, addr + 0x70);
  572. ophigh= nv_rd32(dev, addr + 0x74);
  573. NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - "
  574. "TP %d MP %d: ", tpid, i);
  575. nouveau_print_enum_names(status,
  576. nv50_mp_exec_error_names);
  577. printk(" at %06x warp %d, opcode %08x %08x\n",
  578. pc&0xffffff, pc >> 24,
  579. oplow, ophigh);
  580. }
  581. nv_wr32(dev, addr + 0x10, mp10);
  582. nv_wr32(dev, addr + 0x14, 0);
  583. mps++;
  584. }
  585. if (!mps && display)
  586. NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: "
  587. "No MPs claiming errors?\n", tpid);
  588. }
  589. static void
  590. nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
  591. uint32_t ustatus_new, int display, const char *name)
  592. {
  593. struct drm_nouveau_private *dev_priv = dev->dev_private;
  594. int tps = 0;
  595. uint32_t units = nv_rd32(dev, 0x1540);
  596. int i, r;
  597. uint32_t ustatus_addr, ustatus;
  598. for (i = 0; i < 16; i++) {
  599. if (!(units & (1 << i)))
  600. continue;
  601. if (dev_priv->chipset < 0xa0)
  602. ustatus_addr = ustatus_old + (i << 12);
  603. else
  604. ustatus_addr = ustatus_new + (i << 11);
  605. ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff;
  606. if (!ustatus)
  607. continue;
  608. tps++;
  609. switch (type) {
  610. case 6: /* texture error... unknown for now */
  611. nv50_fb_vm_trap(dev, display, name);
  612. if (display) {
  613. NV_ERROR(dev, "magic set %d:\n", i);
  614. for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
  615. NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
  616. nv_rd32(dev, r));
  617. }
  618. break;
  619. case 7: /* MP error */
  620. if (ustatus & 0x00010000) {
  621. nv50_pgraph_mp_trap(dev, i, display);
  622. ustatus &= ~0x00010000;
  623. }
  624. break;
  625. case 8: /* TPDMA error */
  626. {
  627. uint32_t e0c = nv_rd32(dev, ustatus_addr + 4);
  628. uint32_t e10 = nv_rd32(dev, ustatus_addr + 8);
  629. uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc);
  630. uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10);
  631. uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14);
  632. uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18);
  633. uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c);
  634. nv50_fb_vm_trap(dev, display, name);
  635. /* 2d engine destination */
  636. if (ustatus & 0x00000010) {
  637. if (display) {
  638. NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n",
  639. i, e14, e10);
  640. NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
  641. i, e0c, e18, e1c, e20, e24);
  642. }
  643. ustatus &= ~0x00000010;
  644. }
  645. /* Render target */
  646. if (ustatus & 0x00000040) {
  647. if (display) {
  648. NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n",
  649. i, e14, e10);
  650. NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
  651. i, e0c, e18, e1c, e20, e24);
  652. }
  653. ustatus &= ~0x00000040;
  654. }
  655. /* CUDA memory: l[], g[] or stack. */
  656. if (ustatus & 0x00000080) {
  657. if (display) {
  658. if (e18 & 0x80000000) {
  659. /* g[] read fault? */
  660. NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n",
  661. i, e14, e10 | ((e18 >> 24) & 0x1f));
  662. e18 &= ~0x1f000000;
  663. } else if (e18 & 0xc) {
  664. /* g[] write fault? */
  665. NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n",
  666. i, e14, e10 | ((e18 >> 7) & 0x1f));
  667. e18 &= ~0x00000f80;
  668. } else {
  669. NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n",
  670. i, e14, e10);
  671. }
  672. NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
  673. i, e0c, e18, e1c, e20, e24);
  674. }
  675. ustatus &= ~0x00000080;
  676. }
  677. }
  678. break;
  679. }
  680. if (ustatus) {
  681. if (display)
  682. NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
  683. }
  684. nv_wr32(dev, ustatus_addr, 0xc0000000);
  685. }
  686. if (!tps && display)
  687. NV_INFO(dev, "%s - No TPs claiming errors?\n", name);
  688. }
  689. static void
  690. nv50_pgraph_trap_handler(struct drm_device *dev)
  691. {
  692. struct nouveau_pgraph_trap trap;
  693. uint32_t status = nv_rd32(dev, 0x400108);
  694. uint32_t ustatus;
  695. int display = nouveau_ratelimit();
  696. if (!status && display) {
  697. nouveau_graph_trap_info(dev, &trap);
  698. nouveau_graph_dump_trap_info(dev, "PGRAPH_TRAP", &trap);
  699. NV_INFO(dev, "PGRAPH_TRAP - no units reporting traps?\n");
  700. }
  701. /* DISPATCH: Relays commands to other units and handles NOTIFY,
  702. * COND, QUERY. If you get a trap from it, the command is still stuck
  703. * in DISPATCH and you need to do something about it. */
  704. if (status & 0x001) {
  705. ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff;
  706. if (!ustatus && display) {
  707. NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n");
  708. }
  709. /* Known to be triggered by screwed up NOTIFY and COND... */
  710. if (ustatus & 0x00000001) {
  711. nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_FAULT");
  712. nv_wr32(dev, 0x400500, 0);
  713. if (nv_rd32(dev, 0x400808) & 0x80000000) {
  714. if (display) {
  715. if (nouveau_graph_trapped_channel(dev, &trap.channel))
  716. trap.channel = -1;
  717. trap.class = nv_rd32(dev, 0x400814);
  718. trap.mthd = nv_rd32(dev, 0x400808) & 0x1ffc;
  719. trap.subc = (nv_rd32(dev, 0x400808) >> 16) & 0x7;
  720. trap.data = nv_rd32(dev, 0x40080c);
  721. trap.data2 = nv_rd32(dev, 0x400810);
  722. nouveau_graph_dump_trap_info(dev,
  723. "PGRAPH_TRAP_DISPATCH_FAULT", &trap);
  724. NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400808: %08x\n", nv_rd32(dev, 0x400808));
  725. NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400848: %08x\n", nv_rd32(dev, 0x400848));
  726. }
  727. nv_wr32(dev, 0x400808, 0);
  728. } else if (display) {
  729. NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - No stuck command?\n");
  730. }
  731. nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3);
  732. nv_wr32(dev, 0x400848, 0);
  733. ustatus &= ~0x00000001;
  734. }
  735. if (ustatus & 0x00000002) {
  736. nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_QUERY");
  737. nv_wr32(dev, 0x400500, 0);
  738. if (nv_rd32(dev, 0x40084c) & 0x80000000) {
  739. if (display) {
  740. if (nouveau_graph_trapped_channel(dev, &trap.channel))
  741. trap.channel = -1;
  742. trap.class = nv_rd32(dev, 0x400814);
  743. trap.mthd = nv_rd32(dev, 0x40084c) & 0x1ffc;
  744. trap.subc = (nv_rd32(dev, 0x40084c) >> 16) & 0x7;
  745. trap.data = nv_rd32(dev, 0x40085c);
  746. trap.data2 = 0;
  747. nouveau_graph_dump_trap_info(dev,
  748. "PGRAPH_TRAP_DISPATCH_QUERY", &trap);
  749. NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - 40084c: %08x\n", nv_rd32(dev, 0x40084c));
  750. }
  751. nv_wr32(dev, 0x40084c, 0);
  752. } else if (display) {
  753. NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - No stuck command?\n");
  754. }
  755. ustatus &= ~0x00000002;
  756. }
  757. if (ustatus && display)
  758. NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - Unhandled ustatus 0x%08x\n", ustatus);
  759. nv_wr32(dev, 0x400804, 0xc0000000);
  760. nv_wr32(dev, 0x400108, 0x001);
  761. status &= ~0x001;
  762. }
  763. /* TRAPs other than dispatch use the "normal" trap regs. */
  764. if (status && display) {
  765. nouveau_graph_trap_info(dev, &trap);
  766. nouveau_graph_dump_trap_info(dev,
  767. "PGRAPH_TRAP", &trap);
  768. }
  769. /* M2MF: Memory to memory copy engine. */
  770. if (status & 0x002) {
  771. ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff;
  772. if (!ustatus && display) {
  773. NV_INFO(dev, "PGRAPH_TRAP_M2MF - no ustatus?\n");
  774. }
  775. if (ustatus & 0x00000001) {
  776. nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_NOTIFY");
  777. ustatus &= ~0x00000001;
  778. }
  779. if (ustatus & 0x00000002) {
  780. nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_IN");
  781. ustatus &= ~0x00000002;
  782. }
  783. if (ustatus & 0x00000004) {
  784. nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_OUT");
  785. ustatus &= ~0x00000004;
  786. }
  787. NV_INFO (dev, "PGRAPH_TRAP_M2MF - %08x %08x %08x %08x\n",
  788. nv_rd32(dev, 0x406804),
  789. nv_rd32(dev, 0x406808),
  790. nv_rd32(dev, 0x40680c),
  791. nv_rd32(dev, 0x406810));
  792. if (ustatus && display)
  793. NV_INFO(dev, "PGRAPH_TRAP_M2MF - Unhandled ustatus 0x%08x\n", ustatus);
  794. /* No sane way found yet -- just reset the bugger. */
  795. nv_wr32(dev, 0x400040, 2);
  796. nv_wr32(dev, 0x400040, 0);
  797. nv_wr32(dev, 0x406800, 0xc0000000);
  798. nv_wr32(dev, 0x400108, 0x002);
  799. status &= ~0x002;
  800. }
  801. /* VFETCH: Fetches data from vertex buffers. */
  802. if (status & 0x004) {
  803. ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff;
  804. if (!ustatus && display) {
  805. NV_INFO(dev, "PGRAPH_TRAP_VFETCH - no ustatus?\n");
  806. }
  807. if (ustatus & 0x00000001) {
  808. nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_VFETCH_FAULT");
  809. NV_INFO (dev, "PGRAPH_TRAP_VFETCH_FAULT - %08x %08x %08x %08x\n",
  810. nv_rd32(dev, 0x400c00),
  811. nv_rd32(dev, 0x400c08),
  812. nv_rd32(dev, 0x400c0c),
  813. nv_rd32(dev, 0x400c10));
  814. ustatus &= ~0x00000001;
  815. }
  816. if (ustatus && display)
  817. NV_INFO(dev, "PGRAPH_TRAP_VFETCH - Unhandled ustatus 0x%08x\n", ustatus);
  818. nv_wr32(dev, 0x400c04, 0xc0000000);
  819. nv_wr32(dev, 0x400108, 0x004);
  820. status &= ~0x004;
  821. }
  822. /* STRMOUT: DirectX streamout / OpenGL transform feedback. */
  823. if (status & 0x008) {
  824. ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff;
  825. if (!ustatus && display) {
  826. NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - no ustatus?\n");
  827. }
  828. if (ustatus & 0x00000001) {
  829. nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_STRMOUT_FAULT");
  830. NV_INFO (dev, "PGRAPH_TRAP_STRMOUT_FAULT - %08x %08x %08x %08x\n",
  831. nv_rd32(dev, 0x401804),
  832. nv_rd32(dev, 0x401808),
  833. nv_rd32(dev, 0x40180c),
  834. nv_rd32(dev, 0x401810));
  835. ustatus &= ~0x00000001;
  836. }
  837. if (ustatus && display)
  838. NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - Unhandled ustatus 0x%08x\n", ustatus);
  839. /* No sane way found yet -- just reset the bugger. */
  840. nv_wr32(dev, 0x400040, 0x80);
  841. nv_wr32(dev, 0x400040, 0);
  842. nv_wr32(dev, 0x401800, 0xc0000000);
  843. nv_wr32(dev, 0x400108, 0x008);
  844. status &= ~0x008;
  845. }
  846. /* CCACHE: Handles code and c[] caches and fills them. */
  847. if (status & 0x010) {
  848. ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff;
  849. if (!ustatus && display) {
  850. NV_INFO(dev, "PGRAPH_TRAP_CCACHE - no ustatus?\n");
  851. }
  852. if (ustatus & 0x00000001) {
  853. nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_CCACHE_FAULT");
  854. NV_INFO (dev, "PGRAPH_TRAP_CCACHE_FAULT - %08x %08x %08x %08x %08x %08x %08x\n",
  855. nv_rd32(dev, 0x405800),
  856. nv_rd32(dev, 0x405804),
  857. nv_rd32(dev, 0x405808),
  858. nv_rd32(dev, 0x40580c),
  859. nv_rd32(dev, 0x405810),
  860. nv_rd32(dev, 0x405814),
  861. nv_rd32(dev, 0x40581c));
  862. ustatus &= ~0x00000001;
  863. }
  864. if (ustatus && display)
  865. NV_INFO(dev, "PGRAPH_TRAP_CCACHE - Unhandled ustatus 0x%08x\n", ustatus);
  866. nv_wr32(dev, 0x405018, 0xc0000000);
  867. nv_wr32(dev, 0x400108, 0x010);
  868. status &= ~0x010;
  869. }
  870. /* Unknown, not seen yet... 0x402000 is the only trap status reg
  871. * remaining, so try to handle it anyway. Perhaps related to that
  872. * unknown DMA slot on tesla? */
  873. if (status & 0x20) {
  874. nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_UNKC04");
  875. ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff;
  876. if (display)
  877. NV_INFO(dev, "PGRAPH_TRAP_UNKC04 - Unhandled ustatus 0x%08x\n", ustatus);
  878. nv_wr32(dev, 0x402000, 0xc0000000);
  879. /* no status modifiction on purpose */
  880. }
  881. /* TEXTURE: CUDA texturing units */
  882. if (status & 0x040) {
  883. nv50_pgraph_tp_trap (dev, 6, 0x408900, 0x408600, display,
  884. "PGRAPH_TRAP_TEXTURE");
  885. nv_wr32(dev, 0x400108, 0x040);
  886. status &= ~0x040;
  887. }
  888. /* MP: CUDA execution engines. */
  889. if (status & 0x080) {
  890. nv50_pgraph_tp_trap (dev, 7, 0x408314, 0x40831c, display,
  891. "PGRAPH_TRAP_MP");
  892. nv_wr32(dev, 0x400108, 0x080);
  893. status &= ~0x080;
  894. }
  895. /* TPDMA: Handles TP-initiated uncached memory accesses:
  896. * l[], g[], stack, 2d surfaces, render targets. */
  897. if (status & 0x100) {
  898. nv50_pgraph_tp_trap (dev, 8, 0x408e08, 0x408708, display,
  899. "PGRAPH_TRAP_TPDMA");
  900. nv_wr32(dev, 0x400108, 0x100);
  901. status &= ~0x100;
  902. }
  903. if (status) {
  904. if (display)
  905. NV_INFO(dev, "PGRAPH_TRAP - Unknown trap 0x%08x\n",
  906. status);
  907. nv_wr32(dev, 0x400108, status);
  908. }
  909. }
  910. /* There must be a *lot* of these. Will take some time to gather them up. */
  911. static struct nouveau_enum_names nv50_data_error_names[] =
  912. {
  913. { 4, "INVALID_VALUE" },
  914. { 5, "INVALID_ENUM" },
  915. { 8, "INVALID_OBJECT" },
  916. { 0xc, "INVALID_BITFIELD" },
  917. { 0x28, "MP_NO_REG_SPACE" },
  918. { 0x2b, "MP_BLOCK_SIZE_MISMATCH" },
  919. };
  920. static void
  921. nv50_pgraph_irq_handler(struct drm_device *dev)
  922. {
  923. struct nouveau_pgraph_trap trap;
  924. int unhandled = 0;
  925. uint32_t status;
  926. while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
  927. /* NOTIFY: You've set a NOTIFY an a command and it's done. */
  928. if (status & 0x00000001) {
  929. nouveau_graph_trap_info(dev, &trap);
  930. if (nouveau_ratelimit())
  931. nouveau_graph_dump_trap_info(dev,
  932. "PGRAPH_NOTIFY", &trap);
  933. status &= ~0x00000001;
  934. nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001);
  935. }
  936. /* COMPUTE_QUERY: Purpose and exact cause unknown, happens
  937. * when you write 0x200 to 0x50c0 method 0x31c. */
  938. if (status & 0x00000002) {
  939. nouveau_graph_trap_info(dev, &trap);
  940. if (nouveau_ratelimit())
  941. nouveau_graph_dump_trap_info(dev,
  942. "PGRAPH_COMPUTE_QUERY", &trap);
  943. status &= ~0x00000002;
  944. nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000002);
  945. }
  946. /* Unknown, never seen: 0x4 */
  947. /* ILLEGAL_MTHD: You used a wrong method for this class. */
  948. if (status & 0x00000010) {
  949. nouveau_graph_trap_info(dev, &trap);
  950. if (nouveau_pgraph_intr_swmthd(dev, &trap))
  951. unhandled = 1;
  952. if (unhandled && nouveau_ratelimit())
  953. nouveau_graph_dump_trap_info(dev,
  954. "PGRAPH_ILLEGAL_MTHD", &trap);
  955. status &= ~0x00000010;
  956. nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010);
  957. }
  958. /* ILLEGAL_CLASS: You used a wrong class. */
  959. if (status & 0x00000020) {
  960. nouveau_graph_trap_info(dev, &trap);
  961. if (nouveau_ratelimit())
  962. nouveau_graph_dump_trap_info(dev,
  963. "PGRAPH_ILLEGAL_CLASS", &trap);
  964. status &= ~0x00000020;
  965. nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000020);
  966. }
  967. /* DOUBLE_NOTIFY: You tried to set a NOTIFY on another NOTIFY. */
  968. if (status & 0x00000040) {
  969. nouveau_graph_trap_info(dev, &trap);
  970. if (nouveau_ratelimit())
  971. nouveau_graph_dump_trap_info(dev,
  972. "PGRAPH_DOUBLE_NOTIFY", &trap);
  973. status &= ~0x00000040;
  974. nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000040);
  975. }
  976. /* CONTEXT_SWITCH: PGRAPH needs us to load a new context */
  977. if (status & 0x00001000) {
  978. nv_wr32(dev, 0x400500, 0x00000000);
  979. nv_wr32(dev, NV03_PGRAPH_INTR,
  980. NV_PGRAPH_INTR_CONTEXT_SWITCH);
  981. nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
  982. NV40_PGRAPH_INTR_EN) &
  983. ~NV_PGRAPH_INTR_CONTEXT_SWITCH);
  984. nv_wr32(dev, 0x400500, 0x00010001);
  985. nv50_graph_context_switch(dev);
  986. status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
  987. }
  988. /* BUFFER_NOTIFY: Your m2mf transfer finished */
  989. if (status & 0x00010000) {
  990. nouveau_graph_trap_info(dev, &trap);
  991. if (nouveau_ratelimit())
  992. nouveau_graph_dump_trap_info(dev,
  993. "PGRAPH_BUFFER_NOTIFY", &trap);
  994. status &= ~0x00010000;
  995. nv_wr32(dev, NV03_PGRAPH_INTR, 0x00010000);
  996. }
  997. /* DATA_ERROR: Invalid value for this method, or invalid
  998. * state in current PGRAPH context for this operation */
  999. if (status & 0x00100000) {
  1000. nouveau_graph_trap_info(dev, &trap);
  1001. if (nouveau_ratelimit()) {
  1002. nouveau_graph_dump_trap_info(dev,
  1003. "PGRAPH_DATA_ERROR", &trap);
  1004. NV_INFO (dev, "PGRAPH_DATA_ERROR - ");
  1005. nouveau_print_enum_names(nv_rd32(dev, 0x400110),
  1006. nv50_data_error_names);
  1007. printk("\n");
  1008. }
  1009. status &= ~0x00100000;
  1010. nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000);
  1011. }
  1012. /* TRAP: Something bad happened in the middle of command
  1013. * execution. Has a billion types, subtypes, and even
  1014. * subsubtypes. */
  1015. if (status & 0x00200000) {
  1016. nv50_pgraph_trap_handler(dev);
  1017. status &= ~0x00200000;
  1018. nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000);
  1019. }
  1020. /* Unknown, never seen: 0x00400000 */
  1021. /* SINGLE_STEP: Happens on every method if you turned on
  1022. * single stepping in 40008c */
  1023. if (status & 0x01000000) {
  1024. nouveau_graph_trap_info(dev, &trap);
  1025. if (nouveau_ratelimit())
  1026. nouveau_graph_dump_trap_info(dev,
  1027. "PGRAPH_SINGLE_STEP", &trap);
  1028. status &= ~0x01000000;
  1029. nv_wr32(dev, NV03_PGRAPH_INTR, 0x01000000);
  1030. }
  1031. /* 0x02000000 happens when you pause a ctxprog...
  1032. * but the only way this can happen that I know is by
  1033. * poking the relevant MMIO register, and we don't
  1034. * do that. */
  1035. if (status) {
  1036. NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n",
  1037. status);
  1038. nv_wr32(dev, NV03_PGRAPH_INTR, status);
  1039. }
  1040. {
  1041. const int isb = (1 << 16) | (1 << 0);
  1042. if ((nv_rd32(dev, 0x400500) & isb) != isb)
  1043. nv_wr32(dev, 0x400500,
  1044. nv_rd32(dev, 0x400500) | isb);
  1045. }
  1046. }
  1047. nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
  1048. if (nv_rd32(dev, 0x400824) & (1 << 31))
  1049. nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
  1050. }
  1051. static void
  1052. nouveau_crtc_irq_handler(struct drm_device *dev, int crtc)
  1053. {
  1054. if (crtc & 1)
  1055. nv_wr32(dev, NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK);
  1056. if (crtc & 2)
  1057. nv_wr32(dev, NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK);
  1058. }
  1059. irqreturn_t
  1060. nouveau_irq_handler(DRM_IRQ_ARGS)
  1061. {
  1062. struct drm_device *dev = (struct drm_device *)arg;
  1063. struct drm_nouveau_private *dev_priv = dev->dev_private;
  1064. uint32_t status;
  1065. unsigned long flags;
  1066. status = nv_rd32(dev, NV03_PMC_INTR_0);
  1067. if (!status)
  1068. return IRQ_NONE;
  1069. spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
  1070. if (status & NV_PMC_INTR_0_PFIFO_PENDING) {
  1071. nouveau_fifo_irq_handler(dev);
  1072. status &= ~NV_PMC_INTR_0_PFIFO_PENDING;
  1073. }
  1074. if (status & NV_PMC_INTR_0_PGRAPH_PENDING) {
  1075. if (dev_priv->card_type >= NV_50)
  1076. nv50_pgraph_irq_handler(dev);
  1077. else
  1078. nouveau_pgraph_irq_handler(dev);
  1079. status &= ~NV_PMC_INTR_0_PGRAPH_PENDING;
  1080. }
  1081. if (status & NV_PMC_INTR_0_CRTCn_PENDING) {
  1082. nouveau_crtc_irq_handler(dev, (status>>24)&3);
  1083. status &= ~NV_PMC_INTR_0_CRTCn_PENDING;
  1084. }
  1085. if (status & (NV_PMC_INTR_0_NV50_DISPLAY_PENDING |
  1086. NV_PMC_INTR_0_NV50_I2C_PENDING)) {
  1087. nv50_display_irq_handler(dev);
  1088. status &= ~(NV_PMC_INTR_0_NV50_DISPLAY_PENDING |
  1089. NV_PMC_INTR_0_NV50_I2C_PENDING);
  1090. }
  1091. if (status)
  1092. NV_ERROR(dev, "Unhandled PMC INTR status bits 0x%08x\n", status);
  1093. spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
  1094. return IRQ_HANDLED;
  1095. }