nouveau_irq.c 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255
  1. /*
  2. * Copyright (C) 2006 Ben Skeggs.
  3. *
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining
  7. * a copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sublicense, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial
  16. * portions of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  19. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  20. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
  21. * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
  22. * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
  23. * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
  24. * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. */
  27. /*
  28. * Authors:
  29. * Ben Skeggs <darktama@iinet.net.au>
  30. */
  31. #include "drmP.h"
  32. #include "drm.h"
  33. #include "nouveau_drm.h"
  34. #include "nouveau_drv.h"
  35. #include "nouveau_reg.h"
  36. #include <linux/ratelimit.h>
  37. /* needed for hotplug irq */
  38. #include "nouveau_connector.h"
  39. #include "nv50_display.h"
  40. void
  41. nouveau_irq_preinstall(struct drm_device *dev)
  42. {
  43. struct drm_nouveau_private *dev_priv = dev->dev_private;
  44. /* Master disable */
  45. nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
  46. if (dev_priv->card_type == NV_50) {
  47. INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh);
  48. INIT_LIST_HEAD(&dev_priv->vbl_waiting);
  49. }
  50. }
  51. int
  52. nouveau_irq_postinstall(struct drm_device *dev)
  53. {
  54. /* Master enable */
  55. nv_wr32(dev, NV03_PMC_INTR_EN_0, NV_PMC_INTR_EN_0_MASTER_ENABLE);
  56. return 0;
  57. }
  58. void
  59. nouveau_irq_uninstall(struct drm_device *dev)
  60. {
  61. /* Master disable */
  62. nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
  63. }
  64. static int
  65. nouveau_call_method(struct nouveau_channel *chan, int class, int mthd, int data)
  66. {
  67. struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
  68. struct nouveau_pgraph_object_method *grm;
  69. struct nouveau_pgraph_object_class *grc;
  70. grc = dev_priv->engine.graph.grclass;
  71. while (grc->id) {
  72. if (grc->id == class)
  73. break;
  74. grc++;
  75. }
  76. if (grc->id != class || !grc->methods)
  77. return -ENOENT;
  78. grm = grc->methods;
  79. while (grm->id) {
  80. if (grm->id == mthd)
  81. return grm->exec(chan, class, mthd, data);
  82. grm++;
  83. }
  84. return -ENOENT;
  85. }
  86. static bool
  87. nouveau_fifo_swmthd(struct nouveau_channel *chan, uint32_t addr, uint32_t data)
  88. {
  89. struct drm_device *dev = chan->dev;
  90. const int subc = (addr >> 13) & 0x7;
  91. const int mthd = addr & 0x1ffc;
  92. if (mthd == 0x0000) {
  93. struct nouveau_gpuobj_ref *ref = NULL;
  94. if (nouveau_gpuobj_ref_find(chan, data, &ref))
  95. return false;
  96. if (ref->gpuobj->engine != NVOBJ_ENGINE_SW)
  97. return false;
  98. chan->sw_subchannel[subc] = ref->gpuobj->class;
  99. nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_rd32(dev,
  100. NV04_PFIFO_CACHE1_ENGINE) & ~(0xf << subc * 4));
  101. return true;
  102. }
  103. /* hw object */
  104. if (nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE) & (1 << (subc*4)))
  105. return false;
  106. if (nouveau_call_method(chan, chan->sw_subchannel[subc], mthd, data))
  107. return false;
  108. return true;
  109. }
  110. static void
  111. nouveau_fifo_irq_handler(struct drm_device *dev)
  112. {
  113. struct drm_nouveau_private *dev_priv = dev->dev_private;
  114. struct nouveau_engine *engine = &dev_priv->engine;
  115. uint32_t status, reassign;
  116. int cnt = 0;
  117. reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1;
  118. while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) {
  119. struct nouveau_channel *chan = NULL;
  120. uint32_t chid, get;
  121. nv_wr32(dev, NV03_PFIFO_CACHES, 0);
  122. chid = engine->fifo.channel_id(dev);
  123. if (chid >= 0 && chid < engine->fifo.channels)
  124. chan = dev_priv->fifos[chid];
  125. get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET);
  126. if (status & NV_PFIFO_INTR_CACHE_ERROR) {
  127. uint32_t mthd, data;
  128. int ptr;
  129. /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before
  130. * wrapping on my G80 chips, but CACHE1 isn't big
  131. * enough for this much data.. Tests show that it
  132. * wraps around to the start at GET=0x800.. No clue
  133. * as to why..
  134. */
  135. ptr = (get & 0x7ff) >> 2;
  136. if (dev_priv->card_type < NV_40) {
  137. mthd = nv_rd32(dev,
  138. NV04_PFIFO_CACHE1_METHOD(ptr));
  139. data = nv_rd32(dev,
  140. NV04_PFIFO_CACHE1_DATA(ptr));
  141. } else {
  142. mthd = nv_rd32(dev,
  143. NV40_PFIFO_CACHE1_METHOD(ptr));
  144. data = nv_rd32(dev,
  145. NV40_PFIFO_CACHE1_DATA(ptr));
  146. }
  147. if (!chan || !nouveau_fifo_swmthd(chan, mthd, data)) {
  148. NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d "
  149. "Mthd 0x%04x Data 0x%08x\n",
  150. chid, (mthd >> 13) & 7, mthd & 0x1ffc,
  151. data);
  152. }
  153. nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
  154. nv_wr32(dev, NV03_PFIFO_INTR_0,
  155. NV_PFIFO_INTR_CACHE_ERROR);
  156. nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
  157. nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1);
  158. nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
  159. nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
  160. nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1);
  161. nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
  162. nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH,
  163. nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
  164. nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
  165. status &= ~NV_PFIFO_INTR_CACHE_ERROR;
  166. }
  167. if (status & NV_PFIFO_INTR_DMA_PUSHER) {
  168. NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d\n", chid);
  169. status &= ~NV_PFIFO_INTR_DMA_PUSHER;
  170. nv_wr32(dev, NV03_PFIFO_INTR_0,
  171. NV_PFIFO_INTR_DMA_PUSHER);
  172. nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, 0x00000000);
  173. if (nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT) != get)
  174. nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET,
  175. get + 4);
  176. }
  177. if (status & NV_PFIFO_INTR_SEMAPHORE) {
  178. uint32_t sem;
  179. status &= ~NV_PFIFO_INTR_SEMAPHORE;
  180. nv_wr32(dev, NV03_PFIFO_INTR_0,
  181. NV_PFIFO_INTR_SEMAPHORE);
  182. sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE);
  183. nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
  184. nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
  185. nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
  186. }
  187. if (status) {
  188. NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
  189. status, chid);
  190. nv_wr32(dev, NV03_PFIFO_INTR_0, status);
  191. status = 0;
  192. }
  193. nv_wr32(dev, NV03_PFIFO_CACHES, reassign);
  194. }
  195. if (status) {
  196. NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt);
  197. nv_wr32(dev, 0x2140, 0);
  198. nv_wr32(dev, 0x140, 0);
  199. }
  200. nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
  201. }
  202. struct nouveau_bitfield_names {
  203. uint32_t mask;
  204. const char *name;
  205. };
  206. static struct nouveau_bitfield_names nstatus_names[] =
  207. {
  208. { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
  209. { NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
  210. { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
  211. { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }
  212. };
  213. static struct nouveau_bitfield_names nstatus_names_nv10[] =
  214. {
  215. { NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
  216. { NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
  217. { NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
  218. { NV10_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }
  219. };
  220. static struct nouveau_bitfield_names nsource_names[] =
  221. {
  222. { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" },
  223. { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" },
  224. { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" },
  225. { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION, "RANGE_EXCEPTION" },
  226. { NV03_PGRAPH_NSOURCE_LIMIT_COLOR, "LIMIT_COLOR" },
  227. { NV03_PGRAPH_NSOURCE_LIMIT_ZETA, "LIMIT_ZETA" },
  228. { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD, "ILLEGAL_MTHD" },
  229. { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION, "DMA_R_PROTECTION" },
  230. { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION, "DMA_W_PROTECTION" },
  231. { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION, "FORMAT_EXCEPTION" },
  232. { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION, "PATCH_EXCEPTION" },
  233. { NV03_PGRAPH_NSOURCE_STATE_INVALID, "STATE_INVALID" },
  234. { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY, "DOUBLE_NOTIFY" },
  235. { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE, "NOTIFY_IN_USE" },
  236. { NV03_PGRAPH_NSOURCE_METHOD_CNT, "METHOD_CNT" },
  237. { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION, "BFR_NOTIFICATION" },
  238. { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" },
  239. { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" },
  240. { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" },
  241. };
  242. static void
  243. nouveau_print_bitfield_names_(uint32_t value,
  244. const struct nouveau_bitfield_names *namelist,
  245. const int namelist_len)
  246. {
  247. /*
  248. * Caller must have already printed the KERN_* log level for us.
  249. * Also the caller is responsible for adding the newline.
  250. */
  251. int i;
  252. for (i = 0; i < namelist_len; ++i) {
  253. uint32_t mask = namelist[i].mask;
  254. if (value & mask) {
  255. printk(" %s", namelist[i].name);
  256. value &= ~mask;
  257. }
  258. }
  259. if (value)
  260. printk(" (unknown bits 0x%08x)", value);
  261. }
  262. #define nouveau_print_bitfield_names(val, namelist) \
  263. nouveau_print_bitfield_names_((val), (namelist), ARRAY_SIZE(namelist))
  264. struct nouveau_enum_names {
  265. uint32_t value;
  266. const char *name;
  267. };
  268. static void
  269. nouveau_print_enum_names_(uint32_t value,
  270. const struct nouveau_enum_names *namelist,
  271. const int namelist_len)
  272. {
  273. /*
  274. * Caller must have already printed the KERN_* log level for us.
  275. * Also the caller is responsible for adding the newline.
  276. */
  277. int i;
  278. for (i = 0; i < namelist_len; ++i) {
  279. if (value == namelist[i].value) {
  280. printk("%s", namelist[i].name);
  281. return;
  282. }
  283. }
  284. printk("unknown value 0x%08x", value);
  285. }
  286. #define nouveau_print_enum_names(val, namelist) \
  287. nouveau_print_enum_names_((val), (namelist), ARRAY_SIZE(namelist))
  288. static int
  289. nouveau_graph_chid_from_grctx(struct drm_device *dev)
  290. {
  291. struct drm_nouveau_private *dev_priv = dev->dev_private;
  292. uint32_t inst;
  293. int i;
  294. if (dev_priv->card_type < NV_40)
  295. return dev_priv->engine.fifo.channels;
  296. else
  297. if (dev_priv->card_type < NV_50) {
  298. inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 4;
  299. for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
  300. struct nouveau_channel *chan = dev_priv->fifos[i];
  301. if (!chan || !chan->ramin_grctx)
  302. continue;
  303. if (inst == chan->ramin_grctx->instance)
  304. break;
  305. }
  306. } else {
  307. inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 12;
  308. for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
  309. struct nouveau_channel *chan = dev_priv->fifos[i];
  310. if (!chan || !chan->ramin)
  311. continue;
  312. if (inst == chan->ramin->instance)
  313. break;
  314. }
  315. }
  316. return i;
  317. }
  318. static int
  319. nouveau_graph_trapped_channel(struct drm_device *dev, int *channel_ret)
  320. {
  321. struct drm_nouveau_private *dev_priv = dev->dev_private;
  322. struct nouveau_engine *engine = &dev_priv->engine;
  323. int channel;
  324. if (dev_priv->card_type < NV_10)
  325. channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0xf;
  326. else
  327. if (dev_priv->card_type < NV_40)
  328. channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
  329. else
  330. channel = nouveau_graph_chid_from_grctx(dev);
  331. if (channel >= engine->fifo.channels || !dev_priv->fifos[channel]) {
  332. NV_ERROR(dev, "AIII, invalid/inactive channel id %d\n", channel);
  333. return -EINVAL;
  334. }
  335. *channel_ret = channel;
  336. return 0;
  337. }
  338. struct nouveau_pgraph_trap {
  339. int channel;
  340. int class;
  341. int subc, mthd, size;
  342. uint32_t data, data2;
  343. uint32_t nsource, nstatus;
  344. };
  345. static void
  346. nouveau_graph_trap_info(struct drm_device *dev,
  347. struct nouveau_pgraph_trap *trap)
  348. {
  349. struct drm_nouveau_private *dev_priv = dev->dev_private;
  350. uint32_t address;
  351. trap->nsource = trap->nstatus = 0;
  352. if (dev_priv->card_type < NV_50) {
  353. trap->nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
  354. trap->nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
  355. }
  356. if (nouveau_graph_trapped_channel(dev, &trap->channel))
  357. trap->channel = -1;
  358. address = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
  359. trap->mthd = address & 0x1FFC;
  360. trap->data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
  361. if (dev_priv->card_type < NV_10) {
  362. trap->subc = (address >> 13) & 0x7;
  363. } else {
  364. trap->subc = (address >> 16) & 0x7;
  365. trap->data2 = nv_rd32(dev, NV10_PGRAPH_TRAPPED_DATA_HIGH);
  366. }
  367. if (dev_priv->card_type < NV_10)
  368. trap->class = nv_rd32(dev, 0x400180 + trap->subc*4) & 0xFF;
  369. else if (dev_priv->card_type < NV_40)
  370. trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFF;
  371. else if (dev_priv->card_type < NV_50)
  372. trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFFF;
  373. else
  374. trap->class = nv_rd32(dev, 0x400814);
  375. }
  376. static void
  377. nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id,
  378. struct nouveau_pgraph_trap *trap)
  379. {
  380. struct drm_nouveau_private *dev_priv = dev->dev_private;
  381. uint32_t nsource = trap->nsource, nstatus = trap->nstatus;
  382. if (dev_priv->card_type < NV_50) {
  383. NV_INFO(dev, "%s - nSource:", id);
  384. nouveau_print_bitfield_names(nsource, nsource_names);
  385. printk(", nStatus:");
  386. if (dev_priv->card_type < NV_10)
  387. nouveau_print_bitfield_names(nstatus, nstatus_names);
  388. else
  389. nouveau_print_bitfield_names(nstatus, nstatus_names_nv10);
  390. printk("\n");
  391. }
  392. NV_INFO(dev, "%s - Ch %d/%d Class 0x%04x Mthd 0x%04x "
  393. "Data 0x%08x:0x%08x\n",
  394. id, trap->channel, trap->subc,
  395. trap->class, trap->mthd,
  396. trap->data2, trap->data);
  397. }
  398. static int
  399. nouveau_pgraph_intr_swmthd(struct drm_device *dev,
  400. struct nouveau_pgraph_trap *trap)
  401. {
  402. struct drm_nouveau_private *dev_priv = dev->dev_private;
  403. if (trap->channel < 0 ||
  404. trap->channel >= dev_priv->engine.fifo.channels ||
  405. !dev_priv->fifos[trap->channel])
  406. return -ENODEV;
  407. return nouveau_call_method(dev_priv->fifos[trap->channel],
  408. trap->class, trap->mthd, trap->data);
  409. }
  410. static inline void
  411. nouveau_pgraph_intr_notify(struct drm_device *dev, uint32_t nsource)
  412. {
  413. struct nouveau_pgraph_trap trap;
  414. int unhandled = 0;
  415. nouveau_graph_trap_info(dev, &trap);
  416. if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
  417. if (nouveau_pgraph_intr_swmthd(dev, &trap))
  418. unhandled = 1;
  419. } else {
  420. unhandled = 1;
  421. }
  422. if (unhandled)
  423. nouveau_graph_dump_trap_info(dev, "PGRAPH_NOTIFY", &trap);
  424. }
  425. static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20);
  426. static int nouveau_ratelimit(void)
  427. {
  428. return __ratelimit(&nouveau_ratelimit_state);
  429. }
  430. static inline void
  431. nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource)
  432. {
  433. struct nouveau_pgraph_trap trap;
  434. int unhandled = 0;
  435. nouveau_graph_trap_info(dev, &trap);
  436. trap.nsource = nsource;
  437. if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
  438. if (nouveau_pgraph_intr_swmthd(dev, &trap))
  439. unhandled = 1;
  440. } else if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
  441. uint32_t v = nv_rd32(dev, 0x402000);
  442. nv_wr32(dev, 0x402000, v);
  443. /* dump the error anyway for now: it's useful for
  444. Gallium development */
  445. unhandled = 1;
  446. } else {
  447. unhandled = 1;
  448. }
  449. if (unhandled && nouveau_ratelimit())
  450. nouveau_graph_dump_trap_info(dev, "PGRAPH_ERROR", &trap);
  451. }
  452. static inline void
  453. nouveau_pgraph_intr_context_switch(struct drm_device *dev)
  454. {
  455. struct drm_nouveau_private *dev_priv = dev->dev_private;
  456. struct nouveau_engine *engine = &dev_priv->engine;
  457. uint32_t chid;
  458. chid = engine->fifo.channel_id(dev);
  459. NV_DEBUG(dev, "PGRAPH context switch interrupt channel %x\n", chid);
  460. switch (dev_priv->card_type) {
  461. case NV_04:
  462. nv04_graph_context_switch(dev);
  463. break;
  464. case NV_10:
  465. nv10_graph_context_switch(dev);
  466. break;
  467. default:
  468. NV_ERROR(dev, "Context switch not implemented\n");
  469. break;
  470. }
  471. }
  472. static void
  473. nouveau_pgraph_irq_handler(struct drm_device *dev)
  474. {
  475. uint32_t status;
  476. while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
  477. uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
  478. if (status & NV_PGRAPH_INTR_NOTIFY) {
  479. nouveau_pgraph_intr_notify(dev, nsource);
  480. status &= ~NV_PGRAPH_INTR_NOTIFY;
  481. nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_NOTIFY);
  482. }
  483. if (status & NV_PGRAPH_INTR_ERROR) {
  484. nouveau_pgraph_intr_error(dev, nsource);
  485. status &= ~NV_PGRAPH_INTR_ERROR;
  486. nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_ERROR);
  487. }
  488. if (status & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
  489. nouveau_pgraph_intr_context_switch(dev);
  490. status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
  491. nv_wr32(dev, NV03_PGRAPH_INTR,
  492. NV_PGRAPH_INTR_CONTEXT_SWITCH);
  493. }
  494. if (status) {
  495. NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status);
  496. nv_wr32(dev, NV03_PGRAPH_INTR, status);
  497. }
  498. if ((nv_rd32(dev, NV04_PGRAPH_FIFO) & (1 << 0)) == 0)
  499. nv_wr32(dev, NV04_PGRAPH_FIFO, 1);
  500. }
  501. nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
  502. }
  503. static void
  504. nv50_pfb_vm_trap(struct drm_device *dev, int display, const char *name)
  505. {
  506. struct drm_nouveau_private *dev_priv = dev->dev_private;
  507. uint32_t trap[6];
  508. int i, ch;
  509. uint32_t idx = nv_rd32(dev, 0x100c90);
  510. if (idx & 0x80000000) {
  511. idx &= 0xffffff;
  512. if (display) {
  513. for (i = 0; i < 6; i++) {
  514. nv_wr32(dev, 0x100c90, idx | i << 24);
  515. trap[i] = nv_rd32(dev, 0x100c94);
  516. }
  517. for (ch = 0; ch < dev_priv->engine.fifo.channels; ch++) {
  518. struct nouveau_channel *chan = dev_priv->fifos[ch];
  519. if (!chan || !chan->ramin)
  520. continue;
  521. if (trap[1] == chan->ramin->instance >> 12)
  522. break;
  523. }
  524. NV_INFO(dev, "%s - VM: Trapped %s at %02x%04x%04x status %08x %08x channel %d\n",
  525. name, (trap[5]&0x100?"read":"write"),
  526. trap[5]&0xff, trap[4]&0xffff,
  527. trap[3]&0xffff, trap[0], trap[2], ch);
  528. }
  529. nv_wr32(dev, 0x100c90, idx | 0x80000000);
  530. } else if (display) {
  531. NV_INFO(dev, "%s - no VM fault?\n", name);
  532. }
  533. }
  534. static struct nouveau_enum_names nv50_mp_exec_error_names[] =
  535. {
  536. { 3, "STACK_UNDERFLOW" },
  537. { 4, "QUADON_ACTIVE" },
  538. { 8, "TIMEOUT" },
  539. { 0x10, "INVALID_OPCODE" },
  540. { 0x40, "BREAKPOINT" },
  541. };
  542. static void
  543. nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display)
  544. {
  545. struct drm_nouveau_private *dev_priv = dev->dev_private;
  546. uint32_t units = nv_rd32(dev, 0x1540);
  547. uint32_t addr, mp10, status, pc, oplow, ophigh;
  548. int i;
  549. int mps = 0;
  550. for (i = 0; i < 4; i++) {
  551. if (!(units & 1 << (i+24)))
  552. continue;
  553. if (dev_priv->chipset < 0xa0)
  554. addr = 0x408200 + (tpid << 12) + (i << 7);
  555. else
  556. addr = 0x408100 + (tpid << 11) + (i << 7);
  557. mp10 = nv_rd32(dev, addr + 0x10);
  558. status = nv_rd32(dev, addr + 0x14);
  559. if (!status)
  560. continue;
  561. if (display) {
  562. nv_rd32(dev, addr + 0x20);
  563. pc = nv_rd32(dev, addr + 0x24);
  564. oplow = nv_rd32(dev, addr + 0x70);
  565. ophigh= nv_rd32(dev, addr + 0x74);
  566. NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - "
  567. "TP %d MP %d: ", tpid, i);
  568. nouveau_print_enum_names(status,
  569. nv50_mp_exec_error_names);
  570. printk(" at %06x warp %d, opcode %08x %08x\n",
  571. pc&0xffffff, pc >> 24,
  572. oplow, ophigh);
  573. }
  574. nv_wr32(dev, addr + 0x10, mp10);
  575. nv_wr32(dev, addr + 0x14, 0);
  576. mps++;
  577. }
  578. if (!mps && display)
  579. NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: "
  580. "No MPs claiming errors?\n", tpid);
  581. }
  582. static void
  583. nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
  584. uint32_t ustatus_new, int display, const char *name)
  585. {
  586. struct drm_nouveau_private *dev_priv = dev->dev_private;
  587. int tps = 0;
  588. uint32_t units = nv_rd32(dev, 0x1540);
  589. int i, r;
  590. uint32_t ustatus_addr, ustatus;
  591. for (i = 0; i < 16; i++) {
  592. if (!(units & (1 << i)))
  593. continue;
  594. if (dev_priv->chipset < 0xa0)
  595. ustatus_addr = ustatus_old + (i << 12);
  596. else
  597. ustatus_addr = ustatus_new + (i << 11);
  598. ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff;
  599. if (!ustatus)
  600. continue;
  601. tps++;
  602. switch (type) {
  603. case 6: /* texture error... unknown for now */
  604. nv50_pfb_vm_trap(dev, display, name);
  605. if (display) {
  606. NV_ERROR(dev, "magic set %d:\n", i);
  607. for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
  608. NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
  609. nv_rd32(dev, r));
  610. }
  611. break;
  612. case 7: /* MP error */
  613. if (ustatus & 0x00010000) {
  614. nv50_pgraph_mp_trap(dev, i, display);
  615. ustatus &= ~0x00010000;
  616. }
  617. break;
  618. case 8: /* TPDMA error */
  619. {
  620. uint32_t e0c = nv_rd32(dev, ustatus_addr + 4);
  621. uint32_t e10 = nv_rd32(dev, ustatus_addr + 8);
  622. uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc);
  623. uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10);
  624. uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14);
  625. uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18);
  626. uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c);
  627. nv50_pfb_vm_trap(dev, display, name);
  628. /* 2d engine destination */
  629. if (ustatus & 0x00000010) {
  630. if (display) {
  631. NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n",
  632. i, e14, e10);
  633. NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
  634. i, e0c, e18, e1c, e20, e24);
  635. }
  636. ustatus &= ~0x00000010;
  637. }
  638. /* Render target */
  639. if (ustatus & 0x00000040) {
  640. if (display) {
  641. NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n",
  642. i, e14, e10);
  643. NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
  644. i, e0c, e18, e1c, e20, e24);
  645. }
  646. ustatus &= ~0x00000040;
  647. }
  648. /* CUDA memory: l[], g[] or stack. */
  649. if (ustatus & 0x00000080) {
  650. if (display) {
  651. if (e18 & 0x80000000) {
  652. /* g[] read fault? */
  653. NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n",
  654. i, e14, e10 | ((e18 >> 24) & 0x1f));
  655. e18 &= ~0x1f000000;
  656. } else if (e18 & 0xc) {
  657. /* g[] write fault? */
  658. NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n",
  659. i, e14, e10 | ((e18 >> 7) & 0x1f));
  660. e18 &= ~0x00000f80;
  661. } else {
  662. NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n",
  663. i, e14, e10);
  664. }
  665. NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
  666. i, e0c, e18, e1c, e20, e24);
  667. }
  668. ustatus &= ~0x00000080;
  669. }
  670. }
  671. break;
  672. }
  673. if (ustatus) {
  674. if (display)
  675. NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
  676. }
  677. nv_wr32(dev, ustatus_addr, 0xc0000000);
  678. }
  679. if (!tps && display)
  680. NV_INFO(dev, "%s - No TPs claiming errors?\n", name);
  681. }
  682. static void
  683. nv50_pgraph_trap_handler(struct drm_device *dev)
  684. {
  685. struct nouveau_pgraph_trap trap;
  686. uint32_t status = nv_rd32(dev, 0x400108);
  687. uint32_t ustatus;
  688. int display = nouveau_ratelimit();
  689. if (!status && display) {
  690. nouveau_graph_trap_info(dev, &trap);
  691. nouveau_graph_dump_trap_info(dev, "PGRAPH_TRAP", &trap);
  692. NV_INFO(dev, "PGRAPH_TRAP - no units reporting traps?\n");
  693. }
  694. /* DISPATCH: Relays commands to other units and handles NOTIFY,
  695. * COND, QUERY. If you get a trap from it, the command is still stuck
  696. * in DISPATCH and you need to do something about it. */
  697. if (status & 0x001) {
  698. ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff;
  699. if (!ustatus && display) {
  700. NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n");
  701. }
  702. /* Known to be triggered by screwed up NOTIFY and COND... */
  703. if (ustatus & 0x00000001) {
  704. nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_FAULT");
  705. nv_wr32(dev, 0x400500, 0);
  706. if (nv_rd32(dev, 0x400808) & 0x80000000) {
  707. if (display) {
  708. if (nouveau_graph_trapped_channel(dev, &trap.channel))
  709. trap.channel = -1;
  710. trap.class = nv_rd32(dev, 0x400814);
  711. trap.mthd = nv_rd32(dev, 0x400808) & 0x1ffc;
  712. trap.subc = (nv_rd32(dev, 0x400808) >> 16) & 0x7;
  713. trap.data = nv_rd32(dev, 0x40080c);
  714. trap.data2 = nv_rd32(dev, 0x400810);
  715. nouveau_graph_dump_trap_info(dev,
  716. "PGRAPH_TRAP_DISPATCH_FAULT", &trap);
  717. NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400808: %08x\n", nv_rd32(dev, 0x400808));
  718. NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400848: %08x\n", nv_rd32(dev, 0x400848));
  719. }
  720. nv_wr32(dev, 0x400808, 0);
  721. } else if (display) {
  722. NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - No stuck command?\n");
  723. }
  724. nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3);
  725. nv_wr32(dev, 0x400848, 0);
  726. ustatus &= ~0x00000001;
  727. }
  728. if (ustatus & 0x00000002) {
  729. nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_QUERY");
  730. nv_wr32(dev, 0x400500, 0);
  731. if (nv_rd32(dev, 0x40084c) & 0x80000000) {
  732. if (display) {
  733. if (nouveau_graph_trapped_channel(dev, &trap.channel))
  734. trap.channel = -1;
  735. trap.class = nv_rd32(dev, 0x400814);
  736. trap.mthd = nv_rd32(dev, 0x40084c) & 0x1ffc;
  737. trap.subc = (nv_rd32(dev, 0x40084c) >> 16) & 0x7;
  738. trap.data = nv_rd32(dev, 0x40085c);
  739. trap.data2 = 0;
  740. nouveau_graph_dump_trap_info(dev,
  741. "PGRAPH_TRAP_DISPATCH_QUERY", &trap);
  742. NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - 40084c: %08x\n", nv_rd32(dev, 0x40084c));
  743. }
  744. nv_wr32(dev, 0x40084c, 0);
  745. } else if (display) {
  746. NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - No stuck command?\n");
  747. }
  748. ustatus &= ~0x00000002;
  749. }
  750. if (ustatus && display)
  751. NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - Unhandled ustatus 0x%08x\n", ustatus);
  752. nv_wr32(dev, 0x400804, 0xc0000000);
  753. nv_wr32(dev, 0x400108, 0x001);
  754. status &= ~0x001;
  755. }
  756. /* TRAPs other than dispatch use the "normal" trap regs. */
  757. if (status && display) {
  758. nouveau_graph_trap_info(dev, &trap);
  759. nouveau_graph_dump_trap_info(dev,
  760. "PGRAPH_TRAP", &trap);
  761. }
  762. /* M2MF: Memory to memory copy engine. */
  763. if (status & 0x002) {
  764. ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff;
  765. if (!ustatus && display) {
  766. NV_INFO(dev, "PGRAPH_TRAP_M2MF - no ustatus?\n");
  767. }
  768. if (ustatus & 0x00000001) {
  769. nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_NOTIFY");
  770. ustatus &= ~0x00000001;
  771. }
  772. if (ustatus & 0x00000002) {
  773. nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_IN");
  774. ustatus &= ~0x00000002;
  775. }
  776. if (ustatus & 0x00000004) {
  777. nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_OUT");
  778. ustatus &= ~0x00000004;
  779. }
  780. NV_INFO (dev, "PGRAPH_TRAP_M2MF - %08x %08x %08x %08x\n",
  781. nv_rd32(dev, 0x406804),
  782. nv_rd32(dev, 0x406808),
  783. nv_rd32(dev, 0x40680c),
  784. nv_rd32(dev, 0x406810));
  785. if (ustatus && display)
  786. NV_INFO(dev, "PGRAPH_TRAP_M2MF - Unhandled ustatus 0x%08x\n", ustatus);
  787. /* No sane way found yet -- just reset the bugger. */
  788. nv_wr32(dev, 0x400040, 2);
  789. nv_wr32(dev, 0x400040, 0);
  790. nv_wr32(dev, 0x406800, 0xc0000000);
  791. nv_wr32(dev, 0x400108, 0x002);
  792. status &= ~0x002;
  793. }
  794. /* VFETCH: Fetches data from vertex buffers. */
  795. if (status & 0x004) {
  796. ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff;
  797. if (!ustatus && display) {
  798. NV_INFO(dev, "PGRAPH_TRAP_VFETCH - no ustatus?\n");
  799. }
  800. if (ustatus & 0x00000001) {
  801. nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_VFETCH_FAULT");
  802. NV_INFO (dev, "PGRAPH_TRAP_VFETCH_FAULT - %08x %08x %08x %08x\n",
  803. nv_rd32(dev, 0x400c00),
  804. nv_rd32(dev, 0x400c08),
  805. nv_rd32(dev, 0x400c0c),
  806. nv_rd32(dev, 0x400c10));
  807. ustatus &= ~0x00000001;
  808. }
  809. if (ustatus && display)
  810. NV_INFO(dev, "PGRAPH_TRAP_VFETCH - Unhandled ustatus 0x%08x\n", ustatus);
  811. nv_wr32(dev, 0x400c04, 0xc0000000);
  812. nv_wr32(dev, 0x400108, 0x004);
  813. status &= ~0x004;
  814. }
  815. /* STRMOUT: DirectX streamout / OpenGL transform feedback. */
  816. if (status & 0x008) {
  817. ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff;
  818. if (!ustatus && display) {
  819. NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - no ustatus?\n");
  820. }
  821. if (ustatus & 0x00000001) {
  822. nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_STRMOUT_FAULT");
  823. NV_INFO (dev, "PGRAPH_TRAP_STRMOUT_FAULT - %08x %08x %08x %08x\n",
  824. nv_rd32(dev, 0x401804),
  825. nv_rd32(dev, 0x401808),
  826. nv_rd32(dev, 0x40180c),
  827. nv_rd32(dev, 0x401810));
  828. ustatus &= ~0x00000001;
  829. }
  830. if (ustatus && display)
  831. NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - Unhandled ustatus 0x%08x\n", ustatus);
  832. /* No sane way found yet -- just reset the bugger. */
  833. nv_wr32(dev, 0x400040, 0x80);
  834. nv_wr32(dev, 0x400040, 0);
  835. nv_wr32(dev, 0x401800, 0xc0000000);
  836. nv_wr32(dev, 0x400108, 0x008);
  837. status &= ~0x008;
  838. }
  839. /* CCACHE: Handles code and c[] caches and fills them. */
  840. if (status & 0x010) {
  841. ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff;
  842. if (!ustatus && display) {
  843. NV_INFO(dev, "PGRAPH_TRAP_CCACHE - no ustatus?\n");
  844. }
  845. if (ustatus & 0x00000001) {
  846. nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_CCACHE_FAULT");
  847. NV_INFO (dev, "PGRAPH_TRAP_CCACHE_FAULT - %08x %08x %08x %08x %08x %08x %08x\n",
  848. nv_rd32(dev, 0x405800),
  849. nv_rd32(dev, 0x405804),
  850. nv_rd32(dev, 0x405808),
  851. nv_rd32(dev, 0x40580c),
  852. nv_rd32(dev, 0x405810),
  853. nv_rd32(dev, 0x405814),
  854. nv_rd32(dev, 0x40581c));
  855. ustatus &= ~0x00000001;
  856. }
  857. if (ustatus && display)
  858. NV_INFO(dev, "PGRAPH_TRAP_CCACHE - Unhandled ustatus 0x%08x\n", ustatus);
  859. nv_wr32(dev, 0x405018, 0xc0000000);
  860. nv_wr32(dev, 0x400108, 0x010);
  861. status &= ~0x010;
  862. }
  863. /* Unknown, not seen yet... 0x402000 is the only trap status reg
  864. * remaining, so try to handle it anyway. Perhaps related to that
  865. * unknown DMA slot on tesla? */
  866. if (status & 0x20) {
  867. nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_UNKC04");
  868. ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff;
  869. if (display)
  870. NV_INFO(dev, "PGRAPH_TRAP_UNKC04 - Unhandled ustatus 0x%08x\n", ustatus);
  871. nv_wr32(dev, 0x402000, 0xc0000000);
  872. /* no status modifiction on purpose */
  873. }
  874. /* TEXTURE: CUDA texturing units */
  875. if (status & 0x040) {
  876. nv50_pgraph_tp_trap (dev, 6, 0x408900, 0x408600, display,
  877. "PGRAPH_TRAP_TEXTURE");
  878. nv_wr32(dev, 0x400108, 0x040);
  879. status &= ~0x040;
  880. }
  881. /* MP: CUDA execution engines. */
  882. if (status & 0x080) {
  883. nv50_pgraph_tp_trap (dev, 7, 0x408314, 0x40831c, display,
  884. "PGRAPH_TRAP_MP");
  885. nv_wr32(dev, 0x400108, 0x080);
  886. status &= ~0x080;
  887. }
  888. /* TPDMA: Handles TP-initiated uncached memory accesses:
  889. * l[], g[], stack, 2d surfaces, render targets. */
  890. if (status & 0x100) {
  891. nv50_pgraph_tp_trap (dev, 8, 0x408e08, 0x408708, display,
  892. "PGRAPH_TRAP_TPDMA");
  893. nv_wr32(dev, 0x400108, 0x100);
  894. status &= ~0x100;
  895. }
  896. if (status) {
  897. if (display)
  898. NV_INFO(dev, "PGRAPH_TRAP - Unknown trap 0x%08x\n",
  899. status);
  900. nv_wr32(dev, 0x400108, status);
  901. }
  902. }
  903. /* There must be a *lot* of these. Will take some time to gather them up. */
  904. static struct nouveau_enum_names nv50_data_error_names[] =
  905. {
  906. { 4, "INVALID_VALUE" },
  907. { 5, "INVALID_ENUM" },
  908. { 8, "INVALID_OBJECT" },
  909. { 0xc, "INVALID_BITFIELD" },
  910. { 0x28, "MP_NO_REG_SPACE" },
  911. { 0x2b, "MP_BLOCK_SIZE_MISMATCH" },
  912. };
  913. static void
  914. nv50_pgraph_irq_handler(struct drm_device *dev)
  915. {
  916. struct nouveau_pgraph_trap trap;
  917. int unhandled = 0;
  918. uint32_t status;
  919. while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
  920. /* NOTIFY: You've set a NOTIFY an a command and it's done. */
  921. if (status & 0x00000001) {
  922. nouveau_graph_trap_info(dev, &trap);
  923. if (nouveau_ratelimit())
  924. nouveau_graph_dump_trap_info(dev,
  925. "PGRAPH_NOTIFY", &trap);
  926. status &= ~0x00000001;
  927. nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001);
  928. }
  929. /* COMPUTE_QUERY: Purpose and exact cause unknown, happens
  930. * when you write 0x200 to 0x50c0 method 0x31c. */
  931. if (status & 0x00000002) {
  932. nouveau_graph_trap_info(dev, &trap);
  933. if (nouveau_ratelimit())
  934. nouveau_graph_dump_trap_info(dev,
  935. "PGRAPH_COMPUTE_QUERY", &trap);
  936. status &= ~0x00000002;
  937. nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000002);
  938. }
  939. /* Unknown, never seen: 0x4 */
  940. /* ILLEGAL_MTHD: You used a wrong method for this class. */
  941. if (status & 0x00000010) {
  942. nouveau_graph_trap_info(dev, &trap);
  943. if (nouveau_pgraph_intr_swmthd(dev, &trap))
  944. unhandled = 1;
  945. if (unhandled && nouveau_ratelimit())
  946. nouveau_graph_dump_trap_info(dev,
  947. "PGRAPH_ILLEGAL_MTHD", &trap);
  948. status &= ~0x00000010;
  949. nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010);
  950. }
  951. /* ILLEGAL_CLASS: You used a wrong class. */
  952. if (status & 0x00000020) {
  953. nouveau_graph_trap_info(dev, &trap);
  954. if (nouveau_ratelimit())
  955. nouveau_graph_dump_trap_info(dev,
  956. "PGRAPH_ILLEGAL_CLASS", &trap);
  957. status &= ~0x00000020;
  958. nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000020);
  959. }
  960. /* DOUBLE_NOTIFY: You tried to set a NOTIFY on another NOTIFY. */
  961. if (status & 0x00000040) {
  962. nouveau_graph_trap_info(dev, &trap);
  963. if (nouveau_ratelimit())
  964. nouveau_graph_dump_trap_info(dev,
  965. "PGRAPH_DOUBLE_NOTIFY", &trap);
  966. status &= ~0x00000040;
  967. nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000040);
  968. }
  969. /* CONTEXT_SWITCH: PGRAPH needs us to load a new context */
  970. if (status & 0x00001000) {
  971. nv_wr32(dev, 0x400500, 0x00000000);
  972. nv_wr32(dev, NV03_PGRAPH_INTR,
  973. NV_PGRAPH_INTR_CONTEXT_SWITCH);
  974. nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
  975. NV40_PGRAPH_INTR_EN) &
  976. ~NV_PGRAPH_INTR_CONTEXT_SWITCH);
  977. nv_wr32(dev, 0x400500, 0x00010001);
  978. nv50_graph_context_switch(dev);
  979. status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
  980. }
  981. /* BUFFER_NOTIFY: Your m2mf transfer finished */
  982. if (status & 0x00010000) {
  983. nouveau_graph_trap_info(dev, &trap);
  984. if (nouveau_ratelimit())
  985. nouveau_graph_dump_trap_info(dev,
  986. "PGRAPH_BUFFER_NOTIFY", &trap);
  987. status &= ~0x00010000;
  988. nv_wr32(dev, NV03_PGRAPH_INTR, 0x00010000);
  989. }
  990. /* DATA_ERROR: Invalid value for this method, or invalid
  991. * state in current PGRAPH context for this operation */
  992. if (status & 0x00100000) {
  993. nouveau_graph_trap_info(dev, &trap);
  994. if (nouveau_ratelimit()) {
  995. nouveau_graph_dump_trap_info(dev,
  996. "PGRAPH_DATA_ERROR", &trap);
  997. NV_INFO (dev, "PGRAPH_DATA_ERROR - ");
  998. nouveau_print_enum_names(nv_rd32(dev, 0x400110),
  999. nv50_data_error_names);
  1000. printk("\n");
  1001. }
  1002. status &= ~0x00100000;
  1003. nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000);
  1004. }
  1005. /* TRAP: Something bad happened in the middle of command
  1006. * execution. Has a billion types, subtypes, and even
  1007. * subsubtypes. */
  1008. if (status & 0x00200000) {
  1009. nv50_pgraph_trap_handler(dev);
  1010. status &= ~0x00200000;
  1011. nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000);
  1012. }
  1013. /* Unknown, never seen: 0x00400000 */
  1014. /* SINGLE_STEP: Happens on every method if you turned on
  1015. * single stepping in 40008c */
  1016. if (status & 0x01000000) {
  1017. nouveau_graph_trap_info(dev, &trap);
  1018. if (nouveau_ratelimit())
  1019. nouveau_graph_dump_trap_info(dev,
  1020. "PGRAPH_SINGLE_STEP", &trap);
  1021. status &= ~0x01000000;
  1022. nv_wr32(dev, NV03_PGRAPH_INTR, 0x01000000);
  1023. }
  1024. /* 0x02000000 happens when you pause a ctxprog...
  1025. * but the only way this can happen that I know is by
  1026. * poking the relevant MMIO register, and we don't
  1027. * do that. */
  1028. if (status) {
  1029. NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n",
  1030. status);
  1031. nv_wr32(dev, NV03_PGRAPH_INTR, status);
  1032. }
  1033. {
  1034. const int isb = (1 << 16) | (1 << 0);
  1035. if ((nv_rd32(dev, 0x400500) & isb) != isb)
  1036. nv_wr32(dev, 0x400500,
  1037. nv_rd32(dev, 0x400500) | isb);
  1038. }
  1039. }
  1040. nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
  1041. if (nv_rd32(dev, 0x400824) & (1 << 31))
  1042. nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
  1043. }
  1044. static void
  1045. nouveau_crtc_irq_handler(struct drm_device *dev, int crtc)
  1046. {
  1047. if (crtc & 1)
  1048. nv_wr32(dev, NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK);
  1049. if (crtc & 2)
  1050. nv_wr32(dev, NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK);
  1051. }
  1052. irqreturn_t
  1053. nouveau_irq_handler(DRM_IRQ_ARGS)
  1054. {
  1055. struct drm_device *dev = (struct drm_device *)arg;
  1056. struct drm_nouveau_private *dev_priv = dev->dev_private;
  1057. uint32_t status, fbdev_flags = 0;
  1058. unsigned long flags;
  1059. status = nv_rd32(dev, NV03_PMC_INTR_0);
  1060. if (!status)
  1061. return IRQ_NONE;
  1062. spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
  1063. if (dev_priv->fbdev_info) {
  1064. fbdev_flags = dev_priv->fbdev_info->flags;
  1065. dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
  1066. }
  1067. if (status & NV_PMC_INTR_0_PFIFO_PENDING) {
  1068. nouveau_fifo_irq_handler(dev);
  1069. status &= ~NV_PMC_INTR_0_PFIFO_PENDING;
  1070. }
  1071. if (status & NV_PMC_INTR_0_PGRAPH_PENDING) {
  1072. if (dev_priv->card_type >= NV_50)
  1073. nv50_pgraph_irq_handler(dev);
  1074. else
  1075. nouveau_pgraph_irq_handler(dev);
  1076. status &= ~NV_PMC_INTR_0_PGRAPH_PENDING;
  1077. }
  1078. if (status & NV_PMC_INTR_0_CRTCn_PENDING) {
  1079. nouveau_crtc_irq_handler(dev, (status>>24)&3);
  1080. status &= ~NV_PMC_INTR_0_CRTCn_PENDING;
  1081. }
  1082. if (status & (NV_PMC_INTR_0_NV50_DISPLAY_PENDING |
  1083. NV_PMC_INTR_0_NV50_I2C_PENDING)) {
  1084. nv50_display_irq_handler(dev);
  1085. status &= ~(NV_PMC_INTR_0_NV50_DISPLAY_PENDING |
  1086. NV_PMC_INTR_0_NV50_I2C_PENDING);
  1087. }
  1088. if (status)
  1089. NV_ERROR(dev, "Unhandled PMC INTR status bits 0x%08x\n", status);
  1090. if (dev_priv->fbdev_info)
  1091. dev_priv->fbdev_info->flags = fbdev_flags;
  1092. spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
  1093. return IRQ_HANDLED;
  1094. }