nouveau_irq.c 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302
  1. /*
  2. * Copyright (C) 2006 Ben Skeggs.
  3. *
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining
  7. * a copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sublicense, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial
  16. * portions of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  19. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  20. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
  21. * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
  22. * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
  23. * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
  24. * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. */
  27. /*
  28. * Authors:
  29. * Ben Skeggs <darktama@iinet.net.au>
  30. */
  31. #include "drmP.h"
  32. #include "drm.h"
  33. #include "nouveau_drm.h"
  34. #include "nouveau_drv.h"
  35. #include "nouveau_reg.h"
  36. #include "nouveau_ramht.h"
  37. #include <linux/ratelimit.h>
  38. /* needed for hotplug irq */
  39. #include "nouveau_connector.h"
  40. #include "nv50_display.h"
  41. static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20);
  42. static int nouveau_ratelimit(void)
  43. {
  44. return __ratelimit(&nouveau_ratelimit_state);
  45. }
  46. void
  47. nouveau_irq_preinstall(struct drm_device *dev)
  48. {
  49. struct drm_nouveau_private *dev_priv = dev->dev_private;
  50. /* Master disable */
  51. nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
  52. if (dev_priv->card_type >= NV_50) {
  53. INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh);
  54. INIT_WORK(&dev_priv->hpd_work, nv50_display_irq_hotplug_bh);
  55. spin_lock_init(&dev_priv->hpd_state.lock);
  56. INIT_LIST_HEAD(&dev_priv->vbl_waiting);
  57. }
  58. }
  59. int
  60. nouveau_irq_postinstall(struct drm_device *dev)
  61. {
  62. struct drm_nouveau_private *dev_priv = dev->dev_private;
  63. /* Master enable */
  64. nv_wr32(dev, NV03_PMC_INTR_EN_0, NV_PMC_INTR_EN_0_MASTER_ENABLE);
  65. if (dev_priv->msi_enabled)
  66. nv_wr08(dev, 0x00088068, 0xff);
  67. return 0;
  68. }
  69. void
  70. nouveau_irq_uninstall(struct drm_device *dev)
  71. {
  72. /* Master disable */
  73. nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
  74. }
  75. static bool
  76. nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data)
  77. {
  78. struct drm_nouveau_private *dev_priv = dev->dev_private;
  79. struct nouveau_channel *chan = NULL;
  80. struct nouveau_gpuobj *obj;
  81. unsigned long flags;
  82. const int subc = (addr >> 13) & 0x7;
  83. const int mthd = addr & 0x1ffc;
  84. bool handled = false;
  85. u32 engine;
  86. spin_lock_irqsave(&dev_priv->channels.lock, flags);
  87. if (likely(chid >= 0 && chid < dev_priv->engine.fifo.channels))
  88. chan = dev_priv->channels.ptr[chid];
  89. if (unlikely(!chan))
  90. goto out;
  91. switch (mthd) {
  92. case 0x0000: /* bind object to subchannel */
  93. obj = nouveau_ramht_find(chan, data);
  94. if (unlikely(!obj || obj->engine != NVOBJ_ENGINE_SW))
  95. break;
  96. chan->sw_subchannel[subc] = obj->class;
  97. engine = 0x0000000f << (subc * 4);
  98. nv_mask(dev, NV04_PFIFO_CACHE1_ENGINE, engine, 0x00000000);
  99. handled = true;
  100. break;
  101. default:
  102. engine = nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE);
  103. if (unlikely(((engine >> (subc * 4)) & 0xf) != 0))
  104. break;
  105. if (!nouveau_gpuobj_mthd_call(chan, chan->sw_subchannel[subc],
  106. mthd, data))
  107. handled = true;
  108. break;
  109. }
  110. out:
  111. spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
  112. return handled;
  113. }
  114. static void
  115. nouveau_fifo_irq_handler(struct drm_device *dev)
  116. {
  117. struct drm_nouveau_private *dev_priv = dev->dev_private;
  118. struct nouveau_engine *engine = &dev_priv->engine;
  119. uint32_t status, reassign;
  120. int cnt = 0;
  121. reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1;
  122. while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) {
  123. uint32_t chid, get;
  124. nv_wr32(dev, NV03_PFIFO_CACHES, 0);
  125. chid = engine->fifo.channel_id(dev);
  126. get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET);
  127. if (status & NV_PFIFO_INTR_CACHE_ERROR) {
  128. uint32_t mthd, data;
  129. int ptr;
  130. /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before
  131. * wrapping on my G80 chips, but CACHE1 isn't big
  132. * enough for this much data.. Tests show that it
  133. * wraps around to the start at GET=0x800.. No clue
  134. * as to why..
  135. */
  136. ptr = (get & 0x7ff) >> 2;
  137. if (dev_priv->card_type < NV_40) {
  138. mthd = nv_rd32(dev,
  139. NV04_PFIFO_CACHE1_METHOD(ptr));
  140. data = nv_rd32(dev,
  141. NV04_PFIFO_CACHE1_DATA(ptr));
  142. } else {
  143. mthd = nv_rd32(dev,
  144. NV40_PFIFO_CACHE1_METHOD(ptr));
  145. data = nv_rd32(dev,
  146. NV40_PFIFO_CACHE1_DATA(ptr));
  147. }
  148. if (!nouveau_fifo_swmthd(dev, chid, mthd, data)) {
  149. NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d "
  150. "Mthd 0x%04x Data 0x%08x\n",
  151. chid, (mthd >> 13) & 7, mthd & 0x1ffc,
  152. data);
  153. }
  154. nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
  155. nv_wr32(dev, NV03_PFIFO_INTR_0,
  156. NV_PFIFO_INTR_CACHE_ERROR);
  157. nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
  158. nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1);
  159. nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
  160. nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
  161. nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1);
  162. nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
  163. nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH,
  164. nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
  165. nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
  166. status &= ~NV_PFIFO_INTR_CACHE_ERROR;
  167. }
  168. if (status & NV_PFIFO_INTR_DMA_PUSHER) {
  169. u32 dma_get = nv_rd32(dev, 0x003244);
  170. u32 dma_put = nv_rd32(dev, 0x003240);
  171. u32 push = nv_rd32(dev, 0x003220);
  172. u32 state = nv_rd32(dev, 0x003228);
  173. if (dev_priv->card_type == NV_50) {
  174. u32 ho_get = nv_rd32(dev, 0x003328);
  175. u32 ho_put = nv_rd32(dev, 0x003320);
  176. u32 ib_get = nv_rd32(dev, 0x003334);
  177. u32 ib_put = nv_rd32(dev, 0x003330);
  178. if (nouveau_ratelimit())
  179. NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x "
  180. "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
  181. "State 0x%08x Push 0x%08x\n",
  182. chid, ho_get, dma_get, ho_put,
  183. dma_put, ib_get, ib_put, state,
  184. push);
  185. /* METHOD_COUNT, in DMA_STATE on earlier chipsets */
  186. nv_wr32(dev, 0x003364, 0x00000000);
  187. if (dma_get != dma_put || ho_get != ho_put) {
  188. nv_wr32(dev, 0x003244, dma_put);
  189. nv_wr32(dev, 0x003328, ho_put);
  190. } else
  191. if (ib_get != ib_put) {
  192. nv_wr32(dev, 0x003334, ib_put);
  193. }
  194. } else {
  195. NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x "
  196. "Put 0x%08x State 0x%08x Push 0x%08x\n",
  197. chid, dma_get, dma_put, state, push);
  198. if (dma_get != dma_put)
  199. nv_wr32(dev, 0x003244, dma_put);
  200. }
  201. nv_wr32(dev, 0x003228, 0x00000000);
  202. nv_wr32(dev, 0x003220, 0x00000001);
  203. nv_wr32(dev, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
  204. status &= ~NV_PFIFO_INTR_DMA_PUSHER;
  205. }
  206. if (status & NV_PFIFO_INTR_SEMAPHORE) {
  207. uint32_t sem;
  208. status &= ~NV_PFIFO_INTR_SEMAPHORE;
  209. nv_wr32(dev, NV03_PFIFO_INTR_0,
  210. NV_PFIFO_INTR_SEMAPHORE);
  211. sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE);
  212. nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
  213. nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
  214. nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
  215. }
  216. if (dev_priv->card_type == NV_50) {
  217. if (status & 0x00000010) {
  218. nv50_fb_vm_trap(dev, 1, "PFIFO_BAR_FAULT");
  219. status &= ~0x00000010;
  220. nv_wr32(dev, 0x002100, 0x00000010);
  221. }
  222. }
  223. if (status) {
  224. if (nouveau_ratelimit())
  225. NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
  226. status, chid);
  227. nv_wr32(dev, NV03_PFIFO_INTR_0, status);
  228. status = 0;
  229. }
  230. nv_wr32(dev, NV03_PFIFO_CACHES, reassign);
  231. }
  232. if (status) {
  233. NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt);
  234. nv_wr32(dev, 0x2140, 0);
  235. nv_wr32(dev, 0x140, 0);
  236. }
  237. nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
  238. }
  239. struct nouveau_bitfield_names {
  240. uint32_t mask;
  241. const char *name;
  242. };
  243. static struct nouveau_bitfield_names nstatus_names[] =
  244. {
  245. { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
  246. { NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
  247. { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
  248. { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }
  249. };
  250. static struct nouveau_bitfield_names nstatus_names_nv10[] =
  251. {
  252. { NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
  253. { NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
  254. { NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
  255. { NV10_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }
  256. };
  257. static struct nouveau_bitfield_names nsource_names[] =
  258. {
  259. { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" },
  260. { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" },
  261. { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" },
  262. { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION, "RANGE_EXCEPTION" },
  263. { NV03_PGRAPH_NSOURCE_LIMIT_COLOR, "LIMIT_COLOR" },
  264. { NV03_PGRAPH_NSOURCE_LIMIT_ZETA, "LIMIT_ZETA" },
  265. { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD, "ILLEGAL_MTHD" },
  266. { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION, "DMA_R_PROTECTION" },
  267. { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION, "DMA_W_PROTECTION" },
  268. { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION, "FORMAT_EXCEPTION" },
  269. { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION, "PATCH_EXCEPTION" },
  270. { NV03_PGRAPH_NSOURCE_STATE_INVALID, "STATE_INVALID" },
  271. { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY, "DOUBLE_NOTIFY" },
  272. { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE, "NOTIFY_IN_USE" },
  273. { NV03_PGRAPH_NSOURCE_METHOD_CNT, "METHOD_CNT" },
  274. { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION, "BFR_NOTIFICATION" },
  275. { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" },
  276. { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" },
  277. { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" },
  278. };
  279. static void
  280. nouveau_print_bitfield_names_(uint32_t value,
  281. const struct nouveau_bitfield_names *namelist,
  282. const int namelist_len)
  283. {
  284. /*
  285. * Caller must have already printed the KERN_* log level for us.
  286. * Also the caller is responsible for adding the newline.
  287. */
  288. int i;
  289. for (i = 0; i < namelist_len; ++i) {
  290. uint32_t mask = namelist[i].mask;
  291. if (value & mask) {
  292. printk(" %s", namelist[i].name);
  293. value &= ~mask;
  294. }
  295. }
  296. if (value)
  297. printk(" (unknown bits 0x%08x)", value);
  298. }
  299. #define nouveau_print_bitfield_names(val, namelist) \
  300. nouveau_print_bitfield_names_((val), (namelist), ARRAY_SIZE(namelist))
  301. struct nouveau_enum_names {
  302. uint32_t value;
  303. const char *name;
  304. };
  305. static void
  306. nouveau_print_enum_names_(uint32_t value,
  307. const struct nouveau_enum_names *namelist,
  308. const int namelist_len)
  309. {
  310. /*
  311. * Caller must have already printed the KERN_* log level for us.
  312. * Also the caller is responsible for adding the newline.
  313. */
  314. int i;
  315. for (i = 0; i < namelist_len; ++i) {
  316. if (value == namelist[i].value) {
  317. printk("%s", namelist[i].name);
  318. return;
  319. }
  320. }
  321. printk("unknown value 0x%08x", value);
  322. }
  323. #define nouveau_print_enum_names(val, namelist) \
  324. nouveau_print_enum_names_((val), (namelist), ARRAY_SIZE(namelist))
  325. static int
  326. nouveau_graph_chid_from_grctx(struct drm_device *dev)
  327. {
  328. struct drm_nouveau_private *dev_priv = dev->dev_private;
  329. struct nouveau_channel *chan;
  330. unsigned long flags;
  331. uint32_t inst;
  332. int i;
  333. if (dev_priv->card_type < NV_40)
  334. return dev_priv->engine.fifo.channels;
  335. else
  336. if (dev_priv->card_type < NV_50) {
  337. inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 4;
  338. spin_lock_irqsave(&dev_priv->channels.lock, flags);
  339. for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
  340. chan = dev_priv->channels.ptr[i];
  341. if (!chan || !chan->ramin_grctx)
  342. continue;
  343. if (inst == chan->ramin_grctx->pinst)
  344. break;
  345. }
  346. spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
  347. } else {
  348. inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 12;
  349. spin_lock_irqsave(&dev_priv->channels.lock, flags);
  350. for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
  351. chan = dev_priv->channels.ptr[i];
  352. if (!chan || !chan->ramin)
  353. continue;
  354. if (inst == chan->ramin->vinst)
  355. break;
  356. }
  357. spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
  358. }
  359. return i;
  360. }
  361. static int
  362. nouveau_graph_trapped_channel(struct drm_device *dev, int *channel_ret)
  363. {
  364. struct drm_nouveau_private *dev_priv = dev->dev_private;
  365. struct nouveau_engine *engine = &dev_priv->engine;
  366. int channel;
  367. if (dev_priv->card_type < NV_10)
  368. channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0xf;
  369. else
  370. if (dev_priv->card_type < NV_40)
  371. channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
  372. else
  373. channel = nouveau_graph_chid_from_grctx(dev);
  374. if (channel >= engine->fifo.channels ||
  375. !dev_priv->channels.ptr[channel]) {
  376. NV_ERROR(dev, "AIII, invalid/inactive channel id %d\n", channel);
  377. return -EINVAL;
  378. }
  379. *channel_ret = channel;
  380. return 0;
  381. }
  382. struct nouveau_pgraph_trap {
  383. int channel;
  384. int class;
  385. int subc, mthd, size;
  386. uint32_t data, data2;
  387. uint32_t nsource, nstatus;
  388. };
  389. static void
  390. nouveau_graph_trap_info(struct drm_device *dev,
  391. struct nouveau_pgraph_trap *trap)
  392. {
  393. struct drm_nouveau_private *dev_priv = dev->dev_private;
  394. uint32_t address;
  395. trap->nsource = trap->nstatus = 0;
  396. if (dev_priv->card_type < NV_50) {
  397. trap->nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
  398. trap->nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
  399. }
  400. if (nouveau_graph_trapped_channel(dev, &trap->channel))
  401. trap->channel = -1;
  402. address = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
  403. trap->mthd = address & 0x1FFC;
  404. trap->data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
  405. if (dev_priv->card_type < NV_10) {
  406. trap->subc = (address >> 13) & 0x7;
  407. } else {
  408. trap->subc = (address >> 16) & 0x7;
  409. trap->data2 = nv_rd32(dev, NV10_PGRAPH_TRAPPED_DATA_HIGH);
  410. }
  411. if (dev_priv->card_type < NV_10)
  412. trap->class = nv_rd32(dev, 0x400180 + trap->subc*4) & 0xFF;
  413. else if (dev_priv->card_type < NV_40)
  414. trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFF;
  415. else if (dev_priv->card_type < NV_50)
  416. trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFFF;
  417. else
  418. trap->class = nv_rd32(dev, 0x400814);
  419. }
  420. static void
  421. nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id,
  422. struct nouveau_pgraph_trap *trap)
  423. {
  424. struct drm_nouveau_private *dev_priv = dev->dev_private;
  425. uint32_t nsource = trap->nsource, nstatus = trap->nstatus;
  426. if (dev_priv->card_type < NV_50) {
  427. NV_INFO(dev, "%s - nSource:", id);
  428. nouveau_print_bitfield_names(nsource, nsource_names);
  429. printk(", nStatus:");
  430. if (dev_priv->card_type < NV_10)
  431. nouveau_print_bitfield_names(nstatus, nstatus_names);
  432. else
  433. nouveau_print_bitfield_names(nstatus, nstatus_names_nv10);
  434. printk("\n");
  435. }
  436. NV_INFO(dev, "%s - Ch %d/%d Class 0x%04x Mthd 0x%04x "
  437. "Data 0x%08x:0x%08x\n",
  438. id, trap->channel, trap->subc,
  439. trap->class, trap->mthd,
  440. trap->data2, trap->data);
  441. }
  442. static int
  443. nouveau_pgraph_intr_swmthd(struct drm_device *dev,
  444. struct nouveau_pgraph_trap *trap)
  445. {
  446. struct drm_nouveau_private *dev_priv = dev->dev_private;
  447. struct nouveau_channel *chan;
  448. unsigned long flags;
  449. int ret = -EINVAL;
  450. spin_lock_irqsave(&dev_priv->channels.lock, flags);
  451. if (trap->channel > 0 &&
  452. trap->channel < dev_priv->engine.fifo.channels &&
  453. dev_priv->channels.ptr[trap->channel]) {
  454. chan = dev_priv->channels.ptr[trap->channel];
  455. ret = nouveau_gpuobj_mthd_call(chan, trap->class, trap->mthd, trap->data);
  456. }
  457. spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
  458. return ret;
  459. }
  460. static inline void
  461. nouveau_pgraph_intr_notify(struct drm_device *dev, uint32_t nsource)
  462. {
  463. struct nouveau_pgraph_trap trap;
  464. int unhandled = 0;
  465. nouveau_graph_trap_info(dev, &trap);
  466. if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
  467. if (nouveau_pgraph_intr_swmthd(dev, &trap))
  468. unhandled = 1;
  469. } else {
  470. unhandled = 1;
  471. }
  472. if (unhandled)
  473. nouveau_graph_dump_trap_info(dev, "PGRAPH_NOTIFY", &trap);
  474. }
  475. static inline void
  476. nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource)
  477. {
  478. struct nouveau_pgraph_trap trap;
  479. int unhandled = 0;
  480. nouveau_graph_trap_info(dev, &trap);
  481. trap.nsource = nsource;
  482. if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
  483. if (nouveau_pgraph_intr_swmthd(dev, &trap))
  484. unhandled = 1;
  485. } else if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
  486. uint32_t v = nv_rd32(dev, 0x402000);
  487. nv_wr32(dev, 0x402000, v);
  488. /* dump the error anyway for now: it's useful for
  489. Gallium development */
  490. unhandled = 1;
  491. } else {
  492. unhandled = 1;
  493. }
  494. if (unhandled && nouveau_ratelimit())
  495. nouveau_graph_dump_trap_info(dev, "PGRAPH_ERROR", &trap);
  496. }
  497. static inline void
  498. nouveau_pgraph_intr_context_switch(struct drm_device *dev)
  499. {
  500. struct drm_nouveau_private *dev_priv = dev->dev_private;
  501. struct nouveau_engine *engine = &dev_priv->engine;
  502. uint32_t chid;
  503. chid = engine->fifo.channel_id(dev);
  504. NV_DEBUG(dev, "PGRAPH context switch interrupt channel %x\n", chid);
  505. switch (dev_priv->card_type) {
  506. case NV_04:
  507. nv04_graph_context_switch(dev);
  508. break;
  509. case NV_10:
  510. nv10_graph_context_switch(dev);
  511. break;
  512. default:
  513. NV_ERROR(dev, "Context switch not implemented\n");
  514. break;
  515. }
  516. }
  517. static void
  518. nouveau_pgraph_irq_handler(struct drm_device *dev)
  519. {
  520. uint32_t status;
  521. while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
  522. uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
  523. if (status & NV_PGRAPH_INTR_NOTIFY) {
  524. nouveau_pgraph_intr_notify(dev, nsource);
  525. status &= ~NV_PGRAPH_INTR_NOTIFY;
  526. nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_NOTIFY);
  527. }
  528. if (status & NV_PGRAPH_INTR_ERROR) {
  529. nouveau_pgraph_intr_error(dev, nsource);
  530. status &= ~NV_PGRAPH_INTR_ERROR;
  531. nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_ERROR);
  532. }
  533. if (status & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
  534. status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
  535. nv_wr32(dev, NV03_PGRAPH_INTR,
  536. NV_PGRAPH_INTR_CONTEXT_SWITCH);
  537. nouveau_pgraph_intr_context_switch(dev);
  538. }
  539. if (status) {
  540. NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status);
  541. nv_wr32(dev, NV03_PGRAPH_INTR, status);
  542. }
  543. if ((nv_rd32(dev, NV04_PGRAPH_FIFO) & (1 << 0)) == 0)
  544. nv_wr32(dev, NV04_PGRAPH_FIFO, 1);
  545. }
  546. nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
  547. }
  548. static struct nouveau_enum_names nv50_mp_exec_error_names[] =
  549. {
  550. { 3, "STACK_UNDERFLOW" },
  551. { 4, "QUADON_ACTIVE" },
  552. { 8, "TIMEOUT" },
  553. { 0x10, "INVALID_OPCODE" },
  554. { 0x40, "BREAKPOINT" },
  555. };
  556. static void
  557. nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display)
  558. {
  559. struct drm_nouveau_private *dev_priv = dev->dev_private;
  560. uint32_t units = nv_rd32(dev, 0x1540);
  561. uint32_t addr, mp10, status, pc, oplow, ophigh;
  562. int i;
  563. int mps = 0;
  564. for (i = 0; i < 4; i++) {
  565. if (!(units & 1 << (i+24)))
  566. continue;
  567. if (dev_priv->chipset < 0xa0)
  568. addr = 0x408200 + (tpid << 12) + (i << 7);
  569. else
  570. addr = 0x408100 + (tpid << 11) + (i << 7);
  571. mp10 = nv_rd32(dev, addr + 0x10);
  572. status = nv_rd32(dev, addr + 0x14);
  573. if (!status)
  574. continue;
  575. if (display) {
  576. nv_rd32(dev, addr + 0x20);
  577. pc = nv_rd32(dev, addr + 0x24);
  578. oplow = nv_rd32(dev, addr + 0x70);
  579. ophigh= nv_rd32(dev, addr + 0x74);
  580. NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - "
  581. "TP %d MP %d: ", tpid, i);
  582. nouveau_print_enum_names(status,
  583. nv50_mp_exec_error_names);
  584. printk(" at %06x warp %d, opcode %08x %08x\n",
  585. pc&0xffffff, pc >> 24,
  586. oplow, ophigh);
  587. }
  588. nv_wr32(dev, addr + 0x10, mp10);
  589. nv_wr32(dev, addr + 0x14, 0);
  590. mps++;
  591. }
  592. if (!mps && display)
  593. NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: "
  594. "No MPs claiming errors?\n", tpid);
  595. }
  596. static void
  597. nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
  598. uint32_t ustatus_new, int display, const char *name)
  599. {
  600. struct drm_nouveau_private *dev_priv = dev->dev_private;
  601. int tps = 0;
  602. uint32_t units = nv_rd32(dev, 0x1540);
  603. int i, r;
  604. uint32_t ustatus_addr, ustatus;
  605. for (i = 0; i < 16; i++) {
  606. if (!(units & (1 << i)))
  607. continue;
  608. if (dev_priv->chipset < 0xa0)
  609. ustatus_addr = ustatus_old + (i << 12);
  610. else
  611. ustatus_addr = ustatus_new + (i << 11);
  612. ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff;
  613. if (!ustatus)
  614. continue;
  615. tps++;
  616. switch (type) {
  617. case 6: /* texture error... unknown for now */
  618. nv50_fb_vm_trap(dev, display, name);
  619. if (display) {
  620. NV_ERROR(dev, "magic set %d:\n", i);
  621. for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
  622. NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
  623. nv_rd32(dev, r));
  624. }
  625. break;
  626. case 7: /* MP error */
  627. if (ustatus & 0x00010000) {
  628. nv50_pgraph_mp_trap(dev, i, display);
  629. ustatus &= ~0x00010000;
  630. }
  631. break;
  632. case 8: /* TPDMA error */
  633. {
  634. uint32_t e0c = nv_rd32(dev, ustatus_addr + 4);
  635. uint32_t e10 = nv_rd32(dev, ustatus_addr + 8);
  636. uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc);
  637. uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10);
  638. uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14);
  639. uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18);
  640. uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c);
  641. nv50_fb_vm_trap(dev, display, name);
  642. /* 2d engine destination */
  643. if (ustatus & 0x00000010) {
  644. if (display) {
  645. NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n",
  646. i, e14, e10);
  647. NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
  648. i, e0c, e18, e1c, e20, e24);
  649. }
  650. ustatus &= ~0x00000010;
  651. }
  652. /* Render target */
  653. if (ustatus & 0x00000040) {
  654. if (display) {
  655. NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n",
  656. i, e14, e10);
  657. NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
  658. i, e0c, e18, e1c, e20, e24);
  659. }
  660. ustatus &= ~0x00000040;
  661. }
  662. /* CUDA memory: l[], g[] or stack. */
  663. if (ustatus & 0x00000080) {
  664. if (display) {
  665. if (e18 & 0x80000000) {
  666. /* g[] read fault? */
  667. NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n",
  668. i, e14, e10 | ((e18 >> 24) & 0x1f));
  669. e18 &= ~0x1f000000;
  670. } else if (e18 & 0xc) {
  671. /* g[] write fault? */
  672. NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n",
  673. i, e14, e10 | ((e18 >> 7) & 0x1f));
  674. e18 &= ~0x00000f80;
  675. } else {
  676. NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n",
  677. i, e14, e10);
  678. }
  679. NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
  680. i, e0c, e18, e1c, e20, e24);
  681. }
  682. ustatus &= ~0x00000080;
  683. }
  684. }
  685. break;
  686. }
  687. if (ustatus) {
  688. if (display)
  689. NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
  690. }
  691. nv_wr32(dev, ustatus_addr, 0xc0000000);
  692. }
  693. if (!tps && display)
  694. NV_INFO(dev, "%s - No TPs claiming errors?\n", name);
  695. }
  696. static void
  697. nv50_pgraph_trap_handler(struct drm_device *dev)
  698. {
  699. struct nouveau_pgraph_trap trap;
  700. uint32_t status = nv_rd32(dev, 0x400108);
  701. uint32_t ustatus;
  702. int display = nouveau_ratelimit();
  703. if (!status && display) {
  704. nouveau_graph_trap_info(dev, &trap);
  705. nouveau_graph_dump_trap_info(dev, "PGRAPH_TRAP", &trap);
  706. NV_INFO(dev, "PGRAPH_TRAP - no units reporting traps?\n");
  707. }
  708. /* DISPATCH: Relays commands to other units and handles NOTIFY,
  709. * COND, QUERY. If you get a trap from it, the command is still stuck
  710. * in DISPATCH and you need to do something about it. */
  711. if (status & 0x001) {
  712. ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff;
  713. if (!ustatus && display) {
  714. NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n");
  715. }
  716. /* Known to be triggered by screwed up NOTIFY and COND... */
  717. if (ustatus & 0x00000001) {
  718. nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_FAULT");
  719. nv_wr32(dev, 0x400500, 0);
  720. if (nv_rd32(dev, 0x400808) & 0x80000000) {
  721. if (display) {
  722. if (nouveau_graph_trapped_channel(dev, &trap.channel))
  723. trap.channel = -1;
  724. trap.class = nv_rd32(dev, 0x400814);
  725. trap.mthd = nv_rd32(dev, 0x400808) & 0x1ffc;
  726. trap.subc = (nv_rd32(dev, 0x400808) >> 16) & 0x7;
  727. trap.data = nv_rd32(dev, 0x40080c);
  728. trap.data2 = nv_rd32(dev, 0x400810);
  729. nouveau_graph_dump_trap_info(dev,
  730. "PGRAPH_TRAP_DISPATCH_FAULT", &trap);
  731. NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400808: %08x\n", nv_rd32(dev, 0x400808));
  732. NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400848: %08x\n", nv_rd32(dev, 0x400848));
  733. }
  734. nv_wr32(dev, 0x400808, 0);
  735. } else if (display) {
  736. NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - No stuck command?\n");
  737. }
  738. nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3);
  739. nv_wr32(dev, 0x400848, 0);
  740. ustatus &= ~0x00000001;
  741. }
  742. if (ustatus & 0x00000002) {
  743. nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_QUERY");
  744. nv_wr32(dev, 0x400500, 0);
  745. if (nv_rd32(dev, 0x40084c) & 0x80000000) {
  746. if (display) {
  747. if (nouveau_graph_trapped_channel(dev, &trap.channel))
  748. trap.channel = -1;
  749. trap.class = nv_rd32(dev, 0x400814);
  750. trap.mthd = nv_rd32(dev, 0x40084c) & 0x1ffc;
  751. trap.subc = (nv_rd32(dev, 0x40084c) >> 16) & 0x7;
  752. trap.data = nv_rd32(dev, 0x40085c);
  753. trap.data2 = 0;
  754. nouveau_graph_dump_trap_info(dev,
  755. "PGRAPH_TRAP_DISPATCH_QUERY", &trap);
  756. NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - 40084c: %08x\n", nv_rd32(dev, 0x40084c));
  757. }
  758. nv_wr32(dev, 0x40084c, 0);
  759. } else if (display) {
  760. NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - No stuck command?\n");
  761. }
  762. ustatus &= ~0x00000002;
  763. }
  764. if (ustatus && display)
  765. NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - Unhandled ustatus 0x%08x\n", ustatus);
  766. nv_wr32(dev, 0x400804, 0xc0000000);
  767. nv_wr32(dev, 0x400108, 0x001);
  768. status &= ~0x001;
  769. }
  770. /* TRAPs other than dispatch use the "normal" trap regs. */
  771. if (status && display) {
  772. nouveau_graph_trap_info(dev, &trap);
  773. nouveau_graph_dump_trap_info(dev,
  774. "PGRAPH_TRAP", &trap);
  775. }
  776. /* M2MF: Memory to memory copy engine. */
  777. if (status & 0x002) {
  778. ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff;
  779. if (!ustatus && display) {
  780. NV_INFO(dev, "PGRAPH_TRAP_M2MF - no ustatus?\n");
  781. }
  782. if (ustatus & 0x00000001) {
  783. nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_NOTIFY");
  784. ustatus &= ~0x00000001;
  785. }
  786. if (ustatus & 0x00000002) {
  787. nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_IN");
  788. ustatus &= ~0x00000002;
  789. }
  790. if (ustatus & 0x00000004) {
  791. nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_OUT");
  792. ustatus &= ~0x00000004;
  793. }
  794. NV_INFO (dev, "PGRAPH_TRAP_M2MF - %08x %08x %08x %08x\n",
  795. nv_rd32(dev, 0x406804),
  796. nv_rd32(dev, 0x406808),
  797. nv_rd32(dev, 0x40680c),
  798. nv_rd32(dev, 0x406810));
  799. if (ustatus && display)
  800. NV_INFO(dev, "PGRAPH_TRAP_M2MF - Unhandled ustatus 0x%08x\n", ustatus);
  801. /* No sane way found yet -- just reset the bugger. */
  802. nv_wr32(dev, 0x400040, 2);
  803. nv_wr32(dev, 0x400040, 0);
  804. nv_wr32(dev, 0x406800, 0xc0000000);
  805. nv_wr32(dev, 0x400108, 0x002);
  806. status &= ~0x002;
  807. }
  808. /* VFETCH: Fetches data from vertex buffers. */
  809. if (status & 0x004) {
  810. ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff;
  811. if (!ustatus && display) {
  812. NV_INFO(dev, "PGRAPH_TRAP_VFETCH - no ustatus?\n");
  813. }
  814. if (ustatus & 0x00000001) {
  815. nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_VFETCH_FAULT");
  816. NV_INFO (dev, "PGRAPH_TRAP_VFETCH_FAULT - %08x %08x %08x %08x\n",
  817. nv_rd32(dev, 0x400c00),
  818. nv_rd32(dev, 0x400c08),
  819. nv_rd32(dev, 0x400c0c),
  820. nv_rd32(dev, 0x400c10));
  821. ustatus &= ~0x00000001;
  822. }
  823. if (ustatus && display)
  824. NV_INFO(dev, "PGRAPH_TRAP_VFETCH - Unhandled ustatus 0x%08x\n", ustatus);
  825. nv_wr32(dev, 0x400c04, 0xc0000000);
  826. nv_wr32(dev, 0x400108, 0x004);
  827. status &= ~0x004;
  828. }
  829. /* STRMOUT: DirectX streamout / OpenGL transform feedback. */
  830. if (status & 0x008) {
  831. ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff;
  832. if (!ustatus && display) {
  833. NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - no ustatus?\n");
  834. }
  835. if (ustatus & 0x00000001) {
  836. nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_STRMOUT_FAULT");
  837. NV_INFO (dev, "PGRAPH_TRAP_STRMOUT_FAULT - %08x %08x %08x %08x\n",
  838. nv_rd32(dev, 0x401804),
  839. nv_rd32(dev, 0x401808),
  840. nv_rd32(dev, 0x40180c),
  841. nv_rd32(dev, 0x401810));
  842. ustatus &= ~0x00000001;
  843. }
  844. if (ustatus && display)
  845. NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - Unhandled ustatus 0x%08x\n", ustatus);
  846. /* No sane way found yet -- just reset the bugger. */
  847. nv_wr32(dev, 0x400040, 0x80);
  848. nv_wr32(dev, 0x400040, 0);
  849. nv_wr32(dev, 0x401800, 0xc0000000);
  850. nv_wr32(dev, 0x400108, 0x008);
  851. status &= ~0x008;
  852. }
  853. /* CCACHE: Handles code and c[] caches and fills them. */
  854. if (status & 0x010) {
  855. ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff;
  856. if (!ustatus && display) {
  857. NV_INFO(dev, "PGRAPH_TRAP_CCACHE - no ustatus?\n");
  858. }
  859. if (ustatus & 0x00000001) {
  860. nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_CCACHE_FAULT");
  861. NV_INFO (dev, "PGRAPH_TRAP_CCACHE_FAULT - %08x %08x %08x %08x %08x %08x %08x\n",
  862. nv_rd32(dev, 0x405800),
  863. nv_rd32(dev, 0x405804),
  864. nv_rd32(dev, 0x405808),
  865. nv_rd32(dev, 0x40580c),
  866. nv_rd32(dev, 0x405810),
  867. nv_rd32(dev, 0x405814),
  868. nv_rd32(dev, 0x40581c));
  869. ustatus &= ~0x00000001;
  870. }
  871. if (ustatus && display)
  872. NV_INFO(dev, "PGRAPH_TRAP_CCACHE - Unhandled ustatus 0x%08x\n", ustatus);
  873. nv_wr32(dev, 0x405018, 0xc0000000);
  874. nv_wr32(dev, 0x400108, 0x010);
  875. status &= ~0x010;
  876. }
  877. /* Unknown, not seen yet... 0x402000 is the only trap status reg
  878. * remaining, so try to handle it anyway. Perhaps related to that
  879. * unknown DMA slot on tesla? */
  880. if (status & 0x20) {
  881. nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_UNKC04");
  882. ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff;
  883. if (display)
  884. NV_INFO(dev, "PGRAPH_TRAP_UNKC04 - Unhandled ustatus 0x%08x\n", ustatus);
  885. nv_wr32(dev, 0x402000, 0xc0000000);
  886. /* no status modifiction on purpose */
  887. }
  888. /* TEXTURE: CUDA texturing units */
  889. if (status & 0x040) {
  890. nv50_pgraph_tp_trap (dev, 6, 0x408900, 0x408600, display,
  891. "PGRAPH_TRAP_TEXTURE");
  892. nv_wr32(dev, 0x400108, 0x040);
  893. status &= ~0x040;
  894. }
  895. /* MP: CUDA execution engines. */
  896. if (status & 0x080) {
  897. nv50_pgraph_tp_trap (dev, 7, 0x408314, 0x40831c, display,
  898. "PGRAPH_TRAP_MP");
  899. nv_wr32(dev, 0x400108, 0x080);
  900. status &= ~0x080;
  901. }
  902. /* TPDMA: Handles TP-initiated uncached memory accesses:
  903. * l[], g[], stack, 2d surfaces, render targets. */
  904. if (status & 0x100) {
  905. nv50_pgraph_tp_trap (dev, 8, 0x408e08, 0x408708, display,
  906. "PGRAPH_TRAP_TPDMA");
  907. nv_wr32(dev, 0x400108, 0x100);
  908. status &= ~0x100;
  909. }
  910. if (status) {
  911. if (display)
  912. NV_INFO(dev, "PGRAPH_TRAP - Unknown trap 0x%08x\n",
  913. status);
  914. nv_wr32(dev, 0x400108, status);
  915. }
  916. }
  917. /* There must be a *lot* of these. Will take some time to gather them up. */
  918. static struct nouveau_enum_names nv50_data_error_names[] =
  919. {
  920. { 4, "INVALID_VALUE" },
  921. { 5, "INVALID_ENUM" },
  922. { 8, "INVALID_OBJECT" },
  923. { 0xc, "INVALID_BITFIELD" },
  924. { 0x28, "MP_NO_REG_SPACE" },
  925. { 0x2b, "MP_BLOCK_SIZE_MISMATCH" },
  926. };
  927. static void
  928. nv50_pgraph_irq_handler(struct drm_device *dev)
  929. {
  930. struct nouveau_pgraph_trap trap;
  931. int unhandled = 0;
  932. uint32_t status;
  933. while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
  934. /* NOTIFY: You've set a NOTIFY an a command and it's done. */
  935. if (status & 0x00000001) {
  936. nouveau_graph_trap_info(dev, &trap);
  937. if (nouveau_ratelimit())
  938. nouveau_graph_dump_trap_info(dev,
  939. "PGRAPH_NOTIFY", &trap);
  940. status &= ~0x00000001;
  941. nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001);
  942. }
  943. /* COMPUTE_QUERY: Purpose and exact cause unknown, happens
  944. * when you write 0x200 to 0x50c0 method 0x31c. */
  945. if (status & 0x00000002) {
  946. nouveau_graph_trap_info(dev, &trap);
  947. if (nouveau_ratelimit())
  948. nouveau_graph_dump_trap_info(dev,
  949. "PGRAPH_COMPUTE_QUERY", &trap);
  950. status &= ~0x00000002;
  951. nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000002);
  952. }
  953. /* Unknown, never seen: 0x4 */
  954. /* ILLEGAL_MTHD: You used a wrong method for this class. */
  955. if (status & 0x00000010) {
  956. nouveau_graph_trap_info(dev, &trap);
  957. if (nouveau_pgraph_intr_swmthd(dev, &trap))
  958. unhandled = 1;
  959. if (unhandled && nouveau_ratelimit())
  960. nouveau_graph_dump_trap_info(dev,
  961. "PGRAPH_ILLEGAL_MTHD", &trap);
  962. status &= ~0x00000010;
  963. nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010);
  964. }
  965. /* ILLEGAL_CLASS: You used a wrong class. */
  966. if (status & 0x00000020) {
  967. nouveau_graph_trap_info(dev, &trap);
  968. if (nouveau_ratelimit())
  969. nouveau_graph_dump_trap_info(dev,
  970. "PGRAPH_ILLEGAL_CLASS", &trap);
  971. status &= ~0x00000020;
  972. nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000020);
  973. }
  974. /* DOUBLE_NOTIFY: You tried to set a NOTIFY on another NOTIFY. */
  975. if (status & 0x00000040) {
  976. nouveau_graph_trap_info(dev, &trap);
  977. if (nouveau_ratelimit())
  978. nouveau_graph_dump_trap_info(dev,
  979. "PGRAPH_DOUBLE_NOTIFY", &trap);
  980. status &= ~0x00000040;
  981. nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000040);
  982. }
  983. /* CONTEXT_SWITCH: PGRAPH needs us to load a new context */
  984. if (status & 0x00001000) {
  985. nv_wr32(dev, 0x400500, 0x00000000);
  986. nv_wr32(dev, NV03_PGRAPH_INTR,
  987. NV_PGRAPH_INTR_CONTEXT_SWITCH);
  988. nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
  989. NV40_PGRAPH_INTR_EN) &
  990. ~NV_PGRAPH_INTR_CONTEXT_SWITCH);
  991. nv_wr32(dev, 0x400500, 0x00010001);
  992. nv50_graph_context_switch(dev);
  993. status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
  994. }
  995. /* BUFFER_NOTIFY: Your m2mf transfer finished */
  996. if (status & 0x00010000) {
  997. nouveau_graph_trap_info(dev, &trap);
  998. if (nouveau_ratelimit())
  999. nouveau_graph_dump_trap_info(dev,
  1000. "PGRAPH_BUFFER_NOTIFY", &trap);
  1001. status &= ~0x00010000;
  1002. nv_wr32(dev, NV03_PGRAPH_INTR, 0x00010000);
  1003. }
  1004. /* DATA_ERROR: Invalid value for this method, or invalid
  1005. * state in current PGRAPH context for this operation */
  1006. if (status & 0x00100000) {
  1007. nouveau_graph_trap_info(dev, &trap);
  1008. if (nouveau_ratelimit()) {
  1009. nouveau_graph_dump_trap_info(dev,
  1010. "PGRAPH_DATA_ERROR", &trap);
  1011. NV_INFO (dev, "PGRAPH_DATA_ERROR - ");
  1012. nouveau_print_enum_names(nv_rd32(dev, 0x400110),
  1013. nv50_data_error_names);
  1014. printk("\n");
  1015. }
  1016. status &= ~0x00100000;
  1017. nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000);
  1018. }
  1019. /* TRAP: Something bad happened in the middle of command
  1020. * execution. Has a billion types, subtypes, and even
  1021. * subsubtypes. */
  1022. if (status & 0x00200000) {
  1023. nv50_pgraph_trap_handler(dev);
  1024. status &= ~0x00200000;
  1025. nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000);
  1026. }
  1027. /* Unknown, never seen: 0x00400000 */
  1028. /* SINGLE_STEP: Happens on every method if you turned on
  1029. * single stepping in 40008c */
  1030. if (status & 0x01000000) {
  1031. nouveau_graph_trap_info(dev, &trap);
  1032. if (nouveau_ratelimit())
  1033. nouveau_graph_dump_trap_info(dev,
  1034. "PGRAPH_SINGLE_STEP", &trap);
  1035. status &= ~0x01000000;
  1036. nv_wr32(dev, NV03_PGRAPH_INTR, 0x01000000);
  1037. }
  1038. /* 0x02000000 happens when you pause a ctxprog...
  1039. * but the only way this can happen that I know is by
  1040. * poking the relevant MMIO register, and we don't
  1041. * do that. */
  1042. if (status) {
  1043. NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n",
  1044. status);
  1045. nv_wr32(dev, NV03_PGRAPH_INTR, status);
  1046. }
  1047. {
  1048. const int isb = (1 << 16) | (1 << 0);
  1049. if ((nv_rd32(dev, 0x400500) & isb) != isb)
  1050. nv_wr32(dev, 0x400500,
  1051. nv_rd32(dev, 0x400500) | isb);
  1052. }
  1053. }
  1054. nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
  1055. if (nv_rd32(dev, 0x400824) & (1 << 31))
  1056. nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
  1057. }
  1058. static void
  1059. nouveau_crtc_irq_handler(struct drm_device *dev, int crtc)
  1060. {
  1061. if (crtc & 1)
  1062. nv_wr32(dev, NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK);
  1063. if (crtc & 2)
  1064. nv_wr32(dev, NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK);
  1065. }
  1066. irqreturn_t
  1067. nouveau_irq_handler(DRM_IRQ_ARGS)
  1068. {
  1069. struct drm_device *dev = (struct drm_device *)arg;
  1070. struct drm_nouveau_private *dev_priv = dev->dev_private;
  1071. uint32_t status;
  1072. unsigned long flags;
  1073. status = nv_rd32(dev, NV03_PMC_INTR_0);
  1074. if (!status)
  1075. return IRQ_NONE;
  1076. spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
  1077. if (status & NV_PMC_INTR_0_PFIFO_PENDING) {
  1078. nouveau_fifo_irq_handler(dev);
  1079. status &= ~NV_PMC_INTR_0_PFIFO_PENDING;
  1080. }
  1081. if (status & NV_PMC_INTR_0_PGRAPH_PENDING) {
  1082. if (dev_priv->card_type >= NV_50)
  1083. nv50_pgraph_irq_handler(dev);
  1084. else
  1085. nouveau_pgraph_irq_handler(dev);
  1086. status &= ~NV_PMC_INTR_0_PGRAPH_PENDING;
  1087. }
  1088. if (status & 0x00004000) {
  1089. u32 stat = nv_rd32(dev, 0x102130);
  1090. u32 mthd = nv_rd32(dev, 0x102190);
  1091. u32 data = nv_rd32(dev, 0x102194);
  1092. u32 inst = nv_rd32(dev, 0x102188) & 0x7fffffff;
  1093. NV_INFO(dev, "PCRYPT_INTR: 0x%08x 0x%08x 0x%08x 0x%08x\n",
  1094. stat, mthd, data, inst);
  1095. nv_wr32(dev, 0x102130, stat);
  1096. nv_wr32(dev, 0x10200c, 0x10);
  1097. nv50_fb_vm_trap(dev, nouveau_ratelimit(), "PCRYPT");
  1098. status &= ~0x00004000;
  1099. }
  1100. if (status & NV_PMC_INTR_0_CRTCn_PENDING) {
  1101. nouveau_crtc_irq_handler(dev, (status>>24)&3);
  1102. status &= ~NV_PMC_INTR_0_CRTCn_PENDING;
  1103. }
  1104. if (status & (NV_PMC_INTR_0_NV50_DISPLAY_PENDING |
  1105. NV_PMC_INTR_0_NV50_I2C_PENDING)) {
  1106. nv50_display_irq_handler(dev);
  1107. status &= ~(NV_PMC_INTR_0_NV50_DISPLAY_PENDING |
  1108. NV_PMC_INTR_0_NV50_I2C_PENDING);
  1109. }
  1110. if (status)
  1111. NV_ERROR(dev, "Unhandled PMC INTR status bits 0x%08x\n", status);
  1112. spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
  1113. if (dev_priv->msi_enabled)
  1114. nv_wr08(dev, 0x00088068, 0xff);
  1115. return IRQ_HANDLED;
  1116. }
  1117. int
  1118. nouveau_irq_init(struct drm_device *dev)
  1119. {
  1120. struct drm_nouveau_private *dev_priv = dev->dev_private;
  1121. int ret;
  1122. if (nouveau_msi != 0 && dev_priv->card_type >= NV_50) {
  1123. ret = pci_enable_msi(dev->pdev);
  1124. if (ret == 0) {
  1125. NV_INFO(dev, "enabled MSI\n");
  1126. dev_priv->msi_enabled = true;
  1127. }
  1128. }
  1129. return drm_irq_install(dev);
  1130. }
  1131. void
  1132. nouveau_irq_fini(struct drm_device *dev)
  1133. {
  1134. struct drm_nouveau_private *dev_priv = dev->dev_private;
  1135. drm_irq_uninstall(dev);
  1136. if (dev_priv->msi_enabled)
  1137. pci_disable_msi(dev->pdev);
  1138. }