nouveau_irq.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703
  1. /*
  2. * Copyright (C) 2006 Ben Skeggs.
  3. *
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining
  7. * a copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sublicense, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial
  16. * portions of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  19. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  20. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
  21. * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
  22. * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
  23. * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
  24. * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. */
  27. /*
  28. * Authors:
  29. * Ben Skeggs <darktama@iinet.net.au>
  30. */
  31. #include "drmP.h"
  32. #include "drm.h"
  33. #include "nouveau_drm.h"
  34. #include "nouveau_drv.h"
  35. #include "nouveau_reg.h"
  36. #include <linux/ratelimit.h>
  37. /* needed for hotplug irq */
  38. #include "nouveau_connector.h"
  39. #include "nv50_display.h"
  40. void
  41. nouveau_irq_preinstall(struct drm_device *dev)
  42. {
  43. struct drm_nouveau_private *dev_priv = dev->dev_private;
  44. /* Master disable */
  45. nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
  46. if (dev_priv->card_type == NV_50) {
  47. INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh);
  48. INIT_LIST_HEAD(&dev_priv->vbl_waiting);
  49. }
  50. }
  51. int
  52. nouveau_irq_postinstall(struct drm_device *dev)
  53. {
  54. /* Master enable */
  55. nv_wr32(dev, NV03_PMC_INTR_EN_0, NV_PMC_INTR_EN_0_MASTER_ENABLE);
  56. return 0;
  57. }
  58. void
  59. nouveau_irq_uninstall(struct drm_device *dev)
  60. {
  61. /* Master disable */
  62. nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
  63. }
  64. static int
  65. nouveau_call_method(struct nouveau_channel *chan, int class, int mthd, int data)
  66. {
  67. struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
  68. struct nouveau_pgraph_object_method *grm;
  69. struct nouveau_pgraph_object_class *grc;
  70. grc = dev_priv->engine.graph.grclass;
  71. while (grc->id) {
  72. if (grc->id == class)
  73. break;
  74. grc++;
  75. }
  76. if (grc->id != class || !grc->methods)
  77. return -ENOENT;
  78. grm = grc->methods;
  79. while (grm->id) {
  80. if (grm->id == mthd)
  81. return grm->exec(chan, class, mthd, data);
  82. grm++;
  83. }
  84. return -ENOENT;
  85. }
  86. static bool
  87. nouveau_fifo_swmthd(struct nouveau_channel *chan, uint32_t addr, uint32_t data)
  88. {
  89. struct drm_device *dev = chan->dev;
  90. const int subc = (addr >> 13) & 0x7;
  91. const int mthd = addr & 0x1ffc;
  92. if (mthd == 0x0000) {
  93. struct nouveau_gpuobj_ref *ref = NULL;
  94. if (nouveau_gpuobj_ref_find(chan, data, &ref))
  95. return false;
  96. if (ref->gpuobj->engine != NVOBJ_ENGINE_SW)
  97. return false;
  98. chan->sw_subchannel[subc] = ref->gpuobj->class;
  99. nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_rd32(dev,
  100. NV04_PFIFO_CACHE1_ENGINE) & ~(0xf << subc * 4));
  101. return true;
  102. }
  103. /* hw object */
  104. if (nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE) & (1 << (subc*4)))
  105. return false;
  106. if (nouveau_call_method(chan, chan->sw_subchannel[subc], mthd, data))
  107. return false;
  108. return true;
  109. }
  110. static void
  111. nouveau_fifo_irq_handler(struct drm_device *dev)
  112. {
  113. struct drm_nouveau_private *dev_priv = dev->dev_private;
  114. struct nouveau_engine *engine = &dev_priv->engine;
  115. uint32_t status, reassign;
  116. int cnt = 0;
  117. reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1;
  118. while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) {
  119. struct nouveau_channel *chan = NULL;
  120. uint32_t chid, get;
  121. nv_wr32(dev, NV03_PFIFO_CACHES, 0);
  122. chid = engine->fifo.channel_id(dev);
  123. if (chid >= 0 && chid < engine->fifo.channels)
  124. chan = dev_priv->fifos[chid];
  125. get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET);
  126. if (status & NV_PFIFO_INTR_CACHE_ERROR) {
  127. uint32_t mthd, data;
  128. int ptr;
  129. /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before
  130. * wrapping on my G80 chips, but CACHE1 isn't big
  131. * enough for this much data.. Tests show that it
  132. * wraps around to the start at GET=0x800.. No clue
  133. * as to why..
  134. */
  135. ptr = (get & 0x7ff) >> 2;
  136. if (dev_priv->card_type < NV_40) {
  137. mthd = nv_rd32(dev,
  138. NV04_PFIFO_CACHE1_METHOD(ptr));
  139. data = nv_rd32(dev,
  140. NV04_PFIFO_CACHE1_DATA(ptr));
  141. } else {
  142. mthd = nv_rd32(dev,
  143. NV40_PFIFO_CACHE1_METHOD(ptr));
  144. data = nv_rd32(dev,
  145. NV40_PFIFO_CACHE1_DATA(ptr));
  146. }
  147. if (!chan || !nouveau_fifo_swmthd(chan, mthd, data)) {
  148. NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d "
  149. "Mthd 0x%04x Data 0x%08x\n",
  150. chid, (mthd >> 13) & 7, mthd & 0x1ffc,
  151. data);
  152. }
  153. nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
  154. nv_wr32(dev, NV03_PFIFO_INTR_0,
  155. NV_PFIFO_INTR_CACHE_ERROR);
  156. nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
  157. nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1);
  158. nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
  159. nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
  160. nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1);
  161. nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
  162. nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH,
  163. nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
  164. nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
  165. status &= ~NV_PFIFO_INTR_CACHE_ERROR;
  166. }
  167. if (status & NV_PFIFO_INTR_DMA_PUSHER) {
  168. NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d\n", chid);
  169. status &= ~NV_PFIFO_INTR_DMA_PUSHER;
  170. nv_wr32(dev, NV03_PFIFO_INTR_0,
  171. NV_PFIFO_INTR_DMA_PUSHER);
  172. nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, 0x00000000);
  173. if (nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT) != get)
  174. nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET,
  175. get + 4);
  176. }
  177. if (status) {
  178. NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
  179. status, chid);
  180. nv_wr32(dev, NV03_PFIFO_INTR_0, status);
  181. status = 0;
  182. }
  183. nv_wr32(dev, NV03_PFIFO_CACHES, reassign);
  184. }
  185. if (status) {
  186. NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt);
  187. nv_wr32(dev, 0x2140, 0);
  188. nv_wr32(dev, 0x140, 0);
  189. }
  190. nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
  191. }
  192. struct nouveau_bitfield_names {
  193. uint32_t mask;
  194. const char *name;
  195. };
  196. static struct nouveau_bitfield_names nstatus_names[] =
  197. {
  198. { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
  199. { NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
  200. { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
  201. { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }
  202. };
  203. static struct nouveau_bitfield_names nstatus_names_nv10[] =
  204. {
  205. { NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
  206. { NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
  207. { NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
  208. { NV10_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }
  209. };
  210. static struct nouveau_bitfield_names nsource_names[] =
  211. {
  212. { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" },
  213. { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" },
  214. { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" },
  215. { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION, "RANGE_EXCEPTION" },
  216. { NV03_PGRAPH_NSOURCE_LIMIT_COLOR, "LIMIT_COLOR" },
  217. { NV03_PGRAPH_NSOURCE_LIMIT_ZETA, "LIMIT_ZETA" },
  218. { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD, "ILLEGAL_MTHD" },
  219. { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION, "DMA_R_PROTECTION" },
  220. { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION, "DMA_W_PROTECTION" },
  221. { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION, "FORMAT_EXCEPTION" },
  222. { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION, "PATCH_EXCEPTION" },
  223. { NV03_PGRAPH_NSOURCE_STATE_INVALID, "STATE_INVALID" },
  224. { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY, "DOUBLE_NOTIFY" },
  225. { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE, "NOTIFY_IN_USE" },
  226. { NV03_PGRAPH_NSOURCE_METHOD_CNT, "METHOD_CNT" },
  227. { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION, "BFR_NOTIFICATION" },
  228. { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" },
  229. { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" },
  230. { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" },
  231. };
  232. static void
  233. nouveau_print_bitfield_names_(uint32_t value,
  234. const struct nouveau_bitfield_names *namelist,
  235. const int namelist_len)
  236. {
  237. /*
  238. * Caller must have already printed the KERN_* log level for us.
  239. * Also the caller is responsible for adding the newline.
  240. */
  241. int i;
  242. for (i = 0; i < namelist_len; ++i) {
  243. uint32_t mask = namelist[i].mask;
  244. if (value & mask) {
  245. printk(" %s", namelist[i].name);
  246. value &= ~mask;
  247. }
  248. }
  249. if (value)
  250. printk(" (unknown bits 0x%08x)", value);
  251. }
  252. #define nouveau_print_bitfield_names(val, namelist) \
  253. nouveau_print_bitfield_names_((val), (namelist), ARRAY_SIZE(namelist))
  254. static int
  255. nouveau_graph_chid_from_grctx(struct drm_device *dev)
  256. {
  257. struct drm_nouveau_private *dev_priv = dev->dev_private;
  258. uint32_t inst;
  259. int i;
  260. if (dev_priv->card_type < NV_40)
  261. return dev_priv->engine.fifo.channels;
  262. else
  263. if (dev_priv->card_type < NV_50) {
  264. inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 4;
  265. for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
  266. struct nouveau_channel *chan = dev_priv->fifos[i];
  267. if (!chan || !chan->ramin_grctx)
  268. continue;
  269. if (inst == chan->ramin_grctx->instance)
  270. break;
  271. }
  272. } else {
  273. inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 12;
  274. for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
  275. struct nouveau_channel *chan = dev_priv->fifos[i];
  276. if (!chan || !chan->ramin)
  277. continue;
  278. if (inst == chan->ramin->instance)
  279. break;
  280. }
  281. }
  282. return i;
  283. }
  284. static int
  285. nouveau_graph_trapped_channel(struct drm_device *dev, int *channel_ret)
  286. {
  287. struct drm_nouveau_private *dev_priv = dev->dev_private;
  288. struct nouveau_engine *engine = &dev_priv->engine;
  289. int channel;
  290. if (dev_priv->card_type < NV_10)
  291. channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0xf;
  292. else
  293. if (dev_priv->card_type < NV_40)
  294. channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
  295. else
  296. channel = nouveau_graph_chid_from_grctx(dev);
  297. if (channel >= engine->fifo.channels || !dev_priv->fifos[channel]) {
  298. NV_ERROR(dev, "AIII, invalid/inactive channel id %d\n", channel);
  299. return -EINVAL;
  300. }
  301. *channel_ret = channel;
  302. return 0;
  303. }
  304. struct nouveau_pgraph_trap {
  305. int channel;
  306. int class;
  307. int subc, mthd, size;
  308. uint32_t data, data2;
  309. uint32_t nsource, nstatus;
  310. };
  311. static void
  312. nouveau_graph_trap_info(struct drm_device *dev,
  313. struct nouveau_pgraph_trap *trap)
  314. {
  315. struct drm_nouveau_private *dev_priv = dev->dev_private;
  316. uint32_t address;
  317. trap->nsource = trap->nstatus = 0;
  318. if (dev_priv->card_type < NV_50) {
  319. trap->nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
  320. trap->nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
  321. }
  322. if (nouveau_graph_trapped_channel(dev, &trap->channel))
  323. trap->channel = -1;
  324. address = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
  325. trap->mthd = address & 0x1FFC;
  326. trap->data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
  327. if (dev_priv->card_type < NV_10) {
  328. trap->subc = (address >> 13) & 0x7;
  329. } else {
  330. trap->subc = (address >> 16) & 0x7;
  331. trap->data2 = nv_rd32(dev, NV10_PGRAPH_TRAPPED_DATA_HIGH);
  332. }
  333. if (dev_priv->card_type < NV_10)
  334. trap->class = nv_rd32(dev, 0x400180 + trap->subc*4) & 0xFF;
  335. else if (dev_priv->card_type < NV_40)
  336. trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFF;
  337. else if (dev_priv->card_type < NV_50)
  338. trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFFF;
  339. else
  340. trap->class = nv_rd32(dev, 0x400814);
  341. }
  342. static void
  343. nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id,
  344. struct nouveau_pgraph_trap *trap)
  345. {
  346. struct drm_nouveau_private *dev_priv = dev->dev_private;
  347. uint32_t nsource = trap->nsource, nstatus = trap->nstatus;
  348. NV_INFO(dev, "%s - nSource:", id);
  349. nouveau_print_bitfield_names(nsource, nsource_names);
  350. printk(", nStatus:");
  351. if (dev_priv->card_type < NV_10)
  352. nouveau_print_bitfield_names(nstatus, nstatus_names);
  353. else
  354. nouveau_print_bitfield_names(nstatus, nstatus_names_nv10);
  355. printk("\n");
  356. NV_INFO(dev, "%s - Ch %d/%d Class 0x%04x Mthd 0x%04x "
  357. "Data 0x%08x:0x%08x\n",
  358. id, trap->channel, trap->subc,
  359. trap->class, trap->mthd,
  360. trap->data2, trap->data);
  361. }
  362. static int
  363. nouveau_pgraph_intr_swmthd(struct drm_device *dev,
  364. struct nouveau_pgraph_trap *trap)
  365. {
  366. struct drm_nouveau_private *dev_priv = dev->dev_private;
  367. if (trap->channel < 0 ||
  368. trap->channel >= dev_priv->engine.fifo.channels ||
  369. !dev_priv->fifos[trap->channel])
  370. return -ENODEV;
  371. return nouveau_call_method(dev_priv->fifos[trap->channel],
  372. trap->class, trap->mthd, trap->data);
  373. }
  374. static inline void
  375. nouveau_pgraph_intr_notify(struct drm_device *dev, uint32_t nsource)
  376. {
  377. struct nouveau_pgraph_trap trap;
  378. int unhandled = 0;
  379. nouveau_graph_trap_info(dev, &trap);
  380. if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
  381. if (nouveau_pgraph_intr_swmthd(dev, &trap))
  382. unhandled = 1;
  383. } else {
  384. unhandled = 1;
  385. }
  386. if (unhandled)
  387. nouveau_graph_dump_trap_info(dev, "PGRAPH_NOTIFY", &trap);
  388. }
  389. static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20);
  390. static int nouveau_ratelimit(void)
  391. {
  392. return __ratelimit(&nouveau_ratelimit_state);
  393. }
  394. static inline void
  395. nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource)
  396. {
  397. struct nouveau_pgraph_trap trap;
  398. int unhandled = 0;
  399. nouveau_graph_trap_info(dev, &trap);
  400. trap.nsource = nsource;
  401. if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
  402. if (nouveau_pgraph_intr_swmthd(dev, &trap))
  403. unhandled = 1;
  404. } else {
  405. unhandled = 1;
  406. }
  407. if (unhandled && nouveau_ratelimit())
  408. nouveau_graph_dump_trap_info(dev, "PGRAPH_ERROR", &trap);
  409. }
  410. static inline void
  411. nouveau_pgraph_intr_context_switch(struct drm_device *dev)
  412. {
  413. struct drm_nouveau_private *dev_priv = dev->dev_private;
  414. struct nouveau_engine *engine = &dev_priv->engine;
  415. uint32_t chid;
  416. chid = engine->fifo.channel_id(dev);
  417. NV_DEBUG(dev, "PGRAPH context switch interrupt channel %x\n", chid);
  418. switch (dev_priv->card_type) {
  419. case NV_04:
  420. nv04_graph_context_switch(dev);
  421. break;
  422. case NV_10:
  423. nv10_graph_context_switch(dev);
  424. break;
  425. default:
  426. NV_ERROR(dev, "Context switch not implemented\n");
  427. break;
  428. }
  429. }
  430. static void
  431. nouveau_pgraph_irq_handler(struct drm_device *dev)
  432. {
  433. uint32_t status;
  434. while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
  435. uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
  436. if (status & NV_PGRAPH_INTR_NOTIFY) {
  437. nouveau_pgraph_intr_notify(dev, nsource);
  438. status &= ~NV_PGRAPH_INTR_NOTIFY;
  439. nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_NOTIFY);
  440. }
  441. if (status & NV_PGRAPH_INTR_ERROR) {
  442. nouveau_pgraph_intr_error(dev, nsource);
  443. status &= ~NV_PGRAPH_INTR_ERROR;
  444. nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_ERROR);
  445. }
  446. if (status & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
  447. nouveau_pgraph_intr_context_switch(dev);
  448. status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
  449. nv_wr32(dev, NV03_PGRAPH_INTR,
  450. NV_PGRAPH_INTR_CONTEXT_SWITCH);
  451. }
  452. if (status) {
  453. NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status);
  454. nv_wr32(dev, NV03_PGRAPH_INTR, status);
  455. }
  456. if ((nv_rd32(dev, NV04_PGRAPH_FIFO) & (1 << 0)) == 0)
  457. nv_wr32(dev, NV04_PGRAPH_FIFO, 1);
  458. }
  459. nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
  460. }
  461. static void
  462. nv50_pgraph_irq_handler(struct drm_device *dev)
  463. {
  464. uint32_t status, nsource;
  465. status = nv_rd32(dev, NV03_PGRAPH_INTR);
  466. nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
  467. if (status & 0x00000001) {
  468. nouveau_pgraph_intr_notify(dev, nsource);
  469. status &= ~0x00000001;
  470. nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001);
  471. }
  472. if (status & 0x00000010) {
  473. nouveau_pgraph_intr_error(dev, nsource |
  474. NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD);
  475. status &= ~0x00000010;
  476. nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010);
  477. }
  478. if (status & 0x00001000) {
  479. nv_wr32(dev, 0x400500, 0x00000000);
  480. nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_CONTEXT_SWITCH);
  481. nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
  482. NV40_PGRAPH_INTR_EN) & ~NV_PGRAPH_INTR_CONTEXT_SWITCH);
  483. nv_wr32(dev, 0x400500, 0x00010001);
  484. nv50_graph_context_switch(dev);
  485. status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
  486. }
  487. if (status & 0x00100000) {
  488. nouveau_pgraph_intr_error(dev, nsource |
  489. NV03_PGRAPH_NSOURCE_DATA_ERROR);
  490. status &= ~0x00100000;
  491. nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000);
  492. }
  493. if (status & 0x00200000) {
  494. int r;
  495. nouveau_pgraph_intr_error(dev, nsource |
  496. NV03_PGRAPH_NSOURCE_PROTECTION_ERROR);
  497. NV_ERROR(dev, "magic set 1:\n");
  498. for (r = 0x408900; r <= 0x408910; r += 4)
  499. NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r));
  500. nv_wr32(dev, 0x408900, nv_rd32(dev, 0x408904) | 0xc0000000);
  501. for (r = 0x408e08; r <= 0x408e24; r += 4)
  502. NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r));
  503. nv_wr32(dev, 0x408e08, nv_rd32(dev, 0x408e08) | 0xc0000000);
  504. NV_ERROR(dev, "magic set 2:\n");
  505. for (r = 0x409900; r <= 0x409910; r += 4)
  506. NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r));
  507. nv_wr32(dev, 0x409900, nv_rd32(dev, 0x409904) | 0xc0000000);
  508. for (r = 0x409e08; r <= 0x409e24; r += 4)
  509. NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r, nv_rd32(dev, r));
  510. nv_wr32(dev, 0x409e08, nv_rd32(dev, 0x409e08) | 0xc0000000);
  511. status &= ~0x00200000;
  512. nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource);
  513. nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000);
  514. }
  515. if (status) {
  516. NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status);
  517. nv_wr32(dev, NV03_PGRAPH_INTR, status);
  518. }
  519. {
  520. const int isb = (1 << 16) | (1 << 0);
  521. if ((nv_rd32(dev, 0x400500) & isb) != isb)
  522. nv_wr32(dev, 0x400500, nv_rd32(dev, 0x400500) | isb);
  523. nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
  524. }
  525. nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
  526. }
  527. static void
  528. nouveau_crtc_irq_handler(struct drm_device *dev, int crtc)
  529. {
  530. if (crtc & 1)
  531. nv_wr32(dev, NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK);
  532. if (crtc & 2)
  533. nv_wr32(dev, NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK);
  534. }
  535. irqreturn_t
  536. nouveau_irq_handler(DRM_IRQ_ARGS)
  537. {
  538. struct drm_device *dev = (struct drm_device *)arg;
  539. struct drm_nouveau_private *dev_priv = dev->dev_private;
  540. uint32_t status, fbdev_flags = 0;
  541. status = nv_rd32(dev, NV03_PMC_INTR_0);
  542. if (!status)
  543. return IRQ_NONE;
  544. if (dev_priv->fbdev_info) {
  545. fbdev_flags = dev_priv->fbdev_info->flags;
  546. dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
  547. }
  548. if (status & NV_PMC_INTR_0_PFIFO_PENDING) {
  549. nouveau_fifo_irq_handler(dev);
  550. status &= ~NV_PMC_INTR_0_PFIFO_PENDING;
  551. }
  552. if (status & NV_PMC_INTR_0_PGRAPH_PENDING) {
  553. if (dev_priv->card_type >= NV_50)
  554. nv50_pgraph_irq_handler(dev);
  555. else
  556. nouveau_pgraph_irq_handler(dev);
  557. status &= ~NV_PMC_INTR_0_PGRAPH_PENDING;
  558. }
  559. if (status & NV_PMC_INTR_0_CRTCn_PENDING) {
  560. nouveau_crtc_irq_handler(dev, (status>>24)&3);
  561. status &= ~NV_PMC_INTR_0_CRTCn_PENDING;
  562. }
  563. if (status & (NV_PMC_INTR_0_NV50_DISPLAY_PENDING |
  564. NV_PMC_INTR_0_NV50_I2C_PENDING)) {
  565. nv50_display_irq_handler(dev);
  566. status &= ~(NV_PMC_INTR_0_NV50_DISPLAY_PENDING |
  567. NV_PMC_INTR_0_NV50_I2C_PENDING);
  568. }
  569. if (status)
  570. NV_ERROR(dev, "Unhandled PMC INTR status bits 0x%08x\n", status);
  571. if (dev_priv->fbdev_info)
  572. dev_priv->fbdev_info->flags = fbdev_flags;
  573. return IRQ_HANDLED;
  574. }