nouveau_irq.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742
  1. /*
  2. * Copyright (C) 2006 Ben Skeggs.
  3. *
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining
  7. * a copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sublicense, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial
  16. * portions of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  19. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  20. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
  21. * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
  22. * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
  23. * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
  24. * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. */
  27. /*
  28. * Authors:
  29. * Ben Skeggs <darktama@iinet.net.au>
  30. */
  31. #include "drmP.h"
  32. #include "drm.h"
  33. #include "nouveau_drm.h"
  34. #include "nouveau_drv.h"
  35. #include "nouveau_reg.h"
  36. #include <linux/ratelimit.h>
  37. /* needed for hotplug irq */
  38. #include "nouveau_connector.h"
  39. #include "nv50_display.h"
  40. void
  41. nouveau_irq_preinstall(struct drm_device *dev)
  42. {
  43. struct drm_nouveau_private *dev_priv = dev->dev_private;
  44. /* Master disable */
  45. nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
  46. if (dev_priv->card_type == NV_50) {
  47. INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh);
  48. INIT_LIST_HEAD(&dev_priv->vbl_waiting);
  49. }
  50. }
  51. int
  52. nouveau_irq_postinstall(struct drm_device *dev)
  53. {
  54. /* Master enable */
  55. nv_wr32(dev, NV03_PMC_INTR_EN_0, NV_PMC_INTR_EN_0_MASTER_ENABLE);
  56. return 0;
  57. }
  58. void
  59. nouveau_irq_uninstall(struct drm_device *dev)
  60. {
  61. /* Master disable */
  62. nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
  63. }
  64. static int
  65. nouveau_call_method(struct nouveau_channel *chan, int class, int mthd, int data)
  66. {
  67. struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
  68. struct nouveau_pgraph_object_method *grm;
  69. struct nouveau_pgraph_object_class *grc;
  70. grc = dev_priv->engine.graph.grclass;
  71. while (grc->id) {
  72. if (grc->id == class)
  73. break;
  74. grc++;
  75. }
  76. if (grc->id != class || !grc->methods)
  77. return -ENOENT;
  78. grm = grc->methods;
  79. while (grm->id) {
  80. if (grm->id == mthd)
  81. return grm->exec(chan, class, mthd, data);
  82. grm++;
  83. }
  84. return -ENOENT;
  85. }
  86. static bool
  87. nouveau_fifo_swmthd(struct nouveau_channel *chan, uint32_t addr, uint32_t data)
  88. {
  89. struct drm_device *dev = chan->dev;
  90. const int subc = (addr >> 13) & 0x7;
  91. const int mthd = addr & 0x1ffc;
  92. if (mthd == 0x0000) {
  93. struct nouveau_gpuobj_ref *ref = NULL;
  94. if (nouveau_gpuobj_ref_find(chan, data, &ref))
  95. return false;
  96. if (ref->gpuobj->engine != NVOBJ_ENGINE_SW)
  97. return false;
  98. chan->sw_subchannel[subc] = ref->gpuobj->class;
  99. nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_rd32(dev,
  100. NV04_PFIFO_CACHE1_ENGINE) & ~(0xf << subc * 4));
  101. return true;
  102. }
  103. /* hw object */
  104. if (nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE) & (1 << (subc*4)))
  105. return false;
  106. if (nouveau_call_method(chan, chan->sw_subchannel[subc], mthd, data))
  107. return false;
  108. return true;
  109. }
  110. static void
  111. nouveau_fifo_irq_handler(struct drm_device *dev)
  112. {
  113. struct drm_nouveau_private *dev_priv = dev->dev_private;
  114. struct nouveau_engine *engine = &dev_priv->engine;
  115. uint32_t status, reassign;
  116. int cnt = 0;
  117. reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1;
  118. while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) {
  119. struct nouveau_channel *chan = NULL;
  120. uint32_t chid, get;
  121. nv_wr32(dev, NV03_PFIFO_CACHES, 0);
  122. chid = engine->fifo.channel_id(dev);
  123. if (chid >= 0 && chid < engine->fifo.channels)
  124. chan = dev_priv->fifos[chid];
  125. get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET);
  126. if (status & NV_PFIFO_INTR_CACHE_ERROR) {
  127. uint32_t mthd, data;
  128. int ptr;
  129. /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before
  130. * wrapping on my G80 chips, but CACHE1 isn't big
  131. * enough for this much data.. Tests show that it
  132. * wraps around to the start at GET=0x800.. No clue
  133. * as to why..
  134. */
  135. ptr = (get & 0x7ff) >> 2;
  136. if (dev_priv->card_type < NV_40) {
  137. mthd = nv_rd32(dev,
  138. NV04_PFIFO_CACHE1_METHOD(ptr));
  139. data = nv_rd32(dev,
  140. NV04_PFIFO_CACHE1_DATA(ptr));
  141. } else {
  142. mthd = nv_rd32(dev,
  143. NV40_PFIFO_CACHE1_METHOD(ptr));
  144. data = nv_rd32(dev,
  145. NV40_PFIFO_CACHE1_DATA(ptr));
  146. }
  147. if (!chan || !nouveau_fifo_swmthd(chan, mthd, data)) {
  148. NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d "
  149. "Mthd 0x%04x Data 0x%08x\n",
  150. chid, (mthd >> 13) & 7, mthd & 0x1ffc,
  151. data);
  152. }
  153. nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
  154. nv_wr32(dev, NV03_PFIFO_INTR_0,
  155. NV_PFIFO_INTR_CACHE_ERROR);
  156. nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
  157. nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1);
  158. nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
  159. nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
  160. nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1);
  161. nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
  162. nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH,
  163. nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
  164. nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
  165. status &= ~NV_PFIFO_INTR_CACHE_ERROR;
  166. }
  167. if (status & NV_PFIFO_INTR_DMA_PUSHER) {
  168. NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d\n", chid);
  169. status &= ~NV_PFIFO_INTR_DMA_PUSHER;
  170. nv_wr32(dev, NV03_PFIFO_INTR_0,
  171. NV_PFIFO_INTR_DMA_PUSHER);
  172. nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, 0x00000000);
  173. if (nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT) != get)
  174. nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET,
  175. get + 4);
  176. }
  177. if (status & NV_PFIFO_INTR_SEMAPHORE) {
  178. uint32_t sem;
  179. status &= ~NV_PFIFO_INTR_SEMAPHORE;
  180. nv_wr32(dev, NV03_PFIFO_INTR_0,
  181. NV_PFIFO_INTR_SEMAPHORE);
  182. sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE);
  183. nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
  184. nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
  185. nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
  186. }
  187. if (status) {
  188. NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
  189. status, chid);
  190. nv_wr32(dev, NV03_PFIFO_INTR_0, status);
  191. status = 0;
  192. }
  193. nv_wr32(dev, NV03_PFIFO_CACHES, reassign);
  194. }
  195. if (status) {
  196. NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt);
  197. nv_wr32(dev, 0x2140, 0);
  198. nv_wr32(dev, 0x140, 0);
  199. }
  200. nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
  201. }
  202. struct nouveau_bitfield_names {
  203. uint32_t mask;
  204. const char *name;
  205. };
  206. static struct nouveau_bitfield_names nstatus_names[] =
  207. {
  208. { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
  209. { NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
  210. { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
  211. { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }
  212. };
  213. static struct nouveau_bitfield_names nstatus_names_nv10[] =
  214. {
  215. { NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
  216. { NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
  217. { NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
  218. { NV10_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }
  219. };
  220. static struct nouveau_bitfield_names nsource_names[] =
  221. {
  222. { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" },
  223. { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" },
  224. { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" },
  225. { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION, "RANGE_EXCEPTION" },
  226. { NV03_PGRAPH_NSOURCE_LIMIT_COLOR, "LIMIT_COLOR" },
  227. { NV03_PGRAPH_NSOURCE_LIMIT_ZETA, "LIMIT_ZETA" },
  228. { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD, "ILLEGAL_MTHD" },
  229. { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION, "DMA_R_PROTECTION" },
  230. { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION, "DMA_W_PROTECTION" },
  231. { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION, "FORMAT_EXCEPTION" },
  232. { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION, "PATCH_EXCEPTION" },
  233. { NV03_PGRAPH_NSOURCE_STATE_INVALID, "STATE_INVALID" },
  234. { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY, "DOUBLE_NOTIFY" },
  235. { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE, "NOTIFY_IN_USE" },
  236. { NV03_PGRAPH_NSOURCE_METHOD_CNT, "METHOD_CNT" },
  237. { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION, "BFR_NOTIFICATION" },
  238. { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" },
  239. { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" },
  240. { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" },
  241. };
  242. static void
  243. nouveau_print_bitfield_names_(uint32_t value,
  244. const struct nouveau_bitfield_names *namelist,
  245. const int namelist_len)
  246. {
  247. /*
  248. * Caller must have already printed the KERN_* log level for us.
  249. * Also the caller is responsible for adding the newline.
  250. */
  251. int i;
  252. for (i = 0; i < namelist_len; ++i) {
  253. uint32_t mask = namelist[i].mask;
  254. if (value & mask) {
  255. printk(" %s", namelist[i].name);
  256. value &= ~mask;
  257. }
  258. }
  259. if (value)
  260. printk(" (unknown bits 0x%08x)", value);
  261. }
  262. #define nouveau_print_bitfield_names(val, namelist) \
  263. nouveau_print_bitfield_names_((val), (namelist), ARRAY_SIZE(namelist))
  264. static int
  265. nouveau_graph_chid_from_grctx(struct drm_device *dev)
  266. {
  267. struct drm_nouveau_private *dev_priv = dev->dev_private;
  268. uint32_t inst;
  269. int i;
  270. if (dev_priv->card_type < NV_40)
  271. return dev_priv->engine.fifo.channels;
  272. else
  273. if (dev_priv->card_type < NV_50) {
  274. inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 4;
  275. for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
  276. struct nouveau_channel *chan = dev_priv->fifos[i];
  277. if (!chan || !chan->ramin_grctx)
  278. continue;
  279. if (inst == chan->ramin_grctx->instance)
  280. break;
  281. }
  282. } else {
  283. inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 12;
  284. for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
  285. struct nouveau_channel *chan = dev_priv->fifos[i];
  286. if (!chan || !chan->ramin)
  287. continue;
  288. if (inst == chan->ramin->instance)
  289. break;
  290. }
  291. }
  292. return i;
  293. }
  294. static int
  295. nouveau_graph_trapped_channel(struct drm_device *dev, int *channel_ret)
  296. {
  297. struct drm_nouveau_private *dev_priv = dev->dev_private;
  298. struct nouveau_engine *engine = &dev_priv->engine;
  299. int channel;
  300. if (dev_priv->card_type < NV_10)
  301. channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0xf;
  302. else
  303. if (dev_priv->card_type < NV_40)
  304. channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
  305. else
  306. channel = nouveau_graph_chid_from_grctx(dev);
  307. if (channel >= engine->fifo.channels || !dev_priv->fifos[channel]) {
  308. NV_ERROR(dev, "AIII, invalid/inactive channel id %d\n", channel);
  309. return -EINVAL;
  310. }
  311. *channel_ret = channel;
  312. return 0;
  313. }
  314. struct nouveau_pgraph_trap {
  315. int channel;
  316. int class;
  317. int subc, mthd, size;
  318. uint32_t data, data2;
  319. uint32_t nsource, nstatus;
  320. };
  321. static void
  322. nouveau_graph_trap_info(struct drm_device *dev,
  323. struct nouveau_pgraph_trap *trap)
  324. {
  325. struct drm_nouveau_private *dev_priv = dev->dev_private;
  326. uint32_t address;
  327. trap->nsource = trap->nstatus = 0;
  328. if (dev_priv->card_type < NV_50) {
  329. trap->nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
  330. trap->nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
  331. }
  332. if (nouveau_graph_trapped_channel(dev, &trap->channel))
  333. trap->channel = -1;
  334. address = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
  335. trap->mthd = address & 0x1FFC;
  336. trap->data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
  337. if (dev_priv->card_type < NV_10) {
  338. trap->subc = (address >> 13) & 0x7;
  339. } else {
  340. trap->subc = (address >> 16) & 0x7;
  341. trap->data2 = nv_rd32(dev, NV10_PGRAPH_TRAPPED_DATA_HIGH);
  342. }
  343. if (dev_priv->card_type < NV_10)
  344. trap->class = nv_rd32(dev, 0x400180 + trap->subc*4) & 0xFF;
  345. else if (dev_priv->card_type < NV_40)
  346. trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFF;
  347. else if (dev_priv->card_type < NV_50)
  348. trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFFF;
  349. else
  350. trap->class = nv_rd32(dev, 0x400814);
  351. }
  352. static void
  353. nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id,
  354. struct nouveau_pgraph_trap *trap)
  355. {
  356. struct drm_nouveau_private *dev_priv = dev->dev_private;
  357. uint32_t nsource = trap->nsource, nstatus = trap->nstatus;
  358. NV_INFO(dev, "%s - nSource:", id);
  359. nouveau_print_bitfield_names(nsource, nsource_names);
  360. printk(", nStatus:");
  361. if (dev_priv->card_type < NV_10)
  362. nouveau_print_bitfield_names(nstatus, nstatus_names);
  363. else
  364. nouveau_print_bitfield_names(nstatus, nstatus_names_nv10);
  365. printk("\n");
  366. NV_INFO(dev, "%s - Ch %d/%d Class 0x%04x Mthd 0x%04x "
  367. "Data 0x%08x:0x%08x\n",
  368. id, trap->channel, trap->subc,
  369. trap->class, trap->mthd,
  370. trap->data2, trap->data);
  371. }
  372. static int
  373. nouveau_pgraph_intr_swmthd(struct drm_device *dev,
  374. struct nouveau_pgraph_trap *trap)
  375. {
  376. struct drm_nouveau_private *dev_priv = dev->dev_private;
  377. if (trap->channel < 0 ||
  378. trap->channel >= dev_priv->engine.fifo.channels ||
  379. !dev_priv->fifos[trap->channel])
  380. return -ENODEV;
  381. return nouveau_call_method(dev_priv->fifos[trap->channel],
  382. trap->class, trap->mthd, trap->data);
  383. }
  384. static inline void
  385. nouveau_pgraph_intr_notify(struct drm_device *dev, uint32_t nsource)
  386. {
  387. struct nouveau_pgraph_trap trap;
  388. int unhandled = 0;
  389. nouveau_graph_trap_info(dev, &trap);
  390. if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
  391. if (nouveau_pgraph_intr_swmthd(dev, &trap))
  392. unhandled = 1;
  393. } else {
  394. unhandled = 1;
  395. }
  396. if (unhandled)
  397. nouveau_graph_dump_trap_info(dev, "PGRAPH_NOTIFY", &trap);
  398. }
  399. static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20);
  400. static int nouveau_ratelimit(void)
  401. {
  402. return __ratelimit(&nouveau_ratelimit_state);
  403. }
  404. static inline void
  405. nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource)
  406. {
  407. struct nouveau_pgraph_trap trap;
  408. int unhandled = 0;
  409. nouveau_graph_trap_info(dev, &trap);
  410. trap.nsource = nsource;
  411. if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
  412. if (nouveau_pgraph_intr_swmthd(dev, &trap))
  413. unhandled = 1;
  414. } else if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
  415. uint32_t v = nv_rd32(dev, 0x402000);
  416. nv_wr32(dev, 0x402000, v);
  417. /* dump the error anyway for now: it's useful for
  418. Gallium development */
  419. unhandled = 1;
  420. } else {
  421. unhandled = 1;
  422. }
  423. if (unhandled && nouveau_ratelimit())
  424. nouveau_graph_dump_trap_info(dev, "PGRAPH_ERROR", &trap);
  425. }
  426. static inline void
  427. nouveau_pgraph_intr_context_switch(struct drm_device *dev)
  428. {
  429. struct drm_nouveau_private *dev_priv = dev->dev_private;
  430. struct nouveau_engine *engine = &dev_priv->engine;
  431. uint32_t chid;
  432. chid = engine->fifo.channel_id(dev);
  433. NV_DEBUG(dev, "PGRAPH context switch interrupt channel %x\n", chid);
  434. switch (dev_priv->card_type) {
  435. case NV_04:
  436. nv04_graph_context_switch(dev);
  437. break;
  438. case NV_10:
  439. nv10_graph_context_switch(dev);
  440. break;
  441. default:
  442. NV_ERROR(dev, "Context switch not implemented\n");
  443. break;
  444. }
  445. }
  446. static void
  447. nouveau_pgraph_irq_handler(struct drm_device *dev)
  448. {
  449. uint32_t status;
  450. while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
  451. uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
  452. if (status & NV_PGRAPH_INTR_NOTIFY) {
  453. nouveau_pgraph_intr_notify(dev, nsource);
  454. status &= ~NV_PGRAPH_INTR_NOTIFY;
  455. nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_NOTIFY);
  456. }
  457. if (status & NV_PGRAPH_INTR_ERROR) {
  458. nouveau_pgraph_intr_error(dev, nsource);
  459. status &= ~NV_PGRAPH_INTR_ERROR;
  460. nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_ERROR);
  461. }
  462. if (status & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
  463. nouveau_pgraph_intr_context_switch(dev);
  464. status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
  465. nv_wr32(dev, NV03_PGRAPH_INTR,
  466. NV_PGRAPH_INTR_CONTEXT_SWITCH);
  467. }
  468. if (status) {
  469. NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status);
  470. nv_wr32(dev, NV03_PGRAPH_INTR, status);
  471. }
  472. if ((nv_rd32(dev, NV04_PGRAPH_FIFO) & (1 << 0)) == 0)
  473. nv_wr32(dev, NV04_PGRAPH_FIFO, 1);
  474. }
  475. nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
  476. }
  477. static void
  478. nv50_pgraph_irq_handler(struct drm_device *dev)
  479. {
  480. uint32_t status;
  481. while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
  482. uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
  483. if (status & 0x00000001) {
  484. nouveau_pgraph_intr_notify(dev, nsource);
  485. status &= ~0x00000001;
  486. nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001);
  487. }
  488. if (status & 0x00000010) {
  489. nouveau_pgraph_intr_error(dev, nsource |
  490. NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD);
  491. status &= ~0x00000010;
  492. nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010);
  493. }
  494. if (status & 0x00001000) {
  495. nv_wr32(dev, 0x400500, 0x00000000);
  496. nv_wr32(dev, NV03_PGRAPH_INTR,
  497. NV_PGRAPH_INTR_CONTEXT_SWITCH);
  498. nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
  499. NV40_PGRAPH_INTR_EN) &
  500. ~NV_PGRAPH_INTR_CONTEXT_SWITCH);
  501. nv_wr32(dev, 0x400500, 0x00010001);
  502. nv50_graph_context_switch(dev);
  503. status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
  504. }
  505. if (status & 0x00100000) {
  506. nouveau_pgraph_intr_error(dev, nsource |
  507. NV03_PGRAPH_NSOURCE_DATA_ERROR);
  508. status &= ~0x00100000;
  509. nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000);
  510. }
  511. if (status & 0x00200000) {
  512. int r;
  513. nouveau_pgraph_intr_error(dev, nsource |
  514. NV03_PGRAPH_NSOURCE_PROTECTION_ERROR);
  515. NV_ERROR(dev, "magic set 1:\n");
  516. for (r = 0x408900; r <= 0x408910; r += 4)
  517. NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
  518. nv_rd32(dev, r));
  519. nv_wr32(dev, 0x408900,
  520. nv_rd32(dev, 0x408904) | 0xc0000000);
  521. for (r = 0x408e08; r <= 0x408e24; r += 4)
  522. NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
  523. nv_rd32(dev, r));
  524. nv_wr32(dev, 0x408e08,
  525. nv_rd32(dev, 0x408e08) | 0xc0000000);
  526. NV_ERROR(dev, "magic set 2:\n");
  527. for (r = 0x409900; r <= 0x409910; r += 4)
  528. NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
  529. nv_rd32(dev, r));
  530. nv_wr32(dev, 0x409900,
  531. nv_rd32(dev, 0x409904) | 0xc0000000);
  532. for (r = 0x409e08; r <= 0x409e24; r += 4)
  533. NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
  534. nv_rd32(dev, r));
  535. nv_wr32(dev, 0x409e08,
  536. nv_rd32(dev, 0x409e08) | 0xc0000000);
  537. status &= ~0x00200000;
  538. nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource);
  539. nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000);
  540. }
  541. if (status) {
  542. NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n",
  543. status);
  544. nv_wr32(dev, NV03_PGRAPH_INTR, status);
  545. }
  546. {
  547. const int isb = (1 << 16) | (1 << 0);
  548. if ((nv_rd32(dev, 0x400500) & isb) != isb)
  549. nv_wr32(dev, 0x400500,
  550. nv_rd32(dev, 0x400500) | isb);
  551. }
  552. }
  553. nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
  554. nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
  555. }
  556. static void
  557. nouveau_crtc_irq_handler(struct drm_device *dev, int crtc)
  558. {
  559. if (crtc & 1)
  560. nv_wr32(dev, NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK);
  561. if (crtc & 2)
  562. nv_wr32(dev, NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK);
  563. }
  564. irqreturn_t
  565. nouveau_irq_handler(DRM_IRQ_ARGS)
  566. {
  567. struct drm_device *dev = (struct drm_device *)arg;
  568. struct drm_nouveau_private *dev_priv = dev->dev_private;
  569. uint32_t status, fbdev_flags = 0;
  570. unsigned long flags;
  571. status = nv_rd32(dev, NV03_PMC_INTR_0);
  572. if (!status)
  573. return IRQ_NONE;
  574. spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
  575. if (dev_priv->fbdev_info) {
  576. fbdev_flags = dev_priv->fbdev_info->flags;
  577. dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
  578. }
  579. if (status & NV_PMC_INTR_0_PFIFO_PENDING) {
  580. nouveau_fifo_irq_handler(dev);
  581. status &= ~NV_PMC_INTR_0_PFIFO_PENDING;
  582. }
  583. if (status & NV_PMC_INTR_0_PGRAPH_PENDING) {
  584. if (dev_priv->card_type >= NV_50)
  585. nv50_pgraph_irq_handler(dev);
  586. else
  587. nouveau_pgraph_irq_handler(dev);
  588. status &= ~NV_PMC_INTR_0_PGRAPH_PENDING;
  589. }
  590. if (status & NV_PMC_INTR_0_CRTCn_PENDING) {
  591. nouveau_crtc_irq_handler(dev, (status>>24)&3);
  592. status &= ~NV_PMC_INTR_0_CRTCn_PENDING;
  593. }
  594. if (status & (NV_PMC_INTR_0_NV50_DISPLAY_PENDING |
  595. NV_PMC_INTR_0_NV50_I2C_PENDING)) {
  596. nv50_display_irq_handler(dev);
  597. status &= ~(NV_PMC_INTR_0_NV50_DISPLAY_PENDING |
  598. NV_PMC_INTR_0_NV50_I2C_PENDING);
  599. }
  600. if (status)
  601. NV_ERROR(dev, "Unhandled PMC INTR status bits 0x%08x\n", status);
  602. if (dev_priv->fbdev_info)
  603. dev_priv->fbdev_info->flags = fbdev_flags;
  604. spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
  605. return IRQ_HANDLED;
  606. }