nouveau_irq.c 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256
  1. /*
  2. * Copyright (C) 2006 Ben Skeggs.
  3. *
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining
  7. * a copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sublicense, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial
  16. * portions of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  19. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  20. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
  21. * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
  22. * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
  23. * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
  24. * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. */
  27. /*
  28. * Authors:
  29. * Ben Skeggs <darktama@iinet.net.au>
  30. */
  31. #include "drmP.h"
  32. #include "drm.h"
  33. #include "nouveau_drm.h"
  34. #include "nouveau_drv.h"
  35. #include "nouveau_reg.h"
  36. #include <linux/ratelimit.h>
  37. /* needed for hotplug irq */
  38. #include "nouveau_connector.h"
  39. #include "nv50_display.h"
  40. void
  41. nouveau_irq_preinstall(struct drm_device *dev)
  42. {
  43. struct drm_nouveau_private *dev_priv = dev->dev_private;
  44. /* Master disable */
  45. nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
  46. if (dev_priv->card_type == NV_50) {
  47. INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh);
  48. INIT_WORK(&dev_priv->hpd_work, nv50_display_irq_hotplug_bh);
  49. INIT_LIST_HEAD(&dev_priv->vbl_waiting);
  50. }
  51. }
  52. int
  53. nouveau_irq_postinstall(struct drm_device *dev)
  54. {
  55. /* Master enable */
  56. nv_wr32(dev, NV03_PMC_INTR_EN_0, NV_PMC_INTR_EN_0_MASTER_ENABLE);
  57. return 0;
  58. }
  59. void
  60. nouveau_irq_uninstall(struct drm_device *dev)
  61. {
  62. /* Master disable */
  63. nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
  64. }
  65. static int
  66. nouveau_call_method(struct nouveau_channel *chan, int class, int mthd, int data)
  67. {
  68. struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
  69. struct nouveau_pgraph_object_method *grm;
  70. struct nouveau_pgraph_object_class *grc;
  71. grc = dev_priv->engine.graph.grclass;
  72. while (grc->id) {
  73. if (grc->id == class)
  74. break;
  75. grc++;
  76. }
  77. if (grc->id != class || !grc->methods)
  78. return -ENOENT;
  79. grm = grc->methods;
  80. while (grm->id) {
  81. if (grm->id == mthd)
  82. return grm->exec(chan, class, mthd, data);
  83. grm++;
  84. }
  85. return -ENOENT;
  86. }
  87. static bool
  88. nouveau_fifo_swmthd(struct nouveau_channel *chan, uint32_t addr, uint32_t data)
  89. {
  90. struct drm_device *dev = chan->dev;
  91. const int subc = (addr >> 13) & 0x7;
  92. const int mthd = addr & 0x1ffc;
  93. if (mthd == 0x0000) {
  94. struct nouveau_gpuobj_ref *ref = NULL;
  95. if (nouveau_gpuobj_ref_find(chan, data, &ref))
  96. return false;
  97. if (ref->gpuobj->engine != NVOBJ_ENGINE_SW)
  98. return false;
  99. chan->sw_subchannel[subc] = ref->gpuobj->class;
  100. nv_wr32(dev, NV04_PFIFO_CACHE1_ENGINE, nv_rd32(dev,
  101. NV04_PFIFO_CACHE1_ENGINE) & ~(0xf << subc * 4));
  102. return true;
  103. }
  104. /* hw object */
  105. if (nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE) & (1 << (subc*4)))
  106. return false;
  107. if (nouveau_call_method(chan, chan->sw_subchannel[subc], mthd, data))
  108. return false;
  109. return true;
  110. }
  111. static void
  112. nouveau_fifo_irq_handler(struct drm_device *dev)
  113. {
  114. struct drm_nouveau_private *dev_priv = dev->dev_private;
  115. struct nouveau_engine *engine = &dev_priv->engine;
  116. uint32_t status, reassign;
  117. int cnt = 0;
  118. reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1;
  119. while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) {
  120. struct nouveau_channel *chan = NULL;
  121. uint32_t chid, get;
  122. nv_wr32(dev, NV03_PFIFO_CACHES, 0);
  123. chid = engine->fifo.channel_id(dev);
  124. if (chid >= 0 && chid < engine->fifo.channels)
  125. chan = dev_priv->fifos[chid];
  126. get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET);
  127. if (status & NV_PFIFO_INTR_CACHE_ERROR) {
  128. uint32_t mthd, data;
  129. int ptr;
  130. /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before
  131. * wrapping on my G80 chips, but CACHE1 isn't big
  132. * enough for this much data.. Tests show that it
  133. * wraps around to the start at GET=0x800.. No clue
  134. * as to why..
  135. */
  136. ptr = (get & 0x7ff) >> 2;
  137. if (dev_priv->card_type < NV_40) {
  138. mthd = nv_rd32(dev,
  139. NV04_PFIFO_CACHE1_METHOD(ptr));
  140. data = nv_rd32(dev,
  141. NV04_PFIFO_CACHE1_DATA(ptr));
  142. } else {
  143. mthd = nv_rd32(dev,
  144. NV40_PFIFO_CACHE1_METHOD(ptr));
  145. data = nv_rd32(dev,
  146. NV40_PFIFO_CACHE1_DATA(ptr));
  147. }
  148. if (!chan || !nouveau_fifo_swmthd(chan, mthd, data)) {
  149. NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d "
  150. "Mthd 0x%04x Data 0x%08x\n",
  151. chid, (mthd >> 13) & 7, mthd & 0x1ffc,
  152. data);
  153. }
  154. nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
  155. nv_wr32(dev, NV03_PFIFO_INTR_0,
  156. NV_PFIFO_INTR_CACHE_ERROR);
  157. nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
  158. nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1);
  159. nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
  160. nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
  161. nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1);
  162. nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
  163. nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH,
  164. nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
  165. nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
  166. status &= ~NV_PFIFO_INTR_CACHE_ERROR;
  167. }
  168. if (status & NV_PFIFO_INTR_DMA_PUSHER) {
  169. NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d\n", chid);
  170. status &= ~NV_PFIFO_INTR_DMA_PUSHER;
  171. nv_wr32(dev, NV03_PFIFO_INTR_0,
  172. NV_PFIFO_INTR_DMA_PUSHER);
  173. nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_STATE, 0x00000000);
  174. if (nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUT) != get)
  175. nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_GET,
  176. get + 4);
  177. }
  178. if (status & NV_PFIFO_INTR_SEMAPHORE) {
  179. uint32_t sem;
  180. status &= ~NV_PFIFO_INTR_SEMAPHORE;
  181. nv_wr32(dev, NV03_PFIFO_INTR_0,
  182. NV_PFIFO_INTR_SEMAPHORE);
  183. sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE);
  184. nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
  185. nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
  186. nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
  187. }
  188. if (status) {
  189. NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
  190. status, chid);
  191. nv_wr32(dev, NV03_PFIFO_INTR_0, status);
  192. status = 0;
  193. }
  194. nv_wr32(dev, NV03_PFIFO_CACHES, reassign);
  195. }
  196. if (status) {
  197. NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt);
  198. nv_wr32(dev, 0x2140, 0);
  199. nv_wr32(dev, 0x140, 0);
  200. }
  201. nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
  202. }
  203. struct nouveau_bitfield_names {
  204. uint32_t mask;
  205. const char *name;
  206. };
  207. static struct nouveau_bitfield_names nstatus_names[] =
  208. {
  209. { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
  210. { NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
  211. { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
  212. { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }
  213. };
  214. static struct nouveau_bitfield_names nstatus_names_nv10[] =
  215. {
  216. { NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
  217. { NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
  218. { NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
  219. { NV10_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }
  220. };
  221. static struct nouveau_bitfield_names nsource_names[] =
  222. {
  223. { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" },
  224. { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" },
  225. { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" },
  226. { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION, "RANGE_EXCEPTION" },
  227. { NV03_PGRAPH_NSOURCE_LIMIT_COLOR, "LIMIT_COLOR" },
  228. { NV03_PGRAPH_NSOURCE_LIMIT_ZETA, "LIMIT_ZETA" },
  229. { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD, "ILLEGAL_MTHD" },
  230. { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION, "DMA_R_PROTECTION" },
  231. { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION, "DMA_W_PROTECTION" },
  232. { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION, "FORMAT_EXCEPTION" },
  233. { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION, "PATCH_EXCEPTION" },
  234. { NV03_PGRAPH_NSOURCE_STATE_INVALID, "STATE_INVALID" },
  235. { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY, "DOUBLE_NOTIFY" },
  236. { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE, "NOTIFY_IN_USE" },
  237. { NV03_PGRAPH_NSOURCE_METHOD_CNT, "METHOD_CNT" },
  238. { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION, "BFR_NOTIFICATION" },
  239. { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" },
  240. { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" },
  241. { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" },
  242. };
  243. static void
  244. nouveau_print_bitfield_names_(uint32_t value,
  245. const struct nouveau_bitfield_names *namelist,
  246. const int namelist_len)
  247. {
  248. /*
  249. * Caller must have already printed the KERN_* log level for us.
  250. * Also the caller is responsible for adding the newline.
  251. */
  252. int i;
  253. for (i = 0; i < namelist_len; ++i) {
  254. uint32_t mask = namelist[i].mask;
  255. if (value & mask) {
  256. printk(" %s", namelist[i].name);
  257. value &= ~mask;
  258. }
  259. }
  260. if (value)
  261. printk(" (unknown bits 0x%08x)", value);
  262. }
  263. #define nouveau_print_bitfield_names(val, namelist) \
  264. nouveau_print_bitfield_names_((val), (namelist), ARRAY_SIZE(namelist))
  265. struct nouveau_enum_names {
  266. uint32_t value;
  267. const char *name;
  268. };
  269. static void
  270. nouveau_print_enum_names_(uint32_t value,
  271. const struct nouveau_enum_names *namelist,
  272. const int namelist_len)
  273. {
  274. /*
  275. * Caller must have already printed the KERN_* log level for us.
  276. * Also the caller is responsible for adding the newline.
  277. */
  278. int i;
  279. for (i = 0; i < namelist_len; ++i) {
  280. if (value == namelist[i].value) {
  281. printk("%s", namelist[i].name);
  282. return;
  283. }
  284. }
  285. printk("unknown value 0x%08x", value);
  286. }
  287. #define nouveau_print_enum_names(val, namelist) \
  288. nouveau_print_enum_names_((val), (namelist), ARRAY_SIZE(namelist))
  289. static int
  290. nouveau_graph_chid_from_grctx(struct drm_device *dev)
  291. {
  292. struct drm_nouveau_private *dev_priv = dev->dev_private;
  293. uint32_t inst;
  294. int i;
  295. if (dev_priv->card_type < NV_40)
  296. return dev_priv->engine.fifo.channels;
  297. else
  298. if (dev_priv->card_type < NV_50) {
  299. inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 4;
  300. for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
  301. struct nouveau_channel *chan = dev_priv->fifos[i];
  302. if (!chan || !chan->ramin_grctx)
  303. continue;
  304. if (inst == chan->ramin_grctx->instance)
  305. break;
  306. }
  307. } else {
  308. inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 12;
  309. for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
  310. struct nouveau_channel *chan = dev_priv->fifos[i];
  311. if (!chan || !chan->ramin)
  312. continue;
  313. if (inst == chan->ramin->instance)
  314. break;
  315. }
  316. }
  317. return i;
  318. }
  319. static int
  320. nouveau_graph_trapped_channel(struct drm_device *dev, int *channel_ret)
  321. {
  322. struct drm_nouveau_private *dev_priv = dev->dev_private;
  323. struct nouveau_engine *engine = &dev_priv->engine;
  324. int channel;
  325. if (dev_priv->card_type < NV_10)
  326. channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0xf;
  327. else
  328. if (dev_priv->card_type < NV_40)
  329. channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
  330. else
  331. channel = nouveau_graph_chid_from_grctx(dev);
  332. if (channel >= engine->fifo.channels || !dev_priv->fifos[channel]) {
  333. NV_ERROR(dev, "AIII, invalid/inactive channel id %d\n", channel);
  334. return -EINVAL;
  335. }
  336. *channel_ret = channel;
  337. return 0;
  338. }
  339. struct nouveau_pgraph_trap {
  340. int channel;
  341. int class;
  342. int subc, mthd, size;
  343. uint32_t data, data2;
  344. uint32_t nsource, nstatus;
  345. };
  346. static void
  347. nouveau_graph_trap_info(struct drm_device *dev,
  348. struct nouveau_pgraph_trap *trap)
  349. {
  350. struct drm_nouveau_private *dev_priv = dev->dev_private;
  351. uint32_t address;
  352. trap->nsource = trap->nstatus = 0;
  353. if (dev_priv->card_type < NV_50) {
  354. trap->nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
  355. trap->nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
  356. }
  357. if (nouveau_graph_trapped_channel(dev, &trap->channel))
  358. trap->channel = -1;
  359. address = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
  360. trap->mthd = address & 0x1FFC;
  361. trap->data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
  362. if (dev_priv->card_type < NV_10) {
  363. trap->subc = (address >> 13) & 0x7;
  364. } else {
  365. trap->subc = (address >> 16) & 0x7;
  366. trap->data2 = nv_rd32(dev, NV10_PGRAPH_TRAPPED_DATA_HIGH);
  367. }
  368. if (dev_priv->card_type < NV_10)
  369. trap->class = nv_rd32(dev, 0x400180 + trap->subc*4) & 0xFF;
  370. else if (dev_priv->card_type < NV_40)
  371. trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFF;
  372. else if (dev_priv->card_type < NV_50)
  373. trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFFF;
  374. else
  375. trap->class = nv_rd32(dev, 0x400814);
  376. }
  377. static void
  378. nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id,
  379. struct nouveau_pgraph_trap *trap)
  380. {
  381. struct drm_nouveau_private *dev_priv = dev->dev_private;
  382. uint32_t nsource = trap->nsource, nstatus = trap->nstatus;
  383. if (dev_priv->card_type < NV_50) {
  384. NV_INFO(dev, "%s - nSource:", id);
  385. nouveau_print_bitfield_names(nsource, nsource_names);
  386. printk(", nStatus:");
  387. if (dev_priv->card_type < NV_10)
  388. nouveau_print_bitfield_names(nstatus, nstatus_names);
  389. else
  390. nouveau_print_bitfield_names(nstatus, nstatus_names_nv10);
  391. printk("\n");
  392. }
  393. NV_INFO(dev, "%s - Ch %d/%d Class 0x%04x Mthd 0x%04x "
  394. "Data 0x%08x:0x%08x\n",
  395. id, trap->channel, trap->subc,
  396. trap->class, trap->mthd,
  397. trap->data2, trap->data);
  398. }
  399. static int
  400. nouveau_pgraph_intr_swmthd(struct drm_device *dev,
  401. struct nouveau_pgraph_trap *trap)
  402. {
  403. struct drm_nouveau_private *dev_priv = dev->dev_private;
  404. if (trap->channel < 0 ||
  405. trap->channel >= dev_priv->engine.fifo.channels ||
  406. !dev_priv->fifos[trap->channel])
  407. return -ENODEV;
  408. return nouveau_call_method(dev_priv->fifos[trap->channel],
  409. trap->class, trap->mthd, trap->data);
  410. }
  411. static inline void
  412. nouveau_pgraph_intr_notify(struct drm_device *dev, uint32_t nsource)
  413. {
  414. struct nouveau_pgraph_trap trap;
  415. int unhandled = 0;
  416. nouveau_graph_trap_info(dev, &trap);
  417. if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
  418. if (nouveau_pgraph_intr_swmthd(dev, &trap))
  419. unhandled = 1;
  420. } else {
  421. unhandled = 1;
  422. }
  423. if (unhandled)
  424. nouveau_graph_dump_trap_info(dev, "PGRAPH_NOTIFY", &trap);
  425. }
  426. static DEFINE_RATELIMIT_STATE(nouveau_ratelimit_state, 3 * HZ, 20);
  427. static int nouveau_ratelimit(void)
  428. {
  429. return __ratelimit(&nouveau_ratelimit_state);
  430. }
  431. static inline void
  432. nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource)
  433. {
  434. struct nouveau_pgraph_trap trap;
  435. int unhandled = 0;
  436. nouveau_graph_trap_info(dev, &trap);
  437. trap.nsource = nsource;
  438. if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
  439. if (nouveau_pgraph_intr_swmthd(dev, &trap))
  440. unhandled = 1;
  441. } else if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
  442. uint32_t v = nv_rd32(dev, 0x402000);
  443. nv_wr32(dev, 0x402000, v);
  444. /* dump the error anyway for now: it's useful for
  445. Gallium development */
  446. unhandled = 1;
  447. } else {
  448. unhandled = 1;
  449. }
  450. if (unhandled && nouveau_ratelimit())
  451. nouveau_graph_dump_trap_info(dev, "PGRAPH_ERROR", &trap);
  452. }
  453. static inline void
  454. nouveau_pgraph_intr_context_switch(struct drm_device *dev)
  455. {
  456. struct drm_nouveau_private *dev_priv = dev->dev_private;
  457. struct nouveau_engine *engine = &dev_priv->engine;
  458. uint32_t chid;
  459. chid = engine->fifo.channel_id(dev);
  460. NV_DEBUG(dev, "PGRAPH context switch interrupt channel %x\n", chid);
  461. switch (dev_priv->card_type) {
  462. case NV_04:
  463. nv04_graph_context_switch(dev);
  464. break;
  465. case NV_10:
  466. nv10_graph_context_switch(dev);
  467. break;
  468. default:
  469. NV_ERROR(dev, "Context switch not implemented\n");
  470. break;
  471. }
  472. }
  473. static void
  474. nouveau_pgraph_irq_handler(struct drm_device *dev)
  475. {
  476. uint32_t status;
  477. while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
  478. uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
  479. if (status & NV_PGRAPH_INTR_NOTIFY) {
  480. nouveau_pgraph_intr_notify(dev, nsource);
  481. status &= ~NV_PGRAPH_INTR_NOTIFY;
  482. nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_NOTIFY);
  483. }
  484. if (status & NV_PGRAPH_INTR_ERROR) {
  485. nouveau_pgraph_intr_error(dev, nsource);
  486. status &= ~NV_PGRAPH_INTR_ERROR;
  487. nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_ERROR);
  488. }
  489. if (status & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
  490. nouveau_pgraph_intr_context_switch(dev);
  491. status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
  492. nv_wr32(dev, NV03_PGRAPH_INTR,
  493. NV_PGRAPH_INTR_CONTEXT_SWITCH);
  494. }
  495. if (status) {
  496. NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status);
  497. nv_wr32(dev, NV03_PGRAPH_INTR, status);
  498. }
  499. if ((nv_rd32(dev, NV04_PGRAPH_FIFO) & (1 << 0)) == 0)
  500. nv_wr32(dev, NV04_PGRAPH_FIFO, 1);
  501. }
  502. nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
  503. }
  504. static void
  505. nv50_pfb_vm_trap(struct drm_device *dev, int display, const char *name)
  506. {
  507. struct drm_nouveau_private *dev_priv = dev->dev_private;
  508. uint32_t trap[6];
  509. int i, ch;
  510. uint32_t idx = nv_rd32(dev, 0x100c90);
  511. if (idx & 0x80000000) {
  512. idx &= 0xffffff;
  513. if (display) {
  514. for (i = 0; i < 6; i++) {
  515. nv_wr32(dev, 0x100c90, idx | i << 24);
  516. trap[i] = nv_rd32(dev, 0x100c94);
  517. }
  518. for (ch = 0; ch < dev_priv->engine.fifo.channels; ch++) {
  519. struct nouveau_channel *chan = dev_priv->fifos[ch];
  520. if (!chan || !chan->ramin)
  521. continue;
  522. if (trap[1] == chan->ramin->instance >> 12)
  523. break;
  524. }
  525. NV_INFO(dev, "%s - VM: Trapped %s at %02x%04x%04x status %08x %08x channel %d\n",
  526. name, (trap[5]&0x100?"read":"write"),
  527. trap[5]&0xff, trap[4]&0xffff,
  528. trap[3]&0xffff, trap[0], trap[2], ch);
  529. }
  530. nv_wr32(dev, 0x100c90, idx | 0x80000000);
  531. } else if (display) {
  532. NV_INFO(dev, "%s - no VM fault?\n", name);
  533. }
  534. }
  535. static struct nouveau_enum_names nv50_mp_exec_error_names[] =
  536. {
  537. { 3, "STACK_UNDERFLOW" },
  538. { 4, "QUADON_ACTIVE" },
  539. { 8, "TIMEOUT" },
  540. { 0x10, "INVALID_OPCODE" },
  541. { 0x40, "BREAKPOINT" },
  542. };
  543. static void
  544. nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display)
  545. {
  546. struct drm_nouveau_private *dev_priv = dev->dev_private;
  547. uint32_t units = nv_rd32(dev, 0x1540);
  548. uint32_t addr, mp10, status, pc, oplow, ophigh;
  549. int i;
  550. int mps = 0;
  551. for (i = 0; i < 4; i++) {
  552. if (!(units & 1 << (i+24)))
  553. continue;
  554. if (dev_priv->chipset < 0xa0)
  555. addr = 0x408200 + (tpid << 12) + (i << 7);
  556. else
  557. addr = 0x408100 + (tpid << 11) + (i << 7);
  558. mp10 = nv_rd32(dev, addr + 0x10);
  559. status = nv_rd32(dev, addr + 0x14);
  560. if (!status)
  561. continue;
  562. if (display) {
  563. nv_rd32(dev, addr + 0x20);
  564. pc = nv_rd32(dev, addr + 0x24);
  565. oplow = nv_rd32(dev, addr + 0x70);
  566. ophigh= nv_rd32(dev, addr + 0x74);
  567. NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - "
  568. "TP %d MP %d: ", tpid, i);
  569. nouveau_print_enum_names(status,
  570. nv50_mp_exec_error_names);
  571. printk(" at %06x warp %d, opcode %08x %08x\n",
  572. pc&0xffffff, pc >> 24,
  573. oplow, ophigh);
  574. }
  575. nv_wr32(dev, addr + 0x10, mp10);
  576. nv_wr32(dev, addr + 0x14, 0);
  577. mps++;
  578. }
  579. if (!mps && display)
  580. NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: "
  581. "No MPs claiming errors?\n", tpid);
  582. }
  583. static void
  584. nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
  585. uint32_t ustatus_new, int display, const char *name)
  586. {
  587. struct drm_nouveau_private *dev_priv = dev->dev_private;
  588. int tps = 0;
  589. uint32_t units = nv_rd32(dev, 0x1540);
  590. int i, r;
  591. uint32_t ustatus_addr, ustatus;
  592. for (i = 0; i < 16; i++) {
  593. if (!(units & (1 << i)))
  594. continue;
  595. if (dev_priv->chipset < 0xa0)
  596. ustatus_addr = ustatus_old + (i << 12);
  597. else
  598. ustatus_addr = ustatus_new + (i << 11);
  599. ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff;
  600. if (!ustatus)
  601. continue;
  602. tps++;
  603. switch (type) {
  604. case 6: /* texture error... unknown for now */
  605. nv50_pfb_vm_trap(dev, display, name);
  606. if (display) {
  607. NV_ERROR(dev, "magic set %d:\n", i);
  608. for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
  609. NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
  610. nv_rd32(dev, r));
  611. }
  612. break;
  613. case 7: /* MP error */
  614. if (ustatus & 0x00010000) {
  615. nv50_pgraph_mp_trap(dev, i, display);
  616. ustatus &= ~0x00010000;
  617. }
  618. break;
  619. case 8: /* TPDMA error */
  620. {
  621. uint32_t e0c = nv_rd32(dev, ustatus_addr + 4);
  622. uint32_t e10 = nv_rd32(dev, ustatus_addr + 8);
  623. uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc);
  624. uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10);
  625. uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14);
  626. uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18);
  627. uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c);
  628. nv50_pfb_vm_trap(dev, display, name);
  629. /* 2d engine destination */
  630. if (ustatus & 0x00000010) {
  631. if (display) {
  632. NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n",
  633. i, e14, e10);
  634. NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
  635. i, e0c, e18, e1c, e20, e24);
  636. }
  637. ustatus &= ~0x00000010;
  638. }
  639. /* Render target */
  640. if (ustatus & 0x00000040) {
  641. if (display) {
  642. NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n",
  643. i, e14, e10);
  644. NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
  645. i, e0c, e18, e1c, e20, e24);
  646. }
  647. ustatus &= ~0x00000040;
  648. }
  649. /* CUDA memory: l[], g[] or stack. */
  650. if (ustatus & 0x00000080) {
  651. if (display) {
  652. if (e18 & 0x80000000) {
  653. /* g[] read fault? */
  654. NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n",
  655. i, e14, e10 | ((e18 >> 24) & 0x1f));
  656. e18 &= ~0x1f000000;
  657. } else if (e18 & 0xc) {
  658. /* g[] write fault? */
  659. NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n",
  660. i, e14, e10 | ((e18 >> 7) & 0x1f));
  661. e18 &= ~0x00000f80;
  662. } else {
  663. NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n",
  664. i, e14, e10);
  665. }
  666. NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
  667. i, e0c, e18, e1c, e20, e24);
  668. }
  669. ustatus &= ~0x00000080;
  670. }
  671. }
  672. break;
  673. }
  674. if (ustatus) {
  675. if (display)
  676. NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
  677. }
  678. nv_wr32(dev, ustatus_addr, 0xc0000000);
  679. }
  680. if (!tps && display)
  681. NV_INFO(dev, "%s - No TPs claiming errors?\n", name);
  682. }
  683. static void
  684. nv50_pgraph_trap_handler(struct drm_device *dev)
  685. {
  686. struct nouveau_pgraph_trap trap;
  687. uint32_t status = nv_rd32(dev, 0x400108);
  688. uint32_t ustatus;
  689. int display = nouveau_ratelimit();
  690. if (!status && display) {
  691. nouveau_graph_trap_info(dev, &trap);
  692. nouveau_graph_dump_trap_info(dev, "PGRAPH_TRAP", &trap);
  693. NV_INFO(dev, "PGRAPH_TRAP - no units reporting traps?\n");
  694. }
  695. /* DISPATCH: Relays commands to other units and handles NOTIFY,
  696. * COND, QUERY. If you get a trap from it, the command is still stuck
  697. * in DISPATCH and you need to do something about it. */
  698. if (status & 0x001) {
  699. ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff;
  700. if (!ustatus && display) {
  701. NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n");
  702. }
  703. /* Known to be triggered by screwed up NOTIFY and COND... */
  704. if (ustatus & 0x00000001) {
  705. nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_FAULT");
  706. nv_wr32(dev, 0x400500, 0);
  707. if (nv_rd32(dev, 0x400808) & 0x80000000) {
  708. if (display) {
  709. if (nouveau_graph_trapped_channel(dev, &trap.channel))
  710. trap.channel = -1;
  711. trap.class = nv_rd32(dev, 0x400814);
  712. trap.mthd = nv_rd32(dev, 0x400808) & 0x1ffc;
  713. trap.subc = (nv_rd32(dev, 0x400808) >> 16) & 0x7;
  714. trap.data = nv_rd32(dev, 0x40080c);
  715. trap.data2 = nv_rd32(dev, 0x400810);
  716. nouveau_graph_dump_trap_info(dev,
  717. "PGRAPH_TRAP_DISPATCH_FAULT", &trap);
  718. NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400808: %08x\n", nv_rd32(dev, 0x400808));
  719. NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400848: %08x\n", nv_rd32(dev, 0x400848));
  720. }
  721. nv_wr32(dev, 0x400808, 0);
  722. } else if (display) {
  723. NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - No stuck command?\n");
  724. }
  725. nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3);
  726. nv_wr32(dev, 0x400848, 0);
  727. ustatus &= ~0x00000001;
  728. }
  729. if (ustatus & 0x00000002) {
  730. nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_QUERY");
  731. nv_wr32(dev, 0x400500, 0);
  732. if (nv_rd32(dev, 0x40084c) & 0x80000000) {
  733. if (display) {
  734. if (nouveau_graph_trapped_channel(dev, &trap.channel))
  735. trap.channel = -1;
  736. trap.class = nv_rd32(dev, 0x400814);
  737. trap.mthd = nv_rd32(dev, 0x40084c) & 0x1ffc;
  738. trap.subc = (nv_rd32(dev, 0x40084c) >> 16) & 0x7;
  739. trap.data = nv_rd32(dev, 0x40085c);
  740. trap.data2 = 0;
  741. nouveau_graph_dump_trap_info(dev,
  742. "PGRAPH_TRAP_DISPATCH_QUERY", &trap);
  743. NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - 40084c: %08x\n", nv_rd32(dev, 0x40084c));
  744. }
  745. nv_wr32(dev, 0x40084c, 0);
  746. } else if (display) {
  747. NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - No stuck command?\n");
  748. }
  749. ustatus &= ~0x00000002;
  750. }
  751. if (ustatus && display)
  752. NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - Unhandled ustatus 0x%08x\n", ustatus);
  753. nv_wr32(dev, 0x400804, 0xc0000000);
  754. nv_wr32(dev, 0x400108, 0x001);
  755. status &= ~0x001;
  756. }
  757. /* TRAPs other than dispatch use the "normal" trap regs. */
  758. if (status && display) {
  759. nouveau_graph_trap_info(dev, &trap);
  760. nouveau_graph_dump_trap_info(dev,
  761. "PGRAPH_TRAP", &trap);
  762. }
  763. /* M2MF: Memory to memory copy engine. */
  764. if (status & 0x002) {
  765. ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff;
  766. if (!ustatus && display) {
  767. NV_INFO(dev, "PGRAPH_TRAP_M2MF - no ustatus?\n");
  768. }
  769. if (ustatus & 0x00000001) {
  770. nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_NOTIFY");
  771. ustatus &= ~0x00000001;
  772. }
  773. if (ustatus & 0x00000002) {
  774. nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_IN");
  775. ustatus &= ~0x00000002;
  776. }
  777. if (ustatus & 0x00000004) {
  778. nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_OUT");
  779. ustatus &= ~0x00000004;
  780. }
  781. NV_INFO (dev, "PGRAPH_TRAP_M2MF - %08x %08x %08x %08x\n",
  782. nv_rd32(dev, 0x406804),
  783. nv_rd32(dev, 0x406808),
  784. nv_rd32(dev, 0x40680c),
  785. nv_rd32(dev, 0x406810));
  786. if (ustatus && display)
  787. NV_INFO(dev, "PGRAPH_TRAP_M2MF - Unhandled ustatus 0x%08x\n", ustatus);
  788. /* No sane way found yet -- just reset the bugger. */
  789. nv_wr32(dev, 0x400040, 2);
  790. nv_wr32(dev, 0x400040, 0);
  791. nv_wr32(dev, 0x406800, 0xc0000000);
  792. nv_wr32(dev, 0x400108, 0x002);
  793. status &= ~0x002;
  794. }
  795. /* VFETCH: Fetches data from vertex buffers. */
  796. if (status & 0x004) {
  797. ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff;
  798. if (!ustatus && display) {
  799. NV_INFO(dev, "PGRAPH_TRAP_VFETCH - no ustatus?\n");
  800. }
  801. if (ustatus & 0x00000001) {
  802. nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_VFETCH_FAULT");
  803. NV_INFO (dev, "PGRAPH_TRAP_VFETCH_FAULT - %08x %08x %08x %08x\n",
  804. nv_rd32(dev, 0x400c00),
  805. nv_rd32(dev, 0x400c08),
  806. nv_rd32(dev, 0x400c0c),
  807. nv_rd32(dev, 0x400c10));
  808. ustatus &= ~0x00000001;
  809. }
  810. if (ustatus && display)
  811. NV_INFO(dev, "PGRAPH_TRAP_VFETCH - Unhandled ustatus 0x%08x\n", ustatus);
  812. nv_wr32(dev, 0x400c04, 0xc0000000);
  813. nv_wr32(dev, 0x400108, 0x004);
  814. status &= ~0x004;
  815. }
  816. /* STRMOUT: DirectX streamout / OpenGL transform feedback. */
  817. if (status & 0x008) {
  818. ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff;
  819. if (!ustatus && display) {
  820. NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - no ustatus?\n");
  821. }
  822. if (ustatus & 0x00000001) {
  823. nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_STRMOUT_FAULT");
  824. NV_INFO (dev, "PGRAPH_TRAP_STRMOUT_FAULT - %08x %08x %08x %08x\n",
  825. nv_rd32(dev, 0x401804),
  826. nv_rd32(dev, 0x401808),
  827. nv_rd32(dev, 0x40180c),
  828. nv_rd32(dev, 0x401810));
  829. ustatus &= ~0x00000001;
  830. }
  831. if (ustatus && display)
  832. NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - Unhandled ustatus 0x%08x\n", ustatus);
  833. /* No sane way found yet -- just reset the bugger. */
  834. nv_wr32(dev, 0x400040, 0x80);
  835. nv_wr32(dev, 0x400040, 0);
  836. nv_wr32(dev, 0x401800, 0xc0000000);
  837. nv_wr32(dev, 0x400108, 0x008);
  838. status &= ~0x008;
  839. }
  840. /* CCACHE: Handles code and c[] caches and fills them. */
  841. if (status & 0x010) {
  842. ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff;
  843. if (!ustatus && display) {
  844. NV_INFO(dev, "PGRAPH_TRAP_CCACHE - no ustatus?\n");
  845. }
  846. if (ustatus & 0x00000001) {
  847. nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_CCACHE_FAULT");
  848. NV_INFO (dev, "PGRAPH_TRAP_CCACHE_FAULT - %08x %08x %08x %08x %08x %08x %08x\n",
  849. nv_rd32(dev, 0x405800),
  850. nv_rd32(dev, 0x405804),
  851. nv_rd32(dev, 0x405808),
  852. nv_rd32(dev, 0x40580c),
  853. nv_rd32(dev, 0x405810),
  854. nv_rd32(dev, 0x405814),
  855. nv_rd32(dev, 0x40581c));
  856. ustatus &= ~0x00000001;
  857. }
  858. if (ustatus && display)
  859. NV_INFO(dev, "PGRAPH_TRAP_CCACHE - Unhandled ustatus 0x%08x\n", ustatus);
  860. nv_wr32(dev, 0x405018, 0xc0000000);
  861. nv_wr32(dev, 0x400108, 0x010);
  862. status &= ~0x010;
  863. }
  864. /* Unknown, not seen yet... 0x402000 is the only trap status reg
  865. * remaining, so try to handle it anyway. Perhaps related to that
  866. * unknown DMA slot on tesla? */
  867. if (status & 0x20) {
  868. nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_UNKC04");
  869. ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff;
  870. if (display)
  871. NV_INFO(dev, "PGRAPH_TRAP_UNKC04 - Unhandled ustatus 0x%08x\n", ustatus);
  872. nv_wr32(dev, 0x402000, 0xc0000000);
  873. /* no status modifiction on purpose */
  874. }
  875. /* TEXTURE: CUDA texturing units */
  876. if (status & 0x040) {
  877. nv50_pgraph_tp_trap (dev, 6, 0x408900, 0x408600, display,
  878. "PGRAPH_TRAP_TEXTURE");
  879. nv_wr32(dev, 0x400108, 0x040);
  880. status &= ~0x040;
  881. }
  882. /* MP: CUDA execution engines. */
  883. if (status & 0x080) {
  884. nv50_pgraph_tp_trap (dev, 7, 0x408314, 0x40831c, display,
  885. "PGRAPH_TRAP_MP");
  886. nv_wr32(dev, 0x400108, 0x080);
  887. status &= ~0x080;
  888. }
  889. /* TPDMA: Handles TP-initiated uncached memory accesses:
  890. * l[], g[], stack, 2d surfaces, render targets. */
  891. if (status & 0x100) {
  892. nv50_pgraph_tp_trap (dev, 8, 0x408e08, 0x408708, display,
  893. "PGRAPH_TRAP_TPDMA");
  894. nv_wr32(dev, 0x400108, 0x100);
  895. status &= ~0x100;
  896. }
  897. if (status) {
  898. if (display)
  899. NV_INFO(dev, "PGRAPH_TRAP - Unknown trap 0x%08x\n",
  900. status);
  901. nv_wr32(dev, 0x400108, status);
  902. }
  903. }
  904. /* There must be a *lot* of these. Will take some time to gather them up. */
  905. static struct nouveau_enum_names nv50_data_error_names[] =
  906. {
  907. { 4, "INVALID_VALUE" },
  908. { 5, "INVALID_ENUM" },
  909. { 8, "INVALID_OBJECT" },
  910. { 0xc, "INVALID_BITFIELD" },
  911. { 0x28, "MP_NO_REG_SPACE" },
  912. { 0x2b, "MP_BLOCK_SIZE_MISMATCH" },
  913. };
  914. static void
  915. nv50_pgraph_irq_handler(struct drm_device *dev)
  916. {
  917. struct nouveau_pgraph_trap trap;
  918. int unhandled = 0;
  919. uint32_t status;
  920. while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
  921. /* NOTIFY: You've set a NOTIFY an a command and it's done. */
  922. if (status & 0x00000001) {
  923. nouveau_graph_trap_info(dev, &trap);
  924. if (nouveau_ratelimit())
  925. nouveau_graph_dump_trap_info(dev,
  926. "PGRAPH_NOTIFY", &trap);
  927. status &= ~0x00000001;
  928. nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001);
  929. }
  930. /* COMPUTE_QUERY: Purpose and exact cause unknown, happens
  931. * when you write 0x200 to 0x50c0 method 0x31c. */
  932. if (status & 0x00000002) {
  933. nouveau_graph_trap_info(dev, &trap);
  934. if (nouveau_ratelimit())
  935. nouveau_graph_dump_trap_info(dev,
  936. "PGRAPH_COMPUTE_QUERY", &trap);
  937. status &= ~0x00000002;
  938. nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000002);
  939. }
  940. /* Unknown, never seen: 0x4 */
  941. /* ILLEGAL_MTHD: You used a wrong method for this class. */
  942. if (status & 0x00000010) {
  943. nouveau_graph_trap_info(dev, &trap);
  944. if (nouveau_pgraph_intr_swmthd(dev, &trap))
  945. unhandled = 1;
  946. if (unhandled && nouveau_ratelimit())
  947. nouveau_graph_dump_trap_info(dev,
  948. "PGRAPH_ILLEGAL_MTHD", &trap);
  949. status &= ~0x00000010;
  950. nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010);
  951. }
  952. /* ILLEGAL_CLASS: You used a wrong class. */
  953. if (status & 0x00000020) {
  954. nouveau_graph_trap_info(dev, &trap);
  955. if (nouveau_ratelimit())
  956. nouveau_graph_dump_trap_info(dev,
  957. "PGRAPH_ILLEGAL_CLASS", &trap);
  958. status &= ~0x00000020;
  959. nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000020);
  960. }
  961. /* DOUBLE_NOTIFY: You tried to set a NOTIFY on another NOTIFY. */
  962. if (status & 0x00000040) {
  963. nouveau_graph_trap_info(dev, &trap);
  964. if (nouveau_ratelimit())
  965. nouveau_graph_dump_trap_info(dev,
  966. "PGRAPH_DOUBLE_NOTIFY", &trap);
  967. status &= ~0x00000040;
  968. nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000040);
  969. }
  970. /* CONTEXT_SWITCH: PGRAPH needs us to load a new context */
  971. if (status & 0x00001000) {
  972. nv_wr32(dev, 0x400500, 0x00000000);
  973. nv_wr32(dev, NV03_PGRAPH_INTR,
  974. NV_PGRAPH_INTR_CONTEXT_SWITCH);
  975. nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
  976. NV40_PGRAPH_INTR_EN) &
  977. ~NV_PGRAPH_INTR_CONTEXT_SWITCH);
  978. nv_wr32(dev, 0x400500, 0x00010001);
  979. nv50_graph_context_switch(dev);
  980. status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
  981. }
  982. /* BUFFER_NOTIFY: Your m2mf transfer finished */
  983. if (status & 0x00010000) {
  984. nouveau_graph_trap_info(dev, &trap);
  985. if (nouveau_ratelimit())
  986. nouveau_graph_dump_trap_info(dev,
  987. "PGRAPH_BUFFER_NOTIFY", &trap);
  988. status &= ~0x00010000;
  989. nv_wr32(dev, NV03_PGRAPH_INTR, 0x00010000);
  990. }
  991. /* DATA_ERROR: Invalid value for this method, or invalid
  992. * state in current PGRAPH context for this operation */
  993. if (status & 0x00100000) {
  994. nouveau_graph_trap_info(dev, &trap);
  995. if (nouveau_ratelimit()) {
  996. nouveau_graph_dump_trap_info(dev,
  997. "PGRAPH_DATA_ERROR", &trap);
  998. NV_INFO (dev, "PGRAPH_DATA_ERROR - ");
  999. nouveau_print_enum_names(nv_rd32(dev, 0x400110),
  1000. nv50_data_error_names);
  1001. printk("\n");
  1002. }
  1003. status &= ~0x00100000;
  1004. nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000);
  1005. }
  1006. /* TRAP: Something bad happened in the middle of command
  1007. * execution. Has a billion types, subtypes, and even
  1008. * subsubtypes. */
  1009. if (status & 0x00200000) {
  1010. nv50_pgraph_trap_handler(dev);
  1011. status &= ~0x00200000;
  1012. nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000);
  1013. }
  1014. /* Unknown, never seen: 0x00400000 */
  1015. /* SINGLE_STEP: Happens on every method if you turned on
  1016. * single stepping in 40008c */
  1017. if (status & 0x01000000) {
  1018. nouveau_graph_trap_info(dev, &trap);
  1019. if (nouveau_ratelimit())
  1020. nouveau_graph_dump_trap_info(dev,
  1021. "PGRAPH_SINGLE_STEP", &trap);
  1022. status &= ~0x01000000;
  1023. nv_wr32(dev, NV03_PGRAPH_INTR, 0x01000000);
  1024. }
  1025. /* 0x02000000 happens when you pause a ctxprog...
  1026. * but the only way this can happen that I know is by
  1027. * poking the relevant MMIO register, and we don't
  1028. * do that. */
  1029. if (status) {
  1030. NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n",
  1031. status);
  1032. nv_wr32(dev, NV03_PGRAPH_INTR, status);
  1033. }
  1034. {
  1035. const int isb = (1 << 16) | (1 << 0);
  1036. if ((nv_rd32(dev, 0x400500) & isb) != isb)
  1037. nv_wr32(dev, 0x400500,
  1038. nv_rd32(dev, 0x400500) | isb);
  1039. }
  1040. }
  1041. nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
  1042. if (nv_rd32(dev, 0x400824) & (1 << 31))
  1043. nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
  1044. }
  1045. static void
  1046. nouveau_crtc_irq_handler(struct drm_device *dev, int crtc)
  1047. {
  1048. if (crtc & 1)
  1049. nv_wr32(dev, NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK);
  1050. if (crtc & 2)
  1051. nv_wr32(dev, NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK);
  1052. }
  1053. irqreturn_t
  1054. nouveau_irq_handler(DRM_IRQ_ARGS)
  1055. {
  1056. struct drm_device *dev = (struct drm_device *)arg;
  1057. struct drm_nouveau_private *dev_priv = dev->dev_private;
  1058. uint32_t status, fbdev_flags = 0;
  1059. unsigned long flags;
  1060. status = nv_rd32(dev, NV03_PMC_INTR_0);
  1061. if (!status)
  1062. return IRQ_NONE;
  1063. spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
  1064. if (dev_priv->fbdev_info) {
  1065. fbdev_flags = dev_priv->fbdev_info->flags;
  1066. dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
  1067. }
  1068. if (status & NV_PMC_INTR_0_PFIFO_PENDING) {
  1069. nouveau_fifo_irq_handler(dev);
  1070. status &= ~NV_PMC_INTR_0_PFIFO_PENDING;
  1071. }
  1072. if (status & NV_PMC_INTR_0_PGRAPH_PENDING) {
  1073. if (dev_priv->card_type >= NV_50)
  1074. nv50_pgraph_irq_handler(dev);
  1075. else
  1076. nouveau_pgraph_irq_handler(dev);
  1077. status &= ~NV_PMC_INTR_0_PGRAPH_PENDING;
  1078. }
  1079. if (status & NV_PMC_INTR_0_CRTCn_PENDING) {
  1080. nouveau_crtc_irq_handler(dev, (status>>24)&3);
  1081. status &= ~NV_PMC_INTR_0_CRTCn_PENDING;
  1082. }
  1083. if (status & (NV_PMC_INTR_0_NV50_DISPLAY_PENDING |
  1084. NV_PMC_INTR_0_NV50_I2C_PENDING)) {
  1085. nv50_display_irq_handler(dev);
  1086. status &= ~(NV_PMC_INTR_0_NV50_DISPLAY_PENDING |
  1087. NV_PMC_INTR_0_NV50_I2C_PENDING);
  1088. }
  1089. if (status)
  1090. NV_ERROR(dev, "Unhandled PMC INTR status bits 0x%08x\n", status);
  1091. if (dev_priv->fbdev_info)
  1092. dev_priv->fbdev_info->flags = fbdev_flags;
  1093. spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
  1094. return IRQ_HANDLED;
  1095. }