nouveau_irq.c 37 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313
  1. /*
  2. * Copyright (C) 2006 Ben Skeggs.
  3. *
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining
  7. * a copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sublicense, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial
  16. * portions of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  19. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  20. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
  21. * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
  22. * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
  23. * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
  24. * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. */
  27. /*
  28. * Authors:
  29. * Ben Skeggs <darktama@iinet.net.au>
  30. */
  31. #include "drmP.h"
  32. #include "drm.h"
  33. #include "nouveau_drm.h"
  34. #include "nouveau_drv.h"
  35. #include "nouveau_reg.h"
  36. #include "nouveau_ramht.h"
  37. #include "nouveau_util.h"
  38. /* needed for hotplug irq */
  39. #include "nouveau_connector.h"
  40. #include "nv50_display.h"
  41. void
  42. nouveau_irq_preinstall(struct drm_device *dev)
  43. {
  44. struct drm_nouveau_private *dev_priv = dev->dev_private;
  45. /* Master disable */
  46. nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
  47. if (dev_priv->card_type >= NV_50) {
  48. INIT_WORK(&dev_priv->irq_work, nv50_display_irq_handler_bh);
  49. INIT_WORK(&dev_priv->hpd_work, nv50_display_irq_hotplug_bh);
  50. spin_lock_init(&dev_priv->hpd_state.lock);
  51. INIT_LIST_HEAD(&dev_priv->vbl_waiting);
  52. }
  53. }
  54. int
  55. nouveau_irq_postinstall(struct drm_device *dev)
  56. {
  57. struct drm_nouveau_private *dev_priv = dev->dev_private;
  58. /* Master enable */
  59. nv_wr32(dev, NV03_PMC_INTR_EN_0, NV_PMC_INTR_EN_0_MASTER_ENABLE);
  60. if (dev_priv->msi_enabled)
  61. nv_wr08(dev, 0x00088068, 0xff);
  62. return 0;
  63. }
  64. void
  65. nouveau_irq_uninstall(struct drm_device *dev)
  66. {
  67. /* Master disable */
  68. nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
  69. }
  70. static bool
  71. nouveau_fifo_swmthd(struct drm_device *dev, u32 chid, u32 addr, u32 data)
  72. {
  73. struct drm_nouveau_private *dev_priv = dev->dev_private;
  74. struct nouveau_channel *chan = NULL;
  75. struct nouveau_gpuobj *obj;
  76. unsigned long flags;
  77. const int subc = (addr >> 13) & 0x7;
  78. const int mthd = addr & 0x1ffc;
  79. bool handled = false;
  80. u32 engine;
  81. spin_lock_irqsave(&dev_priv->channels.lock, flags);
  82. if (likely(chid >= 0 && chid < dev_priv->engine.fifo.channels))
  83. chan = dev_priv->channels.ptr[chid];
  84. if (unlikely(!chan))
  85. goto out;
  86. switch (mthd) {
  87. case 0x0000: /* bind object to subchannel */
  88. obj = nouveau_ramht_find(chan, data);
  89. if (unlikely(!obj || obj->engine != NVOBJ_ENGINE_SW))
  90. break;
  91. chan->sw_subchannel[subc] = obj->class;
  92. engine = 0x0000000f << (subc * 4);
  93. nv_mask(dev, NV04_PFIFO_CACHE1_ENGINE, engine, 0x00000000);
  94. handled = true;
  95. break;
  96. default:
  97. engine = nv_rd32(dev, NV04_PFIFO_CACHE1_ENGINE);
  98. if (unlikely(((engine >> (subc * 4)) & 0xf) != 0))
  99. break;
  100. if (!nouveau_gpuobj_mthd_call(chan, chan->sw_subchannel[subc],
  101. mthd, data))
  102. handled = true;
  103. break;
  104. }
  105. out:
  106. spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
  107. return handled;
  108. }
  109. static void
  110. nouveau_fifo_irq_handler(struct drm_device *dev)
  111. {
  112. struct drm_nouveau_private *dev_priv = dev->dev_private;
  113. struct nouveau_engine *engine = &dev_priv->engine;
  114. uint32_t status, reassign;
  115. int cnt = 0;
  116. reassign = nv_rd32(dev, NV03_PFIFO_CACHES) & 1;
  117. while ((status = nv_rd32(dev, NV03_PFIFO_INTR_0)) && (cnt++ < 100)) {
  118. uint32_t chid, get;
  119. nv_wr32(dev, NV03_PFIFO_CACHES, 0);
  120. chid = engine->fifo.channel_id(dev);
  121. get = nv_rd32(dev, NV03_PFIFO_CACHE1_GET);
  122. if (status & NV_PFIFO_INTR_CACHE_ERROR) {
  123. uint32_t mthd, data;
  124. int ptr;
  125. /* NV_PFIFO_CACHE1_GET actually goes to 0xffc before
  126. * wrapping on my G80 chips, but CACHE1 isn't big
  127. * enough for this much data.. Tests show that it
  128. * wraps around to the start at GET=0x800.. No clue
  129. * as to why..
  130. */
  131. ptr = (get & 0x7ff) >> 2;
  132. if (dev_priv->card_type < NV_40) {
  133. mthd = nv_rd32(dev,
  134. NV04_PFIFO_CACHE1_METHOD(ptr));
  135. data = nv_rd32(dev,
  136. NV04_PFIFO_CACHE1_DATA(ptr));
  137. } else {
  138. mthd = nv_rd32(dev,
  139. NV40_PFIFO_CACHE1_METHOD(ptr));
  140. data = nv_rd32(dev,
  141. NV40_PFIFO_CACHE1_DATA(ptr));
  142. }
  143. if (!nouveau_fifo_swmthd(dev, chid, mthd, data)) {
  144. NV_INFO(dev, "PFIFO_CACHE_ERROR - Ch %d/%d "
  145. "Mthd 0x%04x Data 0x%08x\n",
  146. chid, (mthd >> 13) & 7, mthd & 0x1ffc,
  147. data);
  148. }
  149. nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH, 0);
  150. nv_wr32(dev, NV03_PFIFO_INTR_0,
  151. NV_PFIFO_INTR_CACHE_ERROR);
  152. nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
  153. nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) & ~1);
  154. nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
  155. nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH0,
  156. nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH0) | 1);
  157. nv_wr32(dev, NV04_PFIFO_CACHE1_HASH, 0);
  158. nv_wr32(dev, NV04_PFIFO_CACHE1_DMA_PUSH,
  159. nv_rd32(dev, NV04_PFIFO_CACHE1_DMA_PUSH) | 1);
  160. nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
  161. status &= ~NV_PFIFO_INTR_CACHE_ERROR;
  162. }
  163. if (status & NV_PFIFO_INTR_DMA_PUSHER) {
  164. u32 dma_get = nv_rd32(dev, 0x003244);
  165. u32 dma_put = nv_rd32(dev, 0x003240);
  166. u32 push = nv_rd32(dev, 0x003220);
  167. u32 state = nv_rd32(dev, 0x003228);
  168. if (dev_priv->card_type == NV_50) {
  169. u32 ho_get = nv_rd32(dev, 0x003328);
  170. u32 ho_put = nv_rd32(dev, 0x003320);
  171. u32 ib_get = nv_rd32(dev, 0x003334);
  172. u32 ib_put = nv_rd32(dev, 0x003330);
  173. if (nouveau_ratelimit())
  174. NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%02x%08x "
  175. "Put 0x%02x%08x IbGet 0x%08x IbPut 0x%08x "
  176. "State 0x%08x Push 0x%08x\n",
  177. chid, ho_get, dma_get, ho_put,
  178. dma_put, ib_get, ib_put, state,
  179. push);
  180. /* METHOD_COUNT, in DMA_STATE on earlier chipsets */
  181. nv_wr32(dev, 0x003364, 0x00000000);
  182. if (dma_get != dma_put || ho_get != ho_put) {
  183. nv_wr32(dev, 0x003244, dma_put);
  184. nv_wr32(dev, 0x003328, ho_put);
  185. } else
  186. if (ib_get != ib_put) {
  187. nv_wr32(dev, 0x003334, ib_put);
  188. }
  189. } else {
  190. NV_INFO(dev, "PFIFO_DMA_PUSHER - Ch %d Get 0x%08x "
  191. "Put 0x%08x State 0x%08x Push 0x%08x\n",
  192. chid, dma_get, dma_put, state, push);
  193. if (dma_get != dma_put)
  194. nv_wr32(dev, 0x003244, dma_put);
  195. }
  196. nv_wr32(dev, 0x003228, 0x00000000);
  197. nv_wr32(dev, 0x003220, 0x00000001);
  198. nv_wr32(dev, 0x002100, NV_PFIFO_INTR_DMA_PUSHER);
  199. status &= ~NV_PFIFO_INTR_DMA_PUSHER;
  200. }
  201. if (status & NV_PFIFO_INTR_SEMAPHORE) {
  202. uint32_t sem;
  203. status &= ~NV_PFIFO_INTR_SEMAPHORE;
  204. nv_wr32(dev, NV03_PFIFO_INTR_0,
  205. NV_PFIFO_INTR_SEMAPHORE);
  206. sem = nv_rd32(dev, NV10_PFIFO_CACHE1_SEMAPHORE);
  207. nv_wr32(dev, NV10_PFIFO_CACHE1_SEMAPHORE, sem | 0x1);
  208. nv_wr32(dev, NV03_PFIFO_CACHE1_GET, get + 4);
  209. nv_wr32(dev, NV04_PFIFO_CACHE1_PULL0, 1);
  210. }
  211. if (dev_priv->card_type == NV_50) {
  212. if (status & 0x00000010) {
  213. nv50_fb_vm_trap(dev, 1, "PFIFO_BAR_FAULT");
  214. status &= ~0x00000010;
  215. nv_wr32(dev, 0x002100, 0x00000010);
  216. }
  217. }
  218. if (status) {
  219. if (nouveau_ratelimit())
  220. NV_INFO(dev, "PFIFO_INTR 0x%08x - Ch %d\n",
  221. status, chid);
  222. nv_wr32(dev, NV03_PFIFO_INTR_0, status);
  223. status = 0;
  224. }
  225. nv_wr32(dev, NV03_PFIFO_CACHES, reassign);
  226. }
  227. if (status) {
  228. NV_INFO(dev, "PFIFO still angry after %d spins, halt\n", cnt);
  229. nv_wr32(dev, 0x2140, 0);
  230. nv_wr32(dev, 0x140, 0);
  231. }
  232. nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PFIFO_PENDING);
  233. }
  234. struct nouveau_bitfield_names {
  235. uint32_t mask;
  236. const char *name;
  237. };
  238. static struct nouveau_bitfield_names nstatus_names[] =
  239. {
  240. { NV04_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
  241. { NV04_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
  242. { NV04_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
  243. { NV04_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }
  244. };
  245. static struct nouveau_bitfield_names nstatus_names_nv10[] =
  246. {
  247. { NV10_PGRAPH_NSTATUS_STATE_IN_USE, "STATE_IN_USE" },
  248. { NV10_PGRAPH_NSTATUS_INVALID_STATE, "INVALID_STATE" },
  249. { NV10_PGRAPH_NSTATUS_BAD_ARGUMENT, "BAD_ARGUMENT" },
  250. { NV10_PGRAPH_NSTATUS_PROTECTION_FAULT, "PROTECTION_FAULT" }
  251. };
  252. static struct nouveau_bitfield_names nsource_names[] =
  253. {
  254. { NV03_PGRAPH_NSOURCE_NOTIFICATION, "NOTIFICATION" },
  255. { NV03_PGRAPH_NSOURCE_DATA_ERROR, "DATA_ERROR" },
  256. { NV03_PGRAPH_NSOURCE_PROTECTION_ERROR, "PROTECTION_ERROR" },
  257. { NV03_PGRAPH_NSOURCE_RANGE_EXCEPTION, "RANGE_EXCEPTION" },
  258. { NV03_PGRAPH_NSOURCE_LIMIT_COLOR, "LIMIT_COLOR" },
  259. { NV03_PGRAPH_NSOURCE_LIMIT_ZETA, "LIMIT_ZETA" },
  260. { NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD, "ILLEGAL_MTHD" },
  261. { NV03_PGRAPH_NSOURCE_DMA_R_PROTECTION, "DMA_R_PROTECTION" },
  262. { NV03_PGRAPH_NSOURCE_DMA_W_PROTECTION, "DMA_W_PROTECTION" },
  263. { NV03_PGRAPH_NSOURCE_FORMAT_EXCEPTION, "FORMAT_EXCEPTION" },
  264. { NV03_PGRAPH_NSOURCE_PATCH_EXCEPTION, "PATCH_EXCEPTION" },
  265. { NV03_PGRAPH_NSOURCE_STATE_INVALID, "STATE_INVALID" },
  266. { NV03_PGRAPH_NSOURCE_DOUBLE_NOTIFY, "DOUBLE_NOTIFY" },
  267. { NV03_PGRAPH_NSOURCE_NOTIFY_IN_USE, "NOTIFY_IN_USE" },
  268. { NV03_PGRAPH_NSOURCE_METHOD_CNT, "METHOD_CNT" },
  269. { NV03_PGRAPH_NSOURCE_BFR_NOTIFICATION, "BFR_NOTIFICATION" },
  270. { NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION, "DMA_VTX_PROTECTION" },
  271. { NV03_PGRAPH_NSOURCE_DMA_WIDTH_A, "DMA_WIDTH_A" },
  272. { NV03_PGRAPH_NSOURCE_DMA_WIDTH_B, "DMA_WIDTH_B" },
  273. };
  274. static void
  275. nouveau_print_bitfield_names_(uint32_t value,
  276. const struct nouveau_bitfield_names *namelist,
  277. const int namelist_len)
  278. {
  279. /*
  280. * Caller must have already printed the KERN_* log level for us.
  281. * Also the caller is responsible for adding the newline.
  282. */
  283. int i;
  284. for (i = 0; i < namelist_len; ++i) {
  285. uint32_t mask = namelist[i].mask;
  286. if (value & mask) {
  287. printk(" %s", namelist[i].name);
  288. value &= ~mask;
  289. }
  290. }
  291. if (value)
  292. printk(" (unknown bits 0x%08x)", value);
  293. }
  294. #define nouveau_print_bitfield_names(val, namelist) \
  295. nouveau_print_bitfield_names_((val), (namelist), ARRAY_SIZE(namelist))
  296. struct nouveau_enum_names {
  297. uint32_t value;
  298. const char *name;
  299. };
  300. static void
  301. nouveau_print_enum_names_(uint32_t value,
  302. const struct nouveau_enum_names *namelist,
  303. const int namelist_len)
  304. {
  305. /*
  306. * Caller must have already printed the KERN_* log level for us.
  307. * Also the caller is responsible for adding the newline.
  308. */
  309. int i;
  310. for (i = 0; i < namelist_len; ++i) {
  311. if (value == namelist[i].value) {
  312. printk("%s", namelist[i].name);
  313. return;
  314. }
  315. }
  316. printk("unknown value 0x%08x", value);
  317. }
  318. #define nouveau_print_enum_names(val, namelist) \
  319. nouveau_print_enum_names_((val), (namelist), ARRAY_SIZE(namelist))
  320. static int
  321. nouveau_graph_chid_from_grctx(struct drm_device *dev)
  322. {
  323. struct drm_nouveau_private *dev_priv = dev->dev_private;
  324. struct nouveau_channel *chan;
  325. unsigned long flags;
  326. uint32_t inst;
  327. int i;
  328. if (dev_priv->card_type < NV_40)
  329. return dev_priv->engine.fifo.channels;
  330. else
  331. if (dev_priv->card_type < NV_50) {
  332. inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 4;
  333. spin_lock_irqsave(&dev_priv->channels.lock, flags);
  334. for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
  335. chan = dev_priv->channels.ptr[i];
  336. if (!chan || !chan->ramin_grctx)
  337. continue;
  338. if (inst == chan->ramin_grctx->pinst)
  339. break;
  340. }
  341. spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
  342. } else {
  343. inst = (nv_rd32(dev, 0x40032c) & 0xfffff) << 12;
  344. spin_lock_irqsave(&dev_priv->channels.lock, flags);
  345. for (i = 0; i < dev_priv->engine.fifo.channels; i++) {
  346. chan = dev_priv->channels.ptr[i];
  347. if (!chan || !chan->ramin)
  348. continue;
  349. if (inst == chan->ramin->vinst)
  350. break;
  351. }
  352. spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
  353. }
  354. return i;
  355. }
  356. static int
  357. nouveau_graph_trapped_channel(struct drm_device *dev, int *channel_ret)
  358. {
  359. struct drm_nouveau_private *dev_priv = dev->dev_private;
  360. struct nouveau_engine *engine = &dev_priv->engine;
  361. int channel;
  362. if (dev_priv->card_type < NV_10)
  363. channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 24) & 0xf;
  364. else
  365. if (dev_priv->card_type < NV_40)
  366. channel = (nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR) >> 20) & 0x1f;
  367. else
  368. channel = nouveau_graph_chid_from_grctx(dev);
  369. if (channel >= engine->fifo.channels ||
  370. !dev_priv->channels.ptr[channel]) {
  371. NV_ERROR(dev, "AIII, invalid/inactive channel id %d\n", channel);
  372. return -EINVAL;
  373. }
  374. *channel_ret = channel;
  375. return 0;
  376. }
  377. struct nouveau_pgraph_trap {
  378. int channel;
  379. int class;
  380. int subc, mthd, size;
  381. uint32_t data, data2;
  382. uint32_t nsource, nstatus;
  383. };
  384. static void
  385. nouveau_graph_trap_info(struct drm_device *dev,
  386. struct nouveau_pgraph_trap *trap)
  387. {
  388. struct drm_nouveau_private *dev_priv = dev->dev_private;
  389. uint32_t address;
  390. trap->nsource = trap->nstatus = 0;
  391. if (dev_priv->card_type < NV_50) {
  392. trap->nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
  393. trap->nstatus = nv_rd32(dev, NV03_PGRAPH_NSTATUS);
  394. }
  395. if (nouveau_graph_trapped_channel(dev, &trap->channel))
  396. trap->channel = -1;
  397. address = nv_rd32(dev, NV04_PGRAPH_TRAPPED_ADDR);
  398. trap->mthd = address & 0x1FFC;
  399. trap->data = nv_rd32(dev, NV04_PGRAPH_TRAPPED_DATA);
  400. if (dev_priv->card_type < NV_10) {
  401. trap->subc = (address >> 13) & 0x7;
  402. } else {
  403. trap->subc = (address >> 16) & 0x7;
  404. trap->data2 = nv_rd32(dev, NV10_PGRAPH_TRAPPED_DATA_HIGH);
  405. }
  406. if (dev_priv->card_type < NV_10)
  407. trap->class = nv_rd32(dev, 0x400180 + trap->subc*4) & 0xFF;
  408. else if (dev_priv->card_type < NV_40)
  409. trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFF;
  410. else if (dev_priv->card_type < NV_50)
  411. trap->class = nv_rd32(dev, 0x400160 + trap->subc*4) & 0xFFFF;
  412. else
  413. trap->class = nv_rd32(dev, 0x400814);
  414. }
  415. static void
  416. nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id,
  417. struct nouveau_pgraph_trap *trap)
  418. {
  419. struct drm_nouveau_private *dev_priv = dev->dev_private;
  420. uint32_t nsource = trap->nsource, nstatus = trap->nstatus;
  421. if (dev_priv->card_type < NV_50) {
  422. NV_INFO(dev, "%s - nSource:", id);
  423. nouveau_print_bitfield_names(nsource, nsource_names);
  424. printk(", nStatus:");
  425. if (dev_priv->card_type < NV_10)
  426. nouveau_print_bitfield_names(nstatus, nstatus_names);
  427. else
  428. nouveau_print_bitfield_names(nstatus, nstatus_names_nv10);
  429. printk("\n");
  430. }
  431. NV_INFO(dev, "%s - Ch %d/%d Class 0x%04x Mthd 0x%04x "
  432. "Data 0x%08x:0x%08x\n",
  433. id, trap->channel, trap->subc,
  434. trap->class, trap->mthd,
  435. trap->data2, trap->data);
  436. }
  437. static int
  438. nouveau_pgraph_intr_swmthd(struct drm_device *dev,
  439. struct nouveau_pgraph_trap *trap)
  440. {
  441. struct drm_nouveau_private *dev_priv = dev->dev_private;
  442. struct nouveau_channel *chan;
  443. unsigned long flags;
  444. int ret = -EINVAL;
  445. spin_lock_irqsave(&dev_priv->channels.lock, flags);
  446. if (trap->channel > 0 &&
  447. trap->channel < dev_priv->engine.fifo.channels &&
  448. dev_priv->channels.ptr[trap->channel]) {
  449. chan = dev_priv->channels.ptr[trap->channel];
  450. ret = nouveau_gpuobj_mthd_call(chan, trap->class, trap->mthd, trap->data);
  451. }
  452. spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
  453. return ret;
  454. }
  455. static inline void
  456. nouveau_pgraph_intr_notify(struct drm_device *dev, uint32_t nsource)
  457. {
  458. struct nouveau_pgraph_trap trap;
  459. int unhandled = 0;
  460. nouveau_graph_trap_info(dev, &trap);
  461. if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
  462. if (nouveau_pgraph_intr_swmthd(dev, &trap))
  463. unhandled = 1;
  464. } else {
  465. unhandled = 1;
  466. }
  467. if (unhandled)
  468. nouveau_graph_dump_trap_info(dev, "PGRAPH_NOTIFY", &trap);
  469. }
  470. static inline void
  471. nouveau_pgraph_intr_error(struct drm_device *dev, uint32_t nsource)
  472. {
  473. struct nouveau_pgraph_trap trap;
  474. int unhandled = 0;
  475. nouveau_graph_trap_info(dev, &trap);
  476. trap.nsource = nsource;
  477. if (nsource & NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD) {
  478. if (nouveau_pgraph_intr_swmthd(dev, &trap))
  479. unhandled = 1;
  480. } else if (nsource & NV03_PGRAPH_NSOURCE_DMA_VTX_PROTECTION) {
  481. uint32_t v = nv_rd32(dev, 0x402000);
  482. nv_wr32(dev, 0x402000, v);
  483. /* dump the error anyway for now: it's useful for
  484. Gallium development */
  485. unhandled = 1;
  486. } else {
  487. unhandled = 1;
  488. }
  489. if (unhandled && nouveau_ratelimit())
  490. nouveau_graph_dump_trap_info(dev, "PGRAPH_ERROR", &trap);
  491. }
  492. static inline void
  493. nouveau_pgraph_intr_context_switch(struct drm_device *dev)
  494. {
  495. struct drm_nouveau_private *dev_priv = dev->dev_private;
  496. struct nouveau_engine *engine = &dev_priv->engine;
  497. uint32_t chid;
  498. chid = engine->fifo.channel_id(dev);
  499. NV_DEBUG(dev, "PGRAPH context switch interrupt channel %x\n", chid);
  500. switch (dev_priv->card_type) {
  501. case NV_04:
  502. nv04_graph_context_switch(dev);
  503. break;
  504. case NV_10:
  505. nv10_graph_context_switch(dev);
  506. break;
  507. default:
  508. NV_ERROR(dev, "Context switch not implemented\n");
  509. break;
  510. }
  511. }
  512. static void
  513. nouveau_pgraph_irq_handler(struct drm_device *dev)
  514. {
  515. uint32_t status;
  516. while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
  517. uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
  518. if (status & NV_PGRAPH_INTR_NOTIFY) {
  519. nouveau_pgraph_intr_notify(dev, nsource);
  520. status &= ~NV_PGRAPH_INTR_NOTIFY;
  521. nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_NOTIFY);
  522. }
  523. if (status & NV_PGRAPH_INTR_ERROR) {
  524. nouveau_pgraph_intr_error(dev, nsource);
  525. status &= ~NV_PGRAPH_INTR_ERROR;
  526. nv_wr32(dev, NV03_PGRAPH_INTR, NV_PGRAPH_INTR_ERROR);
  527. }
  528. if (status & NV_PGRAPH_INTR_CONTEXT_SWITCH) {
  529. status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
  530. nv_wr32(dev, NV03_PGRAPH_INTR,
  531. NV_PGRAPH_INTR_CONTEXT_SWITCH);
  532. nouveau_pgraph_intr_context_switch(dev);
  533. }
  534. if (status) {
  535. NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n", status);
  536. nv_wr32(dev, NV03_PGRAPH_INTR, status);
  537. }
  538. if ((nv_rd32(dev, NV04_PGRAPH_FIFO) & (1 << 0)) == 0)
  539. nv_wr32(dev, NV04_PGRAPH_FIFO, 1);
  540. }
  541. nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
  542. }
  543. static struct nouveau_enum_names nv50_mp_exec_error_names[] =
  544. {
  545. { 3, "STACK_UNDERFLOW" },
  546. { 4, "QUADON_ACTIVE" },
  547. { 8, "TIMEOUT" },
  548. { 0x10, "INVALID_OPCODE" },
  549. { 0x40, "BREAKPOINT" },
  550. };
  551. static void
  552. nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display)
  553. {
  554. struct drm_nouveau_private *dev_priv = dev->dev_private;
  555. uint32_t units = nv_rd32(dev, 0x1540);
  556. uint32_t addr, mp10, status, pc, oplow, ophigh;
  557. int i;
  558. int mps = 0;
  559. for (i = 0; i < 4; i++) {
  560. if (!(units & 1 << (i+24)))
  561. continue;
  562. if (dev_priv->chipset < 0xa0)
  563. addr = 0x408200 + (tpid << 12) + (i << 7);
  564. else
  565. addr = 0x408100 + (tpid << 11) + (i << 7);
  566. mp10 = nv_rd32(dev, addr + 0x10);
  567. status = nv_rd32(dev, addr + 0x14);
  568. if (!status)
  569. continue;
  570. if (display) {
  571. nv_rd32(dev, addr + 0x20);
  572. pc = nv_rd32(dev, addr + 0x24);
  573. oplow = nv_rd32(dev, addr + 0x70);
  574. ophigh= nv_rd32(dev, addr + 0x74);
  575. NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - "
  576. "TP %d MP %d: ", tpid, i);
  577. nouveau_print_enum_names(status,
  578. nv50_mp_exec_error_names);
  579. printk(" at %06x warp %d, opcode %08x %08x\n",
  580. pc&0xffffff, pc >> 24,
  581. oplow, ophigh);
  582. }
  583. nv_wr32(dev, addr + 0x10, mp10);
  584. nv_wr32(dev, addr + 0x14, 0);
  585. mps++;
  586. }
  587. if (!mps && display)
  588. NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: "
  589. "No MPs claiming errors?\n", tpid);
  590. }
  591. static void
  592. nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
  593. uint32_t ustatus_new, int display, const char *name)
  594. {
  595. struct drm_nouveau_private *dev_priv = dev->dev_private;
  596. int tps = 0;
  597. uint32_t units = nv_rd32(dev, 0x1540);
  598. int i, r;
  599. uint32_t ustatus_addr, ustatus;
  600. for (i = 0; i < 16; i++) {
  601. if (!(units & (1 << i)))
  602. continue;
  603. if (dev_priv->chipset < 0xa0)
  604. ustatus_addr = ustatus_old + (i << 12);
  605. else
  606. ustatus_addr = ustatus_new + (i << 11);
  607. ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff;
  608. if (!ustatus)
  609. continue;
  610. tps++;
  611. switch (type) {
  612. case 6: /* texture error... unknown for now */
  613. nv50_fb_vm_trap(dev, display, name);
  614. if (display) {
  615. NV_ERROR(dev, "magic set %d:\n", i);
  616. for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
  617. NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
  618. nv_rd32(dev, r));
  619. }
  620. break;
  621. case 7: /* MP error */
  622. if (ustatus & 0x00010000) {
  623. nv50_pgraph_mp_trap(dev, i, display);
  624. ustatus &= ~0x00010000;
  625. }
  626. break;
  627. case 8: /* TPDMA error */
  628. {
  629. uint32_t e0c = nv_rd32(dev, ustatus_addr + 4);
  630. uint32_t e10 = nv_rd32(dev, ustatus_addr + 8);
  631. uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc);
  632. uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10);
  633. uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14);
  634. uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18);
  635. uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c);
  636. nv50_fb_vm_trap(dev, display, name);
  637. /* 2d engine destination */
  638. if (ustatus & 0x00000010) {
  639. if (display) {
  640. NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n",
  641. i, e14, e10);
  642. NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
  643. i, e0c, e18, e1c, e20, e24);
  644. }
  645. ustatus &= ~0x00000010;
  646. }
  647. /* Render target */
  648. if (ustatus & 0x00000040) {
  649. if (display) {
  650. NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n",
  651. i, e14, e10);
  652. NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
  653. i, e0c, e18, e1c, e20, e24);
  654. }
  655. ustatus &= ~0x00000040;
  656. }
  657. /* CUDA memory: l[], g[] or stack. */
  658. if (ustatus & 0x00000080) {
  659. if (display) {
  660. if (e18 & 0x80000000) {
  661. /* g[] read fault? */
  662. NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n",
  663. i, e14, e10 | ((e18 >> 24) & 0x1f));
  664. e18 &= ~0x1f000000;
  665. } else if (e18 & 0xc) {
  666. /* g[] write fault? */
  667. NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n",
  668. i, e14, e10 | ((e18 >> 7) & 0x1f));
  669. e18 &= ~0x00000f80;
  670. } else {
  671. NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n",
  672. i, e14, e10);
  673. }
  674. NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
  675. i, e0c, e18, e1c, e20, e24);
  676. }
  677. ustatus &= ~0x00000080;
  678. }
  679. }
  680. break;
  681. }
  682. if (ustatus) {
  683. if (display)
  684. NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
  685. }
  686. nv_wr32(dev, ustatus_addr, 0xc0000000);
  687. }
  688. if (!tps && display)
  689. NV_INFO(dev, "%s - No TPs claiming errors?\n", name);
  690. }
  691. static void
  692. nv50_pgraph_trap_handler(struct drm_device *dev)
  693. {
  694. struct nouveau_pgraph_trap trap;
  695. uint32_t status = nv_rd32(dev, 0x400108);
  696. uint32_t ustatus;
  697. int display = nouveau_ratelimit();
  698. if (!status && display) {
  699. nouveau_graph_trap_info(dev, &trap);
  700. nouveau_graph_dump_trap_info(dev, "PGRAPH_TRAP", &trap);
  701. NV_INFO(dev, "PGRAPH_TRAP - no units reporting traps?\n");
  702. }
  703. /* DISPATCH: Relays commands to other units and handles NOTIFY,
  704. * COND, QUERY. If you get a trap from it, the command is still stuck
  705. * in DISPATCH and you need to do something about it. */
  706. if (status & 0x001) {
  707. ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff;
  708. if (!ustatus && display) {
  709. NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n");
  710. }
  711. /* Known to be triggered by screwed up NOTIFY and COND... */
  712. if (ustatus & 0x00000001) {
  713. nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_FAULT");
  714. nv_wr32(dev, 0x400500, 0);
  715. if (nv_rd32(dev, 0x400808) & 0x80000000) {
  716. if (display) {
  717. if (nouveau_graph_trapped_channel(dev, &trap.channel))
  718. trap.channel = -1;
  719. trap.class = nv_rd32(dev, 0x400814);
  720. trap.mthd = nv_rd32(dev, 0x400808) & 0x1ffc;
  721. trap.subc = (nv_rd32(dev, 0x400808) >> 16) & 0x7;
  722. trap.data = nv_rd32(dev, 0x40080c);
  723. trap.data2 = nv_rd32(dev, 0x400810);
  724. nouveau_graph_dump_trap_info(dev,
  725. "PGRAPH_TRAP_DISPATCH_FAULT", &trap);
  726. NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400808: %08x\n", nv_rd32(dev, 0x400808));
  727. NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400848: %08x\n", nv_rd32(dev, 0x400848));
  728. }
  729. nv_wr32(dev, 0x400808, 0);
  730. } else if (display) {
  731. NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - No stuck command?\n");
  732. }
  733. nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3);
  734. nv_wr32(dev, 0x400848, 0);
  735. ustatus &= ~0x00000001;
  736. }
  737. if (ustatus & 0x00000002) {
  738. nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_QUERY");
  739. nv_wr32(dev, 0x400500, 0);
  740. if (nv_rd32(dev, 0x40084c) & 0x80000000) {
  741. if (display) {
  742. if (nouveau_graph_trapped_channel(dev, &trap.channel))
  743. trap.channel = -1;
  744. trap.class = nv_rd32(dev, 0x400814);
  745. trap.mthd = nv_rd32(dev, 0x40084c) & 0x1ffc;
  746. trap.subc = (nv_rd32(dev, 0x40084c) >> 16) & 0x7;
  747. trap.data = nv_rd32(dev, 0x40085c);
  748. trap.data2 = 0;
  749. nouveau_graph_dump_trap_info(dev,
  750. "PGRAPH_TRAP_DISPATCH_QUERY", &trap);
  751. NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - 40084c: %08x\n", nv_rd32(dev, 0x40084c));
  752. }
  753. nv_wr32(dev, 0x40084c, 0);
  754. } else if (display) {
  755. NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - No stuck command?\n");
  756. }
  757. ustatus &= ~0x00000002;
  758. }
  759. if (ustatus && display)
  760. NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - Unhandled ustatus 0x%08x\n", ustatus);
  761. nv_wr32(dev, 0x400804, 0xc0000000);
  762. nv_wr32(dev, 0x400108, 0x001);
  763. status &= ~0x001;
  764. }
  765. /* TRAPs other than dispatch use the "normal" trap regs. */
  766. if (status && display) {
  767. nouveau_graph_trap_info(dev, &trap);
  768. nouveau_graph_dump_trap_info(dev,
  769. "PGRAPH_TRAP", &trap);
  770. }
  771. /* M2MF: Memory to memory copy engine. */
  772. if (status & 0x002) {
  773. ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff;
  774. if (!ustatus && display) {
  775. NV_INFO(dev, "PGRAPH_TRAP_M2MF - no ustatus?\n");
  776. }
  777. if (ustatus & 0x00000001) {
  778. nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_NOTIFY");
  779. ustatus &= ~0x00000001;
  780. }
  781. if (ustatus & 0x00000002) {
  782. nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_IN");
  783. ustatus &= ~0x00000002;
  784. }
  785. if (ustatus & 0x00000004) {
  786. nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_OUT");
  787. ustatus &= ~0x00000004;
  788. }
  789. NV_INFO (dev, "PGRAPH_TRAP_M2MF - %08x %08x %08x %08x\n",
  790. nv_rd32(dev, 0x406804),
  791. nv_rd32(dev, 0x406808),
  792. nv_rd32(dev, 0x40680c),
  793. nv_rd32(dev, 0x406810));
  794. if (ustatus && display)
  795. NV_INFO(dev, "PGRAPH_TRAP_M2MF - Unhandled ustatus 0x%08x\n", ustatus);
  796. /* No sane way found yet -- just reset the bugger. */
  797. nv_wr32(dev, 0x400040, 2);
  798. nv_wr32(dev, 0x400040, 0);
  799. nv_wr32(dev, 0x406800, 0xc0000000);
  800. nv_wr32(dev, 0x400108, 0x002);
  801. status &= ~0x002;
  802. }
  803. /* VFETCH: Fetches data from vertex buffers. */
  804. if (status & 0x004) {
  805. ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff;
  806. if (!ustatus && display) {
  807. NV_INFO(dev, "PGRAPH_TRAP_VFETCH - no ustatus?\n");
  808. }
  809. if (ustatus & 0x00000001) {
  810. nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_VFETCH_FAULT");
  811. NV_INFO (dev, "PGRAPH_TRAP_VFETCH_FAULT - %08x %08x %08x %08x\n",
  812. nv_rd32(dev, 0x400c00),
  813. nv_rd32(dev, 0x400c08),
  814. nv_rd32(dev, 0x400c0c),
  815. nv_rd32(dev, 0x400c10));
  816. ustatus &= ~0x00000001;
  817. }
  818. if (ustatus && display)
  819. NV_INFO(dev, "PGRAPH_TRAP_VFETCH - Unhandled ustatus 0x%08x\n", ustatus);
  820. nv_wr32(dev, 0x400c04, 0xc0000000);
  821. nv_wr32(dev, 0x400108, 0x004);
  822. status &= ~0x004;
  823. }
  824. /* STRMOUT: DirectX streamout / OpenGL transform feedback. */
  825. if (status & 0x008) {
  826. ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff;
  827. if (!ustatus && display) {
  828. NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - no ustatus?\n");
  829. }
  830. if (ustatus & 0x00000001) {
  831. nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_STRMOUT_FAULT");
  832. NV_INFO (dev, "PGRAPH_TRAP_STRMOUT_FAULT - %08x %08x %08x %08x\n",
  833. nv_rd32(dev, 0x401804),
  834. nv_rd32(dev, 0x401808),
  835. nv_rd32(dev, 0x40180c),
  836. nv_rd32(dev, 0x401810));
  837. ustatus &= ~0x00000001;
  838. }
  839. if (ustatus && display)
  840. NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - Unhandled ustatus 0x%08x\n", ustatus);
  841. /* No sane way found yet -- just reset the bugger. */
  842. nv_wr32(dev, 0x400040, 0x80);
  843. nv_wr32(dev, 0x400040, 0);
  844. nv_wr32(dev, 0x401800, 0xc0000000);
  845. nv_wr32(dev, 0x400108, 0x008);
  846. status &= ~0x008;
  847. }
  848. /* CCACHE: Handles code and c[] caches and fills them. */
  849. if (status & 0x010) {
  850. ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff;
  851. if (!ustatus && display) {
  852. NV_INFO(dev, "PGRAPH_TRAP_CCACHE - no ustatus?\n");
  853. }
  854. if (ustatus & 0x00000001) {
  855. nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_CCACHE_FAULT");
  856. NV_INFO (dev, "PGRAPH_TRAP_CCACHE_FAULT - %08x %08x %08x %08x %08x %08x %08x\n",
  857. nv_rd32(dev, 0x405800),
  858. nv_rd32(dev, 0x405804),
  859. nv_rd32(dev, 0x405808),
  860. nv_rd32(dev, 0x40580c),
  861. nv_rd32(dev, 0x405810),
  862. nv_rd32(dev, 0x405814),
  863. nv_rd32(dev, 0x40581c));
  864. ustatus &= ~0x00000001;
  865. }
  866. if (ustatus && display)
  867. NV_INFO(dev, "PGRAPH_TRAP_CCACHE - Unhandled ustatus 0x%08x\n", ustatus);
  868. nv_wr32(dev, 0x405018, 0xc0000000);
  869. nv_wr32(dev, 0x400108, 0x010);
  870. status &= ~0x010;
  871. }
  872. /* Unknown, not seen yet... 0x402000 is the only trap status reg
  873. * remaining, so try to handle it anyway. Perhaps related to that
  874. * unknown DMA slot on tesla? */
  875. if (status & 0x20) {
  876. nv50_fb_vm_trap(dev, display, "PGRAPH_TRAP_UNKC04");
  877. ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff;
  878. if (display)
  879. NV_INFO(dev, "PGRAPH_TRAP_UNKC04 - Unhandled ustatus 0x%08x\n", ustatus);
  880. nv_wr32(dev, 0x402000, 0xc0000000);
  881. /* no status modifiction on purpose */
  882. }
  883. /* TEXTURE: CUDA texturing units */
  884. if (status & 0x040) {
  885. nv50_pgraph_tp_trap (dev, 6, 0x408900, 0x408600, display,
  886. "PGRAPH_TRAP_TEXTURE");
  887. nv_wr32(dev, 0x400108, 0x040);
  888. status &= ~0x040;
  889. }
  890. /* MP: CUDA execution engines. */
  891. if (status & 0x080) {
  892. nv50_pgraph_tp_trap (dev, 7, 0x408314, 0x40831c, display,
  893. "PGRAPH_TRAP_MP");
  894. nv_wr32(dev, 0x400108, 0x080);
  895. status &= ~0x080;
  896. }
  897. /* TPDMA: Handles TP-initiated uncached memory accesses:
  898. * l[], g[], stack, 2d surfaces, render targets. */
  899. if (status & 0x100) {
  900. nv50_pgraph_tp_trap (dev, 8, 0x408e08, 0x408708, display,
  901. "PGRAPH_TRAP_TPDMA");
  902. nv_wr32(dev, 0x400108, 0x100);
  903. status &= ~0x100;
  904. }
  905. if (status) {
  906. if (display)
  907. NV_INFO(dev, "PGRAPH_TRAP - Unknown trap 0x%08x\n",
  908. status);
  909. nv_wr32(dev, 0x400108, status);
  910. }
  911. }
  912. /* There must be a *lot* of these. Will take some time to gather them up. */
  913. static struct nouveau_enum_names nv50_data_error_names[] =
  914. {
  915. { 4, "INVALID_VALUE" },
  916. { 5, "INVALID_ENUM" },
  917. { 8, "INVALID_OBJECT" },
  918. { 0xc, "INVALID_BITFIELD" },
  919. { 0x28, "MP_NO_REG_SPACE" },
  920. { 0x2b, "MP_BLOCK_SIZE_MISMATCH" },
  921. };
  922. static void
  923. nv50_pgraph_irq_handler(struct drm_device *dev)
  924. {
  925. struct nouveau_pgraph_trap trap;
  926. int unhandled = 0;
  927. uint32_t status;
  928. while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
  929. /* NOTIFY: You've set a NOTIFY an a command and it's done. */
  930. if (status & 0x00000001) {
  931. nouveau_graph_trap_info(dev, &trap);
  932. if (nouveau_ratelimit())
  933. nouveau_graph_dump_trap_info(dev,
  934. "PGRAPH_NOTIFY", &trap);
  935. status &= ~0x00000001;
  936. nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001);
  937. }
  938. /* COMPUTE_QUERY: Purpose and exact cause unknown, happens
  939. * when you write 0x200 to 0x50c0 method 0x31c. */
  940. if (status & 0x00000002) {
  941. nouveau_graph_trap_info(dev, &trap);
  942. if (nouveau_ratelimit())
  943. nouveau_graph_dump_trap_info(dev,
  944. "PGRAPH_COMPUTE_QUERY", &trap);
  945. status &= ~0x00000002;
  946. nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000002);
  947. }
  948. /* Unknown, never seen: 0x4 */
  949. /* ILLEGAL_MTHD: You used a wrong method for this class. */
  950. if (status & 0x00000010) {
  951. nouveau_graph_trap_info(dev, &trap);
  952. if (nouveau_pgraph_intr_swmthd(dev, &trap))
  953. unhandled = 1;
  954. if (unhandled && nouveau_ratelimit())
  955. nouveau_graph_dump_trap_info(dev,
  956. "PGRAPH_ILLEGAL_MTHD", &trap);
  957. status &= ~0x00000010;
  958. nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010);
  959. }
  960. /* ILLEGAL_CLASS: You used a wrong class. */
  961. if (status & 0x00000020) {
  962. nouveau_graph_trap_info(dev, &trap);
  963. if (nouveau_ratelimit())
  964. nouveau_graph_dump_trap_info(dev,
  965. "PGRAPH_ILLEGAL_CLASS", &trap);
  966. status &= ~0x00000020;
  967. nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000020);
  968. }
  969. /* DOUBLE_NOTIFY: You tried to set a NOTIFY on another NOTIFY. */
  970. if (status & 0x00000040) {
  971. nouveau_graph_trap_info(dev, &trap);
  972. if (nouveau_ratelimit())
  973. nouveau_graph_dump_trap_info(dev,
  974. "PGRAPH_DOUBLE_NOTIFY", &trap);
  975. status &= ~0x00000040;
  976. nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000040);
  977. }
  978. /* CONTEXT_SWITCH: PGRAPH needs us to load a new context */
  979. if (status & 0x00001000) {
  980. nv_wr32(dev, 0x400500, 0x00000000);
  981. nv_wr32(dev, NV03_PGRAPH_INTR,
  982. NV_PGRAPH_INTR_CONTEXT_SWITCH);
  983. nv_wr32(dev, NV40_PGRAPH_INTR_EN, nv_rd32(dev,
  984. NV40_PGRAPH_INTR_EN) &
  985. ~NV_PGRAPH_INTR_CONTEXT_SWITCH);
  986. nv_wr32(dev, 0x400500, 0x00010001);
  987. nv50_graph_context_switch(dev);
  988. status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
  989. }
  990. /* BUFFER_NOTIFY: Your m2mf transfer finished */
  991. if (status & 0x00010000) {
  992. nouveau_graph_trap_info(dev, &trap);
  993. if (nouveau_ratelimit())
  994. nouveau_graph_dump_trap_info(dev,
  995. "PGRAPH_BUFFER_NOTIFY", &trap);
  996. status &= ~0x00010000;
  997. nv_wr32(dev, NV03_PGRAPH_INTR, 0x00010000);
  998. }
  999. /* DATA_ERROR: Invalid value for this method, or invalid
  1000. * state in current PGRAPH context for this operation */
  1001. if (status & 0x00100000) {
  1002. nouveau_graph_trap_info(dev, &trap);
  1003. if (nouveau_ratelimit()) {
  1004. nouveau_graph_dump_trap_info(dev,
  1005. "PGRAPH_DATA_ERROR", &trap);
  1006. NV_INFO (dev, "PGRAPH_DATA_ERROR - ");
  1007. nouveau_print_enum_names(nv_rd32(dev, 0x400110),
  1008. nv50_data_error_names);
  1009. printk("\n");
  1010. }
  1011. status &= ~0x00100000;
  1012. nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000);
  1013. }
  1014. /* TRAP: Something bad happened in the middle of command
  1015. * execution. Has a billion types, subtypes, and even
  1016. * subsubtypes. */
  1017. if (status & 0x00200000) {
  1018. nv50_pgraph_trap_handler(dev);
  1019. status &= ~0x00200000;
  1020. nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000);
  1021. }
  1022. /* Unknown, never seen: 0x00400000 */
  1023. /* SINGLE_STEP: Happens on every method if you turned on
  1024. * single stepping in 40008c */
  1025. if (status & 0x01000000) {
  1026. nouveau_graph_trap_info(dev, &trap);
  1027. if (nouveau_ratelimit())
  1028. nouveau_graph_dump_trap_info(dev,
  1029. "PGRAPH_SINGLE_STEP", &trap);
  1030. status &= ~0x01000000;
  1031. nv_wr32(dev, NV03_PGRAPH_INTR, 0x01000000);
  1032. }
  1033. /* 0x02000000 happens when you pause a ctxprog...
  1034. * but the only way this can happen that I know is by
  1035. * poking the relevant MMIO register, and we don't
  1036. * do that. */
  1037. if (status) {
  1038. NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n",
  1039. status);
  1040. nv_wr32(dev, NV03_PGRAPH_INTR, status);
  1041. }
  1042. {
  1043. const int isb = (1 << 16) | (1 << 0);
  1044. if ((nv_rd32(dev, 0x400500) & isb) != isb)
  1045. nv_wr32(dev, 0x400500,
  1046. nv_rd32(dev, 0x400500) | isb);
  1047. }
  1048. }
  1049. nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
  1050. if (nv_rd32(dev, 0x400824) & (1 << 31))
  1051. nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
  1052. }
  1053. static void
  1054. nouveau_crtc_irq_handler(struct drm_device *dev, int crtc)
  1055. {
  1056. if (crtc & 1) {
  1057. nv_wr32(dev, NV_CRTC0_INTSTAT, NV_CRTC_INTR_VBLANK);
  1058. drm_handle_vblank(dev, 0);
  1059. }
  1060. if (crtc & 2) {
  1061. nv_wr32(dev, NV_CRTC1_INTSTAT, NV_CRTC_INTR_VBLANK);
  1062. drm_handle_vblank(dev, 1);
  1063. }
  1064. }
  1065. irqreturn_t
  1066. nouveau_irq_handler(DRM_IRQ_ARGS)
  1067. {
  1068. struct drm_device *dev = (struct drm_device *)arg;
  1069. struct drm_nouveau_private *dev_priv = dev->dev_private;
  1070. unsigned long flags;
  1071. u32 status;
  1072. int i;
  1073. status = nv_rd32(dev, NV03_PMC_INTR_0);
  1074. if (!status)
  1075. return IRQ_NONE;
  1076. spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
  1077. if (status & NV_PMC_INTR_0_PFIFO_PENDING) {
  1078. nouveau_fifo_irq_handler(dev);
  1079. status &= ~NV_PMC_INTR_0_PFIFO_PENDING;
  1080. }
  1081. if (status & NV_PMC_INTR_0_PGRAPH_PENDING) {
  1082. if (dev_priv->card_type >= NV_50)
  1083. nv50_pgraph_irq_handler(dev);
  1084. else
  1085. nouveau_pgraph_irq_handler(dev);
  1086. status &= ~NV_PMC_INTR_0_PGRAPH_PENDING;
  1087. }
  1088. if (status & NV_PMC_INTR_0_CRTCn_PENDING) {
  1089. nouveau_crtc_irq_handler(dev, (status>>24)&3);
  1090. status &= ~NV_PMC_INTR_0_CRTCn_PENDING;
  1091. }
  1092. if (status & NV_PMC_INTR_0_NV50_DISPLAY_PENDING) {
  1093. nv50_display_irq_handler(dev);
  1094. status &= ~NV_PMC_INTR_0_NV50_DISPLAY_PENDING;
  1095. }
  1096. for (i = 0; i < 32 && status; i++) {
  1097. if (!(status & (1 << i)) || !dev_priv->irq_handler[i])
  1098. continue;
  1099. dev_priv->irq_handler[i](dev);
  1100. status &= ~(1 << i);
  1101. }
  1102. if (status)
  1103. NV_ERROR(dev, "Unhandled PMC INTR status bits 0x%08x\n", status);
  1104. spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
  1105. if (dev_priv->msi_enabled)
  1106. nv_wr08(dev, 0x00088068, 0xff);
  1107. return IRQ_HANDLED;
  1108. }
  1109. int
  1110. nouveau_irq_init(struct drm_device *dev)
  1111. {
  1112. struct drm_nouveau_private *dev_priv = dev->dev_private;
  1113. int ret;
  1114. if (nouveau_msi != 0 && dev_priv->card_type >= NV_50) {
  1115. ret = pci_enable_msi(dev->pdev);
  1116. if (ret == 0) {
  1117. NV_INFO(dev, "enabled MSI\n");
  1118. dev_priv->msi_enabled = true;
  1119. }
  1120. }
  1121. return drm_irq_install(dev);
  1122. }
  1123. void
  1124. nouveau_irq_fini(struct drm_device *dev)
  1125. {
  1126. struct drm_nouveau_private *dev_priv = dev->dev_private;
  1127. drm_irq_uninstall(dev);
  1128. if (dev_priv->msi_enabled)
  1129. pci_disable_msi(dev->pdev);
  1130. }
  1131. void
  1132. nouveau_irq_register(struct drm_device *dev, int status_bit,
  1133. void (*handler)(struct drm_device *))
  1134. {
  1135. struct drm_nouveau_private *dev_priv = dev->dev_private;
  1136. unsigned long flags;
  1137. spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
  1138. dev_priv->irq_handler[status_bit] = handler;
  1139. spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
  1140. }
  1141. void
  1142. nouveau_irq_unregister(struct drm_device *dev, int status_bit)
  1143. {
  1144. struct drm_nouveau_private *dev_priv = dev->dev_private;
  1145. unsigned long flags;
  1146. spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
  1147. dev_priv->irq_handler[status_bit] = NULL;
  1148. spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
  1149. }