nv50_display.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078
  1. /*
  2. * Copyright (C) 2008 Maarten Maathuis.
  3. * All Rights Reserved.
  4. *
  5. * Permission is hereby granted, free of charge, to any person obtaining
  6. * a copy of this software and associated documentation files (the
  7. * "Software"), to deal in the Software without restriction, including
  8. * without limitation the rights to use, copy, modify, merge, publish,
  9. * distribute, sublicense, and/or sell copies of the Software, and to
  10. * permit persons to whom the Software is furnished to do so, subject to
  11. * the following conditions:
  12. *
  13. * The above copyright notice and this permission notice (including the
  14. * next paragraph) shall be included in all copies or substantial
  15. * portions of the Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  18. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  19. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
  20. * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
  21. * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
  22. * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
  23. * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  24. *
  25. */
  26. #include "nv50_display.h"
  27. #include "nouveau_crtc.h"
  28. #include "nouveau_encoder.h"
  29. #include "nouveau_connector.h"
  30. #include "nouveau_fb.h"
  31. #include "nouveau_fbcon.h"
  32. #include "drm_crtc_helper.h"
  33. static void
  34. nv50_evo_channel_del(struct nouveau_channel **pchan)
  35. {
  36. struct nouveau_channel *chan = *pchan;
  37. if (!chan)
  38. return;
  39. *pchan = NULL;
  40. nouveau_gpuobj_channel_takedown(chan);
  41. nouveau_bo_ref(NULL, &chan->pushbuf_bo);
  42. if (chan->user)
  43. iounmap(chan->user);
  44. kfree(chan);
  45. }
  46. static int
  47. nv50_evo_dmaobj_new(struct nouveau_channel *evo, uint32_t class, uint32_t name,
  48. uint32_t tile_flags, uint32_t magic_flags,
  49. uint32_t offset, uint32_t limit)
  50. {
  51. struct drm_nouveau_private *dev_priv = evo->dev->dev_private;
  52. struct drm_device *dev = evo->dev;
  53. struct nouveau_gpuobj *obj = NULL;
  54. int ret;
  55. ret = nouveau_gpuobj_new(dev, evo, 6*4, 32, 0, &obj);
  56. if (ret)
  57. return ret;
  58. obj->engine = NVOBJ_ENGINE_DISPLAY;
  59. ret = nouveau_gpuobj_ref_add(dev, evo, name, obj, NULL);
  60. if (ret) {
  61. nouveau_gpuobj_del(dev, &obj);
  62. return ret;
  63. }
  64. dev_priv->engine.instmem.prepare_access(dev, true);
  65. nv_wo32(dev, obj, 0, (tile_flags << 22) | (magic_flags << 16) | class);
  66. nv_wo32(dev, obj, 1, limit);
  67. nv_wo32(dev, obj, 2, offset);
  68. nv_wo32(dev, obj, 3, 0x00000000);
  69. nv_wo32(dev, obj, 4, 0x00000000);
  70. nv_wo32(dev, obj, 5, 0x00010000);
  71. dev_priv->engine.instmem.finish_access(dev);
  72. return 0;
  73. }
  74. static int
  75. nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan)
  76. {
  77. struct drm_nouveau_private *dev_priv = dev->dev_private;
  78. struct nouveau_channel *chan;
  79. int ret;
  80. chan = kzalloc(sizeof(struct nouveau_channel), GFP_KERNEL);
  81. if (!chan)
  82. return -ENOMEM;
  83. *pchan = chan;
  84. chan->id = -1;
  85. chan->dev = dev;
  86. chan->user_get = 4;
  87. chan->user_put = 0;
  88. INIT_LIST_HEAD(&chan->ramht_refs);
  89. ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 32768, 0x1000,
  90. NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin);
  91. if (ret) {
  92. NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret);
  93. nv50_evo_channel_del(pchan);
  94. return ret;
  95. }
  96. ret = drm_mm_init(&chan->ramin_heap,
  97. chan->ramin->gpuobj->im_pramin->start, 32768);
  98. if (ret) {
  99. NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret);
  100. nv50_evo_channel_del(pchan);
  101. return ret;
  102. }
  103. ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 4096, 16,
  104. 0, &chan->ramht);
  105. if (ret) {
  106. NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret);
  107. nv50_evo_channel_del(pchan);
  108. return ret;
  109. }
  110. if (dev_priv->chipset != 0x50) {
  111. ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB16, 0x70, 0x19,
  112. 0, 0xffffffff);
  113. if (ret) {
  114. nv50_evo_channel_del(pchan);
  115. return ret;
  116. }
  117. ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB32, 0x7a, 0x19,
  118. 0, 0xffffffff);
  119. if (ret) {
  120. nv50_evo_channel_del(pchan);
  121. return ret;
  122. }
  123. }
  124. ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoVRAM, 0, 0x19,
  125. 0, dev_priv->vram_size);
  126. if (ret) {
  127. nv50_evo_channel_del(pchan);
  128. return ret;
  129. }
  130. ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0,
  131. false, true, &chan->pushbuf_bo);
  132. if (ret == 0)
  133. ret = nouveau_bo_pin(chan->pushbuf_bo, TTM_PL_FLAG_VRAM);
  134. if (ret) {
  135. NV_ERROR(dev, "Error creating EVO DMA push buffer: %d\n", ret);
  136. nv50_evo_channel_del(pchan);
  137. return ret;
  138. }
  139. ret = nouveau_bo_map(chan->pushbuf_bo);
  140. if (ret) {
  141. NV_ERROR(dev, "Error mapping EVO DMA push buffer: %d\n", ret);
  142. nv50_evo_channel_del(pchan);
  143. return ret;
  144. }
  145. chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
  146. NV50_PDISPLAY_USER(0), PAGE_SIZE);
  147. if (!chan->user) {
  148. NV_ERROR(dev, "Error mapping EVO control regs.\n");
  149. nv50_evo_channel_del(pchan);
  150. return -ENOMEM;
  151. }
  152. return 0;
  153. }
  154. int
  155. nv50_display_init(struct drm_device *dev)
  156. {
  157. struct drm_nouveau_private *dev_priv = dev->dev_private;
  158. struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
  159. struct nouveau_channel *evo = dev_priv->evo;
  160. struct drm_connector *connector;
  161. uint32_t val, ram_amount, hpd_en[2];
  162. uint64_t start;
  163. int ret, i;
  164. NV_DEBUG_KMS(dev, "\n");
  165. nv_wr32(dev, 0x00610184, nv_rd32(dev, 0x00614004));
  166. /*
  167. * I think the 0x006101XX range is some kind of main control area
  168. * that enables things.
  169. */
  170. /* CRTC? */
  171. for (i = 0; i < 2; i++) {
  172. val = nv_rd32(dev, 0x00616100 + (i * 0x800));
  173. nv_wr32(dev, 0x00610190 + (i * 0x10), val);
  174. val = nv_rd32(dev, 0x00616104 + (i * 0x800));
  175. nv_wr32(dev, 0x00610194 + (i * 0x10), val);
  176. val = nv_rd32(dev, 0x00616108 + (i * 0x800));
  177. nv_wr32(dev, 0x00610198 + (i * 0x10), val);
  178. val = nv_rd32(dev, 0x0061610c + (i * 0x800));
  179. nv_wr32(dev, 0x0061019c + (i * 0x10), val);
  180. }
  181. /* DAC */
  182. for (i = 0; i < 3; i++) {
  183. val = nv_rd32(dev, 0x0061a000 + (i * 0x800));
  184. nv_wr32(dev, 0x006101d0 + (i * 0x04), val);
  185. }
  186. /* SOR */
  187. for (i = 0; i < 4; i++) {
  188. val = nv_rd32(dev, 0x0061c000 + (i * 0x800));
  189. nv_wr32(dev, 0x006101e0 + (i * 0x04), val);
  190. }
  191. /* Something not yet in use, tv-out maybe. */
  192. for (i = 0; i < 3; i++) {
  193. val = nv_rd32(dev, 0x0061e000 + (i * 0x800));
  194. nv_wr32(dev, 0x006101f0 + (i * 0x04), val);
  195. }
  196. for (i = 0; i < 3; i++) {
  197. nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(i), 0x00550000 |
  198. NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
  199. nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL1(i), 0x00000001);
  200. }
  201. /* This used to be in crtc unblank, but seems out of place there. */
  202. nv_wr32(dev, NV50_PDISPLAY_UNK_380, 0);
  203. /* RAM is clamped to 256 MiB. */
  204. ram_amount = dev_priv->vram_size;
  205. NV_DEBUG_KMS(dev, "ram_amount %d\n", ram_amount);
  206. if (ram_amount > 256*1024*1024)
  207. ram_amount = 256*1024*1024;
  208. nv_wr32(dev, NV50_PDISPLAY_RAM_AMOUNT, ram_amount - 1);
  209. nv_wr32(dev, NV50_PDISPLAY_UNK_388, 0x150000);
  210. nv_wr32(dev, NV50_PDISPLAY_UNK_38C, 0);
  211. /* The precise purpose is unknown, i suspect it has something to do
  212. * with text mode.
  213. */
  214. if (nv_rd32(dev, NV50_PDISPLAY_INTR_1) & 0x100) {
  215. nv_wr32(dev, NV50_PDISPLAY_INTR_1, 0x100);
  216. nv_wr32(dev, 0x006194e8, nv_rd32(dev, 0x006194e8) & ~1);
  217. if (!nv_wait(0x006194e8, 2, 0)) {
  218. NV_ERROR(dev, "timeout: (0x6194e8 & 2) != 0\n");
  219. NV_ERROR(dev, "0x6194e8 = 0x%08x\n",
  220. nv_rd32(dev, 0x6194e8));
  221. return -EBUSY;
  222. }
  223. }
  224. /* taken from nv bug #12637, attempts to un-wedge the hw if it's
  225. * stuck in some unspecified state
  226. */
  227. start = ptimer->read(dev);
  228. nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x2b00);
  229. while ((val = nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0))) & 0x1e0000) {
  230. if ((val & 0x9f0000) == 0x20000)
  231. nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
  232. val | 0x800000);
  233. if ((val & 0x3f0000) == 0x30000)
  234. nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
  235. val | 0x200000);
  236. if (ptimer->read(dev) - start > 1000000000ULL) {
  237. NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) != 0\n");
  238. NV_ERROR(dev, "0x610200 = 0x%08x\n", val);
  239. return -EBUSY;
  240. }
  241. }
  242. nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, NV50_PDISPLAY_CTRL_STATE_ENABLE);
  243. nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x1000b03);
  244. if (!nv_wait(NV50_PDISPLAY_CHANNEL_STAT(0), 0x40000000, 0x40000000)) {
  245. NV_ERROR(dev, "timeout: (0x610200 & 0x40000000) == 0x40000000\n");
  246. NV_ERROR(dev, "0x610200 = 0x%08x\n",
  247. nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)));
  248. return -EBUSY;
  249. }
  250. for (i = 0; i < 2; i++) {
  251. nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0x2000);
  252. if (!nv_wait(NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
  253. NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) {
  254. NV_ERROR(dev, "timeout: CURSOR_CTRL2_STATUS == 0\n");
  255. NV_ERROR(dev, "CURSOR_CTRL2 = 0x%08x\n",
  256. nv_rd32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
  257. return -EBUSY;
  258. }
  259. nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
  260. NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_ON);
  261. if (!nv_wait(NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
  262. NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS,
  263. NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE)) {
  264. NV_ERROR(dev, "timeout: "
  265. "CURSOR_CTRL2_STATUS_ACTIVE(%d)\n", i);
  266. NV_ERROR(dev, "CURSOR_CTRL2(%d) = 0x%08x\n", i,
  267. nv_rd32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
  268. return -EBUSY;
  269. }
  270. }
  271. nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->instance >> 8) | 9);
  272. /* initialise fifo */
  273. nv_wr32(dev, NV50_PDISPLAY_CHANNEL_DMA_CB(0),
  274. ((evo->pushbuf_bo->bo.mem.mm_node->start << PAGE_SHIFT) >> 8) |
  275. NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_VRAM |
  276. NV50_PDISPLAY_CHANNEL_DMA_CB_VALID);
  277. nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK2(0), 0x00010000);
  278. nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK3(0), 0x00000002);
  279. if (!nv_wait(0x610200, 0x80000000, 0x00000000)) {
  280. NV_ERROR(dev, "timeout: (0x610200 & 0x80000000) == 0\n");
  281. NV_ERROR(dev, "0x610200 = 0x%08x\n", nv_rd32(dev, 0x610200));
  282. return -EBUSY;
  283. }
  284. nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
  285. (nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)) & ~0x00000003) |
  286. NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED);
  287. nv_wr32(dev, NV50_PDISPLAY_USER_PUT(0), 0);
  288. nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x01000003 |
  289. NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED);
  290. nv_wr32(dev, 0x610300, nv_rd32(dev, 0x610300) & ~1);
  291. evo->dma.max = (4096/4) - 2;
  292. evo->dma.put = 0;
  293. evo->dma.cur = evo->dma.put;
  294. evo->dma.free = evo->dma.max - evo->dma.cur;
  295. ret = RING_SPACE(evo, NOUVEAU_DMA_SKIPS);
  296. if (ret)
  297. return ret;
  298. for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
  299. OUT_RING(evo, 0);
  300. ret = RING_SPACE(evo, 11);
  301. if (ret)
  302. return ret;
  303. BEGIN_RING(evo, 0, NV50_EVO_UNK84, 2);
  304. OUT_RING(evo, NV50_EVO_UNK84_NOTIFY_DISABLED);
  305. OUT_RING(evo, NV50_EVO_DMA_NOTIFY_HANDLE_NONE);
  306. BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, FB_DMA), 1);
  307. OUT_RING(evo, NV50_EVO_CRTC_FB_DMA_HANDLE_NONE);
  308. BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK0800), 1);
  309. OUT_RING(evo, 0);
  310. BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, DISPLAY_START), 1);
  311. OUT_RING(evo, 0);
  312. BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK082C), 1);
  313. OUT_RING(evo, 0);
  314. FIRE_RING(evo);
  315. if (!nv_wait(0x640004, 0xffffffff, evo->dma.put << 2))
  316. NV_ERROR(dev, "evo pushbuf stalled\n");
  317. /* enable clock change interrupts. */
  318. nv_wr32(dev, 0x610028, 0x00010001);
  319. nv_wr32(dev, NV50_PDISPLAY_INTR_EN, (NV50_PDISPLAY_INTR_EN_CLK_UNK10 |
  320. NV50_PDISPLAY_INTR_EN_CLK_UNK20 |
  321. NV50_PDISPLAY_INTR_EN_CLK_UNK40));
  322. /* enable hotplug interrupts */
  323. hpd_en[0] = hpd_en[1] = 0;
  324. list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
  325. struct nouveau_connector *conn = nouveau_connector(connector);
  326. struct dcb_gpio_entry *gpio;
  327. if (conn->dcb->gpio_tag == 0xff)
  328. continue;
  329. gpio = nouveau_bios_gpio_entry(dev, conn->dcb->gpio_tag);
  330. if (!gpio)
  331. continue;
  332. hpd_en[gpio->line >> 4] |= (0x00010001 << (gpio->line & 0xf));
  333. }
  334. nv_wr32(dev, 0xe054, 0xffffffff);
  335. nv_wr32(dev, 0xe050, hpd_en[0]);
  336. if (dev_priv->chipset >= 0x90) {
  337. nv_wr32(dev, 0xe074, 0xffffffff);
  338. nv_wr32(dev, 0xe070, hpd_en[1]);
  339. }
  340. return 0;
  341. }
  342. static int nv50_display_disable(struct drm_device *dev)
  343. {
  344. struct drm_nouveau_private *dev_priv = dev->dev_private;
  345. struct drm_crtc *drm_crtc;
  346. int ret, i;
  347. NV_DEBUG_KMS(dev, "\n");
  348. list_for_each_entry(drm_crtc, &dev->mode_config.crtc_list, head) {
  349. struct nouveau_crtc *crtc = nouveau_crtc(drm_crtc);
  350. nv50_crtc_blank(crtc, true);
  351. }
  352. ret = RING_SPACE(dev_priv->evo, 2);
  353. if (ret == 0) {
  354. BEGIN_RING(dev_priv->evo, 0, NV50_EVO_UPDATE, 1);
  355. OUT_RING(dev_priv->evo, 0);
  356. }
  357. FIRE_RING(dev_priv->evo);
  358. /* Almost like ack'ing a vblank interrupt, maybe in the spirit of
  359. * cleaning up?
  360. */
  361. list_for_each_entry(drm_crtc, &dev->mode_config.crtc_list, head) {
  362. struct nouveau_crtc *crtc = nouveau_crtc(drm_crtc);
  363. uint32_t mask = NV50_PDISPLAY_INTR_1_VBLANK_CRTC_(crtc->index);
  364. if (!crtc->base.enabled)
  365. continue;
  366. nv_wr32(dev, NV50_PDISPLAY_INTR_1, mask);
  367. if (!nv_wait(NV50_PDISPLAY_INTR_1, mask, mask)) {
  368. NV_ERROR(dev, "timeout: (0x610024 & 0x%08x) == "
  369. "0x%08x\n", mask, mask);
  370. NV_ERROR(dev, "0x610024 = 0x%08x\n",
  371. nv_rd32(dev, NV50_PDISPLAY_INTR_1));
  372. }
  373. }
  374. nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0);
  375. nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, 0);
  376. if (!nv_wait(NV50_PDISPLAY_CHANNEL_STAT(0), 0x1e0000, 0)) {
  377. NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) == 0\n");
  378. NV_ERROR(dev, "0x610200 = 0x%08x\n",
  379. nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)));
  380. }
  381. for (i = 0; i < 3; i++) {
  382. if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_STATE(i),
  383. NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) {
  384. NV_ERROR(dev, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", i);
  385. NV_ERROR(dev, "SOR_DPMS_STATE(%d) = 0x%08x\n", i,
  386. nv_rd32(dev, NV50_PDISPLAY_SOR_DPMS_STATE(i)));
  387. }
  388. }
  389. /* disable interrupts. */
  390. nv_wr32(dev, NV50_PDISPLAY_INTR_EN, 0x00000000);
  391. /* disable hotplug interrupts */
  392. nv_wr32(dev, 0xe054, 0xffffffff);
  393. nv_wr32(dev, 0xe050, 0x00000000);
  394. if (dev_priv->chipset >= 0x90) {
  395. nv_wr32(dev, 0xe074, 0xffffffff);
  396. nv_wr32(dev, 0xe070, 0x00000000);
  397. }
  398. return 0;
  399. }
  400. int nv50_display_create(struct drm_device *dev)
  401. {
  402. struct drm_nouveau_private *dev_priv = dev->dev_private;
  403. struct dcb_table *dcb = &dev_priv->vbios.dcb;
  404. struct drm_connector *connector, *ct;
  405. int ret, i;
  406. NV_DEBUG_KMS(dev, "\n");
  407. /* init basic kernel modesetting */
  408. drm_mode_config_init(dev);
  409. /* Initialise some optional connector properties. */
  410. drm_mode_create_scaling_mode_property(dev);
  411. drm_mode_create_dithering_property(dev);
  412. dev->mode_config.min_width = 0;
  413. dev->mode_config.min_height = 0;
  414. dev->mode_config.funcs = (void *)&nouveau_mode_config_funcs;
  415. dev->mode_config.max_width = 8192;
  416. dev->mode_config.max_height = 8192;
  417. dev->mode_config.fb_base = dev_priv->fb_phys;
  418. /* Create EVO channel */
  419. ret = nv50_evo_channel_new(dev, &dev_priv->evo);
  420. if (ret) {
  421. NV_ERROR(dev, "Error creating EVO channel: %d\n", ret);
  422. return ret;
  423. }
  424. /* Create CRTC objects */
  425. for (i = 0; i < 2; i++)
  426. nv50_crtc_create(dev, i);
  427. /* We setup the encoders from the BIOS table */
  428. for (i = 0 ; i < dcb->entries; i++) {
  429. struct dcb_entry *entry = &dcb->entry[i];
  430. if (entry->location != DCB_LOC_ON_CHIP) {
  431. NV_WARN(dev, "Off-chip encoder %d/%d unsupported\n",
  432. entry->type, ffs(entry->or) - 1);
  433. continue;
  434. }
  435. connector = nouveau_connector_create(dev, entry->connector);
  436. if (IS_ERR(connector))
  437. continue;
  438. switch (entry->type) {
  439. case OUTPUT_TMDS:
  440. case OUTPUT_LVDS:
  441. case OUTPUT_DP:
  442. nv50_sor_create(connector, entry);
  443. break;
  444. case OUTPUT_ANALOG:
  445. nv50_dac_create(connector, entry);
  446. break;
  447. default:
  448. NV_WARN(dev, "DCB encoder %d unknown\n", entry->type);
  449. continue;
  450. }
  451. }
  452. list_for_each_entry_safe(connector, ct,
  453. &dev->mode_config.connector_list, head) {
  454. if (!connector->encoder_ids[0]) {
  455. NV_WARN(dev, "%s has no encoders, removing\n",
  456. drm_get_connector_name(connector));
  457. connector->funcs->destroy(connector);
  458. }
  459. }
  460. ret = nv50_display_init(dev);
  461. if (ret) {
  462. nv50_display_destroy(dev);
  463. return ret;
  464. }
  465. return 0;
  466. }
  467. int nv50_display_destroy(struct drm_device *dev)
  468. {
  469. struct drm_nouveau_private *dev_priv = dev->dev_private;
  470. NV_DEBUG_KMS(dev, "\n");
  471. drm_mode_config_cleanup(dev);
  472. nv50_display_disable(dev);
  473. nv50_evo_channel_del(&dev_priv->evo);
  474. return 0;
  475. }
  476. static inline uint32_t
  477. nv50_display_mode_ctrl(struct drm_device *dev, bool sor, int or)
  478. {
  479. struct drm_nouveau_private *dev_priv = dev->dev_private;
  480. uint32_t mc;
  481. if (sor) {
  482. if (dev_priv->chipset < 0x90 ||
  483. dev_priv->chipset == 0x92 || dev_priv->chipset == 0xa0)
  484. mc = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_P(or));
  485. else
  486. mc = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_P(or));
  487. } else {
  488. mc = nv_rd32(dev, NV50_PDISPLAY_DAC_MODE_CTRL_P(or));
  489. }
  490. return mc;
  491. }
  492. static int
  493. nv50_display_irq_head(struct drm_device *dev, int *phead,
  494. struct dcb_entry **pdcbent)
  495. {
  496. struct drm_nouveau_private *dev_priv = dev->dev_private;
  497. uint32_t unk30 = nv_rd32(dev, NV50_PDISPLAY_UNK30_CTRL);
  498. uint32_t dac = 0, sor = 0;
  499. int head, i, or = 0, type = OUTPUT_ANY;
  500. /* We're assuming that head 0 *or* head 1 will be active here,
  501. * and not both. I'm not sure if the hw will even signal both
  502. * ever, but it definitely shouldn't for us as we commit each
  503. * CRTC separately, and submission will be blocked by the GPU
  504. * until we handle each in turn.
  505. */
  506. NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
  507. head = ffs((unk30 >> 9) & 3) - 1;
  508. if (head < 0)
  509. return -EINVAL;
  510. /* This assumes CRTCs are never bound to multiple encoders, which
  511. * should be the case.
  512. */
  513. for (i = 0; i < 3 && type == OUTPUT_ANY; i++) {
  514. uint32_t mc = nv50_display_mode_ctrl(dev, false, i);
  515. if (!(mc & (1 << head)))
  516. continue;
  517. switch ((mc >> 8) & 0xf) {
  518. case 0: type = OUTPUT_ANALOG; break;
  519. case 1: type = OUTPUT_TV; break;
  520. default:
  521. NV_ERROR(dev, "unknown dac mode_ctrl: 0x%08x\n", dac);
  522. return -1;
  523. }
  524. or = i;
  525. }
  526. for (i = 0; i < 4 && type == OUTPUT_ANY; i++) {
  527. uint32_t mc = nv50_display_mode_ctrl(dev, true, i);
  528. if (!(mc & (1 << head)))
  529. continue;
  530. switch ((mc >> 8) & 0xf) {
  531. case 0: type = OUTPUT_LVDS; break;
  532. case 1: type = OUTPUT_TMDS; break;
  533. case 2: type = OUTPUT_TMDS; break;
  534. case 5: type = OUTPUT_TMDS; break;
  535. case 8: type = OUTPUT_DP; break;
  536. case 9: type = OUTPUT_DP; break;
  537. default:
  538. NV_ERROR(dev, "unknown sor mode_ctrl: 0x%08x\n", sor);
  539. return -1;
  540. }
  541. or = i;
  542. }
  543. NV_DEBUG_KMS(dev, "type %d, or %d\n", type, or);
  544. if (type == OUTPUT_ANY) {
  545. NV_ERROR(dev, "unknown encoder!!\n");
  546. return -1;
  547. }
  548. for (i = 0; i < dev_priv->vbios.dcb.entries; i++) {
  549. struct dcb_entry *dcbent = &dev_priv->vbios.dcb.entry[i];
  550. if (dcbent->type != type)
  551. continue;
  552. if (!(dcbent->or & (1 << or)))
  553. continue;
  554. *phead = head;
  555. *pdcbent = dcbent;
  556. return 0;
  557. }
  558. NV_ERROR(dev, "no DCB entry for %d %d\n", dac != 0, or);
  559. return 0;
  560. }
  561. static uint32_t
  562. nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcbent,
  563. int pxclk)
  564. {
  565. struct drm_nouveau_private *dev_priv = dev->dev_private;
  566. struct nouveau_connector *nv_connector = NULL;
  567. struct drm_encoder *encoder;
  568. struct nvbios *bios = &dev_priv->vbios;
  569. uint32_t mc, script = 0, or;
  570. list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
  571. struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
  572. if (nv_encoder->dcb != dcbent)
  573. continue;
  574. nv_connector = nouveau_encoder_connector_get(nv_encoder);
  575. break;
  576. }
  577. or = ffs(dcbent->or) - 1;
  578. mc = nv50_display_mode_ctrl(dev, dcbent->type != OUTPUT_ANALOG, or);
  579. switch (dcbent->type) {
  580. case OUTPUT_LVDS:
  581. script = (mc >> 8) & 0xf;
  582. if (bios->fp_no_ddc) {
  583. if (bios->fp.dual_link)
  584. script |= 0x0100;
  585. if (bios->fp.if_is_24bit)
  586. script |= 0x0200;
  587. } else {
  588. if (pxclk >= bios->fp.duallink_transition_clk) {
  589. script |= 0x0100;
  590. if (bios->fp.strapless_is_24bit & 2)
  591. script |= 0x0200;
  592. } else
  593. if (bios->fp.strapless_is_24bit & 1)
  594. script |= 0x0200;
  595. if (nv_connector && nv_connector->edid &&
  596. (nv_connector->edid->revision >= 4) &&
  597. (nv_connector->edid->input & 0x70) >= 0x20)
  598. script |= 0x0200;
  599. }
  600. if (nouveau_uscript_lvds >= 0) {
  601. NV_INFO(dev, "override script 0x%04x with 0x%04x "
  602. "for output LVDS-%d\n", script,
  603. nouveau_uscript_lvds, or);
  604. script = nouveau_uscript_lvds;
  605. }
  606. break;
  607. case OUTPUT_TMDS:
  608. script = (mc >> 8) & 0xf;
  609. if (pxclk >= 165000)
  610. script |= 0x0100;
  611. if (nouveau_uscript_tmds >= 0) {
  612. NV_INFO(dev, "override script 0x%04x with 0x%04x "
  613. "for output TMDS-%d\n", script,
  614. nouveau_uscript_tmds, or);
  615. script = nouveau_uscript_tmds;
  616. }
  617. break;
  618. case OUTPUT_DP:
  619. script = (mc >> 8) & 0xf;
  620. break;
  621. case OUTPUT_ANALOG:
  622. script = 0xff;
  623. break;
  624. default:
  625. NV_ERROR(dev, "modeset on unsupported output type!\n");
  626. break;
  627. }
  628. return script;
  629. }
  630. static void
  631. nv50_display_vblank_crtc_handler(struct drm_device *dev, int crtc)
  632. {
  633. struct drm_nouveau_private *dev_priv = dev->dev_private;
  634. struct nouveau_channel *chan;
  635. struct list_head *entry, *tmp;
  636. list_for_each_safe(entry, tmp, &dev_priv->vbl_waiting) {
  637. chan = list_entry(entry, struct nouveau_channel, nvsw.vbl_wait);
  638. nouveau_bo_wr32(chan->notifier_bo, chan->nvsw.vblsem_offset,
  639. chan->nvsw.vblsem_rval);
  640. list_del(&chan->nvsw.vbl_wait);
  641. }
  642. }
  643. static void
  644. nv50_display_vblank_handler(struct drm_device *dev, uint32_t intr)
  645. {
  646. intr &= NV50_PDISPLAY_INTR_1_VBLANK_CRTC;
  647. if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_0)
  648. nv50_display_vblank_crtc_handler(dev, 0);
  649. if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_1)
  650. nv50_display_vblank_crtc_handler(dev, 1);
  651. nv_wr32(dev, NV50_PDISPLAY_INTR_EN, nv_rd32(dev,
  652. NV50_PDISPLAY_INTR_EN) & ~intr);
  653. nv_wr32(dev, NV50_PDISPLAY_INTR_1, intr);
  654. }
  655. static void
  656. nv50_display_unk10_handler(struct drm_device *dev)
  657. {
  658. struct dcb_entry *dcbent;
  659. int head, ret;
  660. ret = nv50_display_irq_head(dev, &head, &dcbent);
  661. if (ret)
  662. goto ack;
  663. nv_wr32(dev, 0x619494, nv_rd32(dev, 0x619494) & ~8);
  664. nouveau_bios_run_display_table(dev, dcbent, 0, -1);
  665. ack:
  666. nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK10);
  667. nv_wr32(dev, 0x610030, 0x80000000);
  668. }
  669. static void
  670. nv50_display_unk20_dp_hack(struct drm_device *dev, struct dcb_entry *dcb)
  671. {
  672. int or = ffs(dcb->or) - 1, link = !(dcb->dpconf.sor.link & 1);
  673. struct drm_encoder *encoder;
  674. uint32_t tmp, unk0 = 0, unk1 = 0;
  675. if (dcb->type != OUTPUT_DP)
  676. return;
  677. list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
  678. struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
  679. if (nv_encoder->dcb == dcb) {
  680. unk0 = nv_encoder->dp.unk0;
  681. unk1 = nv_encoder->dp.unk1;
  682. break;
  683. }
  684. }
  685. if (unk0 || unk1) {
  686. tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link));
  687. tmp &= 0xfffffe03;
  688. nv_wr32(dev, NV50_SOR_DP_CTRL(or, link), tmp | unk0);
  689. tmp = nv_rd32(dev, NV50_SOR_DP_UNK128(or, link));
  690. tmp &= 0xfef080c0;
  691. nv_wr32(dev, NV50_SOR_DP_UNK128(or, link), tmp | unk1);
  692. }
  693. }
  694. /* If programming a TMDS output on a SOR that can also be configured for
  695. * DisplayPort, make sure NV50_SOR_DP_CTRL_ENABLE is forced off.
  696. *
  697. * It looks like the VBIOS TMDS scripts make an attempt at this, however,
  698. * the VBIOS scripts on at least one board I have only switch it off on
  699. * link 0, causing a blank display if the output has previously been
  700. * programmed for DisplayPort.
  701. */
  702. static void
  703. nv50_display_unk20_dp_set_tmds(struct drm_device *dev, struct dcb_entry *dcb)
  704. {
  705. int or = ffs(dcb->or) - 1, link = !(dcb->dpconf.sor.link & 1);
  706. struct drm_encoder *encoder;
  707. u32 tmp;
  708. if (dcb->type != OUTPUT_TMDS)
  709. return;
  710. list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
  711. struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
  712. if (nv_encoder->dcb->type == OUTPUT_DP) {
  713. tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link));
  714. tmp &= ~NV50_SOR_DP_CTRL_ENABLED;
  715. nv_wr32(dev, NV50_SOR_DP_CTRL(or, link), tmp);
  716. break;
  717. }
  718. }
  719. }
  720. static void
  721. nv50_display_unk20_handler(struct drm_device *dev)
  722. {
  723. struct dcb_entry *dcbent;
  724. uint32_t tmp, pclk, script;
  725. int head, or, ret;
  726. ret = nv50_display_irq_head(dev, &head, &dcbent);
  727. if (ret)
  728. goto ack;
  729. or = ffs(dcbent->or) - 1;
  730. pclk = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(head, CLOCK)) & 0x3fffff;
  731. script = nv50_display_script_select(dev, dcbent, pclk);
  732. NV_DEBUG_KMS(dev, "head %d pxclk: %dKHz\n", head, pclk);
  733. if (dcbent->type != OUTPUT_DP)
  734. nouveau_bios_run_display_table(dev, dcbent, 0, -2);
  735. nv50_crtc_set_clock(dev, head, pclk);
  736. nouveau_bios_run_display_table(dev, dcbent, script, pclk);
  737. nv50_display_unk20_dp_hack(dev, dcbent);
  738. nv50_display_unk20_dp_set_tmds(dev, dcbent);
  739. tmp = nv_rd32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(head));
  740. tmp &= ~0x000000f;
  741. nv_wr32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(head), tmp);
  742. if (dcbent->type != OUTPUT_ANALOG) {
  743. tmp = nv_rd32(dev, NV50_PDISPLAY_SOR_CLK_CTRL2(or));
  744. tmp &= ~0x00000f0f;
  745. if (script & 0x0100)
  746. tmp |= 0x00000101;
  747. nv_wr32(dev, NV50_PDISPLAY_SOR_CLK_CTRL2(or), tmp);
  748. } else {
  749. nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL2(or), 0);
  750. }
  751. ack:
  752. nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK20);
  753. nv_wr32(dev, 0x610030, 0x80000000);
  754. }
  755. static void
  756. nv50_display_unk40_handler(struct drm_device *dev)
  757. {
  758. struct dcb_entry *dcbent;
  759. int head, pclk, script, ret;
  760. ret = nv50_display_irq_head(dev, &head, &dcbent);
  761. if (ret)
  762. goto ack;
  763. pclk = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(head, CLOCK)) & 0x3fffff;
  764. script = nv50_display_script_select(dev, dcbent, pclk);
  765. nouveau_bios_run_display_table(dev, dcbent, script, -pclk);
  766. ack:
  767. nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK40);
  768. nv_wr32(dev, 0x610030, 0x80000000);
  769. nv_wr32(dev, 0x619494, nv_rd32(dev, 0x619494) | 8);
  770. }
  771. void
  772. nv50_display_irq_handler_bh(struct work_struct *work)
  773. {
  774. struct drm_nouveau_private *dev_priv =
  775. container_of(work, struct drm_nouveau_private, irq_work);
  776. struct drm_device *dev = dev_priv->dev;
  777. for (;;) {
  778. uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0);
  779. uint32_t intr1 = nv_rd32(dev, NV50_PDISPLAY_INTR_1);
  780. NV_DEBUG_KMS(dev, "PDISPLAY_INTR_BH 0x%08x 0x%08x\n", intr0, intr1);
  781. if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK10)
  782. nv50_display_unk10_handler(dev);
  783. else
  784. if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK20)
  785. nv50_display_unk20_handler(dev);
  786. else
  787. if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK40)
  788. nv50_display_unk40_handler(dev);
  789. else
  790. break;
  791. }
  792. nv_wr32(dev, NV03_PMC_INTR_EN_0, 1);
  793. }
  794. static void
  795. nv50_display_error_handler(struct drm_device *dev)
  796. {
  797. uint32_t addr, data;
  798. nv_wr32(dev, NV50_PDISPLAY_INTR_0, 0x00010000);
  799. addr = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_ADDR);
  800. data = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_DATA);
  801. NV_ERROR(dev, "EvoCh %d Mthd 0x%04x Data 0x%08x (0x%04x 0x%02x)\n",
  802. 0, addr & 0xffc, data, addr >> 16, (addr >> 12) & 0xf);
  803. nv_wr32(dev, NV50_PDISPLAY_TRAPPED_ADDR, 0x90000000);
  804. }
  805. void
  806. nv50_display_irq_hotplug_bh(struct work_struct *work)
  807. {
  808. struct drm_nouveau_private *dev_priv =
  809. container_of(work, struct drm_nouveau_private, hpd_work);
  810. struct drm_device *dev = dev_priv->dev;
  811. struct drm_connector *connector;
  812. const uint32_t gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
  813. uint32_t unplug_mask, plug_mask, change_mask;
  814. uint32_t hpd0, hpd1 = 0;
  815. hpd0 = nv_rd32(dev, 0xe054) & nv_rd32(dev, 0xe050);
  816. if (dev_priv->chipset >= 0x90)
  817. hpd1 = nv_rd32(dev, 0xe074) & nv_rd32(dev, 0xe070);
  818. plug_mask = (hpd0 & 0x0000ffff) | (hpd1 << 16);
  819. unplug_mask = (hpd0 >> 16) | (hpd1 & 0xffff0000);
  820. change_mask = plug_mask | unplug_mask;
  821. list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
  822. struct drm_encoder_helper_funcs *helper;
  823. struct nouveau_connector *nv_connector =
  824. nouveau_connector(connector);
  825. struct nouveau_encoder *nv_encoder;
  826. struct dcb_gpio_entry *gpio;
  827. uint32_t reg;
  828. bool plugged;
  829. if (!nv_connector->dcb)
  830. continue;
  831. gpio = nouveau_bios_gpio_entry(dev, nv_connector->dcb->gpio_tag);
  832. if (!gpio || !(change_mask & (1 << gpio->line)))
  833. continue;
  834. reg = nv_rd32(dev, gpio_reg[gpio->line >> 3]);
  835. plugged = !!(reg & (4 << ((gpio->line & 7) << 2)));
  836. NV_INFO(dev, "%splugged %s\n", plugged ? "" : "un",
  837. drm_get_connector_name(connector)) ;
  838. if (!connector->encoder || !connector->encoder->crtc ||
  839. !connector->encoder->crtc->enabled)
  840. continue;
  841. nv_encoder = nouveau_encoder(connector->encoder);
  842. helper = connector->encoder->helper_private;
  843. if (nv_encoder->dcb->type != OUTPUT_DP)
  844. continue;
  845. if (plugged)
  846. helper->dpms(connector->encoder, DRM_MODE_DPMS_ON);
  847. else
  848. helper->dpms(connector->encoder, DRM_MODE_DPMS_OFF);
  849. }
  850. nv_wr32(dev, 0xe054, nv_rd32(dev, 0xe054));
  851. if (dev_priv->chipset >= 0x90)
  852. nv_wr32(dev, 0xe074, nv_rd32(dev, 0xe074));
  853. drm_helper_hpd_irq_event(dev);
  854. }
  855. void
  856. nv50_display_irq_handler(struct drm_device *dev)
  857. {
  858. struct drm_nouveau_private *dev_priv = dev->dev_private;
  859. uint32_t delayed = 0;
  860. if (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_HOTPLUG) {
  861. if (!work_pending(&dev_priv->hpd_work))
  862. queue_work(dev_priv->wq, &dev_priv->hpd_work);
  863. }
  864. while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) {
  865. uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0);
  866. uint32_t intr1 = nv_rd32(dev, NV50_PDISPLAY_INTR_1);
  867. uint32_t clock;
  868. NV_DEBUG_KMS(dev, "PDISPLAY_INTR 0x%08x 0x%08x\n", intr0, intr1);
  869. if (!intr0 && !(intr1 & ~delayed))
  870. break;
  871. if (intr0 & 0x00010000) {
  872. nv50_display_error_handler(dev);
  873. intr0 &= ~0x00010000;
  874. }
  875. if (intr1 & NV50_PDISPLAY_INTR_1_VBLANK_CRTC) {
  876. nv50_display_vblank_handler(dev, intr1);
  877. intr1 &= ~NV50_PDISPLAY_INTR_1_VBLANK_CRTC;
  878. }
  879. clock = (intr1 & (NV50_PDISPLAY_INTR_1_CLK_UNK10 |
  880. NV50_PDISPLAY_INTR_1_CLK_UNK20 |
  881. NV50_PDISPLAY_INTR_1_CLK_UNK40));
  882. if (clock) {
  883. nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
  884. if (!work_pending(&dev_priv->irq_work))
  885. queue_work(dev_priv->wq, &dev_priv->irq_work);
  886. delayed |= clock;
  887. intr1 &= ~clock;
  888. }
  889. if (intr0) {
  890. NV_ERROR(dev, "unknown PDISPLAY_INTR_0: 0x%08x\n", intr0);
  891. nv_wr32(dev, NV50_PDISPLAY_INTR_0, intr0);
  892. }
  893. if (intr1) {
  894. NV_ERROR(dev,
  895. "unknown PDISPLAY_INTR_1: 0x%08x\n", intr1);
  896. nv_wr32(dev, NV50_PDISPLAY_INTR_1, intr1);
  897. }
  898. }
  899. }