nv50_display.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040
  1. /*
  2. * Copyright (C) 2008 Maarten Maathuis.
  3. * All Rights Reserved.
  4. *
  5. * Permission is hereby granted, free of charge, to any person obtaining
  6. * a copy of this software and associated documentation files (the
  7. * "Software"), to deal in the Software without restriction, including
  8. * without limitation the rights to use, copy, modify, merge, publish,
  9. * distribute, sublicense, and/or sell copies of the Software, and to
  10. * permit persons to whom the Software is furnished to do so, subject to
  11. * the following conditions:
  12. *
  13. * The above copyright notice and this permission notice (including the
  14. * next paragraph) shall be included in all copies or substantial
  15. * portions of the Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  18. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  19. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
  20. * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
  21. * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
  22. * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
  23. * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  24. *
  25. */
  26. #include "nv50_display.h"
  27. #include "nouveau_crtc.h"
  28. #include "nouveau_encoder.h"
  29. #include "nouveau_connector.h"
  30. #include "nouveau_fb.h"
  31. #include "nouveau_fbcon.h"
  32. #include "drm_crtc_helper.h"
  33. static void
  34. nv50_evo_channel_del(struct nouveau_channel **pchan)
  35. {
  36. struct nouveau_channel *chan = *pchan;
  37. if (!chan)
  38. return;
  39. *pchan = NULL;
  40. nouveau_gpuobj_channel_takedown(chan);
  41. nouveau_bo_ref(NULL, &chan->pushbuf_bo);
  42. if (chan->user)
  43. iounmap(chan->user);
  44. kfree(chan);
  45. }
  46. static int
  47. nv50_evo_dmaobj_new(struct nouveau_channel *evo, uint32_t class, uint32_t name,
  48. uint32_t tile_flags, uint32_t magic_flags,
  49. uint32_t offset, uint32_t limit)
  50. {
  51. struct drm_nouveau_private *dev_priv = evo->dev->dev_private;
  52. struct drm_device *dev = evo->dev;
  53. struct nouveau_gpuobj *obj = NULL;
  54. int ret;
  55. ret = nouveau_gpuobj_new(dev, evo, 6*4, 32, 0, &obj);
  56. if (ret)
  57. return ret;
  58. obj->engine = NVOBJ_ENGINE_DISPLAY;
  59. ret = nouveau_gpuobj_ref_add(dev, evo, name, obj, NULL);
  60. if (ret) {
  61. nouveau_gpuobj_del(dev, &obj);
  62. return ret;
  63. }
  64. dev_priv->engine.instmem.prepare_access(dev, true);
  65. nv_wo32(dev, obj, 0, (tile_flags << 22) | (magic_flags << 16) | class);
  66. nv_wo32(dev, obj, 1, limit);
  67. nv_wo32(dev, obj, 2, offset);
  68. nv_wo32(dev, obj, 3, 0x00000000);
  69. nv_wo32(dev, obj, 4, 0x00000000);
  70. nv_wo32(dev, obj, 5, 0x00010000);
  71. dev_priv->engine.instmem.finish_access(dev);
  72. return 0;
  73. }
  74. static int
  75. nv50_evo_channel_new(struct drm_device *dev, struct nouveau_channel **pchan)
  76. {
  77. struct drm_nouveau_private *dev_priv = dev->dev_private;
  78. struct nouveau_channel *chan;
  79. int ret;
  80. chan = kzalloc(sizeof(struct nouveau_channel), GFP_KERNEL);
  81. if (!chan)
  82. return -ENOMEM;
  83. *pchan = chan;
  84. chan->id = -1;
  85. chan->dev = dev;
  86. chan->user_get = 4;
  87. chan->user_put = 0;
  88. INIT_LIST_HEAD(&chan->ramht_refs);
  89. ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 32768, 0x1000,
  90. NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin);
  91. if (ret) {
  92. NV_ERROR(dev, "Error allocating EVO channel memory: %d\n", ret);
  93. nv50_evo_channel_del(pchan);
  94. return ret;
  95. }
  96. ret = nouveau_mem_init_heap(&chan->ramin_heap, chan->ramin->gpuobj->
  97. im_pramin->start, 32768);
  98. if (ret) {
  99. NV_ERROR(dev, "Error initialising EVO PRAMIN heap: %d\n", ret);
  100. nv50_evo_channel_del(pchan);
  101. return ret;
  102. }
  103. ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 4096, 16,
  104. 0, &chan->ramht);
  105. if (ret) {
  106. NV_ERROR(dev, "Unable to allocate EVO RAMHT: %d\n", ret);
  107. nv50_evo_channel_del(pchan);
  108. return ret;
  109. }
  110. if (dev_priv->chipset != 0x50) {
  111. ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB16, 0x70, 0x19,
  112. 0, 0xffffffff);
  113. if (ret) {
  114. nv50_evo_channel_del(pchan);
  115. return ret;
  116. }
  117. ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoFB32, 0x7a, 0x19,
  118. 0, 0xffffffff);
  119. if (ret) {
  120. nv50_evo_channel_del(pchan);
  121. return ret;
  122. }
  123. }
  124. ret = nv50_evo_dmaobj_new(chan, 0x3d, NvEvoVRAM, 0, 0x19,
  125. 0, dev_priv->vram_size);
  126. if (ret) {
  127. nv50_evo_channel_del(pchan);
  128. return ret;
  129. }
  130. ret = nouveau_bo_new(dev, NULL, 4096, 0, TTM_PL_FLAG_VRAM, 0, 0,
  131. false, true, &chan->pushbuf_bo);
  132. if (ret == 0)
  133. ret = nouveau_bo_pin(chan->pushbuf_bo, TTM_PL_FLAG_VRAM);
  134. if (ret) {
  135. NV_ERROR(dev, "Error creating EVO DMA push buffer: %d\n", ret);
  136. nv50_evo_channel_del(pchan);
  137. return ret;
  138. }
  139. ret = nouveau_bo_map(chan->pushbuf_bo);
  140. if (ret) {
  141. NV_ERROR(dev, "Error mapping EVO DMA push buffer: %d\n", ret);
  142. nv50_evo_channel_del(pchan);
  143. return ret;
  144. }
  145. chan->user = ioremap(pci_resource_start(dev->pdev, 0) +
  146. NV50_PDISPLAY_USER(0), PAGE_SIZE);
  147. if (!chan->user) {
  148. NV_ERROR(dev, "Error mapping EVO control regs.\n");
  149. nv50_evo_channel_del(pchan);
  150. return -ENOMEM;
  151. }
  152. return 0;
  153. }
  154. int
  155. nv50_display_init(struct drm_device *dev)
  156. {
  157. struct drm_nouveau_private *dev_priv = dev->dev_private;
  158. struct nouveau_timer_engine *ptimer = &dev_priv->engine.timer;
  159. struct nouveau_channel *evo = dev_priv->evo;
  160. struct drm_connector *connector;
  161. uint32_t val, ram_amount, hpd_en[2];
  162. uint64_t start;
  163. int ret, i;
  164. NV_DEBUG_KMS(dev, "\n");
  165. nv_wr32(dev, 0x00610184, nv_rd32(dev, 0x00614004));
  166. /*
  167. * I think the 0x006101XX range is some kind of main control area
  168. * that enables things.
  169. */
  170. /* CRTC? */
  171. for (i = 0; i < 2; i++) {
  172. val = nv_rd32(dev, 0x00616100 + (i * 0x800));
  173. nv_wr32(dev, 0x00610190 + (i * 0x10), val);
  174. val = nv_rd32(dev, 0x00616104 + (i * 0x800));
  175. nv_wr32(dev, 0x00610194 + (i * 0x10), val);
  176. val = nv_rd32(dev, 0x00616108 + (i * 0x800));
  177. nv_wr32(dev, 0x00610198 + (i * 0x10), val);
  178. val = nv_rd32(dev, 0x0061610c + (i * 0x800));
  179. nv_wr32(dev, 0x0061019c + (i * 0x10), val);
  180. }
  181. /* DAC */
  182. for (i = 0; i < 3; i++) {
  183. val = nv_rd32(dev, 0x0061a000 + (i * 0x800));
  184. nv_wr32(dev, 0x006101d0 + (i * 0x04), val);
  185. }
  186. /* SOR */
  187. for (i = 0; i < 4; i++) {
  188. val = nv_rd32(dev, 0x0061c000 + (i * 0x800));
  189. nv_wr32(dev, 0x006101e0 + (i * 0x04), val);
  190. }
  191. /* Something not yet in use, tv-out maybe. */
  192. for (i = 0; i < 3; i++) {
  193. val = nv_rd32(dev, 0x0061e000 + (i * 0x800));
  194. nv_wr32(dev, 0x006101f0 + (i * 0x04), val);
  195. }
  196. for (i = 0; i < 3; i++) {
  197. nv_wr32(dev, NV50_PDISPLAY_DAC_DPMS_CTRL(i), 0x00550000 |
  198. NV50_PDISPLAY_DAC_DPMS_CTRL_PENDING);
  199. nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL1(i), 0x00000001);
  200. }
  201. /* This used to be in crtc unblank, but seems out of place there. */
  202. nv_wr32(dev, NV50_PDISPLAY_UNK_380, 0);
  203. /* RAM is clamped to 256 MiB. */
  204. ram_amount = dev_priv->vram_size;
  205. NV_DEBUG_KMS(dev, "ram_amount %d\n", ram_amount);
  206. if (ram_amount > 256*1024*1024)
  207. ram_amount = 256*1024*1024;
  208. nv_wr32(dev, NV50_PDISPLAY_RAM_AMOUNT, ram_amount - 1);
  209. nv_wr32(dev, NV50_PDISPLAY_UNK_388, 0x150000);
  210. nv_wr32(dev, NV50_PDISPLAY_UNK_38C, 0);
  211. /* The precise purpose is unknown, i suspect it has something to do
  212. * with text mode.
  213. */
  214. if (nv_rd32(dev, NV50_PDISPLAY_INTR_1) & 0x100) {
  215. nv_wr32(dev, NV50_PDISPLAY_INTR_1, 0x100);
  216. nv_wr32(dev, 0x006194e8, nv_rd32(dev, 0x006194e8) & ~1);
  217. if (!nv_wait(0x006194e8, 2, 0)) {
  218. NV_ERROR(dev, "timeout: (0x6194e8 & 2) != 0\n");
  219. NV_ERROR(dev, "0x6194e8 = 0x%08x\n",
  220. nv_rd32(dev, 0x6194e8));
  221. return -EBUSY;
  222. }
  223. }
  224. /* taken from nv bug #12637, attempts to un-wedge the hw if it's
  225. * stuck in some unspecified state
  226. */
  227. start = ptimer->read(dev);
  228. nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x2b00);
  229. while ((val = nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0))) & 0x1e0000) {
  230. if ((val & 0x9f0000) == 0x20000)
  231. nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
  232. val | 0x800000);
  233. if ((val & 0x3f0000) == 0x30000)
  234. nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
  235. val | 0x200000);
  236. if (ptimer->read(dev) - start > 1000000000ULL) {
  237. NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) != 0\n");
  238. NV_ERROR(dev, "0x610200 = 0x%08x\n", val);
  239. return -EBUSY;
  240. }
  241. }
  242. nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, NV50_PDISPLAY_CTRL_STATE_ENABLE);
  243. nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x1000b03);
  244. if (!nv_wait(NV50_PDISPLAY_CHANNEL_STAT(0), 0x40000000, 0x40000000)) {
  245. NV_ERROR(dev, "timeout: (0x610200 & 0x40000000) == 0x40000000\n");
  246. NV_ERROR(dev, "0x610200 = 0x%08x\n",
  247. nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)));
  248. return -EBUSY;
  249. }
  250. for (i = 0; i < 2; i++) {
  251. nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i), 0x2000);
  252. if (!nv_wait(NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
  253. NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS, 0)) {
  254. NV_ERROR(dev, "timeout: CURSOR_CTRL2_STATUS == 0\n");
  255. NV_ERROR(dev, "CURSOR_CTRL2 = 0x%08x\n",
  256. nv_rd32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
  257. return -EBUSY;
  258. }
  259. nv_wr32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
  260. NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_ON);
  261. if (!nv_wait(NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i),
  262. NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS,
  263. NV50_PDISPLAY_CURSOR_CURSOR_CTRL2_STATUS_ACTIVE)) {
  264. NV_ERROR(dev, "timeout: "
  265. "CURSOR_CTRL2_STATUS_ACTIVE(%d)\n", i);
  266. NV_ERROR(dev, "CURSOR_CTRL2(%d) = 0x%08x\n", i,
  267. nv_rd32(dev, NV50_PDISPLAY_CURSOR_CURSOR_CTRL2(i)));
  268. return -EBUSY;
  269. }
  270. }
  271. nv_wr32(dev, NV50_PDISPLAY_OBJECTS, (evo->ramin->instance >> 8) | 9);
  272. /* initialise fifo */
  273. nv_wr32(dev, NV50_PDISPLAY_CHANNEL_DMA_CB(0),
  274. ((evo->pushbuf_bo->bo.mem.mm_node->start << PAGE_SHIFT) >> 8) |
  275. NV50_PDISPLAY_CHANNEL_DMA_CB_LOCATION_VRAM |
  276. NV50_PDISPLAY_CHANNEL_DMA_CB_VALID);
  277. nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK2(0), 0x00010000);
  278. nv_wr32(dev, NV50_PDISPLAY_CHANNEL_UNK3(0), 0x00000002);
  279. if (!nv_wait(0x610200, 0x80000000, 0x00000000)) {
  280. NV_ERROR(dev, "timeout: (0x610200 & 0x80000000) == 0\n");
  281. NV_ERROR(dev, "0x610200 = 0x%08x\n", nv_rd32(dev, 0x610200));
  282. return -EBUSY;
  283. }
  284. nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0),
  285. (nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)) & ~0x00000003) |
  286. NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED);
  287. nv_wr32(dev, NV50_PDISPLAY_USER_PUT(0), 0);
  288. nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0x01000003 |
  289. NV50_PDISPLAY_CHANNEL_STAT_DMA_ENABLED);
  290. nv_wr32(dev, 0x610300, nv_rd32(dev, 0x610300) & ~1);
  291. evo->dma.max = (4096/4) - 2;
  292. evo->dma.put = 0;
  293. evo->dma.cur = evo->dma.put;
  294. evo->dma.free = evo->dma.max - evo->dma.cur;
  295. ret = RING_SPACE(evo, NOUVEAU_DMA_SKIPS);
  296. if (ret)
  297. return ret;
  298. for (i = 0; i < NOUVEAU_DMA_SKIPS; i++)
  299. OUT_RING(evo, 0);
  300. ret = RING_SPACE(evo, 11);
  301. if (ret)
  302. return ret;
  303. BEGIN_RING(evo, 0, NV50_EVO_UNK84, 2);
  304. OUT_RING(evo, NV50_EVO_UNK84_NOTIFY_DISABLED);
  305. OUT_RING(evo, NV50_EVO_DMA_NOTIFY_HANDLE_NONE);
  306. BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, FB_DMA), 1);
  307. OUT_RING(evo, NV50_EVO_CRTC_FB_DMA_HANDLE_NONE);
  308. BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK0800), 1);
  309. OUT_RING(evo, 0);
  310. BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, DISPLAY_START), 1);
  311. OUT_RING(evo, 0);
  312. BEGIN_RING(evo, 0, NV50_EVO_CRTC(0, UNK082C), 1);
  313. OUT_RING(evo, 0);
  314. FIRE_RING(evo);
  315. if (!nv_wait(0x640004, 0xffffffff, evo->dma.put << 2))
  316. NV_ERROR(dev, "evo pushbuf stalled\n");
  317. /* enable clock change interrupts. */
  318. nv_wr32(dev, 0x610028, 0x00010001);
  319. nv_wr32(dev, NV50_PDISPLAY_INTR_EN, (NV50_PDISPLAY_INTR_EN_CLK_UNK10 |
  320. NV50_PDISPLAY_INTR_EN_CLK_UNK20 |
  321. NV50_PDISPLAY_INTR_EN_CLK_UNK40));
  322. /* enable hotplug interrupts */
  323. hpd_en[0] = hpd_en[1] = 0;
  324. list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
  325. struct nouveau_connector *conn = nouveau_connector(connector);
  326. struct dcb_gpio_entry *gpio;
  327. if (conn->dcb->gpio_tag == 0xff)
  328. continue;
  329. gpio = nouveau_bios_gpio_entry(dev, conn->dcb->gpio_tag);
  330. if (!gpio)
  331. continue;
  332. hpd_en[gpio->line >> 4] |= (0x00010001 << (gpio->line & 0xf));
  333. }
  334. nv_wr32(dev, 0xe054, 0xffffffff);
  335. nv_wr32(dev, 0xe050, hpd_en[0]);
  336. if (dev_priv->chipset >= 0x90) {
  337. nv_wr32(dev, 0xe074, 0xffffffff);
  338. nv_wr32(dev, 0xe070, hpd_en[1]);
  339. }
  340. return 0;
  341. }
  342. static int nv50_display_disable(struct drm_device *dev)
  343. {
  344. struct drm_nouveau_private *dev_priv = dev->dev_private;
  345. struct drm_crtc *drm_crtc;
  346. int ret, i;
  347. NV_DEBUG_KMS(dev, "\n");
  348. list_for_each_entry(drm_crtc, &dev->mode_config.crtc_list, head) {
  349. struct nouveau_crtc *crtc = nouveau_crtc(drm_crtc);
  350. nv50_crtc_blank(crtc, true);
  351. }
  352. ret = RING_SPACE(dev_priv->evo, 2);
  353. if (ret == 0) {
  354. BEGIN_RING(dev_priv->evo, 0, NV50_EVO_UPDATE, 1);
  355. OUT_RING(dev_priv->evo, 0);
  356. }
  357. FIRE_RING(dev_priv->evo);
  358. /* Almost like ack'ing a vblank interrupt, maybe in the spirit of
  359. * cleaning up?
  360. */
  361. list_for_each_entry(drm_crtc, &dev->mode_config.crtc_list, head) {
  362. struct nouveau_crtc *crtc = nouveau_crtc(drm_crtc);
  363. uint32_t mask = NV50_PDISPLAY_INTR_1_VBLANK_CRTC_(crtc->index);
  364. if (!crtc->base.enabled)
  365. continue;
  366. nv_wr32(dev, NV50_PDISPLAY_INTR_1, mask);
  367. if (!nv_wait(NV50_PDISPLAY_INTR_1, mask, mask)) {
  368. NV_ERROR(dev, "timeout: (0x610024 & 0x%08x) == "
  369. "0x%08x\n", mask, mask);
  370. NV_ERROR(dev, "0x610024 = 0x%08x\n",
  371. nv_rd32(dev, NV50_PDISPLAY_INTR_1));
  372. }
  373. }
  374. nv_wr32(dev, NV50_PDISPLAY_CHANNEL_STAT(0), 0);
  375. nv_wr32(dev, NV50_PDISPLAY_CTRL_STATE, 0);
  376. if (!nv_wait(NV50_PDISPLAY_CHANNEL_STAT(0), 0x1e0000, 0)) {
  377. NV_ERROR(dev, "timeout: (0x610200 & 0x1e0000) == 0\n");
  378. NV_ERROR(dev, "0x610200 = 0x%08x\n",
  379. nv_rd32(dev, NV50_PDISPLAY_CHANNEL_STAT(0)));
  380. }
  381. for (i = 0; i < 3; i++) {
  382. if (!nv_wait(NV50_PDISPLAY_SOR_DPMS_STATE(i),
  383. NV50_PDISPLAY_SOR_DPMS_STATE_WAIT, 0)) {
  384. NV_ERROR(dev, "timeout: SOR_DPMS_STATE_WAIT(%d) == 0\n", i);
  385. NV_ERROR(dev, "SOR_DPMS_STATE(%d) = 0x%08x\n", i,
  386. nv_rd32(dev, NV50_PDISPLAY_SOR_DPMS_STATE(i)));
  387. }
  388. }
  389. /* disable interrupts. */
  390. nv_wr32(dev, NV50_PDISPLAY_INTR_EN, 0x00000000);
  391. /* disable hotplug interrupts */
  392. nv_wr32(dev, 0xe054, 0xffffffff);
  393. nv_wr32(dev, 0xe050, 0x00000000);
  394. if (dev_priv->chipset >= 0x90) {
  395. nv_wr32(dev, 0xe074, 0xffffffff);
  396. nv_wr32(dev, 0xe070, 0x00000000);
  397. }
  398. return 0;
  399. }
  400. int nv50_display_create(struct drm_device *dev)
  401. {
  402. struct drm_nouveau_private *dev_priv = dev->dev_private;
  403. struct dcb_table *dcb = &dev_priv->vbios.dcb;
  404. int ret, i;
  405. NV_DEBUG_KMS(dev, "\n");
  406. /* init basic kernel modesetting */
  407. drm_mode_config_init(dev);
  408. /* Initialise some optional connector properties. */
  409. drm_mode_create_scaling_mode_property(dev);
  410. drm_mode_create_dithering_property(dev);
  411. dev->mode_config.min_width = 0;
  412. dev->mode_config.min_height = 0;
  413. dev->mode_config.funcs = (void *)&nouveau_mode_config_funcs;
  414. dev->mode_config.max_width = 8192;
  415. dev->mode_config.max_height = 8192;
  416. dev->mode_config.fb_base = dev_priv->fb_phys;
  417. /* Create EVO channel */
  418. ret = nv50_evo_channel_new(dev, &dev_priv->evo);
  419. if (ret) {
  420. NV_ERROR(dev, "Error creating EVO channel: %d\n", ret);
  421. return ret;
  422. }
  423. /* Create CRTC objects */
  424. for (i = 0; i < 2; i++)
  425. nv50_crtc_create(dev, i);
  426. /* We setup the encoders from the BIOS table */
  427. for (i = 0 ; i < dcb->entries; i++) {
  428. struct dcb_entry *entry = &dcb->entry[i];
  429. if (entry->location != DCB_LOC_ON_CHIP) {
  430. NV_WARN(dev, "Off-chip encoder %d/%d unsupported\n",
  431. entry->type, ffs(entry->or) - 1);
  432. continue;
  433. }
  434. switch (entry->type) {
  435. case OUTPUT_TMDS:
  436. case OUTPUT_LVDS:
  437. case OUTPUT_DP:
  438. nv50_sor_create(dev, entry);
  439. break;
  440. case OUTPUT_ANALOG:
  441. nv50_dac_create(dev, entry);
  442. break;
  443. default:
  444. NV_WARN(dev, "DCB encoder %d unknown\n", entry->type);
  445. continue;
  446. }
  447. }
  448. for (i = 0 ; i < dcb->connector.entries; i++) {
  449. if (i != 0 && dcb->connector.entry[i].index2 ==
  450. dcb->connector.entry[i - 1].index2)
  451. continue;
  452. nouveau_connector_create(dev, &dcb->connector.entry[i]);
  453. }
  454. ret = nv50_display_init(dev);
  455. if (ret) {
  456. nv50_display_destroy(dev);
  457. return ret;
  458. }
  459. return 0;
  460. }
  461. int nv50_display_destroy(struct drm_device *dev)
  462. {
  463. struct drm_nouveau_private *dev_priv = dev->dev_private;
  464. NV_DEBUG_KMS(dev, "\n");
  465. drm_mode_config_cleanup(dev);
  466. nv50_display_disable(dev);
  467. nv50_evo_channel_del(&dev_priv->evo);
  468. return 0;
  469. }
  470. static inline uint32_t
  471. nv50_display_mode_ctrl(struct drm_device *dev, bool sor, int or)
  472. {
  473. struct drm_nouveau_private *dev_priv = dev->dev_private;
  474. uint32_t mc;
  475. if (sor) {
  476. if (dev_priv->chipset < 0x90 ||
  477. dev_priv->chipset == 0x92 || dev_priv->chipset == 0xa0)
  478. mc = nv_rd32(dev, NV50_PDISPLAY_SOR_MODE_CTRL_P(or));
  479. else
  480. mc = nv_rd32(dev, NV90_PDISPLAY_SOR_MODE_CTRL_P(or));
  481. } else {
  482. mc = nv_rd32(dev, NV50_PDISPLAY_DAC_MODE_CTRL_P(or));
  483. }
  484. return mc;
  485. }
  486. static int
  487. nv50_display_irq_head(struct drm_device *dev, int *phead,
  488. struct dcb_entry **pdcbent)
  489. {
  490. struct drm_nouveau_private *dev_priv = dev->dev_private;
  491. uint32_t unk30 = nv_rd32(dev, NV50_PDISPLAY_UNK30_CTRL);
  492. uint32_t dac = 0, sor = 0;
  493. int head, i, or = 0, type = OUTPUT_ANY;
  494. /* We're assuming that head 0 *or* head 1 will be active here,
  495. * and not both. I'm not sure if the hw will even signal both
  496. * ever, but it definitely shouldn't for us as we commit each
  497. * CRTC separately, and submission will be blocked by the GPU
  498. * until we handle each in turn.
  499. */
  500. NV_DEBUG_KMS(dev, "0x610030: 0x%08x\n", unk30);
  501. head = ffs((unk30 >> 9) & 3) - 1;
  502. if (head < 0)
  503. return -EINVAL;
  504. /* This assumes CRTCs are never bound to multiple encoders, which
  505. * should be the case.
  506. */
  507. for (i = 0; i < 3 && type == OUTPUT_ANY; i++) {
  508. uint32_t mc = nv50_display_mode_ctrl(dev, false, i);
  509. if (!(mc & (1 << head)))
  510. continue;
  511. switch ((mc >> 8) & 0xf) {
  512. case 0: type = OUTPUT_ANALOG; break;
  513. case 1: type = OUTPUT_TV; break;
  514. default:
  515. NV_ERROR(dev, "unknown dac mode_ctrl: 0x%08x\n", dac);
  516. return -1;
  517. }
  518. or = i;
  519. }
  520. for (i = 0; i < 4 && type == OUTPUT_ANY; i++) {
  521. uint32_t mc = nv50_display_mode_ctrl(dev, true, i);
  522. if (!(mc & (1 << head)))
  523. continue;
  524. switch ((mc >> 8) & 0xf) {
  525. case 0: type = OUTPUT_LVDS; break;
  526. case 1: type = OUTPUT_TMDS; break;
  527. case 2: type = OUTPUT_TMDS; break;
  528. case 5: type = OUTPUT_TMDS; break;
  529. case 8: type = OUTPUT_DP; break;
  530. case 9: type = OUTPUT_DP; break;
  531. default:
  532. NV_ERROR(dev, "unknown sor mode_ctrl: 0x%08x\n", sor);
  533. return -1;
  534. }
  535. or = i;
  536. }
  537. NV_DEBUG_KMS(dev, "type %d, or %d\n", type, or);
  538. if (type == OUTPUT_ANY) {
  539. NV_ERROR(dev, "unknown encoder!!\n");
  540. return -1;
  541. }
  542. for (i = 0; i < dev_priv->vbios.dcb.entries; i++) {
  543. struct dcb_entry *dcbent = &dev_priv->vbios.dcb.entry[i];
  544. if (dcbent->type != type)
  545. continue;
  546. if (!(dcbent->or & (1 << or)))
  547. continue;
  548. *phead = head;
  549. *pdcbent = dcbent;
  550. return 0;
  551. }
  552. NV_ERROR(dev, "no DCB entry for %d %d\n", dac != 0, or);
  553. return 0;
  554. }
  555. static uint32_t
  556. nv50_display_script_select(struct drm_device *dev, struct dcb_entry *dcbent,
  557. int pxclk)
  558. {
  559. struct drm_nouveau_private *dev_priv = dev->dev_private;
  560. struct nouveau_connector *nv_connector = NULL;
  561. struct drm_encoder *encoder;
  562. struct nvbios *bios = &dev_priv->vbios;
  563. uint32_t mc, script = 0, or;
  564. list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
  565. struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
  566. if (nv_encoder->dcb != dcbent)
  567. continue;
  568. nv_connector = nouveau_encoder_connector_get(nv_encoder);
  569. break;
  570. }
  571. or = ffs(dcbent->or) - 1;
  572. mc = nv50_display_mode_ctrl(dev, dcbent->type != OUTPUT_ANALOG, or);
  573. switch (dcbent->type) {
  574. case OUTPUT_LVDS:
  575. script = (mc >> 8) & 0xf;
  576. if (bios->fp_no_ddc) {
  577. if (bios->fp.dual_link)
  578. script |= 0x0100;
  579. if (bios->fp.if_is_24bit)
  580. script |= 0x0200;
  581. } else {
  582. if (pxclk >= bios->fp.duallink_transition_clk) {
  583. script |= 0x0100;
  584. if (bios->fp.strapless_is_24bit & 2)
  585. script |= 0x0200;
  586. } else
  587. if (bios->fp.strapless_is_24bit & 1)
  588. script |= 0x0200;
  589. if (nv_connector && nv_connector->edid &&
  590. (nv_connector->edid->revision >= 4) &&
  591. (nv_connector->edid->input & 0x70) >= 0x20)
  592. script |= 0x0200;
  593. }
  594. if (nouveau_uscript_lvds >= 0) {
  595. NV_INFO(dev, "override script 0x%04x with 0x%04x "
  596. "for output LVDS-%d\n", script,
  597. nouveau_uscript_lvds, or);
  598. script = nouveau_uscript_lvds;
  599. }
  600. break;
  601. case OUTPUT_TMDS:
  602. script = (mc >> 8) & 0xf;
  603. if (pxclk >= 165000)
  604. script |= 0x0100;
  605. if (nouveau_uscript_tmds >= 0) {
  606. NV_INFO(dev, "override script 0x%04x with 0x%04x "
  607. "for output TMDS-%d\n", script,
  608. nouveau_uscript_tmds, or);
  609. script = nouveau_uscript_tmds;
  610. }
  611. break;
  612. case OUTPUT_DP:
  613. script = (mc >> 8) & 0xf;
  614. break;
  615. case OUTPUT_ANALOG:
  616. script = 0xff;
  617. break;
  618. default:
  619. NV_ERROR(dev, "modeset on unsupported output type!\n");
  620. break;
  621. }
  622. return script;
  623. }
  624. static void
  625. nv50_display_vblank_crtc_handler(struct drm_device *dev, int crtc)
  626. {
  627. struct drm_nouveau_private *dev_priv = dev->dev_private;
  628. struct nouveau_channel *chan;
  629. struct list_head *entry, *tmp;
  630. list_for_each_safe(entry, tmp, &dev_priv->vbl_waiting) {
  631. chan = list_entry(entry, struct nouveau_channel, nvsw.vbl_wait);
  632. nouveau_bo_wr32(chan->notifier_bo, chan->nvsw.vblsem_offset,
  633. chan->nvsw.vblsem_rval);
  634. list_del(&chan->nvsw.vbl_wait);
  635. }
  636. }
  637. static void
  638. nv50_display_vblank_handler(struct drm_device *dev, uint32_t intr)
  639. {
  640. intr &= NV50_PDISPLAY_INTR_1_VBLANK_CRTC;
  641. if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_0)
  642. nv50_display_vblank_crtc_handler(dev, 0);
  643. if (intr & NV50_PDISPLAY_INTR_1_VBLANK_CRTC_1)
  644. nv50_display_vblank_crtc_handler(dev, 1);
  645. nv_wr32(dev, NV50_PDISPLAY_INTR_EN, nv_rd32(dev,
  646. NV50_PDISPLAY_INTR_EN) & ~intr);
  647. nv_wr32(dev, NV50_PDISPLAY_INTR_1, intr);
  648. }
  649. static void
  650. nv50_display_unk10_handler(struct drm_device *dev)
  651. {
  652. struct dcb_entry *dcbent;
  653. int head, ret;
  654. ret = nv50_display_irq_head(dev, &head, &dcbent);
  655. if (ret)
  656. goto ack;
  657. nv_wr32(dev, 0x619494, nv_rd32(dev, 0x619494) & ~8);
  658. nouveau_bios_run_display_table(dev, dcbent, 0, -1);
  659. ack:
  660. nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK10);
  661. nv_wr32(dev, 0x610030, 0x80000000);
  662. }
  663. static void
  664. nv50_display_unk20_dp_hack(struct drm_device *dev, struct dcb_entry *dcb)
  665. {
  666. int or = ffs(dcb->or) - 1, link = !(dcb->dpconf.sor.link & 1);
  667. struct drm_encoder *encoder;
  668. uint32_t tmp, unk0 = 0, unk1 = 0;
  669. if (dcb->type != OUTPUT_DP)
  670. return;
  671. list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
  672. struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
  673. if (nv_encoder->dcb == dcb) {
  674. unk0 = nv_encoder->dp.unk0;
  675. unk1 = nv_encoder->dp.unk1;
  676. break;
  677. }
  678. }
  679. if (unk0 || unk1) {
  680. tmp = nv_rd32(dev, NV50_SOR_DP_CTRL(or, link));
  681. tmp &= 0xfffffe03;
  682. nv_wr32(dev, NV50_SOR_DP_CTRL(or, link), tmp | unk0);
  683. tmp = nv_rd32(dev, NV50_SOR_DP_UNK128(or, link));
  684. tmp &= 0xfef080c0;
  685. nv_wr32(dev, NV50_SOR_DP_UNK128(or, link), tmp | unk1);
  686. }
  687. }
  688. static void
  689. nv50_display_unk20_handler(struct drm_device *dev)
  690. {
  691. struct dcb_entry *dcbent;
  692. uint32_t tmp, pclk, script;
  693. int head, or, ret;
  694. ret = nv50_display_irq_head(dev, &head, &dcbent);
  695. if (ret)
  696. goto ack;
  697. or = ffs(dcbent->or) - 1;
  698. pclk = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(head, CLOCK)) & 0x3fffff;
  699. script = nv50_display_script_select(dev, dcbent, pclk);
  700. NV_DEBUG_KMS(dev, "head %d pxclk: %dKHz\n", head, pclk);
  701. if (dcbent->type != OUTPUT_DP)
  702. nouveau_bios_run_display_table(dev, dcbent, 0, -2);
  703. nv50_crtc_set_clock(dev, head, pclk);
  704. nouveau_bios_run_display_table(dev, dcbent, script, pclk);
  705. nv50_display_unk20_dp_hack(dev, dcbent);
  706. tmp = nv_rd32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(head));
  707. tmp &= ~0x000000f;
  708. nv_wr32(dev, NV50_PDISPLAY_CRTC_CLK_CTRL2(head), tmp);
  709. if (dcbent->type != OUTPUT_ANALOG) {
  710. tmp = nv_rd32(dev, NV50_PDISPLAY_SOR_CLK_CTRL2(or));
  711. tmp &= ~0x00000f0f;
  712. if (script & 0x0100)
  713. tmp |= 0x00000101;
  714. nv_wr32(dev, NV50_PDISPLAY_SOR_CLK_CTRL2(or), tmp);
  715. } else {
  716. nv_wr32(dev, NV50_PDISPLAY_DAC_CLK_CTRL2(or), 0);
  717. }
  718. ack:
  719. nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK20);
  720. nv_wr32(dev, 0x610030, 0x80000000);
  721. }
  722. static void
  723. nv50_display_unk40_handler(struct drm_device *dev)
  724. {
  725. struct dcb_entry *dcbent;
  726. int head, pclk, script, ret;
  727. ret = nv50_display_irq_head(dev, &head, &dcbent);
  728. if (ret)
  729. goto ack;
  730. pclk = nv_rd32(dev, NV50_PDISPLAY_CRTC_P(head, CLOCK)) & 0x3fffff;
  731. script = nv50_display_script_select(dev, dcbent, pclk);
  732. nouveau_bios_run_display_table(dev, dcbent, script, -pclk);
  733. ack:
  734. nv_wr32(dev, NV50_PDISPLAY_INTR_1, NV50_PDISPLAY_INTR_1_CLK_UNK40);
  735. nv_wr32(dev, 0x610030, 0x80000000);
  736. nv_wr32(dev, 0x619494, nv_rd32(dev, 0x619494) | 8);
  737. }
  738. void
  739. nv50_display_irq_handler_bh(struct work_struct *work)
  740. {
  741. struct drm_nouveau_private *dev_priv =
  742. container_of(work, struct drm_nouveau_private, irq_work);
  743. struct drm_device *dev = dev_priv->dev;
  744. for (;;) {
  745. uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0);
  746. uint32_t intr1 = nv_rd32(dev, NV50_PDISPLAY_INTR_1);
  747. NV_DEBUG_KMS(dev, "PDISPLAY_INTR_BH 0x%08x 0x%08x\n", intr0, intr1);
  748. if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK10)
  749. nv50_display_unk10_handler(dev);
  750. else
  751. if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK20)
  752. nv50_display_unk20_handler(dev);
  753. else
  754. if (intr1 & NV50_PDISPLAY_INTR_1_CLK_UNK40)
  755. nv50_display_unk40_handler(dev);
  756. else
  757. break;
  758. }
  759. nv_wr32(dev, NV03_PMC_INTR_EN_0, 1);
  760. }
  761. static void
  762. nv50_display_error_handler(struct drm_device *dev)
  763. {
  764. uint32_t addr, data;
  765. nv_wr32(dev, NV50_PDISPLAY_INTR_0, 0x00010000);
  766. addr = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_ADDR);
  767. data = nv_rd32(dev, NV50_PDISPLAY_TRAPPED_DATA);
  768. NV_ERROR(dev, "EvoCh %d Mthd 0x%04x Data 0x%08x (0x%04x 0x%02x)\n",
  769. 0, addr & 0xffc, data, addr >> 16, (addr >> 12) & 0xf);
  770. nv_wr32(dev, NV50_PDISPLAY_TRAPPED_ADDR, 0x90000000);
  771. }
  772. void
  773. nv50_display_irq_hotplug_bh(struct work_struct *work)
  774. {
  775. struct drm_nouveau_private *dev_priv =
  776. container_of(work, struct drm_nouveau_private, hpd_work);
  777. struct drm_device *dev = dev_priv->dev;
  778. struct drm_connector *connector;
  779. const uint32_t gpio_reg[4] = { 0xe104, 0xe108, 0xe280, 0xe284 };
  780. uint32_t unplug_mask, plug_mask, change_mask;
  781. uint32_t hpd0, hpd1 = 0;
  782. hpd0 = nv_rd32(dev, 0xe054) & nv_rd32(dev, 0xe050);
  783. if (dev_priv->chipset >= 0x90)
  784. hpd1 = nv_rd32(dev, 0xe074) & nv_rd32(dev, 0xe070);
  785. plug_mask = (hpd0 & 0x0000ffff) | (hpd1 << 16);
  786. unplug_mask = (hpd0 >> 16) | (hpd1 & 0xffff0000);
  787. change_mask = plug_mask | unplug_mask;
  788. list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
  789. struct drm_encoder_helper_funcs *helper;
  790. struct nouveau_connector *nv_connector =
  791. nouveau_connector(connector);
  792. struct nouveau_encoder *nv_encoder;
  793. struct dcb_gpio_entry *gpio;
  794. uint32_t reg;
  795. bool plugged;
  796. if (!nv_connector->dcb)
  797. continue;
  798. gpio = nouveau_bios_gpio_entry(dev, nv_connector->dcb->gpio_tag);
  799. if (!gpio || !(change_mask & (1 << gpio->line)))
  800. continue;
  801. reg = nv_rd32(dev, gpio_reg[gpio->line >> 3]);
  802. plugged = !!(reg & (4 << ((gpio->line & 7) << 2)));
  803. NV_INFO(dev, "%splugged %s\n", plugged ? "" : "un",
  804. drm_get_connector_name(connector)) ;
  805. if (!connector->encoder || !connector->encoder->crtc ||
  806. !connector->encoder->crtc->enabled)
  807. continue;
  808. nv_encoder = nouveau_encoder(connector->encoder);
  809. helper = connector->encoder->helper_private;
  810. if (nv_encoder->dcb->type != OUTPUT_DP)
  811. continue;
  812. if (plugged)
  813. helper->dpms(connector->encoder, DRM_MODE_DPMS_ON);
  814. else
  815. helper->dpms(connector->encoder, DRM_MODE_DPMS_OFF);
  816. }
  817. nv_wr32(dev, 0xe054, nv_rd32(dev, 0xe054));
  818. if (dev_priv->chipset >= 0x90)
  819. nv_wr32(dev, 0xe074, nv_rd32(dev, 0xe074));
  820. drm_helper_hpd_irq_event(dev);
  821. }
  822. void
  823. nv50_display_irq_handler(struct drm_device *dev)
  824. {
  825. struct drm_nouveau_private *dev_priv = dev->dev_private;
  826. uint32_t delayed = 0;
  827. if (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_HOTPLUG) {
  828. if (!work_pending(&dev_priv->hpd_work))
  829. queue_work(dev_priv->wq, &dev_priv->hpd_work);
  830. }
  831. while (nv_rd32(dev, NV50_PMC_INTR_0) & NV50_PMC_INTR_0_DISPLAY) {
  832. uint32_t intr0 = nv_rd32(dev, NV50_PDISPLAY_INTR_0);
  833. uint32_t intr1 = nv_rd32(dev, NV50_PDISPLAY_INTR_1);
  834. uint32_t clock;
  835. NV_DEBUG_KMS(dev, "PDISPLAY_INTR 0x%08x 0x%08x\n", intr0, intr1);
  836. if (!intr0 && !(intr1 & ~delayed))
  837. break;
  838. if (intr0 & 0x00010000) {
  839. nv50_display_error_handler(dev);
  840. intr0 &= ~0x00010000;
  841. }
  842. if (intr1 & NV50_PDISPLAY_INTR_1_VBLANK_CRTC) {
  843. nv50_display_vblank_handler(dev, intr1);
  844. intr1 &= ~NV50_PDISPLAY_INTR_1_VBLANK_CRTC;
  845. }
  846. clock = (intr1 & (NV50_PDISPLAY_INTR_1_CLK_UNK10 |
  847. NV50_PDISPLAY_INTR_1_CLK_UNK20 |
  848. NV50_PDISPLAY_INTR_1_CLK_UNK40));
  849. if (clock) {
  850. nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
  851. if (!work_pending(&dev_priv->irq_work))
  852. queue_work(dev_priv->wq, &dev_priv->irq_work);
  853. delayed |= clock;
  854. intr1 &= ~clock;
  855. }
  856. if (intr0) {
  857. NV_ERROR(dev, "unknown PDISPLAY_INTR_0: 0x%08x\n", intr0);
  858. nv_wr32(dev, NV50_PDISPLAY_INTR_0, intr0);
  859. }
  860. if (intr1) {
  861. NV_ERROR(dev,
  862. "unknown PDISPLAY_INTR_1: 0x%08x\n", intr1);
  863. nv_wr32(dev, NV50_PDISPLAY_INTR_1, intr1);
  864. }
  865. }
  866. }