nvd0_display.c 38 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481
  1. /*
  2. * Copyright 2011 Red Hat Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Ben Skeggs
  23. */
  24. #include <linux/dma-mapping.h>
  25. #include "drmP.h"
  26. #include "drm_crtc_helper.h"
  27. #include "nouveau_drv.h"
  28. #include "nouveau_connector.h"
  29. #include "nouveau_encoder.h"
  30. #include "nouveau_crtc.h"
  31. #include "nouveau_fb.h"
  32. #include "nv50_display.h"
  33. #define MEM_SYNC 0xe0000001
  34. #define MEM_VRAM 0xe0010000
  35. #include "nouveau_dma.h"
  36. struct nvd0_display {
  37. struct nouveau_gpuobj *mem;
  38. struct {
  39. dma_addr_t handle;
  40. u32 *ptr;
  41. } evo[1];
  42. struct tasklet_struct tasklet;
  43. u32 modeset;
  44. };
  45. static struct nvd0_display *
  46. nvd0_display(struct drm_device *dev)
  47. {
  48. struct drm_nouveau_private *dev_priv = dev->dev_private;
  49. return dev_priv->engine.display.priv;
  50. }
  51. static int
  52. evo_icmd(struct drm_device *dev, int id, u32 mthd, u32 data)
  53. {
  54. int ret = 0;
  55. nv_mask(dev, 0x610700 + (id * 0x10), 0x00000001, 0x00000001);
  56. nv_wr32(dev, 0x610704 + (id * 0x10), data);
  57. nv_mask(dev, 0x610704 + (id * 0x10), 0x80000ffc, 0x80000000 | mthd);
  58. if (!nv_wait(dev, 0x610704 + (id * 0x10), 0x80000000, 0x00000000))
  59. ret = -EBUSY;
  60. nv_mask(dev, 0x610700 + (id * 0x10), 0x00000001, 0x00000000);
  61. return ret;
  62. }
  63. static u32 *
  64. evo_wait(struct drm_device *dev, int id, int nr)
  65. {
  66. struct nvd0_display *disp = nvd0_display(dev);
  67. u32 put = nv_rd32(dev, 0x640000 + (id * 0x1000)) / 4;
  68. if (put + nr >= (PAGE_SIZE / 4)) {
  69. disp->evo[id].ptr[put] = 0x20000000;
  70. nv_wr32(dev, 0x640000 + (id * 0x1000), 0x00000000);
  71. if (!nv_wait(dev, 0x640004 + (id * 0x1000), ~0, 0x00000000)) {
  72. NV_ERROR(dev, "evo %d dma stalled\n", id);
  73. return NULL;
  74. }
  75. put = 0;
  76. }
  77. return disp->evo[id].ptr + put;
  78. }
  79. static void
  80. evo_kick(u32 *push, struct drm_device *dev, int id)
  81. {
  82. struct nvd0_display *disp = nvd0_display(dev);
  83. nv_wr32(dev, 0x640000 + (id * 0x1000), (push - disp->evo[id].ptr) << 2);
  84. }
  85. #define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m))
  86. #define evo_data(p,d) *((p)++) = (d)
  87. static struct drm_crtc *
  88. nvd0_display_crtc_get(struct drm_encoder *encoder)
  89. {
  90. return nouveau_encoder(encoder)->crtc;
  91. }
  92. /******************************************************************************
  93. * CRTC
  94. *****************************************************************************/
  95. static int
  96. nvd0_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool on, bool update)
  97. {
  98. struct drm_device *dev = nv_crtc->base.dev;
  99. u32 *push, mode;
  100. mode = 0x00000000;
  101. if (on) {
  102. /* 0x11: 6bpc dynamic 2x2
  103. * 0x13: 8bpc dynamic 2x2
  104. * 0x19: 6bpc static 2x2
  105. * 0x1b: 8bpc static 2x2
  106. * 0x21: 6bpc temporal
  107. * 0x23: 8bpc temporal
  108. */
  109. mode = 0x00000011;
  110. }
  111. push = evo_wait(dev, 0, 4);
  112. if (push) {
  113. evo_mthd(push, 0x0490 + (nv_crtc->index * 0x300), 1);
  114. evo_data(push, mode);
  115. if (update) {
  116. evo_mthd(push, 0x0080, 1);
  117. evo_data(push, 0x00000000);
  118. }
  119. evo_kick(push, dev, 0);
  120. }
  121. return 0;
  122. }
  123. static int
  124. nvd0_crtc_set_scale(struct nouveau_crtc *nv_crtc, int type, bool update)
  125. {
  126. struct drm_display_mode *mode = &nv_crtc->base.mode;
  127. struct drm_device *dev = nv_crtc->base.dev;
  128. struct nouveau_connector *nv_connector;
  129. u32 *push, outX, outY;
  130. outX = mode->hdisplay;
  131. outY = mode->vdisplay;
  132. nv_connector = nouveau_crtc_connector_get(nv_crtc);
  133. if (nv_connector && nv_connector->native_mode) {
  134. struct drm_display_mode *native = nv_connector->native_mode;
  135. u32 xratio = (native->hdisplay << 19) / mode->hdisplay;
  136. u32 yratio = (native->vdisplay << 19) / mode->vdisplay;
  137. switch (type) {
  138. case DRM_MODE_SCALE_ASPECT:
  139. if (xratio > yratio) {
  140. outX = (mode->hdisplay * yratio) >> 19;
  141. outY = (mode->vdisplay * yratio) >> 19;
  142. } else {
  143. outX = (mode->hdisplay * xratio) >> 19;
  144. outY = (mode->vdisplay * xratio) >> 19;
  145. }
  146. break;
  147. case DRM_MODE_SCALE_FULLSCREEN:
  148. outX = native->hdisplay;
  149. outY = native->vdisplay;
  150. break;
  151. default:
  152. break;
  153. }
  154. }
  155. push = evo_wait(dev, 0, 16);
  156. if (push) {
  157. evo_mthd(push, 0x04c0 + (nv_crtc->index * 0x300), 3);
  158. evo_data(push, (outY << 16) | outX);
  159. evo_data(push, (outY << 16) | outX);
  160. evo_data(push, (outY << 16) | outX);
  161. evo_mthd(push, 0x0494 + (nv_crtc->index * 0x300), 1);
  162. evo_data(push, 0x00000000);
  163. evo_mthd(push, 0x04b8 + (nv_crtc->index * 0x300), 1);
  164. evo_data(push, (mode->vdisplay << 16) | mode->hdisplay);
  165. if (update) {
  166. evo_mthd(push, 0x0080, 1);
  167. evo_data(push, 0x00000000);
  168. }
  169. evo_kick(push, dev, 0);
  170. }
  171. return 0;
  172. }
  173. static int
  174. nvd0_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb,
  175. int x, int y, bool update)
  176. {
  177. struct nouveau_framebuffer *nvfb = nouveau_framebuffer(fb);
  178. u32 *push;
  179. push = evo_wait(fb->dev, 0, 16);
  180. if (push) {
  181. evo_mthd(push, 0x0460 + (nv_crtc->index * 0x300), 1);
  182. evo_data(push, nvfb->nvbo->bo.offset >> 8);
  183. evo_mthd(push, 0x0468 + (nv_crtc->index * 0x300), 4);
  184. evo_data(push, (fb->height << 16) | fb->width);
  185. evo_data(push, nvfb->r_pitch);
  186. evo_data(push, nvfb->r_format);
  187. evo_data(push, nvfb->r_dma);
  188. evo_mthd(push, 0x04b0 + (nv_crtc->index * 0x300), 1);
  189. evo_data(push, (y << 16) | x);
  190. if (update) {
  191. evo_mthd(push, 0x0080, 1);
  192. evo_data(push, 0x00000000);
  193. }
  194. evo_kick(push, fb->dev, 0);
  195. }
  196. nv_crtc->fb.tile_flags = nvfb->r_dma;
  197. return 0;
  198. }
  199. static void
  200. nvd0_crtc_cursor_show(struct nouveau_crtc *nv_crtc, bool show, bool update)
  201. {
  202. struct drm_device *dev = nv_crtc->base.dev;
  203. u32 *push = evo_wait(dev, 0, 16);
  204. if (push) {
  205. if (show) {
  206. evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 2);
  207. evo_data(push, 0x85000000);
  208. evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
  209. evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
  210. evo_data(push, MEM_VRAM);
  211. } else {
  212. evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 1);
  213. evo_data(push, 0x05000000);
  214. evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
  215. evo_data(push, 0x00000000);
  216. }
  217. if (update) {
  218. evo_mthd(push, 0x0080, 1);
  219. evo_data(push, 0x00000000);
  220. }
  221. evo_kick(push, dev, 0);
  222. }
  223. }
  224. static void
  225. nvd0_crtc_dpms(struct drm_crtc *crtc, int mode)
  226. {
  227. }
  228. static void
  229. nvd0_crtc_prepare(struct drm_crtc *crtc)
  230. {
  231. struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
  232. u32 *push;
  233. push = evo_wait(crtc->dev, 0, 2);
  234. if (push) {
  235. evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
  236. evo_data(push, 0x00000000);
  237. evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 1);
  238. evo_data(push, 0x03000000);
  239. evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
  240. evo_data(push, 0x00000000);
  241. evo_kick(push, crtc->dev, 0);
  242. }
  243. nvd0_crtc_cursor_show(nv_crtc, false, false);
  244. }
  245. static void
  246. nvd0_crtc_commit(struct drm_crtc *crtc)
  247. {
  248. struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
  249. u32 *push;
  250. push = evo_wait(crtc->dev, 0, 32);
  251. if (push) {
  252. evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
  253. evo_data(push, nv_crtc->fb.tile_flags);
  254. evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 4);
  255. evo_data(push, 0x83000000);
  256. evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
  257. evo_data(push, 0x00000000);
  258. evo_data(push, 0x00000000);
  259. evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
  260. evo_data(push, MEM_VRAM);
  261. evo_mthd(push, 0x0430 + (nv_crtc->index * 0x300), 1);
  262. evo_data(push, 0xffffff00);
  263. evo_kick(push, crtc->dev, 0);
  264. }
  265. nvd0_crtc_cursor_show(nv_crtc, nv_crtc->cursor.visible, true);
  266. }
  267. static bool
  268. nvd0_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode,
  269. struct drm_display_mode *adjusted_mode)
  270. {
  271. return true;
  272. }
  273. static int
  274. nvd0_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
  275. {
  276. struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb);
  277. int ret;
  278. ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM);
  279. if (ret)
  280. return ret;
  281. if (old_fb) {
  282. nvfb = nouveau_framebuffer(old_fb);
  283. nouveau_bo_unpin(nvfb->nvbo);
  284. }
  285. return 0;
  286. }
  287. static int
  288. nvd0_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
  289. struct drm_display_mode *mode, int x, int y,
  290. struct drm_framebuffer *old_fb)
  291. {
  292. struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
  293. struct nouveau_connector *nv_connector;
  294. u32 htotal = mode->htotal;
  295. u32 vtotal = mode->vtotal;
  296. u32 hsyncw = mode->hsync_end - mode->hsync_start - 1;
  297. u32 vsyncw = mode->vsync_end - mode->vsync_start - 1;
  298. u32 hfrntp = mode->hsync_start - mode->hdisplay;
  299. u32 vfrntp = mode->vsync_start - mode->vdisplay;
  300. u32 hbackp = mode->htotal - mode->hsync_end;
  301. u32 vbackp = mode->vtotal - mode->vsync_end;
  302. u32 hss2be = hsyncw + hbackp;
  303. u32 vss2be = vsyncw + vbackp;
  304. u32 hss2de = htotal - hfrntp;
  305. u32 vss2de = vtotal - vfrntp;
  306. u32 syncs, *push;
  307. int ret;
  308. syncs = 0x00000001;
  309. if (mode->flags & DRM_MODE_FLAG_NHSYNC)
  310. syncs |= 0x00000008;
  311. if (mode->flags & DRM_MODE_FLAG_NVSYNC)
  312. syncs |= 0x00000010;
  313. ret = nvd0_crtc_swap_fbs(crtc, old_fb);
  314. if (ret)
  315. return ret;
  316. push = evo_wait(crtc->dev, 0, 64);
  317. if (push) {
  318. evo_mthd(push, 0x0410 + (nv_crtc->index * 0x300), 5);
  319. evo_data(push, 0x00000000);
  320. evo_data(push, (vtotal << 16) | htotal);
  321. evo_data(push, (vsyncw << 16) | hsyncw);
  322. evo_data(push, (vss2be << 16) | hss2be);
  323. evo_data(push, (vss2de << 16) | hss2de);
  324. evo_mthd(push, 0x042c + (nv_crtc->index * 0x300), 1);
  325. evo_data(push, 0x00000000); /* ??? */
  326. evo_mthd(push, 0x0450 + (nv_crtc->index * 0x300), 3);
  327. evo_data(push, mode->clock * 1000);
  328. evo_data(push, 0x00200000); /* ??? */
  329. evo_data(push, mode->clock * 1000);
  330. evo_mthd(push, 0x0404 + (nv_crtc->index * 0x300), 1);
  331. evo_data(push, syncs);
  332. evo_kick(push, crtc->dev, 0);
  333. }
  334. nv_connector = nouveau_crtc_connector_get(nv_crtc);
  335. nvd0_crtc_set_dither(nv_crtc, nv_connector->use_dithering, false);
  336. nvd0_crtc_set_scale(nv_crtc, nv_connector->scaling_mode, false);
  337. nvd0_crtc_set_image(nv_crtc, crtc->fb, x, y, false);
  338. return 0;
  339. }
  340. static int
  341. nvd0_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
  342. struct drm_framebuffer *old_fb)
  343. {
  344. struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
  345. int ret;
  346. ret = nvd0_crtc_swap_fbs(crtc, old_fb);
  347. if (ret)
  348. return ret;
  349. nvd0_crtc_set_image(nv_crtc, crtc->fb, x, y, true);
  350. return 0;
  351. }
  352. static int
  353. nvd0_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
  354. struct drm_framebuffer *fb, int x, int y,
  355. enum mode_set_atomic state)
  356. {
  357. struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
  358. nvd0_crtc_set_image(nv_crtc, fb, x, y, true);
  359. return 0;
  360. }
  361. static void
  362. nvd0_crtc_lut_load(struct drm_crtc *crtc)
  363. {
  364. struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
  365. void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo);
  366. int i;
  367. for (i = 0; i < 256; i++) {
  368. writew(0x6000 + (nv_crtc->lut.r[i] >> 2), lut + (i * 0x20) + 0);
  369. writew(0x6000 + (nv_crtc->lut.g[i] >> 2), lut + (i * 0x20) + 2);
  370. writew(0x6000 + (nv_crtc->lut.b[i] >> 2), lut + (i * 0x20) + 4);
  371. }
  372. }
  373. static int
  374. nvd0_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
  375. uint32_t handle, uint32_t width, uint32_t height)
  376. {
  377. struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
  378. struct drm_device *dev = crtc->dev;
  379. struct drm_gem_object *gem;
  380. struct nouveau_bo *nvbo;
  381. bool visible = (handle != 0);
  382. int i, ret = 0;
  383. if (visible) {
  384. if (width != 64 || height != 64)
  385. return -EINVAL;
  386. gem = drm_gem_object_lookup(dev, file_priv, handle);
  387. if (unlikely(!gem))
  388. return -ENOENT;
  389. nvbo = nouveau_gem_object(gem);
  390. ret = nouveau_bo_map(nvbo);
  391. if (ret == 0) {
  392. for (i = 0; i < 64 * 64; i++) {
  393. u32 v = nouveau_bo_rd32(nvbo, i);
  394. nouveau_bo_wr32(nv_crtc->cursor.nvbo, i, v);
  395. }
  396. nouveau_bo_unmap(nvbo);
  397. }
  398. drm_gem_object_unreference_unlocked(gem);
  399. }
  400. if (visible != nv_crtc->cursor.visible) {
  401. nvd0_crtc_cursor_show(nv_crtc, visible, true);
  402. nv_crtc->cursor.visible = visible;
  403. }
  404. return ret;
  405. }
  406. static int
  407. nvd0_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
  408. {
  409. struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
  410. const u32 data = (y << 16) | x;
  411. nv_wr32(crtc->dev, 0x64d084 + (nv_crtc->index * 0x1000), data);
  412. nv_wr32(crtc->dev, 0x64d080 + (nv_crtc->index * 0x1000), 0x00000000);
  413. return 0;
  414. }
  415. static void
  416. nvd0_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
  417. uint32_t start, uint32_t size)
  418. {
  419. struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
  420. u32 end = max(start + size, (u32)256);
  421. u32 i;
  422. for (i = start; i < end; i++) {
  423. nv_crtc->lut.r[i] = r[i];
  424. nv_crtc->lut.g[i] = g[i];
  425. nv_crtc->lut.b[i] = b[i];
  426. }
  427. nvd0_crtc_lut_load(crtc);
  428. }
  429. static void
  430. nvd0_crtc_destroy(struct drm_crtc *crtc)
  431. {
  432. struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
  433. nouveau_bo_unmap(nv_crtc->cursor.nvbo);
  434. nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
  435. nouveau_bo_unmap(nv_crtc->lut.nvbo);
  436. nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
  437. drm_crtc_cleanup(crtc);
  438. kfree(crtc);
  439. }
  440. static const struct drm_crtc_helper_funcs nvd0_crtc_hfunc = {
  441. .dpms = nvd0_crtc_dpms,
  442. .prepare = nvd0_crtc_prepare,
  443. .commit = nvd0_crtc_commit,
  444. .mode_fixup = nvd0_crtc_mode_fixup,
  445. .mode_set = nvd0_crtc_mode_set,
  446. .mode_set_base = nvd0_crtc_mode_set_base,
  447. .mode_set_base_atomic = nvd0_crtc_mode_set_base_atomic,
  448. .load_lut = nvd0_crtc_lut_load,
  449. };
  450. static const struct drm_crtc_funcs nvd0_crtc_func = {
  451. .cursor_set = nvd0_crtc_cursor_set,
  452. .cursor_move = nvd0_crtc_cursor_move,
  453. .gamma_set = nvd0_crtc_gamma_set,
  454. .set_config = drm_crtc_helper_set_config,
  455. .destroy = nvd0_crtc_destroy,
  456. };
  457. static int
  458. nvd0_crtc_create(struct drm_device *dev, int index)
  459. {
  460. struct nouveau_crtc *nv_crtc;
  461. struct drm_crtc *crtc;
  462. int ret, i;
  463. nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL);
  464. if (!nv_crtc)
  465. return -ENOMEM;
  466. nv_crtc->index = index;
  467. nv_crtc->set_dither = nvd0_crtc_set_dither;
  468. nv_crtc->set_scale = nvd0_crtc_set_scale;
  469. for (i = 0; i < 256; i++) {
  470. nv_crtc->lut.r[i] = i << 8;
  471. nv_crtc->lut.g[i] = i << 8;
  472. nv_crtc->lut.b[i] = i << 8;
  473. }
  474. crtc = &nv_crtc->base;
  475. drm_crtc_init(dev, crtc, &nvd0_crtc_func);
  476. drm_crtc_helper_add(crtc, &nvd0_crtc_hfunc);
  477. drm_mode_crtc_set_gamma_size(crtc, 256);
  478. ret = nouveau_bo_new(dev, 64 * 64 * 4, 0x100, TTM_PL_FLAG_VRAM,
  479. 0, 0x0000, &nv_crtc->cursor.nvbo);
  480. if (!ret) {
  481. ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
  482. if (!ret)
  483. ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
  484. if (ret)
  485. nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
  486. }
  487. if (ret)
  488. goto out;
  489. ret = nouveau_bo_new(dev, 8192, 0x100, TTM_PL_FLAG_VRAM,
  490. 0, 0x0000, &nv_crtc->lut.nvbo);
  491. if (!ret) {
  492. ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM);
  493. if (!ret)
  494. ret = nouveau_bo_map(nv_crtc->lut.nvbo);
  495. if (ret)
  496. nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
  497. }
  498. if (ret)
  499. goto out;
  500. nvd0_crtc_lut_load(crtc);
  501. out:
  502. if (ret)
  503. nvd0_crtc_destroy(crtc);
  504. return ret;
  505. }
  506. /******************************************************************************
  507. * DAC
  508. *****************************************************************************/
  509. static void
  510. nvd0_dac_dpms(struct drm_encoder *encoder, int mode)
  511. {
  512. struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
  513. struct drm_device *dev = encoder->dev;
  514. int or = nv_encoder->or;
  515. u32 dpms_ctrl;
  516. dpms_ctrl = 0x80000000;
  517. if (mode == DRM_MODE_DPMS_STANDBY || mode == DRM_MODE_DPMS_OFF)
  518. dpms_ctrl |= 0x00000001;
  519. if (mode == DRM_MODE_DPMS_SUSPEND || mode == DRM_MODE_DPMS_OFF)
  520. dpms_ctrl |= 0x00000004;
  521. nv_wait(dev, 0x61a004 + (or * 0x0800), 0x80000000, 0x00000000);
  522. nv_mask(dev, 0x61a004 + (or * 0x0800), 0xc000007f, dpms_ctrl);
  523. nv_wait(dev, 0x61a004 + (or * 0x0800), 0x80000000, 0x00000000);
  524. }
  525. static bool
  526. nvd0_dac_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
  527. struct drm_display_mode *adjusted_mode)
  528. {
  529. struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
  530. struct nouveau_connector *nv_connector;
  531. nv_connector = nouveau_encoder_connector_get(nv_encoder);
  532. if (nv_connector && nv_connector->native_mode) {
  533. if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
  534. int id = adjusted_mode->base.id;
  535. *adjusted_mode = *nv_connector->native_mode;
  536. adjusted_mode->base.id = id;
  537. }
  538. }
  539. return true;
  540. }
  541. static void
  542. nvd0_dac_prepare(struct drm_encoder *encoder)
  543. {
  544. }
  545. static void
  546. nvd0_dac_commit(struct drm_encoder *encoder)
  547. {
  548. }
  549. static void
  550. nvd0_dac_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
  551. struct drm_display_mode *adjusted_mode)
  552. {
  553. struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
  554. struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
  555. u32 *push;
  556. nvd0_dac_dpms(encoder, DRM_MODE_DPMS_ON);
  557. push = evo_wait(encoder->dev, 0, 4);
  558. if (push) {
  559. evo_mthd(push, 0x0180 + (nv_encoder->or * 0x20), 2);
  560. evo_data(push, 1 << nv_crtc->index);
  561. evo_data(push, 0x00ff);
  562. evo_kick(push, encoder->dev, 0);
  563. }
  564. nv_encoder->crtc = encoder->crtc;
  565. }
  566. static void
  567. nvd0_dac_disconnect(struct drm_encoder *encoder)
  568. {
  569. struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
  570. struct drm_device *dev = encoder->dev;
  571. u32 *push;
  572. if (nv_encoder->crtc) {
  573. nvd0_crtc_prepare(nv_encoder->crtc);
  574. push = evo_wait(dev, 0, 4);
  575. if (push) {
  576. evo_mthd(push, 0x0180 + (nv_encoder->or * 0x20), 1);
  577. evo_data(push, 0x00000000);
  578. evo_mthd(push, 0x0080, 1);
  579. evo_data(push, 0x00000000);
  580. evo_kick(push, dev, 0);
  581. }
  582. nv_encoder->crtc = NULL;
  583. }
  584. }
  585. static enum drm_connector_status
  586. nvd0_dac_detect(struct drm_encoder *encoder, struct drm_connector *connector)
  587. {
  588. enum drm_connector_status status = connector_status_disconnected;
  589. struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
  590. struct drm_device *dev = encoder->dev;
  591. int or = nv_encoder->or;
  592. u32 load;
  593. nv_wr32(dev, 0x61a00c + (or * 0x800), 0x00100000);
  594. udelay(9500);
  595. nv_wr32(dev, 0x61a00c + (or * 0x800), 0x80000000);
  596. load = nv_rd32(dev, 0x61a00c + (or * 0x800));
  597. if ((load & 0x38000000) == 0x38000000)
  598. status = connector_status_connected;
  599. nv_wr32(dev, 0x61a00c + (or * 0x800), 0x00000000);
  600. return status;
  601. }
  602. static void
  603. nvd0_dac_destroy(struct drm_encoder *encoder)
  604. {
  605. drm_encoder_cleanup(encoder);
  606. kfree(encoder);
  607. }
  608. static const struct drm_encoder_helper_funcs nvd0_dac_hfunc = {
  609. .dpms = nvd0_dac_dpms,
  610. .mode_fixup = nvd0_dac_mode_fixup,
  611. .prepare = nvd0_dac_prepare,
  612. .commit = nvd0_dac_commit,
  613. .mode_set = nvd0_dac_mode_set,
  614. .disable = nvd0_dac_disconnect,
  615. .get_crtc = nvd0_display_crtc_get,
  616. .detect = nvd0_dac_detect
  617. };
  618. static const struct drm_encoder_funcs nvd0_dac_func = {
  619. .destroy = nvd0_dac_destroy,
  620. };
  621. static int
  622. nvd0_dac_create(struct drm_connector *connector, struct dcb_entry *dcbe)
  623. {
  624. struct drm_device *dev = connector->dev;
  625. struct nouveau_encoder *nv_encoder;
  626. struct drm_encoder *encoder;
  627. nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
  628. if (!nv_encoder)
  629. return -ENOMEM;
  630. nv_encoder->dcb = dcbe;
  631. nv_encoder->or = ffs(dcbe->or) - 1;
  632. encoder = to_drm_encoder(nv_encoder);
  633. encoder->possible_crtcs = dcbe->heads;
  634. encoder->possible_clones = 0;
  635. drm_encoder_init(dev, encoder, &nvd0_dac_func, DRM_MODE_ENCODER_DAC);
  636. drm_encoder_helper_add(encoder, &nvd0_dac_hfunc);
  637. drm_mode_connector_attach_encoder(connector, encoder);
  638. return 0;
  639. }
  640. /******************************************************************************
  641. * SOR
  642. *****************************************************************************/
  643. static void
  644. nvd0_sor_dpms(struct drm_encoder *encoder, int mode)
  645. {
  646. struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
  647. struct drm_device *dev = encoder->dev;
  648. struct drm_encoder *partner;
  649. int or = nv_encoder->or;
  650. u32 dpms_ctrl;
  651. nv_encoder->last_dpms = mode;
  652. list_for_each_entry(partner, &dev->mode_config.encoder_list, head) {
  653. struct nouveau_encoder *nv_partner = nouveau_encoder(partner);
  654. if (partner->encoder_type != DRM_MODE_ENCODER_TMDS)
  655. continue;
  656. if (nv_partner != nv_encoder &&
  657. nv_partner->dcb->or == nv_encoder->or) {
  658. if (nv_partner->last_dpms == DRM_MODE_DPMS_ON)
  659. return;
  660. break;
  661. }
  662. }
  663. dpms_ctrl = (mode == DRM_MODE_DPMS_ON);
  664. dpms_ctrl |= 0x80000000;
  665. nv_wait(dev, 0x61c004 + (or * 0x0800), 0x80000000, 0x00000000);
  666. nv_mask(dev, 0x61c004 + (or * 0x0800), 0x80000001, dpms_ctrl);
  667. nv_wait(dev, 0x61c004 + (or * 0x0800), 0x80000000, 0x00000000);
  668. nv_wait(dev, 0x61c030 + (or * 0x0800), 0x10000000, 0x00000000);
  669. }
  670. static bool
  671. nvd0_sor_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
  672. struct drm_display_mode *adjusted_mode)
  673. {
  674. struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
  675. struct nouveau_connector *nv_connector;
  676. nv_connector = nouveau_encoder_connector_get(nv_encoder);
  677. if (nv_connector && nv_connector->native_mode) {
  678. if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
  679. int id = adjusted_mode->base.id;
  680. *adjusted_mode = *nv_connector->native_mode;
  681. adjusted_mode->base.id = id;
  682. }
  683. }
  684. return true;
  685. }
  686. static void
  687. nvd0_sor_prepare(struct drm_encoder *encoder)
  688. {
  689. }
  690. static void
  691. nvd0_sor_commit(struct drm_encoder *encoder)
  692. {
  693. }
  694. static void
  695. nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *umode,
  696. struct drm_display_mode *mode)
  697. {
  698. struct drm_nouveau_private *dev_priv = encoder->dev->dev_private;
  699. struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
  700. struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
  701. struct nouveau_connector *nv_connector;
  702. struct nvbios *bios = &dev_priv->vbios;
  703. u32 mode_ctrl = (1 << nv_crtc->index);
  704. u32 *push, or_config;
  705. nv_connector = nouveau_encoder_connector_get(nv_encoder);
  706. switch (nv_encoder->dcb->type) {
  707. case OUTPUT_TMDS:
  708. if (nv_encoder->dcb->sorconf.link & 1) {
  709. if (mode->clock < 165000)
  710. mode_ctrl |= 0x00000100;
  711. else
  712. mode_ctrl |= 0x00000500;
  713. } else {
  714. mode_ctrl |= 0x00000200;
  715. }
  716. or_config = (mode_ctrl & 0x00000f00) >> 8;
  717. if (mode->clock >= 165000)
  718. or_config |= 0x0100;
  719. break;
  720. case OUTPUT_LVDS:
  721. or_config = (mode_ctrl & 0x00000f00) >> 8;
  722. if (bios->fp_no_ddc) {
  723. if (bios->fp.dual_link)
  724. or_config |= 0x0100;
  725. if (bios->fp.if_is_24bit)
  726. or_config |= 0x0200;
  727. } else {
  728. if (nv_connector->dcb->type == DCB_CONNECTOR_LVDS_SPWG) {
  729. if (((u8 *)nv_connector->edid)[121] == 2)
  730. or_config |= 0x0100;
  731. } else
  732. if (mode->clock >= bios->fp.duallink_transition_clk) {
  733. or_config |= 0x0100;
  734. }
  735. if (or_config & 0x0100) {
  736. if (bios->fp.strapless_is_24bit & 2)
  737. or_config |= 0x0200;
  738. } else {
  739. if (bios->fp.strapless_is_24bit & 1)
  740. or_config |= 0x0200;
  741. }
  742. if (nv_connector->base.display_info.bpc == 8)
  743. or_config |= 0x0200;
  744. }
  745. break;
  746. default:
  747. BUG_ON(1);
  748. break;
  749. }
  750. nvd0_sor_dpms(encoder, DRM_MODE_DPMS_ON);
  751. push = evo_wait(encoder->dev, 0, 4);
  752. if (push) {
  753. evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 2);
  754. evo_data(push, mode_ctrl);
  755. evo_data(push, or_config);
  756. evo_kick(push, encoder->dev, 0);
  757. }
  758. nv_encoder->crtc = encoder->crtc;
  759. }
  760. static void
  761. nvd0_sor_disconnect(struct drm_encoder *encoder)
  762. {
  763. struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
  764. struct drm_device *dev = encoder->dev;
  765. u32 *push;
  766. if (nv_encoder->crtc) {
  767. nvd0_crtc_prepare(nv_encoder->crtc);
  768. push = evo_wait(dev, 0, 4);
  769. if (push) {
  770. evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 1);
  771. evo_data(push, 0x00000000);
  772. evo_mthd(push, 0x0080, 1);
  773. evo_data(push, 0x00000000);
  774. evo_kick(push, dev, 0);
  775. }
  776. nv_encoder->crtc = NULL;
  777. nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
  778. }
  779. }
  780. static void
  781. nvd0_sor_destroy(struct drm_encoder *encoder)
  782. {
  783. drm_encoder_cleanup(encoder);
  784. kfree(encoder);
  785. }
  786. static const struct drm_encoder_helper_funcs nvd0_sor_hfunc = {
  787. .dpms = nvd0_sor_dpms,
  788. .mode_fixup = nvd0_sor_mode_fixup,
  789. .prepare = nvd0_sor_prepare,
  790. .commit = nvd0_sor_commit,
  791. .mode_set = nvd0_sor_mode_set,
  792. .disable = nvd0_sor_disconnect,
  793. .get_crtc = nvd0_display_crtc_get,
  794. };
  795. static const struct drm_encoder_funcs nvd0_sor_func = {
  796. .destroy = nvd0_sor_destroy,
  797. };
  798. static int
  799. nvd0_sor_create(struct drm_connector *connector, struct dcb_entry *dcbe)
  800. {
  801. struct drm_device *dev = connector->dev;
  802. struct nouveau_encoder *nv_encoder;
  803. struct drm_encoder *encoder;
  804. nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
  805. if (!nv_encoder)
  806. return -ENOMEM;
  807. nv_encoder->dcb = dcbe;
  808. nv_encoder->or = ffs(dcbe->or) - 1;
  809. nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
  810. encoder = to_drm_encoder(nv_encoder);
  811. encoder->possible_crtcs = dcbe->heads;
  812. encoder->possible_clones = 0;
  813. drm_encoder_init(dev, encoder, &nvd0_sor_func, DRM_MODE_ENCODER_TMDS);
  814. drm_encoder_helper_add(encoder, &nvd0_sor_hfunc);
  815. drm_mode_connector_attach_encoder(connector, encoder);
  816. return 0;
  817. }
  818. /******************************************************************************
  819. * IRQ
  820. *****************************************************************************/
  821. static void
  822. debug_irq(struct drm_device *dev, int i)
  823. {
  824. if (drm_debug & (DRM_UT_DRIVER | DRM_UT_KMS)) {
  825. NV_INFO(dev, "PDISP: modeset req %d\n", i);
  826. NV_INFO(dev, " STAT: 0x%08x 0x%08x 0x%08x\n",
  827. nv_rd32(dev, 0x6101d0),
  828. nv_rd32(dev, 0x6101d4), nv_rd32(dev, 0x6109d4));
  829. for (i = 0; i < 8; i++) {
  830. NV_INFO(dev, " %s%d: 0x%08x 0x%08x\n",
  831. i < 4 ? "DAC" : "SOR", i,
  832. nv_rd32(dev, 0x640180 + (i * 0x20)),
  833. nv_rd32(dev, 0x660180 + (i * 0x20)));
  834. }
  835. }
  836. }
  837. static struct dcb_entry *
  838. lookup_dcb(struct drm_device *dev, int id, u32 mc)
  839. {
  840. struct drm_nouveau_private *dev_priv = dev->dev_private;
  841. int type, or, i;
  842. if (id < 4) {
  843. type = OUTPUT_ANALOG;
  844. or = id;
  845. } else {
  846. switch (mc & 0x00000f00) {
  847. case 0x00000000: type = OUTPUT_LVDS; break;
  848. case 0x00000100: type = OUTPUT_TMDS; break;
  849. case 0x00000200: type = OUTPUT_TMDS; break;
  850. case 0x00000500: type = OUTPUT_TMDS; break;
  851. default:
  852. NV_ERROR(dev, "PDISP: unknown SOR mc 0x%08x\n", mc);
  853. return NULL;
  854. }
  855. or = id - 4;
  856. }
  857. for (i = 0; i < dev_priv->vbios.dcb.entries; i++) {
  858. struct dcb_entry *dcb = &dev_priv->vbios.dcb.entry[i];
  859. if (dcb->type == type && (dcb->or & (1 << or)))
  860. return dcb;
  861. }
  862. NV_ERROR(dev, "PDISP: DCB for %d/0x%08x not found\n", id, mc);
  863. return NULL;
  864. }
  865. static void
  866. nvd0_display_unk1_handler(struct drm_device *dev)
  867. {
  868. struct dcb_entry *dcb;
  869. u32 mask, crtc;
  870. int i;
  871. mask = nv_rd32(dev, 0x6101d4);
  872. crtc = 0;
  873. if (!mask) {
  874. mask = nv_rd32(dev, 0x6109d4);
  875. crtc = 1;
  876. }
  877. debug_irq(dev, 1);
  878. for (i = 0; mask && i < 8; i++) {
  879. u32 mcc = nv_rd32(dev, 0x640180 + (i * 0x20));
  880. if (!(mcc & (1 << crtc)))
  881. continue;
  882. dcb = lookup_dcb(dev, i, mcc);
  883. if (!dcb)
  884. continue;
  885. nouveau_bios_run_display_table(dev, 0x0000, -1, dcb, crtc);
  886. }
  887. nv_wr32(dev, 0x6101d4, 0x00000000);
  888. nv_wr32(dev, 0x6109d4, 0x00000000);
  889. nv_wr32(dev, 0x6101d0, 0x80000000);
  890. }
  891. static void
  892. nvd0_display_unk2_handler(struct drm_device *dev)
  893. {
  894. struct dcb_entry *dcb;
  895. u32 mask, crtc, pclk;
  896. u32 or, tmp;
  897. int i;
  898. mask = nv_rd32(dev, 0x6101d4);
  899. crtc = 0;
  900. if (!mask) {
  901. mask = nv_rd32(dev, 0x6109d4);
  902. crtc = 1;
  903. }
  904. debug_irq(dev, 2);
  905. for (i = 0; mask && i < 8; i++) {
  906. u32 mcc = nv_rd32(dev, 0x640180 + (i * 0x20));
  907. if (!(mcc & (1 << crtc)))
  908. continue;
  909. dcb = lookup_dcb(dev, i, mcc);
  910. if (!dcb)
  911. continue;
  912. nouveau_bios_run_display_table(dev, 0x0000, -2, dcb, crtc);
  913. }
  914. pclk = nv_rd32(dev, 0x660450 + (crtc * 0x300)) / 1000;
  915. if (mask & 0x00010000) {
  916. nv50_crtc_set_clock(dev, crtc, pclk);
  917. }
  918. for (i = 0; mask && i < 8; i++) {
  919. u32 mcp = nv_rd32(dev, 0x660180 + (i * 0x20));
  920. u32 cfg = nv_rd32(dev, 0x660184 + (i * 0x20));
  921. if (!(mcp & (1 << crtc)))
  922. continue;
  923. dcb = lookup_dcb(dev, i, mcp);
  924. if (!dcb)
  925. continue;
  926. or = ffs(dcb->or) - 1;
  927. nouveau_bios_run_display_table(dev, cfg, pclk, dcb, crtc);
  928. nv_wr32(dev, 0x612200 + (crtc * 0x800), 0x00000000);
  929. switch (dcb->type) {
  930. case OUTPUT_ANALOG:
  931. nv_wr32(dev, 0x612280 + (or * 0x800), 0x00000000);
  932. break;
  933. case OUTPUT_TMDS:
  934. case OUTPUT_LVDS:
  935. if (cfg & 0x00000100)
  936. tmp = 0x00000101;
  937. else
  938. tmp = 0x00000000;
  939. nv_mask(dev, 0x612300 + (or * 0x800), 0x00000707, tmp);
  940. break;
  941. default:
  942. break;
  943. }
  944. break;
  945. }
  946. nv_wr32(dev, 0x6101d4, 0x00000000);
  947. nv_wr32(dev, 0x6109d4, 0x00000000);
  948. nv_wr32(dev, 0x6101d0, 0x80000000);
  949. }
  950. static void
  951. nvd0_display_unk4_handler(struct drm_device *dev)
  952. {
  953. struct dcb_entry *dcb;
  954. u32 mask, crtc;
  955. int pclk, i;
  956. mask = nv_rd32(dev, 0x6101d4);
  957. crtc = 0;
  958. if (!mask) {
  959. mask = nv_rd32(dev, 0x6109d4);
  960. crtc = 1;
  961. }
  962. debug_irq(dev, 4);
  963. pclk = nv_rd32(dev, 0x660450 + (crtc * 0x300)) / 1000;
  964. for (i = 0; mask && i < 8; i++) {
  965. u32 mcp = nv_rd32(dev, 0x660180 + (i * 0x20));
  966. u32 cfg = nv_rd32(dev, 0x660184 + (i * 0x20));
  967. if (!(mcp & (1 << crtc)))
  968. continue;
  969. dcb = lookup_dcb(dev, i, mcp);
  970. if (!dcb)
  971. continue;
  972. nouveau_bios_run_display_table(dev, cfg, -pclk, dcb, crtc);
  973. }
  974. nv_wr32(dev, 0x6101d4, 0x00000000);
  975. nv_wr32(dev, 0x6109d4, 0x00000000);
  976. nv_wr32(dev, 0x6101d0, 0x80000000);
  977. }
  978. static void
  979. nvd0_display_bh(unsigned long data)
  980. {
  981. struct drm_device *dev = (struct drm_device *)data;
  982. struct nvd0_display *disp = nvd0_display(dev);
  983. if (disp->modeset & 0x00000001)
  984. nvd0_display_unk1_handler(dev);
  985. if (disp->modeset & 0x00000002)
  986. nvd0_display_unk2_handler(dev);
  987. if (disp->modeset & 0x00000004)
  988. nvd0_display_unk4_handler(dev);
  989. }
  990. static void
  991. nvd0_display_intr(struct drm_device *dev)
  992. {
  993. struct nvd0_display *disp = nvd0_display(dev);
  994. u32 intr = nv_rd32(dev, 0x610088);
  995. if (intr & 0x00000002) {
  996. u32 stat = nv_rd32(dev, 0x61009c);
  997. int chid = ffs(stat) - 1;
  998. if (chid >= 0) {
  999. u32 mthd = nv_rd32(dev, 0x6101f0 + (chid * 12));
  1000. u32 data = nv_rd32(dev, 0x6101f4 + (chid * 12));
  1001. u32 unkn = nv_rd32(dev, 0x6101f8 + (chid * 12));
  1002. NV_INFO(dev, "EvoCh: chid %d mthd 0x%04x data 0x%08x "
  1003. "0x%08x 0x%08x\n",
  1004. chid, (mthd & 0x0000ffc), data, mthd, unkn);
  1005. nv_wr32(dev, 0x61009c, (1 << chid));
  1006. nv_wr32(dev, 0x6101f0 + (chid * 12), 0x90000000);
  1007. }
  1008. intr &= ~0x00000002;
  1009. }
  1010. if (intr & 0x00100000) {
  1011. u32 stat = nv_rd32(dev, 0x6100ac);
  1012. if (stat & 0x00000007) {
  1013. disp->modeset = stat;
  1014. tasklet_schedule(&disp->tasklet);
  1015. nv_wr32(dev, 0x6100ac, (stat & 0x00000007));
  1016. stat &= ~0x00000007;
  1017. }
  1018. if (stat) {
  1019. NV_INFO(dev, "PDISP: unknown intr24 0x%08x\n", stat);
  1020. nv_wr32(dev, 0x6100ac, stat);
  1021. }
  1022. intr &= ~0x00100000;
  1023. }
  1024. if (intr & 0x01000000) {
  1025. u32 stat = nv_rd32(dev, 0x6100bc);
  1026. nv_wr32(dev, 0x6100bc, stat);
  1027. intr &= ~0x01000000;
  1028. }
  1029. if (intr & 0x02000000) {
  1030. u32 stat = nv_rd32(dev, 0x6108bc);
  1031. nv_wr32(dev, 0x6108bc, stat);
  1032. intr &= ~0x02000000;
  1033. }
  1034. if (intr)
  1035. NV_INFO(dev, "PDISP: unknown intr 0x%08x\n", intr);
  1036. }
  1037. /******************************************************************************
  1038. * Init
  1039. *****************************************************************************/
  1040. static void
  1041. nvd0_display_fini(struct drm_device *dev)
  1042. {
  1043. int i;
  1044. /* fini cursors */
  1045. for (i = 14; i >= 13; i--) {
  1046. if (!(nv_rd32(dev, 0x610490 + (i * 0x10)) & 0x00000001))
  1047. continue;
  1048. nv_mask(dev, 0x610490 + (i * 0x10), 0x00000001, 0x00000000);
  1049. nv_wait(dev, 0x610490 + (i * 0x10), 0x00010000, 0x00000000);
  1050. nv_mask(dev, 0x610090, 1 << i, 0x00000000);
  1051. nv_mask(dev, 0x6100a0, 1 << i, 0x00000000);
  1052. }
  1053. /* fini master */
  1054. if (nv_rd32(dev, 0x610490) & 0x00000010) {
  1055. nv_mask(dev, 0x610490, 0x00000010, 0x00000000);
  1056. nv_mask(dev, 0x610490, 0x00000003, 0x00000000);
  1057. nv_wait(dev, 0x610490, 0x80000000, 0x00000000);
  1058. nv_mask(dev, 0x610090, 0x00000001, 0x00000000);
  1059. nv_mask(dev, 0x6100a0, 0x00000001, 0x00000000);
  1060. }
  1061. }
  1062. int
  1063. nvd0_display_init(struct drm_device *dev)
  1064. {
  1065. struct nvd0_display *disp = nvd0_display(dev);
  1066. u32 *push;
  1067. int i;
  1068. if (nv_rd32(dev, 0x6100ac) & 0x00000100) {
  1069. nv_wr32(dev, 0x6100ac, 0x00000100);
  1070. nv_mask(dev, 0x6194e8, 0x00000001, 0x00000000);
  1071. if (!nv_wait(dev, 0x6194e8, 0x00000002, 0x00000000)) {
  1072. NV_ERROR(dev, "PDISP: 0x6194e8 0x%08x\n",
  1073. nv_rd32(dev, 0x6194e8));
  1074. return -EBUSY;
  1075. }
  1076. }
  1077. /* nfi what these are exactly, i do know that SOR_MODE_CTRL won't
  1078. * work at all unless you do the SOR part below.
  1079. */
  1080. for (i = 0; i < 3; i++) {
  1081. u32 dac = nv_rd32(dev, 0x61a000 + (i * 0x800));
  1082. nv_wr32(dev, 0x6101c0 + (i * 0x800), dac);
  1083. }
  1084. for (i = 0; i < 4; i++) {
  1085. u32 sor = nv_rd32(dev, 0x61c000 + (i * 0x800));
  1086. nv_wr32(dev, 0x6301c4 + (i * 0x800), sor);
  1087. }
  1088. for (i = 0; i < 2; i++) {
  1089. u32 crtc0 = nv_rd32(dev, 0x616104 + (i * 0x800));
  1090. u32 crtc1 = nv_rd32(dev, 0x616108 + (i * 0x800));
  1091. u32 crtc2 = nv_rd32(dev, 0x61610c + (i * 0x800));
  1092. nv_wr32(dev, 0x6101b4 + (i * 0x800), crtc0);
  1093. nv_wr32(dev, 0x6101b8 + (i * 0x800), crtc1);
  1094. nv_wr32(dev, 0x6101bc + (i * 0x800), crtc2);
  1095. }
  1096. /* point at our hash table / objects, enable interrupts */
  1097. nv_wr32(dev, 0x610010, (disp->mem->vinst >> 8) | 9);
  1098. nv_mask(dev, 0x6100b0, 0x00000307, 0x00000307);
  1099. /* init master */
  1100. nv_wr32(dev, 0x610494, (disp->evo[0].handle >> 8) | 3);
  1101. nv_wr32(dev, 0x610498, 0x00010000);
  1102. nv_wr32(dev, 0x61049c, 0x00000001);
  1103. nv_mask(dev, 0x610490, 0x00000010, 0x00000010);
  1104. nv_wr32(dev, 0x640000, 0x00000000);
  1105. nv_wr32(dev, 0x610490, 0x01000013);
  1106. if (!nv_wait(dev, 0x610490, 0x80000000, 0x00000000)) {
  1107. NV_ERROR(dev, "PDISP: master 0x%08x\n",
  1108. nv_rd32(dev, 0x610490));
  1109. return -EBUSY;
  1110. }
  1111. nv_mask(dev, 0x610090, 0x00000001, 0x00000001);
  1112. nv_mask(dev, 0x6100a0, 0x00000001, 0x00000001);
  1113. /* init cursors */
  1114. for (i = 13; i <= 14; i++) {
  1115. nv_wr32(dev, 0x610490 + (i * 0x10), 0x00000001);
  1116. if (!nv_wait(dev, 0x610490 + (i * 0x10), 0x00010000, 0x00010000)) {
  1117. NV_ERROR(dev, "PDISP: curs%d 0x%08x\n", i,
  1118. nv_rd32(dev, 0x610490 + (i * 0x10)));
  1119. return -EBUSY;
  1120. }
  1121. nv_mask(dev, 0x610090, 1 << i, 1 << i);
  1122. nv_mask(dev, 0x6100a0, 1 << i, 1 << i);
  1123. }
  1124. push = evo_wait(dev, 0, 32);
  1125. if (!push)
  1126. return -EBUSY;
  1127. evo_mthd(push, 0x0088, 1);
  1128. evo_data(push, MEM_SYNC);
  1129. evo_mthd(push, 0x0084, 1);
  1130. evo_data(push, 0x00000000);
  1131. evo_mthd(push, 0x0084, 1);
  1132. evo_data(push, 0x80000000);
  1133. evo_mthd(push, 0x008c, 1);
  1134. evo_data(push, 0x00000000);
  1135. evo_kick(push, dev, 0);
  1136. return 0;
  1137. }
  1138. void
  1139. nvd0_display_destroy(struct drm_device *dev)
  1140. {
  1141. struct drm_nouveau_private *dev_priv = dev->dev_private;
  1142. struct nvd0_display *disp = nvd0_display(dev);
  1143. struct pci_dev *pdev = dev->pdev;
  1144. nvd0_display_fini(dev);
  1145. pci_free_consistent(pdev, PAGE_SIZE, disp->evo[0].ptr, disp->evo[0].handle);
  1146. nouveau_gpuobj_ref(NULL, &disp->mem);
  1147. nouveau_irq_unregister(dev, 26);
  1148. dev_priv->engine.display.priv = NULL;
  1149. kfree(disp);
  1150. }
  1151. int
  1152. nvd0_display_create(struct drm_device *dev)
  1153. {
  1154. struct drm_nouveau_private *dev_priv = dev->dev_private;
  1155. struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
  1156. struct dcb_table *dcb = &dev_priv->vbios.dcb;
  1157. struct drm_connector *connector, *tmp;
  1158. struct pci_dev *pdev = dev->pdev;
  1159. struct nvd0_display *disp;
  1160. struct dcb_entry *dcbe;
  1161. int ret, i;
  1162. disp = kzalloc(sizeof(*disp), GFP_KERNEL);
  1163. if (!disp)
  1164. return -ENOMEM;
  1165. dev_priv->engine.display.priv = disp;
  1166. /* create crtc objects to represent the hw heads */
  1167. for (i = 0; i < 2; i++) {
  1168. ret = nvd0_crtc_create(dev, i);
  1169. if (ret)
  1170. goto out;
  1171. }
  1172. /* create encoder/connector objects based on VBIOS DCB table */
  1173. for (i = 0, dcbe = &dcb->entry[0]; i < dcb->entries; i++, dcbe++) {
  1174. connector = nouveau_connector_create(dev, dcbe->connector);
  1175. if (IS_ERR(connector))
  1176. continue;
  1177. if (dcbe->location != DCB_LOC_ON_CHIP) {
  1178. NV_WARN(dev, "skipping off-chip encoder %d/%d\n",
  1179. dcbe->type, ffs(dcbe->or) - 1);
  1180. continue;
  1181. }
  1182. switch (dcbe->type) {
  1183. case OUTPUT_TMDS:
  1184. case OUTPUT_LVDS:
  1185. nvd0_sor_create(connector, dcbe);
  1186. break;
  1187. case OUTPUT_ANALOG:
  1188. nvd0_dac_create(connector, dcbe);
  1189. break;
  1190. default:
  1191. NV_WARN(dev, "skipping unsupported encoder %d/%d\n",
  1192. dcbe->type, ffs(dcbe->or) - 1);
  1193. continue;
  1194. }
  1195. }
  1196. /* cull any connectors we created that don't have an encoder */
  1197. list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) {
  1198. if (connector->encoder_ids[0])
  1199. continue;
  1200. NV_WARN(dev, "%s has no encoders, removing\n",
  1201. drm_get_connector_name(connector));
  1202. connector->funcs->destroy(connector);
  1203. }
  1204. /* setup interrupt handling */
  1205. tasklet_init(&disp->tasklet, nvd0_display_bh, (unsigned long)dev);
  1206. nouveau_irq_register(dev, 26, nvd0_display_intr);
  1207. /* hash table and dma objects for the memory areas we care about */
  1208. ret = nouveau_gpuobj_new(dev, NULL, 0x4000, 0x10000,
  1209. NVOBJ_FLAG_ZERO_ALLOC, &disp->mem);
  1210. if (ret)
  1211. goto out;
  1212. nv_wo32(disp->mem, 0x1000, 0x00000049);
  1213. nv_wo32(disp->mem, 0x1004, (disp->mem->vinst + 0x2000) >> 8);
  1214. nv_wo32(disp->mem, 0x1008, (disp->mem->vinst + 0x2fff) >> 8);
  1215. nv_wo32(disp->mem, 0x100c, 0x00000000);
  1216. nv_wo32(disp->mem, 0x1010, 0x00000000);
  1217. nv_wo32(disp->mem, 0x1014, 0x00000000);
  1218. nv_wo32(disp->mem, 0x0000, MEM_SYNC);
  1219. nv_wo32(disp->mem, 0x0004, (0x1000 << 9) | 0x00000001);
  1220. nv_wo32(disp->mem, 0x1020, 0x00000049);
  1221. nv_wo32(disp->mem, 0x1024, 0x00000000);
  1222. nv_wo32(disp->mem, 0x1028, (dev_priv->vram_size - 1) >> 8);
  1223. nv_wo32(disp->mem, 0x102c, 0x00000000);
  1224. nv_wo32(disp->mem, 0x1030, 0x00000000);
  1225. nv_wo32(disp->mem, 0x1034, 0x00000000);
  1226. nv_wo32(disp->mem, 0x0008, MEM_VRAM);
  1227. nv_wo32(disp->mem, 0x000c, (0x1020 << 9) | 0x00000001);
  1228. nv_wo32(disp->mem, 0x1040, 0x00000009);
  1229. nv_wo32(disp->mem, 0x1044, 0x00000000);
  1230. nv_wo32(disp->mem, 0x1048, (dev_priv->vram_size - 1) >> 8);
  1231. nv_wo32(disp->mem, 0x104c, 0x00000000);
  1232. nv_wo32(disp->mem, 0x1050, 0x00000000);
  1233. nv_wo32(disp->mem, 0x1054, 0x00000000);
  1234. nv_wo32(disp->mem, 0x0010, NvEvoVRAM_LP);
  1235. nv_wo32(disp->mem, 0x0014, (0x1040 << 9) | 0x00000001);
  1236. nv_wo32(disp->mem, 0x1060, 0x0fe00009);
  1237. nv_wo32(disp->mem, 0x1064, 0x00000000);
  1238. nv_wo32(disp->mem, 0x1068, (dev_priv->vram_size - 1) >> 8);
  1239. nv_wo32(disp->mem, 0x106c, 0x00000000);
  1240. nv_wo32(disp->mem, 0x1070, 0x00000000);
  1241. nv_wo32(disp->mem, 0x1074, 0x00000000);
  1242. nv_wo32(disp->mem, 0x0018, NvEvoFB32);
  1243. nv_wo32(disp->mem, 0x001c, (0x1060 << 9) | 0x00000001);
  1244. pinstmem->flush(dev);
  1245. /* push buffers for evo channels */
  1246. disp->evo[0].ptr =
  1247. pci_alloc_consistent(pdev, PAGE_SIZE, &disp->evo[0].handle);
  1248. if (!disp->evo[0].ptr) {
  1249. ret = -ENOMEM;
  1250. goto out;
  1251. }
  1252. ret = nvd0_display_init(dev);
  1253. if (ret)
  1254. goto out;
  1255. out:
  1256. if (ret)
  1257. nvd0_display_destroy(dev);
  1258. return ret;
  1259. }