nvd0_display.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029
  1. /*
  2. * Copyright 2011 Red Hat Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Ben Skeggs
  23. */
  24. #include <linux/dma-mapping.h>
  25. #include "drmP.h"
  26. #include "drm_crtc_helper.h"
  27. #include "nouveau_drv.h"
  28. #include "nouveau_connector.h"
  29. #include "nouveau_encoder.h"
  30. #include "nouveau_crtc.h"
  31. #include "nouveau_fb.h"
  32. #define MEM_SYNC 0xe0000001
  33. #define MEM_VRAM 0xe0010000
  34. struct nvd0_display {
  35. struct nouveau_gpuobj *mem;
  36. struct {
  37. dma_addr_t handle;
  38. u32 *ptr;
  39. } evo[1];
  40. };
  41. static struct nvd0_display *
  42. nvd0_display(struct drm_device *dev)
  43. {
  44. struct drm_nouveau_private *dev_priv = dev->dev_private;
  45. return dev_priv->engine.display.priv;
  46. }
  47. static int
  48. evo_icmd(struct drm_device *dev, int id, u32 mthd, u32 data)
  49. {
  50. int ret = 0;
  51. nv_mask(dev, 0x610700 + (id * 0x10), 0x00000001, 0x00000001);
  52. nv_wr32(dev, 0x610704 + (id * 0x10), data);
  53. nv_mask(dev, 0x610704 + (id * 0x10), 0x80000ffc, 0x80000000 | mthd);
  54. if (!nv_wait(dev, 0x610704 + (id * 0x10), 0x80000000, 0x00000000))
  55. ret = -EBUSY;
  56. nv_mask(dev, 0x610700 + (id * 0x10), 0x00000001, 0x00000000);
  57. return ret;
  58. }
  59. static u32 *
  60. evo_wait(struct drm_device *dev, int id, int nr)
  61. {
  62. struct nvd0_display *disp = nvd0_display(dev);
  63. u32 put = nv_rd32(dev, 0x640000 + (id * 0x1000)) / 4;
  64. if (put + nr >= (PAGE_SIZE / 4)) {
  65. disp->evo[id].ptr[put] = 0x20000000;
  66. nv_wr32(dev, 0x640000 + (id * 0x1000), 0x00000000);
  67. if (!nv_wait(dev, 0x640004 + (id * 0x1000), ~0, 0x00000000)) {
  68. NV_ERROR(dev, "evo %d dma stalled\n", id);
  69. return NULL;
  70. }
  71. put = 0;
  72. }
  73. return disp->evo[id].ptr + put;
  74. }
  75. static void
  76. evo_kick(u32 *push, struct drm_device *dev, int id)
  77. {
  78. struct nvd0_display *disp = nvd0_display(dev);
  79. nv_wr32(dev, 0x640000 + (id * 0x1000), (push - disp->evo[id].ptr) << 2);
  80. }
  81. #define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m))
  82. #define evo_data(p,d) *((p)++) = (d)
  83. static struct drm_crtc *
  84. nvd0_display_crtc_get(struct drm_encoder *encoder)
  85. {
  86. return nouveau_encoder(encoder)->crtc;
  87. }
  88. /******************************************************************************
  89. * CRTC
  90. *****************************************************************************/
  91. static int
  92. nvd0_crtc_set_dither(struct nouveau_crtc *nv_crtc, bool on, bool update)
  93. {
  94. struct drm_device *dev = nv_crtc->base.dev;
  95. u32 *push, mode;
  96. mode = 0x00000000;
  97. if (on) {
  98. /* 0x11: 6bpc dynamic 2x2
  99. * 0x13: 8bpc dynamic 2x2
  100. * 0x19: 6bpc static 2x2
  101. * 0x1b: 8bpc static 2x2
  102. * 0x21: 6bpc temporal
  103. * 0x23: 8bpc temporal
  104. */
  105. mode = 0x00000011;
  106. }
  107. push = evo_wait(dev, 0, 4);
  108. if (push) {
  109. evo_mthd(push, 0x0490 + (nv_crtc->index * 0x300), 1);
  110. evo_data(push, mode);
  111. if (update) {
  112. evo_mthd(push, 0x0080, 1);
  113. evo_data(push, 0x00000000);
  114. }
  115. evo_kick(push, dev, 0);
  116. }
  117. return 0;
  118. }
  119. static int
  120. nvd0_crtc_set_scale(struct nouveau_crtc *nv_crtc, int type, bool update)
  121. {
  122. struct drm_display_mode *mode = &nv_crtc->base.mode;
  123. struct drm_device *dev = nv_crtc->base.dev;
  124. u32 *push;
  125. /*XXX: actually handle scaling */
  126. push = evo_wait(dev, 0, 16);
  127. if (push) {
  128. evo_mthd(push, 0x04c0 + (nv_crtc->index * 0x300), 3);
  129. evo_data(push, (mode->vdisplay << 16) | mode->hdisplay);
  130. evo_data(push, (mode->vdisplay << 16) | mode->hdisplay);
  131. evo_data(push, (mode->vdisplay << 16) | mode->hdisplay);
  132. evo_mthd(push, 0x0494 + (nv_crtc->index * 0x300), 1);
  133. evo_data(push, 0x00000000);
  134. evo_mthd(push, 0x04b0 + (nv_crtc->index * 0x300), 1);
  135. evo_data(push, 0x00000000);
  136. evo_mthd(push, 0x04b8 + (nv_crtc->index * 0x300), 1);
  137. evo_data(push, (mode->vdisplay << 16) | mode->hdisplay);
  138. if (update) {
  139. evo_mthd(push, 0x0080, 1);
  140. evo_data(push, 0x00000000);
  141. }
  142. evo_kick(push, dev, 0);
  143. }
  144. return 0;
  145. }
  146. static int
  147. nvd0_crtc_set_image(struct nouveau_crtc *nv_crtc, struct drm_framebuffer *fb,
  148. int x, int y, bool update)
  149. {
  150. struct nouveau_framebuffer *nvfb = nouveau_framebuffer(fb);
  151. u32 *push;
  152. /*XXX*/
  153. nv_crtc->fb.tile_flags = MEM_VRAM;
  154. push = evo_wait(fb->dev, 0, 16);
  155. if (push) {
  156. evo_mthd(push, 0x0460 + (nv_crtc->index * 0x300), 1);
  157. evo_data(push, nvfb->nvbo->bo.offset >> 8);
  158. evo_mthd(push, 0x0468 + (nv_crtc->index * 0x300), 4);
  159. evo_data(push, (fb->height << 16) | fb->width);
  160. evo_data(push, nvfb->r_pitch);
  161. evo_data(push, nvfb->r_format);
  162. evo_data(push, nv_crtc->fb.tile_flags);
  163. evo_kick(push, fb->dev, 0);
  164. }
  165. return 0;
  166. }
  167. static void
  168. nvd0_crtc_cursor_show(struct nouveau_crtc *nv_crtc, bool show, bool update)
  169. {
  170. struct drm_device *dev = nv_crtc->base.dev;
  171. u32 *push = evo_wait(dev, 0, 16);
  172. if (push) {
  173. if (show) {
  174. evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 2);
  175. evo_data(push, 0x85000000);
  176. evo_data(push, nv_crtc->cursor.nvbo->bo.offset >> 8);
  177. evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
  178. evo_data(push, MEM_VRAM);
  179. } else {
  180. evo_mthd(push, 0x0480 + (nv_crtc->index * 0x300), 1);
  181. evo_data(push, 0x05000000);
  182. evo_mthd(push, 0x048c + (nv_crtc->index * 0x300), 1);
  183. evo_data(push, 0x00000000);
  184. }
  185. if (update) {
  186. evo_mthd(push, 0x0080, 1);
  187. evo_data(push, 0x00000000);
  188. }
  189. evo_kick(push, dev, 0);
  190. }
  191. }
  192. static void
  193. nvd0_crtc_dpms(struct drm_crtc *crtc, int mode)
  194. {
  195. }
  196. static void
  197. nvd0_crtc_prepare(struct drm_crtc *crtc)
  198. {
  199. struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
  200. u32 *push;
  201. push = evo_wait(crtc->dev, 0, 2);
  202. if (push) {
  203. evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
  204. evo_data(push, 0x00000000);
  205. evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 1);
  206. evo_data(push, 0x03000000);
  207. evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
  208. evo_data(push, 0x00000000);
  209. evo_kick(push, crtc->dev, 0);
  210. }
  211. nvd0_crtc_cursor_show(nv_crtc, false, false);
  212. }
  213. static void
  214. nvd0_crtc_commit(struct drm_crtc *crtc)
  215. {
  216. struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
  217. u32 *push;
  218. push = evo_wait(crtc->dev, 0, 32);
  219. if (push) {
  220. evo_mthd(push, 0x0474 + (nv_crtc->index * 0x300), 1);
  221. evo_data(push, nv_crtc->fb.tile_flags);
  222. evo_mthd(push, 0x0440 + (nv_crtc->index * 0x300), 4);
  223. evo_data(push, 0x83000000);
  224. evo_data(push, nv_crtc->lut.nvbo->bo.offset >> 8);
  225. evo_data(push, 0x00000000);
  226. evo_data(push, 0x00000000);
  227. evo_mthd(push, 0x045c + (nv_crtc->index * 0x300), 1);
  228. evo_data(push, MEM_VRAM);
  229. evo_kick(push, crtc->dev, 0);
  230. }
  231. nvd0_crtc_cursor_show(nv_crtc, nv_crtc->cursor.visible, true);
  232. }
  233. static bool
  234. nvd0_crtc_mode_fixup(struct drm_crtc *crtc, struct drm_display_mode *mode,
  235. struct drm_display_mode *adjusted_mode)
  236. {
  237. return true;
  238. }
  239. static int
  240. nvd0_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb)
  241. {
  242. struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb);
  243. int ret;
  244. ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM);
  245. if (ret)
  246. return ret;
  247. if (old_fb) {
  248. nvfb = nouveau_framebuffer(old_fb);
  249. nouveau_bo_unpin(nvfb->nvbo);
  250. }
  251. return 0;
  252. }
  253. static int
  254. nvd0_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
  255. struct drm_display_mode *mode, int x, int y,
  256. struct drm_framebuffer *old_fb)
  257. {
  258. struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
  259. struct nouveau_connector *nv_connector;
  260. u32 htotal = mode->htotal;
  261. u32 vtotal = mode->vtotal;
  262. u32 hsyncw = mode->hsync_end - mode->hsync_start - 1;
  263. u32 vsyncw = mode->vsync_end - mode->vsync_start - 1;
  264. u32 hfrntp = mode->hsync_start - mode->hdisplay;
  265. u32 vfrntp = mode->vsync_start - mode->vdisplay;
  266. u32 hbackp = mode->htotal - mode->hsync_end;
  267. u32 vbackp = mode->vtotal - mode->vsync_end;
  268. u32 hss2be = hsyncw + hbackp;
  269. u32 vss2be = vsyncw + vbackp;
  270. u32 hss2de = htotal - hfrntp;
  271. u32 vss2de = vtotal - vfrntp;
  272. u32 hstart = 0;
  273. u32 vstart = 0;
  274. u32 *push;
  275. int ret;
  276. ret = nvd0_crtc_swap_fbs(crtc, old_fb);
  277. if (ret)
  278. return ret;
  279. push = evo_wait(crtc->dev, 0, 64);
  280. if (push) {
  281. evo_mthd(push, 0x0410 + (nv_crtc->index * 0x300), 5);
  282. evo_data(push, (vstart << 16) | hstart);
  283. evo_data(push, (vtotal << 16) | htotal);
  284. evo_data(push, (vsyncw << 16) | hsyncw);
  285. evo_data(push, (vss2be << 16) | hss2be);
  286. evo_data(push, (vss2de << 16) | hss2de);
  287. evo_mthd(push, 0x042c + (nv_crtc->index * 0x300), 1);
  288. evo_data(push, 0x00000000); /* ??? */
  289. evo_mthd(push, 0x0450 + (nv_crtc->index * 0x300), 3);
  290. evo_data(push, mode->clock * 1000);
  291. evo_data(push, 0x00200000); /* ??? */
  292. evo_data(push, mode->clock * 1000);
  293. evo_mthd(push, 0x0408 + (nv_crtc->index * 0x300), 1);
  294. evo_data(push, 0x31ec6000); /* ??? */
  295. evo_kick(push, crtc->dev, 0);
  296. }
  297. nv_connector = nouveau_crtc_connector_get(nv_crtc);
  298. nvd0_crtc_set_dither(nv_crtc, nv_connector->use_dithering, false);
  299. nvd0_crtc_set_scale(nv_crtc, nv_connector->scaling_mode, false);
  300. nvd0_crtc_set_image(nv_crtc, crtc->fb, x, y, false);
  301. return 0;
  302. }
  303. static int
  304. nvd0_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
  305. struct drm_framebuffer *old_fb)
  306. {
  307. struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
  308. int ret;
  309. ret = nvd0_crtc_swap_fbs(crtc, old_fb);
  310. if (ret)
  311. return ret;
  312. nvd0_crtc_set_image(nv_crtc, crtc->fb, x, y, true);
  313. return 0;
  314. }
  315. static int
  316. nvd0_crtc_mode_set_base_atomic(struct drm_crtc *crtc,
  317. struct drm_framebuffer *fb, int x, int y,
  318. enum mode_set_atomic state)
  319. {
  320. struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
  321. nvd0_crtc_set_image(nv_crtc, fb, x, y, true);
  322. return 0;
  323. }
  324. static void
  325. nvd0_crtc_lut_load(struct drm_crtc *crtc)
  326. {
  327. struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
  328. void __iomem *lut = nvbo_kmap_obj_iovirtual(nv_crtc->lut.nvbo);
  329. int i;
  330. for (i = 0; i < 256; i++) {
  331. writew(nv_crtc->lut.r[i] >> 2, lut + 8*i + 0);
  332. writew(nv_crtc->lut.g[i] >> 2, lut + 8*i + 2);
  333. writew(nv_crtc->lut.b[i] >> 2, lut + 8*i + 4);
  334. }
  335. }
  336. static int
  337. nvd0_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
  338. uint32_t handle, uint32_t width, uint32_t height)
  339. {
  340. struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
  341. struct drm_device *dev = crtc->dev;
  342. struct drm_gem_object *gem;
  343. struct nouveau_bo *nvbo;
  344. bool visible = (handle != 0);
  345. int i, ret = 0;
  346. if (visible) {
  347. if (width != 64 || height != 64)
  348. return -EINVAL;
  349. gem = drm_gem_object_lookup(dev, file_priv, handle);
  350. if (unlikely(!gem))
  351. return -ENOENT;
  352. nvbo = nouveau_gem_object(gem);
  353. ret = nouveau_bo_map(nvbo);
  354. if (ret == 0) {
  355. for (i = 0; i < 64 * 64; i++) {
  356. u32 v = nouveau_bo_rd32(nvbo, i);
  357. nouveau_bo_wr32(nv_crtc->cursor.nvbo, i, v);
  358. }
  359. nouveau_bo_unmap(nvbo);
  360. }
  361. drm_gem_object_unreference_unlocked(gem);
  362. }
  363. if (visible != nv_crtc->cursor.visible) {
  364. nvd0_crtc_cursor_show(nv_crtc, visible, true);
  365. nv_crtc->cursor.visible = visible;
  366. }
  367. return ret;
  368. }
  369. static int
  370. nvd0_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
  371. {
  372. struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
  373. const u32 data = (y << 16) | x;
  374. nv_wr32(crtc->dev, 0x64d084 + (nv_crtc->index * 0x1000), data);
  375. nv_wr32(crtc->dev, 0x64d080 + (nv_crtc->index * 0x1000), 0x00000000);
  376. return 0;
  377. }
  378. static void
  379. nvd0_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
  380. uint32_t start, uint32_t size)
  381. {
  382. struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
  383. u32 end = max(start + size, (u32)256);
  384. u32 i;
  385. for (i = start; i < end; i++) {
  386. nv_crtc->lut.r[i] = r[i];
  387. nv_crtc->lut.g[i] = g[i];
  388. nv_crtc->lut.b[i] = b[i];
  389. }
  390. nvd0_crtc_lut_load(crtc);
  391. }
  392. static void
  393. nvd0_crtc_destroy(struct drm_crtc *crtc)
  394. {
  395. struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
  396. nouveau_bo_unmap(nv_crtc->cursor.nvbo);
  397. nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
  398. nouveau_bo_unmap(nv_crtc->lut.nvbo);
  399. nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
  400. drm_crtc_cleanup(crtc);
  401. kfree(crtc);
  402. }
  403. static const struct drm_crtc_helper_funcs nvd0_crtc_hfunc = {
  404. .dpms = nvd0_crtc_dpms,
  405. .prepare = nvd0_crtc_prepare,
  406. .commit = nvd0_crtc_commit,
  407. .mode_fixup = nvd0_crtc_mode_fixup,
  408. .mode_set = nvd0_crtc_mode_set,
  409. .mode_set_base = nvd0_crtc_mode_set_base,
  410. .mode_set_base_atomic = nvd0_crtc_mode_set_base_atomic,
  411. .load_lut = nvd0_crtc_lut_load,
  412. };
  413. static const struct drm_crtc_funcs nvd0_crtc_func = {
  414. .cursor_set = nvd0_crtc_cursor_set,
  415. .cursor_move = nvd0_crtc_cursor_move,
  416. .gamma_set = nvd0_crtc_gamma_set,
  417. .set_config = drm_crtc_helper_set_config,
  418. .destroy = nvd0_crtc_destroy,
  419. };
  420. static int
  421. nvd0_crtc_create(struct drm_device *dev, int index)
  422. {
  423. struct nouveau_crtc *nv_crtc;
  424. struct drm_crtc *crtc;
  425. int ret, i;
  426. nv_crtc = kzalloc(sizeof(*nv_crtc), GFP_KERNEL);
  427. if (!nv_crtc)
  428. return -ENOMEM;
  429. nv_crtc->index = index;
  430. nv_crtc->set_dither = nvd0_crtc_set_dither;
  431. nv_crtc->set_scale = nvd0_crtc_set_scale;
  432. for (i = 0; i < 256; i++) {
  433. nv_crtc->lut.r[i] = i << 8;
  434. nv_crtc->lut.g[i] = i << 8;
  435. nv_crtc->lut.b[i] = i << 8;
  436. }
  437. crtc = &nv_crtc->base;
  438. drm_crtc_init(dev, crtc, &nvd0_crtc_func);
  439. drm_crtc_helper_add(crtc, &nvd0_crtc_hfunc);
  440. drm_mode_crtc_set_gamma_size(crtc, 256);
  441. ret = nouveau_bo_new(dev, 64 * 64 * 4, 0x100, TTM_PL_FLAG_VRAM,
  442. 0, 0x0000, &nv_crtc->cursor.nvbo);
  443. if (!ret) {
  444. ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
  445. if (!ret)
  446. ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
  447. if (ret)
  448. nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo);
  449. }
  450. if (ret)
  451. goto out;
  452. ret = nouveau_bo_new(dev, 4096, 0x100, TTM_PL_FLAG_VRAM,
  453. 0, 0x0000, &nv_crtc->lut.nvbo);
  454. if (!ret) {
  455. ret = nouveau_bo_pin(nv_crtc->lut.nvbo, TTM_PL_FLAG_VRAM);
  456. if (!ret)
  457. ret = nouveau_bo_map(nv_crtc->lut.nvbo);
  458. if (ret)
  459. nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo);
  460. }
  461. if (ret)
  462. goto out;
  463. nvd0_crtc_lut_load(crtc);
  464. out:
  465. if (ret)
  466. nvd0_crtc_destroy(crtc);
  467. return ret;
  468. }
  469. /******************************************************************************
  470. * DAC
  471. *****************************************************************************/
  472. /******************************************************************************
  473. * SOR
  474. *****************************************************************************/
  475. static void
  476. nvd0_sor_dpms(struct drm_encoder *encoder, int mode)
  477. {
  478. struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
  479. struct drm_device *dev = encoder->dev;
  480. struct drm_encoder *partner;
  481. int or = nv_encoder->or;
  482. u32 dpms_ctrl;
  483. nv_encoder->last_dpms = mode;
  484. list_for_each_entry(partner, &dev->mode_config.encoder_list, head) {
  485. struct nouveau_encoder *nv_partner = nouveau_encoder(partner);
  486. if (partner->encoder_type != DRM_MODE_ENCODER_TMDS)
  487. continue;
  488. if (nv_partner != nv_encoder &&
  489. nv_partner->dcb->or == nv_encoder->or) {
  490. if (nv_partner->last_dpms == DRM_MODE_DPMS_ON)
  491. return;
  492. break;
  493. }
  494. }
  495. dpms_ctrl = (mode == DRM_MODE_DPMS_ON);
  496. dpms_ctrl |= 0x80000000;
  497. nv_wait(dev, 0x61c004 + (or * 0x0800), 0x80000000, 0x00000000);
  498. nv_mask(dev, 0x61c004 + (or * 0x0800), 0x80000001, dpms_ctrl);
  499. nv_wait(dev, 0x61c004 + (or * 0x0800), 0x80000000, 0x00000000);
  500. nv_wait(dev, 0x61c030 + (or * 0x0800), 0x10000000, 0x00000000);
  501. }
  502. static bool
  503. nvd0_sor_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
  504. struct drm_display_mode *adjusted_mode)
  505. {
  506. struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
  507. struct nouveau_connector *nv_connector;
  508. nv_connector = nouveau_encoder_connector_get(nv_encoder);
  509. if (nv_connector && nv_connector->native_mode) {
  510. if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
  511. int id = adjusted_mode->base.id;
  512. *adjusted_mode = *nv_connector->native_mode;
  513. adjusted_mode->base.id = id;
  514. }
  515. }
  516. return true;
  517. }
  518. static void
  519. nvd0_sor_prepare(struct drm_encoder *encoder)
  520. {
  521. }
  522. static void
  523. nvd0_sor_commit(struct drm_encoder *encoder)
  524. {
  525. }
  526. static void
  527. nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
  528. struct drm_display_mode *adjusted_mode)
  529. {
  530. struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
  531. struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
  532. u32 mode_ctrl = (1 << nv_crtc->index);
  533. u32 *push;
  534. if (nv_encoder->dcb->sorconf.link & 1) {
  535. if (adjusted_mode->clock < 165000)
  536. mode_ctrl |= 0x00000100;
  537. else
  538. mode_ctrl |= 0x00000500;
  539. } else {
  540. mode_ctrl |= 0x00000200;
  541. }
  542. nvd0_sor_dpms(encoder, DRM_MODE_DPMS_ON);
  543. push = evo_wait(encoder->dev, 0, 2);
  544. if (push) {
  545. evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 1);
  546. evo_data(push, mode_ctrl);
  547. evo_kick(push, encoder->dev, 0);
  548. }
  549. nv_encoder->crtc = encoder->crtc;
  550. }
  551. static void
  552. nvd0_sor_disconnect(struct drm_encoder *encoder)
  553. {
  554. struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
  555. struct drm_device *dev = encoder->dev;
  556. u32 *push;
  557. if (nv_encoder->crtc) {
  558. nvd0_crtc_prepare(nv_encoder->crtc);
  559. push = evo_wait(dev, 0, 4);
  560. if (push) {
  561. evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 1);
  562. evo_data(push, 0x00000000);
  563. evo_mthd(push, 0x0080, 1);
  564. evo_data(push, 0x00000000);
  565. evo_kick(push, dev, 0);
  566. }
  567. nv_encoder->crtc = NULL;
  568. nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
  569. }
  570. }
  571. static void
  572. nvd0_sor_destroy(struct drm_encoder *encoder)
  573. {
  574. drm_encoder_cleanup(encoder);
  575. kfree(encoder);
  576. }
  577. static const struct drm_encoder_helper_funcs nvd0_sor_hfunc = {
  578. .dpms = nvd0_sor_dpms,
  579. .mode_fixup = nvd0_sor_mode_fixup,
  580. .prepare = nvd0_sor_prepare,
  581. .commit = nvd0_sor_commit,
  582. .mode_set = nvd0_sor_mode_set,
  583. .disable = nvd0_sor_disconnect,
  584. .get_crtc = nvd0_display_crtc_get,
  585. };
  586. static const struct drm_encoder_funcs nvd0_sor_func = {
  587. .destroy = nvd0_sor_destroy,
  588. };
  589. static int
  590. nvd0_sor_create(struct drm_connector *connector, struct dcb_entry *dcbe)
  591. {
  592. struct drm_device *dev = connector->dev;
  593. struct nouveau_encoder *nv_encoder;
  594. struct drm_encoder *encoder;
  595. nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
  596. if (!nv_encoder)
  597. return -ENOMEM;
  598. nv_encoder->dcb = dcbe;
  599. nv_encoder->or = ffs(dcbe->or) - 1;
  600. nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
  601. encoder = to_drm_encoder(nv_encoder);
  602. encoder->possible_crtcs = dcbe->heads;
  603. encoder->possible_clones = 0;
  604. drm_encoder_init(dev, encoder, &nvd0_sor_func, DRM_MODE_ENCODER_TMDS);
  605. drm_encoder_helper_add(encoder, &nvd0_sor_hfunc);
  606. drm_mode_connector_attach_encoder(connector, encoder);
  607. return 0;
  608. }
  609. /******************************************************************************
  610. * IRQ
  611. *****************************************************************************/
  612. static void
  613. nvd0_display_unk1_handler(struct drm_device *dev)
  614. {
  615. u32 unk0 = nv_rd32(dev, 0x6101d0);
  616. NV_INFO(dev, "PDISP: unk1 0x%08x\n", unk0);
  617. nv_wr32(dev, 0x6101d4, 0x00000000);
  618. nv_wr32(dev, 0x6109d4, 0x00000000);
  619. nv_wr32(dev, 0x6101d0, 0x80000000);
  620. }
  621. static void
  622. nvd0_display_unk2_handler(struct drm_device *dev)
  623. {
  624. u32 unk0 = nv_rd32(dev, 0x6101d0);
  625. NV_INFO(dev, "PDISP: unk2 0x%08x\n", unk0);
  626. nv_wr32(dev, 0x6101d4, 0x00000000);
  627. nv_wr32(dev, 0x6109d4, 0x00000000);
  628. nv_wr32(dev, 0x6101d0, 0x80000000);
  629. }
  630. static void
  631. nvd0_display_unk4_handler(struct drm_device *dev)
  632. {
  633. u32 unk0 = nv_rd32(dev, 0x6101d0);
  634. NV_INFO(dev, "PDISP: unk4 0x%08x\n", unk0);
  635. nv_wr32(dev, 0x6101d4, 0x00000000);
  636. nv_wr32(dev, 0x6109d4, 0x00000000);
  637. nv_wr32(dev, 0x6101d0, 0x80000000);
  638. }
  639. static void
  640. nvd0_display_intr(struct drm_device *dev)
  641. {
  642. u32 intr = nv_rd32(dev, 0x610088);
  643. if (intr & 0x00000002) {
  644. u32 stat = nv_rd32(dev, 0x61009c);
  645. int chid = ffs(stat) - 1;
  646. if (chid >= 0) {
  647. u32 mthd = nv_rd32(dev, 0x6101f0 + (chid * 12));
  648. u32 data = nv_rd32(dev, 0x6101f4 + (chid * 12));
  649. u32 unkn = nv_rd32(dev, 0x6101f8 + (chid * 12));
  650. NV_INFO(dev, "EvoCh: chid %d mthd 0x%04x data 0x%08x "
  651. "0x%08x 0x%08x\n",
  652. chid, (mthd & 0x0000ffc), data, mthd, unkn);
  653. nv_wr32(dev, 0x61009c, (1 << chid));
  654. nv_wr32(dev, 0x6101f0 + (chid * 12), 0x90000000);
  655. }
  656. intr &= ~0x00000002;
  657. }
  658. if (intr & 0x00100000) {
  659. u32 stat = nv_rd32(dev, 0x6100ac);
  660. if (stat & 0x00000007) {
  661. nv_wr32(dev, 0x6100ac, (stat & 0x00000007));
  662. if (stat & 0x00000001)
  663. nvd0_display_unk1_handler(dev);
  664. if (stat & 0x00000002)
  665. nvd0_display_unk2_handler(dev);
  666. if (stat & 0x00000004)
  667. nvd0_display_unk4_handler(dev);
  668. stat &= ~0x00000007;
  669. }
  670. if (stat) {
  671. NV_INFO(dev, "PDISP: unknown intr24 0x%08x\n", stat);
  672. nv_wr32(dev, 0x6100ac, stat);
  673. }
  674. intr &= ~0x00100000;
  675. }
  676. if (intr & 0x01000000) {
  677. u32 stat = nv_rd32(dev, 0x6100bc);
  678. nv_wr32(dev, 0x6100bc, stat);
  679. intr &= ~0x01000000;
  680. }
  681. if (intr & 0x02000000) {
  682. u32 stat = nv_rd32(dev, 0x6108bc);
  683. nv_wr32(dev, 0x6108bc, stat);
  684. intr &= ~0x02000000;
  685. }
  686. if (intr)
  687. NV_INFO(dev, "PDISP: unknown intr 0x%08x\n", intr);
  688. }
  689. /******************************************************************************
  690. * Init
  691. *****************************************************************************/
  692. static void
  693. nvd0_display_fini(struct drm_device *dev)
  694. {
  695. int i;
  696. /* fini cursors */
  697. for (i = 14; i >= 13; i--) {
  698. if (!(nv_rd32(dev, 0x610490 + (i * 0x10)) & 0x00000001))
  699. continue;
  700. nv_mask(dev, 0x610490 + (i * 0x10), 0x00000001, 0x00000000);
  701. nv_wait(dev, 0x610490 + (i * 0x10), 0x00010000, 0x00000000);
  702. nv_mask(dev, 0x610090, 1 << i, 0x00000000);
  703. nv_mask(dev, 0x6100a0, 1 << i, 0x00000000);
  704. }
  705. /* fini master */
  706. if (nv_rd32(dev, 0x610490) & 0x00000010) {
  707. nv_mask(dev, 0x610490, 0x00000010, 0x00000000);
  708. nv_mask(dev, 0x610490, 0x00000003, 0x00000000);
  709. nv_wait(dev, 0x610490, 0x80000000, 0x00000000);
  710. nv_mask(dev, 0x610090, 0x00000001, 0x00000000);
  711. nv_mask(dev, 0x6100a0, 0x00000001, 0x00000000);
  712. }
  713. }
  714. int
  715. nvd0_display_init(struct drm_device *dev)
  716. {
  717. struct nvd0_display *disp = nvd0_display(dev);
  718. u32 *push;
  719. int i;
  720. if (nv_rd32(dev, 0x6100ac) & 0x00000100) {
  721. nv_wr32(dev, 0x6100ac, 0x00000100);
  722. nv_mask(dev, 0x6194e8, 0x00000001, 0x00000000);
  723. if (!nv_wait(dev, 0x6194e8, 0x00000002, 0x00000000)) {
  724. NV_ERROR(dev, "PDISP: 0x6194e8 0x%08x\n",
  725. nv_rd32(dev, 0x6194e8));
  726. return -EBUSY;
  727. }
  728. }
  729. nv_wr32(dev, 0x610010, (disp->mem->vinst >> 8) | 9);
  730. nv_mask(dev, 0x6100b0, 0x00000307, 0x00000307);
  731. /* init master */
  732. nv_wr32(dev, 0x610494, (disp->evo[0].handle >> 8) | 3);
  733. nv_wr32(dev, 0x610498, 0x00010000);
  734. nv_wr32(dev, 0x61049c, 0x00000001);
  735. nv_mask(dev, 0x610490, 0x00000010, 0x00000010);
  736. nv_wr32(dev, 0x640000, 0x00000000);
  737. nv_wr32(dev, 0x610490, 0x01000013);
  738. if (!nv_wait(dev, 0x610490, 0x80000000, 0x00000000)) {
  739. NV_ERROR(dev, "PDISP: master 0x%08x\n",
  740. nv_rd32(dev, 0x610490));
  741. return -EBUSY;
  742. }
  743. nv_mask(dev, 0x610090, 0x00000001, 0x00000001);
  744. nv_mask(dev, 0x6100a0, 0x00000001, 0x00000001);
  745. /* init cursors */
  746. for (i = 13; i <= 14; i++) {
  747. nv_wr32(dev, 0x610490 + (i * 0x10), 0x00000001);
  748. if (!nv_wait(dev, 0x610490 + (i * 0x10), 0x00010000, 0x00010000)) {
  749. NV_ERROR(dev, "PDISP: curs%d 0x%08x\n", i,
  750. nv_rd32(dev, 0x610490 + (i * 0x10)));
  751. return -EBUSY;
  752. }
  753. nv_mask(dev, 0x610090, 1 << i, 1 << i);
  754. nv_mask(dev, 0x6100a0, 1 << i, 1 << i);
  755. }
  756. push = evo_wait(dev, 0, 32);
  757. if (!push)
  758. return -EBUSY;
  759. evo_mthd(push, 0x0088, 1);
  760. evo_data(push, MEM_SYNC);
  761. evo_mthd(push, 0x0084, 1);
  762. evo_data(push, 0x00000000);
  763. evo_mthd(push, 0x0084, 1);
  764. evo_data(push, 0x80000000);
  765. evo_mthd(push, 0x008c, 1);
  766. evo_data(push, 0x00000000);
  767. evo_kick(push, dev, 0);
  768. return 0;
  769. }
  770. void
  771. nvd0_display_destroy(struct drm_device *dev)
  772. {
  773. struct drm_nouveau_private *dev_priv = dev->dev_private;
  774. struct nvd0_display *disp = nvd0_display(dev);
  775. struct pci_dev *pdev = dev->pdev;
  776. nvd0_display_fini(dev);
  777. pci_free_consistent(pdev, PAGE_SIZE, disp->evo[0].ptr, disp->evo[0].handle);
  778. nouveau_gpuobj_ref(NULL, &disp->mem);
  779. nouveau_irq_unregister(dev, 26);
  780. dev_priv->engine.display.priv = NULL;
  781. kfree(disp);
  782. }
  783. int
  784. nvd0_display_create(struct drm_device *dev)
  785. {
  786. struct drm_nouveau_private *dev_priv = dev->dev_private;
  787. struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
  788. struct dcb_table *dcb = &dev_priv->vbios.dcb;
  789. struct drm_connector *connector, *tmp;
  790. struct pci_dev *pdev = dev->pdev;
  791. struct nvd0_display *disp;
  792. struct dcb_entry *dcbe;
  793. int ret, i;
  794. disp = kzalloc(sizeof(*disp), GFP_KERNEL);
  795. if (!disp)
  796. return -ENOMEM;
  797. dev_priv->engine.display.priv = disp;
  798. /* create crtc objects to represent the hw heads */
  799. for (i = 0; i < 2; i++) {
  800. ret = nvd0_crtc_create(dev, i);
  801. if (ret)
  802. goto out;
  803. }
  804. /* create encoder/connector objects based on VBIOS DCB table */
  805. for (i = 0, dcbe = &dcb->entry[0]; i < dcb->entries; i++, dcbe++) {
  806. connector = nouveau_connector_create(dev, dcbe->connector);
  807. if (IS_ERR(connector))
  808. continue;
  809. if (dcbe->location != DCB_LOC_ON_CHIP) {
  810. NV_WARN(dev, "skipping off-chip encoder %d/%d\n",
  811. dcbe->type, ffs(dcbe->or) - 1);
  812. continue;
  813. }
  814. switch (dcbe->type) {
  815. case OUTPUT_TMDS:
  816. nvd0_sor_create(connector, dcbe);
  817. break;
  818. default:
  819. NV_WARN(dev, "skipping unsupported encoder %d/%d\n",
  820. dcbe->type, ffs(dcbe->or) - 1);
  821. continue;
  822. }
  823. }
  824. /* cull any connectors we created that don't have an encoder */
  825. list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) {
  826. if (connector->encoder_ids[0])
  827. continue;
  828. NV_WARN(dev, "%s has no encoders, removing\n",
  829. drm_get_connector_name(connector));
  830. connector->funcs->destroy(connector);
  831. }
  832. /* setup interrupt handling */
  833. nouveau_irq_register(dev, 26, nvd0_display_intr);
  834. /* hash table and dma objects for the memory areas we care about */
  835. ret = nouveau_gpuobj_new(dev, NULL, 0x4000, 0x10000,
  836. NVOBJ_FLAG_ZERO_ALLOC, &disp->mem);
  837. if (ret)
  838. goto out;
  839. nv_wo32(disp->mem, 0x1000, 0x00000049);
  840. nv_wo32(disp->mem, 0x1004, (disp->mem->vinst + 0x2000) >> 8);
  841. nv_wo32(disp->mem, 0x1008, (disp->mem->vinst + 0x2fff) >> 8);
  842. nv_wo32(disp->mem, 0x100c, 0x00000000);
  843. nv_wo32(disp->mem, 0x1010, 0x00000000);
  844. nv_wo32(disp->mem, 0x1014, 0x00000000);
  845. nv_wo32(disp->mem, 0x0000, MEM_SYNC);
  846. nv_wo32(disp->mem, 0x0004, (0x1000 << 9) | 0x00000001);
  847. nv_wo32(disp->mem, 0x1020, 0x00000009);
  848. nv_wo32(disp->mem, 0x1024, 0x00000000);
  849. nv_wo32(disp->mem, 0x1028, (dev_priv->vram_size - 1) >> 8);
  850. nv_wo32(disp->mem, 0x102c, 0x00000000);
  851. nv_wo32(disp->mem, 0x1030, 0x00000000);
  852. nv_wo32(disp->mem, 0x1034, 0x00000000);
  853. nv_wo32(disp->mem, 0x0008, MEM_VRAM);
  854. nv_wo32(disp->mem, 0x000c, (0x1020 << 9) | 0x00000001);
  855. pinstmem->flush(dev);
  856. /* push buffers for evo channels */
  857. disp->evo[0].ptr =
  858. pci_alloc_consistent(pdev, PAGE_SIZE, &disp->evo[0].handle);
  859. if (!disp->evo[0].ptr) {
  860. ret = -ENOMEM;
  861. goto out;
  862. }
  863. ret = nvd0_display_init(dev);
  864. if (ret)
  865. goto out;
  866. out:
  867. if (ret)
  868. nvd0_display_destroy(dev);
  869. return ret;
  870. }