nvd0_display.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514
  1. /*
  2. * Copyright 2011 Red Hat Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Ben Skeggs
  23. */
  24. #include <linux/dma-mapping.h>
  25. #include "drmP.h"
  26. #include "drm_crtc_helper.h"
  27. #include "nouveau_drv.h"
  28. #include "nouveau_connector.h"
  29. #include "nouveau_encoder.h"
  30. #include "nouveau_crtc.h"
  31. #define MEM_SYNC 0xe0000001
  32. #define MEM_VRAM 0xe0010000
  33. struct nvd0_display {
  34. struct nouveau_gpuobj *mem;
  35. struct {
  36. dma_addr_t handle;
  37. u32 *ptr;
  38. } evo[1];
  39. };
  40. static struct nvd0_display *
  41. nvd0_display(struct drm_device *dev)
  42. {
  43. struct drm_nouveau_private *dev_priv = dev->dev_private;
  44. return dev_priv->engine.display.priv;
  45. }
  46. static int
  47. evo_icmd(struct drm_device *dev, int id, u32 mthd, u32 data)
  48. {
  49. int ret = 0;
  50. nv_mask(dev, 0x610700 + (id * 0x10), 0x00000001, 0x00000001);
  51. nv_wr32(dev, 0x610704 + (id * 0x10), data);
  52. nv_mask(dev, 0x610704 + (id * 0x10), 0x80000ffc, 0x80000000 | mthd);
  53. if (!nv_wait(dev, 0x610704 + (id * 0x10), 0x80000000, 0x00000000))
  54. ret = -EBUSY;
  55. nv_mask(dev, 0x610700 + (id * 0x10), 0x00000001, 0x00000000);
  56. return ret;
  57. }
  58. static u32 *
  59. evo_wait(struct drm_device *dev, int id, int nr)
  60. {
  61. struct nvd0_display *disp = nvd0_display(dev);
  62. u32 put = nv_rd32(dev, 0x640000 + (id * 0x1000)) / 4;
  63. if (put + nr >= (PAGE_SIZE / 4)) {
  64. disp->evo[id].ptr[put] = 0x20000000;
  65. nv_wr32(dev, 0x640000 + (id * 0x1000), 0x00000000);
  66. if (!nv_wait(dev, 0x640004 + (id * 0x1000), ~0, 0x00000000)) {
  67. NV_ERROR(dev, "evo %d dma stalled\n", id);
  68. return NULL;
  69. }
  70. put = 0;
  71. }
  72. return disp->evo[id].ptr + put;
  73. }
  74. static void
  75. evo_kick(u32 *push, struct drm_device *dev, int id)
  76. {
  77. struct nvd0_display *disp = nvd0_display(dev);
  78. nv_wr32(dev, 0x640000 + (id * 0x1000), (push - disp->evo[id].ptr) << 2);
  79. }
  80. #define evo_mthd(p,m,s) *((p)++) = (((s) << 18) | (m))
  81. #define evo_data(p,d) *((p)++) = (d)
  82. static struct drm_crtc *
  83. nvd0_display_crtc_get(struct drm_encoder *encoder)
  84. {
  85. return nouveau_encoder(encoder)->crtc;
  86. }
  87. /******************************************************************************
  88. * DAC
  89. *****************************************************************************/
  90. /******************************************************************************
  91. * SOR
  92. *****************************************************************************/
  93. static void
  94. nvd0_sor_dpms(struct drm_encoder *encoder, int mode)
  95. {
  96. struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
  97. struct drm_device *dev = encoder->dev;
  98. struct drm_encoder *partner;
  99. int or = nv_encoder->or;
  100. u32 dpms_ctrl;
  101. nv_encoder->last_dpms = mode;
  102. list_for_each_entry(partner, &dev->mode_config.encoder_list, head) {
  103. struct nouveau_encoder *nv_partner = nouveau_encoder(partner);
  104. if (partner->encoder_type != DRM_MODE_ENCODER_TMDS)
  105. continue;
  106. if (nv_partner != nv_encoder &&
  107. nv_partner->dcb->or == nv_encoder->or) {
  108. if (nv_partner->last_dpms == DRM_MODE_DPMS_ON)
  109. return;
  110. break;
  111. }
  112. }
  113. dpms_ctrl = (mode == DRM_MODE_DPMS_ON);
  114. dpms_ctrl |= 0x80000000;
  115. nv_wait(dev, 0x61c004 + (or * 0x0800), 0x80000000, 0x00000000);
  116. nv_mask(dev, 0x61c004 + (or * 0x0800), 0x80000001, dpms_ctrl);
  117. nv_wait(dev, 0x61c004 + (or * 0x0800), 0x80000000, 0x00000000);
  118. nv_wait(dev, 0x61c030 + (or * 0x0800), 0x10000000, 0x00000000);
  119. }
  120. static bool
  121. nvd0_sor_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
  122. struct drm_display_mode *adjusted_mode)
  123. {
  124. struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
  125. struct nouveau_connector *nv_connector;
  126. nv_connector = nouveau_encoder_connector_get(nv_encoder);
  127. if (nv_connector && nv_connector->native_mode) {
  128. if (nv_connector->scaling_mode != DRM_MODE_SCALE_NONE) {
  129. int id = adjusted_mode->base.id;
  130. *adjusted_mode = *nv_connector->native_mode;
  131. adjusted_mode->base.id = id;
  132. }
  133. }
  134. return true;
  135. }
  136. static void
  137. nvd0_sor_prepare(struct drm_encoder *encoder)
  138. {
  139. }
  140. static void
  141. nvd0_sor_commit(struct drm_encoder *encoder)
  142. {
  143. }
  144. static void
  145. nvd0_sor_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode,
  146. struct drm_display_mode *adjusted_mode)
  147. {
  148. struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
  149. struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc);
  150. u32 mode_ctrl = (1 << nv_crtc->index);
  151. u32 *push;
  152. if (nv_encoder->dcb->sorconf.link & 1) {
  153. if (adjusted_mode->clock < 165000)
  154. mode_ctrl |= 0x00000100;
  155. else
  156. mode_ctrl |= 0x00000500;
  157. } else {
  158. mode_ctrl |= 0x00000200;
  159. }
  160. nvd0_sor_dpms(encoder, DRM_MODE_DPMS_ON);
  161. push = evo_wait(encoder->dev, 0, 2);
  162. if (push) {
  163. evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 1);
  164. evo_data(push, mode_ctrl);
  165. }
  166. nv_encoder->crtc = encoder->crtc;
  167. }
  168. static void
  169. nvd0_sor_disconnect(struct drm_encoder *encoder)
  170. {
  171. struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder);
  172. struct drm_device *dev = encoder->dev;
  173. if (nv_encoder->crtc) {
  174. u32 *push = evo_wait(dev, 0, 4);
  175. if (push) {
  176. evo_mthd(push, 0x0200 + (nv_encoder->or * 0x20), 1);
  177. evo_data(push, 0x00000000);
  178. evo_mthd(push, 0x0080, 1);
  179. evo_data(push, 0x00000000);
  180. evo_kick(push, dev, 0);
  181. }
  182. nv_encoder->crtc = NULL;
  183. nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
  184. }
  185. }
  186. static void
  187. nvd0_sor_destroy(struct drm_encoder *encoder)
  188. {
  189. drm_encoder_cleanup(encoder);
  190. kfree(encoder);
  191. }
  192. static const struct drm_encoder_helper_funcs nvd0_sor_hfunc = {
  193. .dpms = nvd0_sor_dpms,
  194. .mode_fixup = nvd0_sor_mode_fixup,
  195. .prepare = nvd0_sor_prepare,
  196. .commit = nvd0_sor_commit,
  197. .mode_set = nvd0_sor_mode_set,
  198. .disable = nvd0_sor_disconnect,
  199. .get_crtc = nvd0_display_crtc_get,
  200. };
  201. static const struct drm_encoder_funcs nvd0_sor_func = {
  202. .destroy = nvd0_sor_destroy,
  203. };
  204. static int
  205. nvd0_sor_create(struct drm_connector *connector, struct dcb_entry *dcbe)
  206. {
  207. struct drm_device *dev = connector->dev;
  208. struct nouveau_encoder *nv_encoder;
  209. struct drm_encoder *encoder;
  210. nv_encoder = kzalloc(sizeof(*nv_encoder), GFP_KERNEL);
  211. if (!nv_encoder)
  212. return -ENOMEM;
  213. nv_encoder->dcb = dcbe;
  214. nv_encoder->or = ffs(dcbe->or) - 1;
  215. nv_encoder->last_dpms = DRM_MODE_DPMS_OFF;
  216. encoder = to_drm_encoder(nv_encoder);
  217. encoder->possible_crtcs = dcbe->heads;
  218. encoder->possible_clones = 0;
  219. drm_encoder_init(dev, encoder, &nvd0_sor_func, DRM_MODE_ENCODER_TMDS);
  220. drm_encoder_helper_add(encoder, &nvd0_sor_hfunc);
  221. drm_mode_connector_attach_encoder(connector, encoder);
  222. return 0;
  223. }
  224. /******************************************************************************
  225. * IRQ
  226. *****************************************************************************/
  227. static void
  228. nvd0_display_intr(struct drm_device *dev)
  229. {
  230. u32 intr = nv_rd32(dev, 0x610088);
  231. if (intr & 0x00000002) {
  232. u32 stat = nv_rd32(dev, 0x61009c);
  233. int chid = ffs(stat) - 1;
  234. if (chid >= 0) {
  235. u32 mthd = nv_rd32(dev, 0x6101f0 + (chid * 12));
  236. u32 data = nv_rd32(dev, 0x6101f4 + (chid * 12));
  237. u32 unkn = nv_rd32(dev, 0x6101f8 + (chid * 12));
  238. NV_INFO(dev, "EvoCh: chid %d mthd 0x%04x data 0x%08x "
  239. "0x%08x 0x%08x\n",
  240. chid, (mthd & 0x0000ffc), data, mthd, unkn);
  241. nv_wr32(dev, 0x61009c, (1 << chid));
  242. nv_wr32(dev, 0x6101f0 + (chid * 12), 0x90000000);
  243. }
  244. intr &= ~0x00000002;
  245. }
  246. if (intr & 0x01000000) {
  247. u32 stat = nv_rd32(dev, 0x6100bc);
  248. nv_wr32(dev, 0x6100bc, stat);
  249. intr &= ~0x01000000;
  250. }
  251. if (intr & 0x02000000) {
  252. u32 stat = nv_rd32(dev, 0x6108bc);
  253. nv_wr32(dev, 0x6108bc, stat);
  254. intr &= ~0x02000000;
  255. }
  256. if (intr)
  257. NV_INFO(dev, "PDISP: unknown intr 0x%08x\n", intr);
  258. }
  259. /******************************************************************************
  260. * Init
  261. *****************************************************************************/
  262. static void
  263. nvd0_display_fini(struct drm_device *dev)
  264. {
  265. int i;
  266. /* fini cursors */
  267. for (i = 14; i >= 13; i--) {
  268. if (!(nv_rd32(dev, 0x610490 + (i * 0x10)) & 0x00000001))
  269. continue;
  270. nv_mask(dev, 0x610490 + (i * 0x10), 0x00000001, 0x00000000);
  271. nv_wait(dev, 0x610490 + (i * 0x10), 0x00010000, 0x00000000);
  272. nv_mask(dev, 0x610090, 1 << i, 0x00000000);
  273. nv_mask(dev, 0x6100a0, 1 << i, 0x00000000);
  274. }
  275. /* fini master */
  276. if (nv_rd32(dev, 0x610490) & 0x00000010) {
  277. nv_mask(dev, 0x610490, 0x00000010, 0x00000000);
  278. nv_mask(dev, 0x610490, 0x00000003, 0x00000000);
  279. nv_wait(dev, 0x610490, 0x80000000, 0x00000000);
  280. nv_mask(dev, 0x610090, 0x00000001, 0x00000000);
  281. nv_mask(dev, 0x6100a0, 0x00000001, 0x00000000);
  282. }
  283. }
  284. int
  285. nvd0_display_init(struct drm_device *dev)
  286. {
  287. struct nvd0_display *disp = nvd0_display(dev);
  288. u32 *push;
  289. int i;
  290. if (nv_rd32(dev, 0x6100ac) & 0x00000100) {
  291. nv_wr32(dev, 0x6100ac, 0x00000100);
  292. nv_mask(dev, 0x6194e8, 0x00000001, 0x00000000);
  293. if (!nv_wait(dev, 0x6194e8, 0x00000002, 0x00000000)) {
  294. NV_ERROR(dev, "PDISP: 0x6194e8 0x%08x\n",
  295. nv_rd32(dev, 0x6194e8));
  296. return -EBUSY;
  297. }
  298. }
  299. nv_wr32(dev, 0x610010, (disp->mem->vinst >> 8) | 9);
  300. /* init master */
  301. nv_wr32(dev, 0x610494, (disp->evo[0].handle >> 8) | 3);
  302. nv_wr32(dev, 0x610498, 0x00010000);
  303. nv_wr32(dev, 0x61049c, 0x00000001);
  304. nv_mask(dev, 0x610490, 0x00000010, 0x00000010);
  305. nv_wr32(dev, 0x640000, 0x00000000);
  306. nv_wr32(dev, 0x610490, 0x01000013);
  307. if (!nv_wait(dev, 0x610490, 0x80000000, 0x00000000)) {
  308. NV_ERROR(dev, "PDISP: master 0x%08x\n",
  309. nv_rd32(dev, 0x610490));
  310. return -EBUSY;
  311. }
  312. nv_mask(dev, 0x610090, 0x00000001, 0x00000001);
  313. nv_mask(dev, 0x6100a0, 0x00000001, 0x00000001);
  314. /* init cursors */
  315. for (i = 13; i <= 14; i++) {
  316. nv_wr32(dev, 0x610490 + (i * 0x10), 0x00000001);
  317. if (!nv_wait(dev, 0x610490 + (i * 0x10), 0x00010000, 0x00010000)) {
  318. NV_ERROR(dev, "PDISP: curs%d 0x%08x\n", i,
  319. nv_rd32(dev, 0x610490 + (i * 0x10)));
  320. return -EBUSY;
  321. }
  322. nv_mask(dev, 0x610090, 1 << i, 1 << i);
  323. nv_mask(dev, 0x6100a0, 1 << i, 1 << i);
  324. }
  325. push = evo_wait(dev, 0, 32);
  326. if (!push)
  327. return -EBUSY;
  328. evo_mthd(push, 0x0088, 1);
  329. evo_data(push, MEM_SYNC);
  330. evo_mthd(push, 0x0084, 1);
  331. evo_data(push, 0x00000000);
  332. evo_mthd(push, 0x0084, 1);
  333. evo_data(push, 0x80000000);
  334. evo_mthd(push, 0x008c, 1);
  335. evo_data(push, 0x00000000);
  336. evo_kick(push, dev, 0);
  337. return 0;
  338. }
  339. void
  340. nvd0_display_destroy(struct drm_device *dev)
  341. {
  342. struct drm_nouveau_private *dev_priv = dev->dev_private;
  343. struct nvd0_display *disp = nvd0_display(dev);
  344. struct pci_dev *pdev = dev->pdev;
  345. nvd0_display_fini(dev);
  346. pci_free_consistent(pdev, PAGE_SIZE, disp->evo[0].ptr, disp->evo[0].handle);
  347. nouveau_gpuobj_ref(NULL, &disp->mem);
  348. nouveau_irq_unregister(dev, 26);
  349. dev_priv->engine.display.priv = NULL;
  350. kfree(disp);
  351. }
  352. int
  353. nvd0_display_create(struct drm_device *dev)
  354. {
  355. struct drm_nouveau_private *dev_priv = dev->dev_private;
  356. struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
  357. struct dcb_table *dcb = &dev_priv->vbios.dcb;
  358. struct drm_connector *connector, *tmp;
  359. struct pci_dev *pdev = dev->pdev;
  360. struct nvd0_display *disp;
  361. struct dcb_entry *dcbe;
  362. int ret, i;
  363. disp = kzalloc(sizeof(*disp), GFP_KERNEL);
  364. if (!disp)
  365. return -ENOMEM;
  366. dev_priv->engine.display.priv = disp;
  367. /* create encoder/connector objects based on VBIOS DCB table */
  368. for (i = 0, dcbe = &dcb->entry[0]; i < dcb->entries; i++, dcbe++) {
  369. connector = nouveau_connector_create(dev, dcbe->connector);
  370. if (IS_ERR(connector))
  371. continue;
  372. if (dcbe->location != DCB_LOC_ON_CHIP) {
  373. NV_WARN(dev, "skipping off-chip encoder %d/%d\n",
  374. dcbe->type, ffs(dcbe->or) - 1);
  375. continue;
  376. }
  377. switch (dcbe->type) {
  378. case OUTPUT_TMDS:
  379. nvd0_sor_create(connector, dcbe);
  380. break;
  381. default:
  382. NV_WARN(dev, "skipping unsupported encoder %d/%d\n",
  383. dcbe->type, ffs(dcbe->or) - 1);
  384. continue;
  385. }
  386. }
  387. /* cull any connectors we created that don't have an encoder */
  388. list_for_each_entry_safe(connector, tmp, &dev->mode_config.connector_list, head) {
  389. if (connector->encoder_ids[0])
  390. continue;
  391. NV_WARN(dev, "%s has no encoders, removing\n",
  392. drm_get_connector_name(connector));
  393. connector->funcs->destroy(connector);
  394. }
  395. /* setup interrupt handling */
  396. nouveau_irq_register(dev, 26, nvd0_display_intr);
  397. /* hash table and dma objects for the memory areas we care about */
  398. ret = nouveau_gpuobj_new(dev, NULL, 0x4000, 0x10000,
  399. NVOBJ_FLAG_ZERO_ALLOC, &disp->mem);
  400. if (ret)
  401. goto out;
  402. nv_wo32(disp->mem, 0x1000, 0x00000049);
  403. nv_wo32(disp->mem, 0x1004, (disp->mem->vinst + 0x2000) >> 8);
  404. nv_wo32(disp->mem, 0x1008, (disp->mem->vinst + 0x2fff) >> 8);
  405. nv_wo32(disp->mem, 0x100c, 0x00000000);
  406. nv_wo32(disp->mem, 0x1010, 0x00000000);
  407. nv_wo32(disp->mem, 0x1014, 0x00000000);
  408. nv_wo32(disp->mem, 0x0000, MEM_SYNC);
  409. nv_wo32(disp->mem, 0x0004, (0x1000 << 9) | 0x00000001);
  410. nv_wo32(disp->mem, 0x1020, 0x00000009);
  411. nv_wo32(disp->mem, 0x1024, 0x00000000);
  412. nv_wo32(disp->mem, 0x1028, (dev_priv->vram_size - 1) >> 8);
  413. nv_wo32(disp->mem, 0x102c, 0x00000000);
  414. nv_wo32(disp->mem, 0x1030, 0x00000000);
  415. nv_wo32(disp->mem, 0x1034, 0x00000000);
  416. nv_wo32(disp->mem, 0x0008, MEM_VRAM);
  417. nv_wo32(disp->mem, 0x000c, (0x1020 << 9) | 0x00000001);
  418. pinstmem->flush(dev);
  419. /* push buffers for evo channels */
  420. disp->evo[0].ptr =
  421. pci_alloc_consistent(pdev, PAGE_SIZE, &disp->evo[0].handle);
  422. if (!disp->evo[0].ptr) {
  423. ret = -ENOMEM;
  424. goto out;
  425. }
  426. ret = nvd0_display_init(dev);
  427. if (ret)
  428. goto out;
  429. out:
  430. if (ret)
  431. nvd0_display_destroy(dev);
  432. return ret;
  433. }