nouveau_hw.c 27 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827
  1. /*
  2. * Copyright 2006 Dave Airlie
  3. * Copyright 2007 Maarten Maathuis
  4. * Copyright 2007-2009 Stuart Bennett
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the "Software"),
  8. * to deal in the Software without restriction, including without limitation
  9. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10. * and/or sell copies of the Software, and to permit persons to whom the
  11. * Software is furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
  20. * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
  21. * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  22. * SOFTWARE.
  23. */
  24. #include <drm/drmP.h>
  25. #include "nouveau_drm.h"
  26. #include "nouveau_hw.h"
  27. #include <subdev/bios/pll.h>
  28. #include <subdev/clock.h>
  29. #include <subdev/timer.h>
  30. #define CHIPSET_NFORCE 0x01a0
  31. #define CHIPSET_NFORCE2 0x01f0
  32. /*
  33. * misc hw access wrappers/control functions
  34. */
  35. void
  36. NVWriteVgaSeq(struct drm_device *dev, int head, uint8_t index, uint8_t value)
  37. {
  38. NVWritePRMVIO(dev, head, NV_PRMVIO_SRX, index);
  39. NVWritePRMVIO(dev, head, NV_PRMVIO_SR, value);
  40. }
  41. uint8_t
  42. NVReadVgaSeq(struct drm_device *dev, int head, uint8_t index)
  43. {
  44. NVWritePRMVIO(dev, head, NV_PRMVIO_SRX, index);
  45. return NVReadPRMVIO(dev, head, NV_PRMVIO_SR);
  46. }
  47. void
  48. NVWriteVgaGr(struct drm_device *dev, int head, uint8_t index, uint8_t value)
  49. {
  50. NVWritePRMVIO(dev, head, NV_PRMVIO_GRX, index);
  51. NVWritePRMVIO(dev, head, NV_PRMVIO_GX, value);
  52. }
  53. uint8_t
  54. NVReadVgaGr(struct drm_device *dev, int head, uint8_t index)
  55. {
  56. NVWritePRMVIO(dev, head, NV_PRMVIO_GRX, index);
  57. return NVReadPRMVIO(dev, head, NV_PRMVIO_GX);
  58. }
  59. /* CR44 takes values 0 (head A), 3 (head B) and 4 (heads tied)
  60. * it affects only the 8 bit vga io regs, which we access using mmio at
  61. * 0xc{0,2}3c*, 0x60{1,3}3*, and 0x68{1,3}3d*
  62. * in general, the set value of cr44 does not matter: reg access works as
  63. * expected and values can be set for the appropriate head by using a 0x2000
  64. * offset as required
  65. * however:
  66. * a) pre nv40, the head B range of PRMVIO regs at 0xc23c* was not exposed and
  67. * cr44 must be set to 0 or 3 for accessing values on the correct head
  68. * through the common 0xc03c* addresses
  69. * b) in tied mode (4) head B is programmed to the values set on head A, and
  70. * access using the head B addresses can have strange results, ergo we leave
  71. * tied mode in init once we know to what cr44 should be restored on exit
  72. *
  73. * the owner parameter is slightly abused:
  74. * 0 and 1 are treated as head values and so the set value is (owner * 3)
  75. * other values are treated as literal values to set
  76. */
  77. void
  78. NVSetOwner(struct drm_device *dev, int owner)
  79. {
  80. struct nouveau_drm *drm = nouveau_drm(dev);
  81. if (owner == 1)
  82. owner *= 3;
  83. if (nv_device(drm->device)->chipset == 0x11) {
  84. /* This might seem stupid, but the blob does it and
  85. * omitting it often locks the system up.
  86. */
  87. NVReadVgaCrtc(dev, 0, NV_CIO_SR_LOCK_INDEX);
  88. NVReadVgaCrtc(dev, 1, NV_CIO_SR_LOCK_INDEX);
  89. }
  90. /* CR44 is always changed on CRTC0 */
  91. NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_44, owner);
  92. if (nv_device(drm->device)->chipset == 0x11) { /* set me harder */
  93. NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner);
  94. NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner);
  95. }
  96. }
  97. void
  98. NVBlankScreen(struct drm_device *dev, int head, bool blank)
  99. {
  100. unsigned char seq1;
  101. if (nv_two_heads(dev))
  102. NVSetOwner(dev, head);
  103. seq1 = NVReadVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX);
  104. NVVgaSeqReset(dev, head, true);
  105. if (blank)
  106. NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 | 0x20);
  107. else
  108. NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 & ~0x20);
  109. NVVgaSeqReset(dev, head, false);
  110. }
  111. /*
  112. * PLL getting
  113. */
  114. static void
  115. nouveau_hw_decode_pll(struct drm_device *dev, uint32_t reg1, uint32_t pll1,
  116. uint32_t pll2, struct nouveau_pll_vals *pllvals)
  117. {
  118. struct nouveau_drm *drm = nouveau_drm(dev);
  119. /* to force parsing as single stage (i.e. nv40 vplls) pass pll2 as 0 */
  120. /* log2P is & 0x7 as never more than 7, and nv30/35 only uses 3 bits */
  121. pllvals->log2P = (pll1 >> 16) & 0x7;
  122. pllvals->N2 = pllvals->M2 = 1;
  123. if (reg1 <= 0x405c) {
  124. pllvals->NM1 = pll2 & 0xffff;
  125. /* single stage NVPLL and VPLLs use 1 << 8, MPLL uses 1 << 12 */
  126. if (!(pll1 & 0x1100))
  127. pllvals->NM2 = pll2 >> 16;
  128. } else {
  129. pllvals->NM1 = pll1 & 0xffff;
  130. if (nv_two_reg_pll(dev) && pll2 & NV31_RAMDAC_ENABLE_VCO2)
  131. pllvals->NM2 = pll2 & 0xffff;
  132. else if (nv_device(drm->device)->chipset == 0x30 || nv_device(drm->device)->chipset == 0x35) {
  133. pllvals->M1 &= 0xf; /* only 4 bits */
  134. if (pll1 & NV30_RAMDAC_ENABLE_VCO2) {
  135. pllvals->M2 = (pll1 >> 4) & 0x7;
  136. pllvals->N2 = ((pll1 >> 21) & 0x18) |
  137. ((pll1 >> 19) & 0x7);
  138. }
  139. }
  140. }
  141. }
  142. int
  143. nouveau_hw_get_pllvals(struct drm_device *dev, enum nvbios_pll_type plltype,
  144. struct nouveau_pll_vals *pllvals)
  145. {
  146. struct nouveau_drm *drm = nouveau_drm(dev);
  147. struct nouveau_device *device = nv_device(drm->device);
  148. struct nouveau_bios *bios = nouveau_bios(device);
  149. uint32_t reg1, pll1, pll2 = 0;
  150. struct nvbios_pll pll_lim;
  151. int ret;
  152. ret = nvbios_pll_parse(bios, plltype, &pll_lim);
  153. if (ret || !(reg1 = pll_lim.reg))
  154. return -ENOENT;
  155. pll1 = nv_rd32(device, reg1);
  156. if (reg1 <= 0x405c)
  157. pll2 = nv_rd32(device, reg1 + 4);
  158. else if (nv_two_reg_pll(dev)) {
  159. uint32_t reg2 = reg1 + (reg1 == NV_RAMDAC_VPLL2 ? 0x5c : 0x70);
  160. pll2 = nv_rd32(device, reg2);
  161. }
  162. if (nv_device(drm->device)->card_type == 0x40 && reg1 >= NV_PRAMDAC_VPLL_COEFF) {
  163. uint32_t ramdac580 = NVReadRAMDAC(dev, 0, NV_PRAMDAC_580);
  164. /* check whether vpll has been forced into single stage mode */
  165. if (reg1 == NV_PRAMDAC_VPLL_COEFF) {
  166. if (ramdac580 & NV_RAMDAC_580_VPLL1_ACTIVE)
  167. pll2 = 0;
  168. } else
  169. if (ramdac580 & NV_RAMDAC_580_VPLL2_ACTIVE)
  170. pll2 = 0;
  171. }
  172. nouveau_hw_decode_pll(dev, reg1, pll1, pll2, pllvals);
  173. pllvals->refclk = pll_lim.refclk;
  174. return 0;
  175. }
  176. int
  177. nouveau_hw_pllvals_to_clk(struct nouveau_pll_vals *pv)
  178. {
  179. /* Avoid divide by zero if called at an inappropriate time */
  180. if (!pv->M1 || !pv->M2)
  181. return 0;
  182. return pv->N1 * pv->N2 * pv->refclk / (pv->M1 * pv->M2) >> pv->log2P;
  183. }
  184. int
  185. nouveau_hw_get_clock(struct drm_device *dev, enum nvbios_pll_type plltype)
  186. {
  187. struct nouveau_pll_vals pllvals;
  188. int ret;
  189. if (plltype == PLL_MEMORY &&
  190. (dev->pci_device & 0x0ff0) == CHIPSET_NFORCE) {
  191. uint32_t mpllP;
  192. pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP);
  193. if (!mpllP)
  194. mpllP = 4;
  195. return 400000 / mpllP;
  196. } else
  197. if (plltype == PLL_MEMORY &&
  198. (dev->pci_device & 0xff0) == CHIPSET_NFORCE2) {
  199. uint32_t clock;
  200. pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock);
  201. return clock;
  202. }
  203. ret = nouveau_hw_get_pllvals(dev, plltype, &pllvals);
  204. if (ret)
  205. return ret;
  206. return nouveau_hw_pllvals_to_clk(&pllvals);
  207. }
  208. static void
  209. nouveau_hw_fix_bad_vpll(struct drm_device *dev, int head)
  210. {
  211. /* the vpll on an unused head can come up with a random value, way
  212. * beyond the pll limits. for some reason this causes the chip to
  213. * lock up when reading the dac palette regs, so set a valid pll here
  214. * when such a condition detected. only seen on nv11 to date
  215. */
  216. struct nouveau_drm *drm = nouveau_drm(dev);
  217. struct nouveau_device *device = nv_device(drm->device);
  218. struct nouveau_clock *clk = nouveau_clock(device);
  219. struct nouveau_bios *bios = nouveau_bios(device);
  220. struct nvbios_pll pll_lim;
  221. struct nouveau_pll_vals pv;
  222. enum nvbios_pll_type pll = head ? PLL_VPLL1 : PLL_VPLL0;
  223. if (nvbios_pll_parse(bios, pll, &pll_lim))
  224. return;
  225. nouveau_hw_get_pllvals(dev, pll, &pv);
  226. if (pv.M1 >= pll_lim.vco1.min_m && pv.M1 <= pll_lim.vco1.max_m &&
  227. pv.N1 >= pll_lim.vco1.min_n && pv.N1 <= pll_lim.vco1.max_n &&
  228. pv.log2P <= pll_lim.max_p)
  229. return;
  230. NV_WARN(drm, "VPLL %d outwith limits, attempting to fix\n", head + 1);
  231. /* set lowest clock within static limits */
  232. pv.M1 = pll_lim.vco1.max_m;
  233. pv.N1 = pll_lim.vco1.min_n;
  234. pv.log2P = pll_lim.max_p_usable;
  235. clk->pll_prog(clk, pll_lim.reg, &pv);
  236. }
  237. /*
  238. * vga font save/restore
  239. */
  240. static void nouveau_vga_font_io(struct drm_device *dev,
  241. void __iomem *iovram,
  242. bool save, unsigned plane)
  243. {
  244. unsigned i;
  245. NVWriteVgaSeq(dev, 0, NV_VIO_SR_PLANE_MASK_INDEX, 1 << plane);
  246. NVWriteVgaGr(dev, 0, NV_VIO_GX_READ_MAP_INDEX, plane);
  247. for (i = 0; i < 16384; i++) {
  248. if (save) {
  249. nv04_display(dev)->saved_vga_font[plane][i] =
  250. ioread32_native(iovram + i * 4);
  251. } else {
  252. iowrite32_native(nv04_display(dev)->saved_vga_font[plane][i],
  253. iovram + i * 4);
  254. }
  255. }
  256. }
  257. void
  258. nouveau_hw_save_vga_fonts(struct drm_device *dev, bool save)
  259. {
  260. struct nouveau_drm *drm = nouveau_drm(dev);
  261. uint8_t misc, gr4, gr5, gr6, seq2, seq4;
  262. bool graphicsmode;
  263. unsigned plane;
  264. void __iomem *iovram;
  265. if (nv_two_heads(dev))
  266. NVSetOwner(dev, 0);
  267. NVSetEnablePalette(dev, 0, true);
  268. graphicsmode = NVReadVgaAttr(dev, 0, NV_CIO_AR_MODE_INDEX) & 1;
  269. NVSetEnablePalette(dev, 0, false);
  270. if (graphicsmode) /* graphics mode => framebuffer => no need to save */
  271. return;
  272. NV_INFO(drm, "%sing VGA fonts\n", save ? "Sav" : "Restor");
  273. /* map first 64KiB of VRAM, holds VGA fonts etc */
  274. iovram = ioremap(pci_resource_start(dev->pdev, 1), 65536);
  275. if (!iovram) {
  276. NV_ERROR(drm, "Failed to map VRAM, "
  277. "cannot save/restore VGA fonts.\n");
  278. return;
  279. }
  280. if (nv_two_heads(dev))
  281. NVBlankScreen(dev, 1, true);
  282. NVBlankScreen(dev, 0, true);
  283. /* save control regs */
  284. misc = NVReadPRMVIO(dev, 0, NV_PRMVIO_MISC__READ);
  285. seq2 = NVReadVgaSeq(dev, 0, NV_VIO_SR_PLANE_MASK_INDEX);
  286. seq4 = NVReadVgaSeq(dev, 0, NV_VIO_SR_MEM_MODE_INDEX);
  287. gr4 = NVReadVgaGr(dev, 0, NV_VIO_GX_READ_MAP_INDEX);
  288. gr5 = NVReadVgaGr(dev, 0, NV_VIO_GX_MODE_INDEX);
  289. gr6 = NVReadVgaGr(dev, 0, NV_VIO_GX_MISC_INDEX);
  290. NVWritePRMVIO(dev, 0, NV_PRMVIO_MISC__WRITE, 0x67);
  291. NVWriteVgaSeq(dev, 0, NV_VIO_SR_MEM_MODE_INDEX, 0x6);
  292. NVWriteVgaGr(dev, 0, NV_VIO_GX_MODE_INDEX, 0x0);
  293. NVWriteVgaGr(dev, 0, NV_VIO_GX_MISC_INDEX, 0x5);
  294. /* store font in planes 0..3 */
  295. for (plane = 0; plane < 4; plane++)
  296. nouveau_vga_font_io(dev, iovram, save, plane);
  297. /* restore control regs */
  298. NVWritePRMVIO(dev, 0, NV_PRMVIO_MISC__WRITE, misc);
  299. NVWriteVgaGr(dev, 0, NV_VIO_GX_READ_MAP_INDEX, gr4);
  300. NVWriteVgaGr(dev, 0, NV_VIO_GX_MODE_INDEX, gr5);
  301. NVWriteVgaGr(dev, 0, NV_VIO_GX_MISC_INDEX, gr6);
  302. NVWriteVgaSeq(dev, 0, NV_VIO_SR_PLANE_MASK_INDEX, seq2);
  303. NVWriteVgaSeq(dev, 0, NV_VIO_SR_MEM_MODE_INDEX, seq4);
  304. if (nv_two_heads(dev))
  305. NVBlankScreen(dev, 1, false);
  306. NVBlankScreen(dev, 0, false);
  307. iounmap(iovram);
  308. }
  309. /*
  310. * mode state save/load
  311. */
  312. static void
  313. rd_cio_state(struct drm_device *dev, int head,
  314. struct nv04_crtc_reg *crtcstate, int index)
  315. {
  316. crtcstate->CRTC[index] = NVReadVgaCrtc(dev, head, index);
  317. }
  318. static void
  319. wr_cio_state(struct drm_device *dev, int head,
  320. struct nv04_crtc_reg *crtcstate, int index)
  321. {
  322. NVWriteVgaCrtc(dev, head, index, crtcstate->CRTC[index]);
  323. }
  324. static void
  325. nv_save_state_ramdac(struct drm_device *dev, int head,
  326. struct nv04_mode_state *state)
  327. {
  328. struct nouveau_drm *drm = nouveau_drm(dev);
  329. struct nv04_crtc_reg *regp = &state->crtc_reg[head];
  330. int i;
  331. if (nv_device(drm->device)->card_type >= NV_10)
  332. regp->nv10_cursync = NVReadRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC);
  333. nouveau_hw_get_pllvals(dev, head ? PLL_VPLL1 : PLL_VPLL0, &regp->pllvals);
  334. state->pllsel = NVReadRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT);
  335. if (nv_two_heads(dev))
  336. state->sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK);
  337. if (nv_device(drm->device)->chipset == 0x11)
  338. regp->dither = NVReadRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11);
  339. regp->ramdac_gen_ctrl = NVReadRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL);
  340. if (nv_gf4_disp_arch(dev))
  341. regp->ramdac_630 = NVReadRAMDAC(dev, head, NV_PRAMDAC_630);
  342. if (nv_device(drm->device)->chipset >= 0x30)
  343. regp->ramdac_634 = NVReadRAMDAC(dev, head, NV_PRAMDAC_634);
  344. regp->tv_setup = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP);
  345. regp->tv_vtotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_VTOTAL);
  346. regp->tv_vskew = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_VSKEW);
  347. regp->tv_vsync_delay = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_VSYNC_DELAY);
  348. regp->tv_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HTOTAL);
  349. regp->tv_hskew = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HSKEW);
  350. regp->tv_hsync_delay = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY);
  351. regp->tv_hsync_delay2 = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY2);
  352. for (i = 0; i < 7; i++) {
  353. uint32_t ramdac_reg = NV_PRAMDAC_FP_VDISPLAY_END + (i * 4);
  354. regp->fp_vert_regs[i] = NVReadRAMDAC(dev, head, ramdac_reg);
  355. regp->fp_horiz_regs[i] = NVReadRAMDAC(dev, head, ramdac_reg + 0x20);
  356. }
  357. if (nv_gf4_disp_arch(dev)) {
  358. regp->dither = NVReadRAMDAC(dev, head, NV_RAMDAC_FP_DITHER);
  359. for (i = 0; i < 3; i++) {
  360. regp->dither_regs[i] = NVReadRAMDAC(dev, head, NV_PRAMDAC_850 + i * 4);
  361. regp->dither_regs[i + 3] = NVReadRAMDAC(dev, head, NV_PRAMDAC_85C + i * 4);
  362. }
  363. }
  364. regp->fp_control = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL);
  365. regp->fp_debug_0 = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_0);
  366. if (!nv_gf4_disp_arch(dev) && head == 0) {
  367. /* early chips don't allow access to PRAMDAC_TMDS_* without
  368. * the head A FPCLK on (nv11 even locks up) */
  369. NVWriteRAMDAC(dev, 0, NV_PRAMDAC_FP_DEBUG_0, regp->fp_debug_0 &
  370. ~NV_PRAMDAC_FP_DEBUG_0_PWRDOWN_FPCLK);
  371. }
  372. regp->fp_debug_1 = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_1);
  373. regp->fp_debug_2 = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_2);
  374. regp->fp_margin_color = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_MARGIN_COLOR);
  375. if (nv_gf4_disp_arch(dev))
  376. regp->ramdac_8c0 = NVReadRAMDAC(dev, head, NV_PRAMDAC_8C0);
  377. if (nv_device(drm->device)->card_type == NV_40) {
  378. regp->ramdac_a20 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A20);
  379. regp->ramdac_a24 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A24);
  380. regp->ramdac_a34 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A34);
  381. for (i = 0; i < 38; i++)
  382. regp->ctv_regs[i] = NVReadRAMDAC(dev, head,
  383. NV_PRAMDAC_CTV + 4*i);
  384. }
  385. }
  386. static void
  387. nv_load_state_ramdac(struct drm_device *dev, int head,
  388. struct nv04_mode_state *state)
  389. {
  390. struct nouveau_drm *drm = nouveau_drm(dev);
  391. struct nouveau_clock *clk = nouveau_clock(drm->device);
  392. struct nv04_crtc_reg *regp = &state->crtc_reg[head];
  393. uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF;
  394. int i;
  395. if (nv_device(drm->device)->card_type >= NV_10)
  396. NVWriteRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC, regp->nv10_cursync);
  397. clk->pll_prog(clk, pllreg, &regp->pllvals);
  398. NVWriteRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT, state->pllsel);
  399. if (nv_two_heads(dev))
  400. NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, state->sel_clk);
  401. if (nv_device(drm->device)->chipset == 0x11)
  402. NVWriteRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11, regp->dither);
  403. NVWriteRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL, regp->ramdac_gen_ctrl);
  404. if (nv_gf4_disp_arch(dev))
  405. NVWriteRAMDAC(dev, head, NV_PRAMDAC_630, regp->ramdac_630);
  406. if (nv_device(drm->device)->chipset >= 0x30)
  407. NVWriteRAMDAC(dev, head, NV_PRAMDAC_634, regp->ramdac_634);
  408. NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP, regp->tv_setup);
  409. NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_VTOTAL, regp->tv_vtotal);
  410. NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_VSKEW, regp->tv_vskew);
  411. NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_VSYNC_DELAY, regp->tv_vsync_delay);
  412. NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HTOTAL, regp->tv_htotal);
  413. NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HSKEW, regp->tv_hskew);
  414. NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY, regp->tv_hsync_delay);
  415. NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY2, regp->tv_hsync_delay2);
  416. for (i = 0; i < 7; i++) {
  417. uint32_t ramdac_reg = NV_PRAMDAC_FP_VDISPLAY_END + (i * 4);
  418. NVWriteRAMDAC(dev, head, ramdac_reg, regp->fp_vert_regs[i]);
  419. NVWriteRAMDAC(dev, head, ramdac_reg + 0x20, regp->fp_horiz_regs[i]);
  420. }
  421. if (nv_gf4_disp_arch(dev)) {
  422. NVWriteRAMDAC(dev, head, NV_RAMDAC_FP_DITHER, regp->dither);
  423. for (i = 0; i < 3; i++) {
  424. NVWriteRAMDAC(dev, head, NV_PRAMDAC_850 + i * 4, regp->dither_regs[i]);
  425. NVWriteRAMDAC(dev, head, NV_PRAMDAC_85C + i * 4, regp->dither_regs[i + 3]);
  426. }
  427. }
  428. NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL, regp->fp_control);
  429. NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_0, regp->fp_debug_0);
  430. NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_1, regp->fp_debug_1);
  431. NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_2, regp->fp_debug_2);
  432. NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_MARGIN_COLOR, regp->fp_margin_color);
  433. if (nv_gf4_disp_arch(dev))
  434. NVWriteRAMDAC(dev, head, NV_PRAMDAC_8C0, regp->ramdac_8c0);
  435. if (nv_device(drm->device)->card_type == NV_40) {
  436. NVWriteRAMDAC(dev, head, NV_PRAMDAC_A20, regp->ramdac_a20);
  437. NVWriteRAMDAC(dev, head, NV_PRAMDAC_A24, regp->ramdac_a24);
  438. NVWriteRAMDAC(dev, head, NV_PRAMDAC_A34, regp->ramdac_a34);
  439. for (i = 0; i < 38; i++)
  440. NVWriteRAMDAC(dev, head,
  441. NV_PRAMDAC_CTV + 4*i, regp->ctv_regs[i]);
  442. }
  443. }
  444. static void
  445. nv_save_state_vga(struct drm_device *dev, int head,
  446. struct nv04_mode_state *state)
  447. {
  448. struct nv04_crtc_reg *regp = &state->crtc_reg[head];
  449. int i;
  450. regp->MiscOutReg = NVReadPRMVIO(dev, head, NV_PRMVIO_MISC__READ);
  451. for (i = 0; i < 25; i++)
  452. rd_cio_state(dev, head, regp, i);
  453. NVSetEnablePalette(dev, head, true);
  454. for (i = 0; i < 21; i++)
  455. regp->Attribute[i] = NVReadVgaAttr(dev, head, i);
  456. NVSetEnablePalette(dev, head, false);
  457. for (i = 0; i < 9; i++)
  458. regp->Graphics[i] = NVReadVgaGr(dev, head, i);
  459. for (i = 0; i < 5; i++)
  460. regp->Sequencer[i] = NVReadVgaSeq(dev, head, i);
  461. }
  462. static void
  463. nv_load_state_vga(struct drm_device *dev, int head,
  464. struct nv04_mode_state *state)
  465. {
  466. struct nv04_crtc_reg *regp = &state->crtc_reg[head];
  467. int i;
  468. NVWritePRMVIO(dev, head, NV_PRMVIO_MISC__WRITE, regp->MiscOutReg);
  469. for (i = 0; i < 5; i++)
  470. NVWriteVgaSeq(dev, head, i, regp->Sequencer[i]);
  471. nv_lock_vga_crtc_base(dev, head, false);
  472. for (i = 0; i < 25; i++)
  473. wr_cio_state(dev, head, regp, i);
  474. nv_lock_vga_crtc_base(dev, head, true);
  475. for (i = 0; i < 9; i++)
  476. NVWriteVgaGr(dev, head, i, regp->Graphics[i]);
  477. NVSetEnablePalette(dev, head, true);
  478. for (i = 0; i < 21; i++)
  479. NVWriteVgaAttr(dev, head, i, regp->Attribute[i]);
  480. NVSetEnablePalette(dev, head, false);
  481. }
  482. static void
  483. nv_save_state_ext(struct drm_device *dev, int head,
  484. struct nv04_mode_state *state)
  485. {
  486. struct nouveau_drm *drm = nouveau_drm(dev);
  487. struct nv04_crtc_reg *regp = &state->crtc_reg[head];
  488. int i;
  489. rd_cio_state(dev, head, regp, NV_CIO_CRE_LCD__INDEX);
  490. rd_cio_state(dev, head, regp, NV_CIO_CRE_RPC0_INDEX);
  491. rd_cio_state(dev, head, regp, NV_CIO_CRE_RPC1_INDEX);
  492. rd_cio_state(dev, head, regp, NV_CIO_CRE_LSR_INDEX);
  493. rd_cio_state(dev, head, regp, NV_CIO_CRE_PIXEL_INDEX);
  494. rd_cio_state(dev, head, regp, NV_CIO_CRE_HEB__INDEX);
  495. rd_cio_state(dev, head, regp, NV_CIO_CRE_ENH_INDEX);
  496. rd_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX);
  497. rd_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX);
  498. rd_cio_state(dev, head, regp, NV_CIO_CRE_21);
  499. if (nv_device(drm->device)->card_type >= NV_20)
  500. rd_cio_state(dev, head, regp, NV_CIO_CRE_47);
  501. if (nv_device(drm->device)->card_type >= NV_30)
  502. rd_cio_state(dev, head, regp, 0x9f);
  503. rd_cio_state(dev, head, regp, NV_CIO_CRE_49);
  504. rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
  505. rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX);
  506. rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
  507. rd_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX);
  508. if (nv_device(drm->device)->card_type >= NV_10) {
  509. regp->crtc_830 = NVReadCRTC(dev, head, NV_PCRTC_830);
  510. regp->crtc_834 = NVReadCRTC(dev, head, NV_PCRTC_834);
  511. if (nv_device(drm->device)->card_type >= NV_30)
  512. regp->gpio_ext = NVReadCRTC(dev, head, NV_PCRTC_GPIO_EXT);
  513. if (nv_device(drm->device)->card_type == NV_40)
  514. regp->crtc_850 = NVReadCRTC(dev, head, NV_PCRTC_850);
  515. if (nv_two_heads(dev))
  516. regp->crtc_eng_ctrl = NVReadCRTC(dev, head, NV_PCRTC_ENGINE_CTRL);
  517. regp->cursor_cfg = NVReadCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG);
  518. }
  519. regp->crtc_cfg = NVReadCRTC(dev, head, NV_PCRTC_CONFIG);
  520. rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX);
  521. rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX);
  522. if (nv_device(drm->device)->card_type >= NV_10) {
  523. rd_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX);
  524. rd_cio_state(dev, head, regp, NV_CIO_CRE_CSB);
  525. rd_cio_state(dev, head, regp, NV_CIO_CRE_4B);
  526. rd_cio_state(dev, head, regp, NV_CIO_CRE_TVOUT_LATENCY);
  527. }
  528. /* NV11 and NV20 don't have this, they stop at 0x52. */
  529. if (nv_gf4_disp_arch(dev)) {
  530. rd_cio_state(dev, head, regp, NV_CIO_CRE_42);
  531. rd_cio_state(dev, head, regp, NV_CIO_CRE_53);
  532. rd_cio_state(dev, head, regp, NV_CIO_CRE_54);
  533. for (i = 0; i < 0x10; i++)
  534. regp->CR58[i] = NVReadVgaCrtc5758(dev, head, i);
  535. rd_cio_state(dev, head, regp, NV_CIO_CRE_59);
  536. rd_cio_state(dev, head, regp, NV_CIO_CRE_5B);
  537. rd_cio_state(dev, head, regp, NV_CIO_CRE_85);
  538. rd_cio_state(dev, head, regp, NV_CIO_CRE_86);
  539. }
  540. regp->fb_start = NVReadCRTC(dev, head, NV_PCRTC_START);
  541. }
  542. static void
  543. nv_load_state_ext(struct drm_device *dev, int head,
  544. struct nv04_mode_state *state)
  545. {
  546. struct nouveau_drm *drm = nouveau_drm(dev);
  547. struct nouveau_device *device = nv_device(drm->device);
  548. struct nouveau_timer *ptimer = nouveau_timer(device);
  549. struct nv04_crtc_reg *regp = &state->crtc_reg[head];
  550. uint32_t reg900;
  551. int i;
  552. if (nv_device(drm->device)->card_type >= NV_10) {
  553. if (nv_two_heads(dev))
  554. /* setting ENGINE_CTRL (EC) *must* come before
  555. * CIO_CRE_LCD, as writing CRE_LCD sets bits 16 & 17 in
  556. * EC that should not be overwritten by writing stale EC
  557. */
  558. NVWriteCRTC(dev, head, NV_PCRTC_ENGINE_CTRL, regp->crtc_eng_ctrl);
  559. nv_wr32(device, NV_PVIDEO_STOP, 1);
  560. nv_wr32(device, NV_PVIDEO_INTR_EN, 0);
  561. nv_wr32(device, NV_PVIDEO_OFFSET_BUFF(0), 0);
  562. nv_wr32(device, NV_PVIDEO_OFFSET_BUFF(1), 0);
  563. nv_wr32(device, NV_PVIDEO_LIMIT(0), 0); //drm->fb_available_size - 1);
  564. nv_wr32(device, NV_PVIDEO_LIMIT(1), 0); //drm->fb_available_size - 1);
  565. nv_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(0), 0); //drm->fb_available_size - 1);
  566. nv_wr32(device, NV_PVIDEO_UVPLANE_LIMIT(1), 0); //drm->fb_available_size - 1);
  567. nv_wr32(device, NV_PBUS_POWERCTRL_2, 0);
  568. NVWriteCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG, regp->cursor_cfg);
  569. NVWriteCRTC(dev, head, NV_PCRTC_830, regp->crtc_830);
  570. NVWriteCRTC(dev, head, NV_PCRTC_834, regp->crtc_834);
  571. if (nv_device(drm->device)->card_type >= NV_30)
  572. NVWriteCRTC(dev, head, NV_PCRTC_GPIO_EXT, regp->gpio_ext);
  573. if (nv_device(drm->device)->card_type == NV_40) {
  574. NVWriteCRTC(dev, head, NV_PCRTC_850, regp->crtc_850);
  575. reg900 = NVReadRAMDAC(dev, head, NV_PRAMDAC_900);
  576. if (regp->crtc_cfg == NV10_PCRTC_CONFIG_START_ADDRESS_HSYNC)
  577. NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 | 0x10000);
  578. else
  579. NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 & ~0x10000);
  580. }
  581. }
  582. NVWriteCRTC(dev, head, NV_PCRTC_CONFIG, regp->crtc_cfg);
  583. wr_cio_state(dev, head, regp, NV_CIO_CRE_RPC0_INDEX);
  584. wr_cio_state(dev, head, regp, NV_CIO_CRE_RPC1_INDEX);
  585. wr_cio_state(dev, head, regp, NV_CIO_CRE_LSR_INDEX);
  586. wr_cio_state(dev, head, regp, NV_CIO_CRE_PIXEL_INDEX);
  587. wr_cio_state(dev, head, regp, NV_CIO_CRE_LCD__INDEX);
  588. wr_cio_state(dev, head, regp, NV_CIO_CRE_HEB__INDEX);
  589. wr_cio_state(dev, head, regp, NV_CIO_CRE_ENH_INDEX);
  590. wr_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX);
  591. wr_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX);
  592. if (nv_device(drm->device)->card_type >= NV_20)
  593. wr_cio_state(dev, head, regp, NV_CIO_CRE_47);
  594. if (nv_device(drm->device)->card_type >= NV_30)
  595. wr_cio_state(dev, head, regp, 0x9f);
  596. wr_cio_state(dev, head, regp, NV_CIO_CRE_49);
  597. wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
  598. wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX);
  599. wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
  600. if (nv_device(drm->device)->card_type == NV_40)
  601. nv_fix_nv40_hw_cursor(dev, head);
  602. wr_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX);
  603. wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX);
  604. wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX);
  605. if (nv_device(drm->device)->card_type >= NV_10) {
  606. wr_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX);
  607. wr_cio_state(dev, head, regp, NV_CIO_CRE_CSB);
  608. wr_cio_state(dev, head, regp, NV_CIO_CRE_4B);
  609. wr_cio_state(dev, head, regp, NV_CIO_CRE_TVOUT_LATENCY);
  610. }
  611. /* NV11 and NV20 stop at 0x52. */
  612. if (nv_gf4_disp_arch(dev)) {
  613. if (nv_device(drm->device)->card_type == NV_10) {
  614. /* Not waiting for vertical retrace before modifying
  615. CRE_53/CRE_54 causes lockups. */
  616. nouveau_timer_wait_eq(ptimer, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8);
  617. nouveau_timer_wait_eq(ptimer, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0);
  618. }
  619. wr_cio_state(dev, head, regp, NV_CIO_CRE_42);
  620. wr_cio_state(dev, head, regp, NV_CIO_CRE_53);
  621. wr_cio_state(dev, head, regp, NV_CIO_CRE_54);
  622. for (i = 0; i < 0x10; i++)
  623. NVWriteVgaCrtc5758(dev, head, i, regp->CR58[i]);
  624. wr_cio_state(dev, head, regp, NV_CIO_CRE_59);
  625. wr_cio_state(dev, head, regp, NV_CIO_CRE_5B);
  626. wr_cio_state(dev, head, regp, NV_CIO_CRE_85);
  627. wr_cio_state(dev, head, regp, NV_CIO_CRE_86);
  628. }
  629. NVWriteCRTC(dev, head, NV_PCRTC_START, regp->fb_start);
  630. }
  631. static void
  632. nv_save_state_palette(struct drm_device *dev, int head,
  633. struct nv04_mode_state *state)
  634. {
  635. struct nouveau_device *device = nouveau_dev(dev);
  636. int head_offset = head * NV_PRMDIO_SIZE, i;
  637. nv_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset,
  638. NV_PRMDIO_PIXEL_MASK_MASK);
  639. nv_wr08(device, NV_PRMDIO_READ_MODE_ADDRESS + head_offset, 0x0);
  640. for (i = 0; i < 768; i++) {
  641. state->crtc_reg[head].DAC[i] = nv_rd08(device,
  642. NV_PRMDIO_PALETTE_DATA + head_offset);
  643. }
  644. NVSetEnablePalette(dev, head, false);
  645. }
  646. void
  647. nouveau_hw_load_state_palette(struct drm_device *dev, int head,
  648. struct nv04_mode_state *state)
  649. {
  650. struct nouveau_device *device = nouveau_dev(dev);
  651. int head_offset = head * NV_PRMDIO_SIZE, i;
  652. nv_wr08(device, NV_PRMDIO_PIXEL_MASK + head_offset,
  653. NV_PRMDIO_PIXEL_MASK_MASK);
  654. nv_wr08(device, NV_PRMDIO_WRITE_MODE_ADDRESS + head_offset, 0x0);
  655. for (i = 0; i < 768; i++) {
  656. nv_wr08(device, NV_PRMDIO_PALETTE_DATA + head_offset,
  657. state->crtc_reg[head].DAC[i]);
  658. }
  659. NVSetEnablePalette(dev, head, false);
  660. }
  661. void nouveau_hw_save_state(struct drm_device *dev, int head,
  662. struct nv04_mode_state *state)
  663. {
  664. struct nouveau_drm *drm = nouveau_drm(dev);
  665. if (nv_device(drm->device)->chipset == 0x11)
  666. /* NB: no attempt is made to restore the bad pll later on */
  667. nouveau_hw_fix_bad_vpll(dev, head);
  668. nv_save_state_ramdac(dev, head, state);
  669. nv_save_state_vga(dev, head, state);
  670. nv_save_state_palette(dev, head, state);
  671. nv_save_state_ext(dev, head, state);
  672. }
  673. void nouveau_hw_load_state(struct drm_device *dev, int head,
  674. struct nv04_mode_state *state)
  675. {
  676. NVVgaProtect(dev, head, true);
  677. nv_load_state_ramdac(dev, head, state);
  678. nv_load_state_ext(dev, head, state);
  679. nouveau_hw_load_state_palette(dev, head, state);
  680. nv_load_state_vga(dev, head, state);
  681. NVVgaProtect(dev, head, false);
  682. }