nouveau_hw.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080
  1. /*
  2. * Copyright 2006 Dave Airlie
  3. * Copyright 2007 Maarten Maathuis
  4. * Copyright 2007-2009 Stuart Bennett
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the "Software"),
  8. * to deal in the Software without restriction, including without limitation
  9. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10. * and/or sell copies of the Software, and to permit persons to whom the
  11. * Software is furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice shall be included in
  14. * all copies or substantial portions of the Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
  20. * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
  21. * OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  22. * SOFTWARE.
  23. */
  24. #include "drmP.h"
  25. #include "nouveau_drv.h"
  26. #include "nouveau_hw.h"
  27. #define CHIPSET_NFORCE 0x01a0
  28. #define CHIPSET_NFORCE2 0x01f0
  29. /*
  30. * misc hw access wrappers/control functions
  31. */
  32. void
  33. NVWriteVgaSeq(struct drm_device *dev, int head, uint8_t index, uint8_t value)
  34. {
  35. NVWritePRMVIO(dev, head, NV_PRMVIO_SRX, index);
  36. NVWritePRMVIO(dev, head, NV_PRMVIO_SR, value);
  37. }
  38. uint8_t
  39. NVReadVgaSeq(struct drm_device *dev, int head, uint8_t index)
  40. {
  41. NVWritePRMVIO(dev, head, NV_PRMVIO_SRX, index);
  42. return NVReadPRMVIO(dev, head, NV_PRMVIO_SR);
  43. }
  44. void
  45. NVWriteVgaGr(struct drm_device *dev, int head, uint8_t index, uint8_t value)
  46. {
  47. NVWritePRMVIO(dev, head, NV_PRMVIO_GRX, index);
  48. NVWritePRMVIO(dev, head, NV_PRMVIO_GX, value);
  49. }
  50. uint8_t
  51. NVReadVgaGr(struct drm_device *dev, int head, uint8_t index)
  52. {
  53. NVWritePRMVIO(dev, head, NV_PRMVIO_GRX, index);
  54. return NVReadPRMVIO(dev, head, NV_PRMVIO_GX);
  55. }
  56. /* CR44 takes values 0 (head A), 3 (head B) and 4 (heads tied)
  57. * it affects only the 8 bit vga io regs, which we access using mmio at
  58. * 0xc{0,2}3c*, 0x60{1,3}3*, and 0x68{1,3}3d*
  59. * in general, the set value of cr44 does not matter: reg access works as
  60. * expected and values can be set for the appropriate head by using a 0x2000
  61. * offset as required
  62. * however:
  63. * a) pre nv40, the head B range of PRMVIO regs at 0xc23c* was not exposed and
  64. * cr44 must be set to 0 or 3 for accessing values on the correct head
  65. * through the common 0xc03c* addresses
  66. * b) in tied mode (4) head B is programmed to the values set on head A, and
  67. * access using the head B addresses can have strange results, ergo we leave
  68. * tied mode in init once we know to what cr44 should be restored on exit
  69. *
  70. * the owner parameter is slightly abused:
  71. * 0 and 1 are treated as head values and so the set value is (owner * 3)
  72. * other values are treated as literal values to set
  73. */
  74. void
  75. NVSetOwner(struct drm_device *dev, int owner)
  76. {
  77. struct drm_nouveau_private *dev_priv = dev->dev_private;
  78. if (owner == 1)
  79. owner *= 3;
  80. if (dev_priv->chipset == 0x11) {
  81. /* This might seem stupid, but the blob does it and
  82. * omitting it often locks the system up.
  83. */
  84. NVReadVgaCrtc(dev, 0, NV_CIO_SR_LOCK_INDEX);
  85. NVReadVgaCrtc(dev, 1, NV_CIO_SR_LOCK_INDEX);
  86. }
  87. /* CR44 is always changed on CRTC0 */
  88. NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_44, owner);
  89. if (dev_priv->chipset == 0x11) { /* set me harder */
  90. NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner);
  91. NVWriteVgaCrtc(dev, 0, NV_CIO_CRE_2E, owner);
  92. }
  93. }
  94. void
  95. NVBlankScreen(struct drm_device *dev, int head, bool blank)
  96. {
  97. unsigned char seq1;
  98. if (nv_two_heads(dev))
  99. NVSetOwner(dev, head);
  100. seq1 = NVReadVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX);
  101. NVVgaSeqReset(dev, head, true);
  102. if (blank)
  103. NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 | 0x20);
  104. else
  105. NVWriteVgaSeq(dev, head, NV_VIO_SR_CLOCK_INDEX, seq1 & ~0x20);
  106. NVVgaSeqReset(dev, head, false);
  107. }
  108. /*
  109. * PLL setting
  110. */
  111. static int
  112. powerctrl_1_shift(int chip_version, int reg)
  113. {
  114. int shift = -4;
  115. if (chip_version < 0x17 || chip_version == 0x1a || chip_version == 0x20)
  116. return shift;
  117. switch (reg) {
  118. case NV_RAMDAC_VPLL2:
  119. shift += 4;
  120. case NV_PRAMDAC_VPLL_COEFF:
  121. shift += 4;
  122. case NV_PRAMDAC_MPLL_COEFF:
  123. shift += 4;
  124. case NV_PRAMDAC_NVPLL_COEFF:
  125. shift += 4;
  126. }
  127. /*
  128. * the shift for vpll regs is only used for nv3x chips with a single
  129. * stage pll
  130. */
  131. if (shift > 4 && (chip_version < 0x32 || chip_version == 0x35 ||
  132. chip_version == 0x36 || chip_version >= 0x40))
  133. shift = -4;
  134. return shift;
  135. }
  136. static void
  137. setPLL_single(struct drm_device *dev, uint32_t reg, struct nouveau_pll_vals *pv)
  138. {
  139. struct drm_nouveau_private *dev_priv = dev->dev_private;
  140. int chip_version = dev_priv->vbios->chip_version;
  141. uint32_t oldpll = NVReadRAMDAC(dev, 0, reg);
  142. int oldN = (oldpll >> 8) & 0xff, oldM = oldpll & 0xff;
  143. uint32_t pll = (oldpll & 0xfff80000) | pv->log2P << 16 | pv->NM1;
  144. uint32_t saved_powerctrl_1 = 0;
  145. int shift_powerctrl_1 = powerctrl_1_shift(chip_version, reg);
  146. if (oldpll == pll)
  147. return; /* already set */
  148. if (shift_powerctrl_1 >= 0) {
  149. saved_powerctrl_1 = nvReadMC(dev, NV_PBUS_POWERCTRL_1);
  150. nvWriteMC(dev, NV_PBUS_POWERCTRL_1,
  151. (saved_powerctrl_1 & ~(0xf << shift_powerctrl_1)) |
  152. 1 << shift_powerctrl_1);
  153. }
  154. if (oldM && pv->M1 && (oldN / oldM < pv->N1 / pv->M1))
  155. /* upclock -- write new post divider first */
  156. NVWriteRAMDAC(dev, 0, reg, pv->log2P << 16 | (oldpll & 0xffff));
  157. else
  158. /* downclock -- write new NM first */
  159. NVWriteRAMDAC(dev, 0, reg, (oldpll & 0xffff0000) | pv->NM1);
  160. if (chip_version < 0x17 && chip_version != 0x11)
  161. /* wait a bit on older chips */
  162. msleep(64);
  163. NVReadRAMDAC(dev, 0, reg);
  164. /* then write the other half as well */
  165. NVWriteRAMDAC(dev, 0, reg, pll);
  166. if (shift_powerctrl_1 >= 0)
  167. nvWriteMC(dev, NV_PBUS_POWERCTRL_1, saved_powerctrl_1);
  168. }
  169. static uint32_t
  170. new_ramdac580(uint32_t reg1, bool ss, uint32_t ramdac580)
  171. {
  172. bool head_a = (reg1 == NV_PRAMDAC_VPLL_COEFF);
  173. if (ss) /* single stage pll mode */
  174. ramdac580 |= head_a ? NV_RAMDAC_580_VPLL1_ACTIVE :
  175. NV_RAMDAC_580_VPLL2_ACTIVE;
  176. else
  177. ramdac580 &= head_a ? ~NV_RAMDAC_580_VPLL1_ACTIVE :
  178. ~NV_RAMDAC_580_VPLL2_ACTIVE;
  179. return ramdac580;
  180. }
  181. static void
  182. setPLL_double_highregs(struct drm_device *dev, uint32_t reg1,
  183. struct nouveau_pll_vals *pv)
  184. {
  185. struct drm_nouveau_private *dev_priv = dev->dev_private;
  186. int chip_version = dev_priv->vbios->chip_version;
  187. bool nv3035 = chip_version == 0x30 || chip_version == 0x35;
  188. uint32_t reg2 = reg1 + ((reg1 == NV_RAMDAC_VPLL2) ? 0x5c : 0x70);
  189. uint32_t oldpll1 = NVReadRAMDAC(dev, 0, reg1);
  190. uint32_t oldpll2 = !nv3035 ? NVReadRAMDAC(dev, 0, reg2) : 0;
  191. uint32_t pll1 = (oldpll1 & 0xfff80000) | pv->log2P << 16 | pv->NM1;
  192. uint32_t pll2 = (oldpll2 & 0x7fff0000) | 1 << 31 | pv->NM2;
  193. uint32_t oldramdac580 = 0, ramdac580 = 0;
  194. bool single_stage = !pv->NM2 || pv->N2 == pv->M2; /* nv41+ only */
  195. uint32_t saved_powerctrl_1 = 0, savedc040 = 0;
  196. int shift_powerctrl_1 = powerctrl_1_shift(chip_version, reg1);
  197. /* model specific additions to generic pll1 and pll2 set up above */
  198. if (nv3035) {
  199. pll1 = (pll1 & 0xfcc7ffff) | (pv->N2 & 0x18) << 21 |
  200. (pv->N2 & 0x7) << 19 | 8 << 4 | (pv->M2 & 7) << 4;
  201. pll2 = 0;
  202. }
  203. if (chip_version > 0x40 && reg1 >= NV_PRAMDAC_VPLL_COEFF) { /* !nv40 */
  204. oldramdac580 = NVReadRAMDAC(dev, 0, NV_PRAMDAC_580);
  205. ramdac580 = new_ramdac580(reg1, single_stage, oldramdac580);
  206. if (oldramdac580 != ramdac580)
  207. oldpll1 = ~0; /* force mismatch */
  208. if (single_stage)
  209. /* magic value used by nvidia in single stage mode */
  210. pll2 |= 0x011f;
  211. }
  212. if (chip_version > 0x70)
  213. /* magic bits set by the blob (but not the bios) on g71-73 */
  214. pll1 = (pll1 & 0x7fffffff) | (single_stage ? 0x4 : 0xc) << 28;
  215. if (oldpll1 == pll1 && oldpll2 == pll2)
  216. return; /* already set */
  217. if (shift_powerctrl_1 >= 0) {
  218. saved_powerctrl_1 = nvReadMC(dev, NV_PBUS_POWERCTRL_1);
  219. nvWriteMC(dev, NV_PBUS_POWERCTRL_1,
  220. (saved_powerctrl_1 & ~(0xf << shift_powerctrl_1)) |
  221. 1 << shift_powerctrl_1);
  222. }
  223. if (chip_version >= 0x40) {
  224. int shift_c040 = 14;
  225. switch (reg1) {
  226. case NV_PRAMDAC_MPLL_COEFF:
  227. shift_c040 += 2;
  228. case NV_PRAMDAC_NVPLL_COEFF:
  229. shift_c040 += 2;
  230. case NV_RAMDAC_VPLL2:
  231. shift_c040 += 2;
  232. case NV_PRAMDAC_VPLL_COEFF:
  233. shift_c040 += 2;
  234. }
  235. savedc040 = nvReadMC(dev, 0xc040);
  236. if (shift_c040 != 14)
  237. nvWriteMC(dev, 0xc040, savedc040 & ~(3 << shift_c040));
  238. }
  239. if (oldramdac580 != ramdac580)
  240. NVWriteRAMDAC(dev, 0, NV_PRAMDAC_580, ramdac580);
  241. if (!nv3035)
  242. NVWriteRAMDAC(dev, 0, reg2, pll2);
  243. NVWriteRAMDAC(dev, 0, reg1, pll1);
  244. if (shift_powerctrl_1 >= 0)
  245. nvWriteMC(dev, NV_PBUS_POWERCTRL_1, saved_powerctrl_1);
  246. if (chip_version >= 0x40)
  247. nvWriteMC(dev, 0xc040, savedc040);
  248. }
  249. static void
  250. setPLL_double_lowregs(struct drm_device *dev, uint32_t NMNMreg,
  251. struct nouveau_pll_vals *pv)
  252. {
  253. /* When setting PLLs, there is a merry game of disabling and enabling
  254. * various bits of hardware during the process. This function is a
  255. * synthesis of six nv4x traces, nearly each card doing a subtly
  256. * different thing. With luck all the necessary bits for each card are
  257. * combined herein. Without luck it deviates from each card's formula
  258. * so as to not work on any :)
  259. */
  260. uint32_t Preg = NMNMreg - 4;
  261. bool mpll = Preg == 0x4020;
  262. uint32_t oldPval = nvReadMC(dev, Preg);
  263. uint32_t NMNM = pv->NM2 << 16 | pv->NM1;
  264. uint32_t Pval = (oldPval & (mpll ? ~(0x11 << 16) : ~(1 << 16))) |
  265. 0xc << 28 | pv->log2P << 16;
  266. uint32_t saved4600 = 0;
  267. /* some cards have different maskc040s */
  268. uint32_t maskc040 = ~(3 << 14), savedc040;
  269. bool single_stage = !pv->NM2 || pv->N2 == pv->M2;
  270. if (nvReadMC(dev, NMNMreg) == NMNM && (oldPval & 0xc0070000) == Pval)
  271. return;
  272. if (Preg == 0x4000)
  273. maskc040 = ~0x333;
  274. if (Preg == 0x4058)
  275. maskc040 = ~(0xc << 24);
  276. if (mpll) {
  277. struct pll_lims pll_lim;
  278. uint8_t Pval2;
  279. if (get_pll_limits(dev, Preg, &pll_lim))
  280. return;
  281. Pval2 = pv->log2P + pll_lim.log2p_bias;
  282. if (Pval2 > pll_lim.max_log2p)
  283. Pval2 = pll_lim.max_log2p;
  284. Pval |= 1 << 28 | Pval2 << 20;
  285. saved4600 = nvReadMC(dev, 0x4600);
  286. nvWriteMC(dev, 0x4600, saved4600 | 8 << 28);
  287. }
  288. if (single_stage)
  289. Pval |= mpll ? 1 << 12 : 1 << 8;
  290. nvWriteMC(dev, Preg, oldPval | 1 << 28);
  291. nvWriteMC(dev, Preg, Pval & ~(4 << 28));
  292. if (mpll) {
  293. Pval |= 8 << 20;
  294. nvWriteMC(dev, 0x4020, Pval & ~(0xc << 28));
  295. nvWriteMC(dev, 0x4038, Pval & ~(0xc << 28));
  296. }
  297. savedc040 = nvReadMC(dev, 0xc040);
  298. nvWriteMC(dev, 0xc040, savedc040 & maskc040);
  299. nvWriteMC(dev, NMNMreg, NMNM);
  300. if (NMNMreg == 0x4024)
  301. nvWriteMC(dev, 0x403c, NMNM);
  302. nvWriteMC(dev, Preg, Pval);
  303. if (mpll) {
  304. Pval &= ~(8 << 20);
  305. nvWriteMC(dev, 0x4020, Pval);
  306. nvWriteMC(dev, 0x4038, Pval);
  307. nvWriteMC(dev, 0x4600, saved4600);
  308. }
  309. nvWriteMC(dev, 0xc040, savedc040);
  310. if (mpll) {
  311. nvWriteMC(dev, 0x4020, Pval & ~(1 << 28));
  312. nvWriteMC(dev, 0x4038, Pval & ~(1 << 28));
  313. }
  314. }
  315. void
  316. nouveau_hw_setpll(struct drm_device *dev, uint32_t reg1,
  317. struct nouveau_pll_vals *pv)
  318. {
  319. struct drm_nouveau_private *dev_priv = dev->dev_private;
  320. int cv = dev_priv->vbios->chip_version;
  321. if (cv == 0x30 || cv == 0x31 || cv == 0x35 || cv == 0x36 ||
  322. cv >= 0x40) {
  323. if (reg1 > 0x405c)
  324. setPLL_double_highregs(dev, reg1, pv);
  325. else
  326. setPLL_double_lowregs(dev, reg1, pv);
  327. } else
  328. setPLL_single(dev, reg1, pv);
  329. }
  330. /*
  331. * PLL getting
  332. */
  333. static void
  334. nouveau_hw_decode_pll(struct drm_device *dev, uint32_t reg1, uint32_t pll1,
  335. uint32_t pll2, struct nouveau_pll_vals *pllvals)
  336. {
  337. struct drm_nouveau_private *dev_priv = dev->dev_private;
  338. /* to force parsing as single stage (i.e. nv40 vplls) pass pll2 as 0 */
  339. /* log2P is & 0x7 as never more than 7, and nv30/35 only uses 3 bits */
  340. pllvals->log2P = (pll1 >> 16) & 0x7;
  341. pllvals->N2 = pllvals->M2 = 1;
  342. if (reg1 <= 0x405c) {
  343. pllvals->NM1 = pll2 & 0xffff;
  344. /* single stage NVPLL and VPLLs use 1 << 8, MPLL uses 1 << 12 */
  345. if (!(pll1 & 0x1100))
  346. pllvals->NM2 = pll2 >> 16;
  347. } else {
  348. pllvals->NM1 = pll1 & 0xffff;
  349. if (nv_two_reg_pll(dev) && pll2 & NV31_RAMDAC_ENABLE_VCO2)
  350. pllvals->NM2 = pll2 & 0xffff;
  351. else if (dev_priv->chipset == 0x30 || dev_priv->chipset == 0x35) {
  352. pllvals->M1 &= 0xf; /* only 4 bits */
  353. if (pll1 & NV30_RAMDAC_ENABLE_VCO2) {
  354. pllvals->M2 = (pll1 >> 4) & 0x7;
  355. pllvals->N2 = ((pll1 >> 21) & 0x18) |
  356. ((pll1 >> 19) & 0x7);
  357. }
  358. }
  359. }
  360. }
  361. int
  362. nouveau_hw_get_pllvals(struct drm_device *dev, enum pll_types plltype,
  363. struct nouveau_pll_vals *pllvals)
  364. {
  365. struct drm_nouveau_private *dev_priv = dev->dev_private;
  366. const uint32_t nv04_regs[MAX_PLL_TYPES] = { NV_PRAMDAC_NVPLL_COEFF,
  367. NV_PRAMDAC_MPLL_COEFF,
  368. NV_PRAMDAC_VPLL_COEFF,
  369. NV_RAMDAC_VPLL2 };
  370. const uint32_t nv40_regs[MAX_PLL_TYPES] = { 0x4000,
  371. 0x4020,
  372. NV_PRAMDAC_VPLL_COEFF,
  373. NV_RAMDAC_VPLL2 };
  374. uint32_t reg1, pll1, pll2 = 0;
  375. struct pll_lims pll_lim;
  376. int ret;
  377. if (dev_priv->card_type < NV_40)
  378. reg1 = nv04_regs[plltype];
  379. else
  380. reg1 = nv40_regs[plltype];
  381. pll1 = nvReadMC(dev, reg1);
  382. if (reg1 <= 0x405c)
  383. pll2 = nvReadMC(dev, reg1 + 4);
  384. else if (nv_two_reg_pll(dev)) {
  385. uint32_t reg2 = reg1 + (reg1 == NV_RAMDAC_VPLL2 ? 0x5c : 0x70);
  386. pll2 = nvReadMC(dev, reg2);
  387. }
  388. if (dev_priv->card_type == 0x40 && reg1 >= NV_PRAMDAC_VPLL_COEFF) {
  389. uint32_t ramdac580 = NVReadRAMDAC(dev, 0, NV_PRAMDAC_580);
  390. /* check whether vpll has been forced into single stage mode */
  391. if (reg1 == NV_PRAMDAC_VPLL_COEFF) {
  392. if (ramdac580 & NV_RAMDAC_580_VPLL1_ACTIVE)
  393. pll2 = 0;
  394. } else
  395. if (ramdac580 & NV_RAMDAC_580_VPLL2_ACTIVE)
  396. pll2 = 0;
  397. }
  398. nouveau_hw_decode_pll(dev, reg1, pll1, pll2, pllvals);
  399. ret = get_pll_limits(dev, plltype, &pll_lim);
  400. if (ret)
  401. return ret;
  402. pllvals->refclk = pll_lim.refclk;
  403. return 0;
  404. }
  405. int
  406. nouveau_hw_pllvals_to_clk(struct nouveau_pll_vals *pv)
  407. {
  408. /* Avoid divide by zero if called at an inappropriate time */
  409. if (!pv->M1 || !pv->M2)
  410. return 0;
  411. return pv->N1 * pv->N2 * pv->refclk / (pv->M1 * pv->M2) >> pv->log2P;
  412. }
  413. int
  414. nouveau_hw_get_clock(struct drm_device *dev, enum pll_types plltype)
  415. {
  416. struct nouveau_pll_vals pllvals;
  417. if (plltype == MPLL && (dev->pci_device & 0x0ff0) == CHIPSET_NFORCE) {
  418. uint32_t mpllP;
  419. pci_read_config_dword(pci_get_bus_and_slot(0, 3), 0x6c, &mpllP);
  420. if (!mpllP)
  421. mpllP = 4;
  422. return 400000 / mpllP;
  423. } else
  424. if (plltype == MPLL && (dev->pci_device & 0xff0) == CHIPSET_NFORCE2) {
  425. uint32_t clock;
  426. pci_read_config_dword(pci_get_bus_and_slot(0, 5), 0x4c, &clock);
  427. return clock;
  428. }
  429. nouveau_hw_get_pllvals(dev, plltype, &pllvals);
  430. return nouveau_hw_pllvals_to_clk(&pllvals);
  431. }
  432. static void
  433. nouveau_hw_fix_bad_vpll(struct drm_device *dev, int head)
  434. {
  435. /* the vpll on an unused head can come up with a random value, way
  436. * beyond the pll limits. for some reason this causes the chip to
  437. * lock up when reading the dac palette regs, so set a valid pll here
  438. * when such a condition detected. only seen on nv11 to date
  439. */
  440. struct pll_lims pll_lim;
  441. struct nouveau_pll_vals pv;
  442. uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF;
  443. if (get_pll_limits(dev, head ? VPLL2 : VPLL1, &pll_lim))
  444. return;
  445. nouveau_hw_get_pllvals(dev, head ? VPLL2 : VPLL1, &pv);
  446. if (pv.M1 >= pll_lim.vco1.min_m && pv.M1 <= pll_lim.vco1.max_m &&
  447. pv.N1 >= pll_lim.vco1.min_n && pv.N1 <= pll_lim.vco1.max_n &&
  448. pv.log2P <= pll_lim.max_log2p)
  449. return;
  450. NV_WARN(dev, "VPLL %d outwith limits, attempting to fix\n", head + 1);
  451. /* set lowest clock within static limits */
  452. pv.M1 = pll_lim.vco1.max_m;
  453. pv.N1 = pll_lim.vco1.min_n;
  454. pv.log2P = pll_lim.max_usable_log2p;
  455. nouveau_hw_setpll(dev, pllreg, &pv);
  456. }
  457. /*
  458. * vga font save/restore
  459. */
  460. static void nouveau_vga_font_io(struct drm_device *dev,
  461. void __iomem *iovram,
  462. bool save, unsigned plane)
  463. {
  464. struct drm_nouveau_private *dev_priv = dev->dev_private;
  465. unsigned i;
  466. NVWriteVgaSeq(dev, 0, NV_VIO_SR_PLANE_MASK_INDEX, 1 << plane);
  467. NVWriteVgaGr(dev, 0, NV_VIO_GX_READ_MAP_INDEX, plane);
  468. for (i = 0; i < 16384; i++) {
  469. if (save) {
  470. dev_priv->saved_vga_font[plane][i] =
  471. ioread32_native(iovram + i * 4);
  472. } else {
  473. iowrite32_native(dev_priv->saved_vga_font[plane][i],
  474. iovram + i * 4);
  475. }
  476. }
  477. }
  478. void
  479. nouveau_hw_save_vga_fonts(struct drm_device *dev, bool save)
  480. {
  481. uint8_t misc, gr4, gr5, gr6, seq2, seq4;
  482. bool graphicsmode;
  483. unsigned plane;
  484. void __iomem *iovram;
  485. if (nv_two_heads(dev))
  486. NVSetOwner(dev, 0);
  487. NVSetEnablePalette(dev, 0, true);
  488. graphicsmode = NVReadVgaAttr(dev, 0, NV_CIO_AR_MODE_INDEX) & 1;
  489. NVSetEnablePalette(dev, 0, false);
  490. if (graphicsmode) /* graphics mode => framebuffer => no need to save */
  491. return;
  492. NV_INFO(dev, "%sing VGA fonts\n", save ? "Sav" : "Restor");
  493. /* map first 64KiB of VRAM, holds VGA fonts etc */
  494. iovram = ioremap(pci_resource_start(dev->pdev, 1), 65536);
  495. if (!iovram) {
  496. NV_ERROR(dev, "Failed to map VRAM, "
  497. "cannot save/restore VGA fonts.\n");
  498. return;
  499. }
  500. if (nv_two_heads(dev))
  501. NVBlankScreen(dev, 1, true);
  502. NVBlankScreen(dev, 0, true);
  503. /* save control regs */
  504. misc = NVReadPRMVIO(dev, 0, NV_PRMVIO_MISC__READ);
  505. seq2 = NVReadVgaSeq(dev, 0, NV_VIO_SR_PLANE_MASK_INDEX);
  506. seq4 = NVReadVgaSeq(dev, 0, NV_VIO_SR_MEM_MODE_INDEX);
  507. gr4 = NVReadVgaGr(dev, 0, NV_VIO_GX_READ_MAP_INDEX);
  508. gr5 = NVReadVgaGr(dev, 0, NV_VIO_GX_MODE_INDEX);
  509. gr6 = NVReadVgaGr(dev, 0, NV_VIO_GX_MISC_INDEX);
  510. NVWritePRMVIO(dev, 0, NV_PRMVIO_MISC__WRITE, 0x67);
  511. NVWriteVgaSeq(dev, 0, NV_VIO_SR_MEM_MODE_INDEX, 0x6);
  512. NVWriteVgaGr(dev, 0, NV_VIO_GX_MODE_INDEX, 0x0);
  513. NVWriteVgaGr(dev, 0, NV_VIO_GX_MISC_INDEX, 0x5);
  514. /* store font in planes 0..3 */
  515. for (plane = 0; plane < 4; plane++)
  516. nouveau_vga_font_io(dev, iovram, save, plane);
  517. /* restore control regs */
  518. NVWritePRMVIO(dev, 0, NV_PRMVIO_MISC__WRITE, misc);
  519. NVWriteVgaGr(dev, 0, NV_VIO_GX_READ_MAP_INDEX, gr4);
  520. NVWriteVgaGr(dev, 0, NV_VIO_GX_MODE_INDEX, gr5);
  521. NVWriteVgaGr(dev, 0, NV_VIO_GX_MISC_INDEX, gr6);
  522. NVWriteVgaSeq(dev, 0, NV_VIO_SR_PLANE_MASK_INDEX, seq2);
  523. NVWriteVgaSeq(dev, 0, NV_VIO_SR_MEM_MODE_INDEX, seq4);
  524. if (nv_two_heads(dev))
  525. NVBlankScreen(dev, 1, false);
  526. NVBlankScreen(dev, 0, false);
  527. iounmap(iovram);
  528. }
  529. /*
  530. * mode state save/load
  531. */
  532. static void
  533. rd_cio_state(struct drm_device *dev, int head,
  534. struct nv04_crtc_reg *crtcstate, int index)
  535. {
  536. crtcstate->CRTC[index] = NVReadVgaCrtc(dev, head, index);
  537. }
  538. static void
  539. wr_cio_state(struct drm_device *dev, int head,
  540. struct nv04_crtc_reg *crtcstate, int index)
  541. {
  542. NVWriteVgaCrtc(dev, head, index, crtcstate->CRTC[index]);
  543. }
  544. static void
  545. nv_save_state_ramdac(struct drm_device *dev, int head,
  546. struct nv04_mode_state *state)
  547. {
  548. struct drm_nouveau_private *dev_priv = dev->dev_private;
  549. struct nv04_crtc_reg *regp = &state->crtc_reg[head];
  550. int i;
  551. if (dev_priv->card_type >= NV_10)
  552. regp->nv10_cursync = NVReadRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC);
  553. nouveau_hw_get_pllvals(dev, head ? VPLL2 : VPLL1, &regp->pllvals);
  554. state->pllsel = NVReadRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT);
  555. if (nv_two_heads(dev))
  556. state->sel_clk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK);
  557. if (dev_priv->chipset == 0x11)
  558. regp->dither = NVReadRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11);
  559. regp->ramdac_gen_ctrl = NVReadRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL);
  560. if (nv_gf4_disp_arch(dev))
  561. regp->ramdac_630 = NVReadRAMDAC(dev, head, NV_PRAMDAC_630);
  562. if (dev_priv->chipset >= 0x30)
  563. regp->ramdac_634 = NVReadRAMDAC(dev, head, NV_PRAMDAC_634);
  564. regp->tv_setup = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP);
  565. regp->tv_vtotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_VTOTAL);
  566. regp->tv_vskew = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_VSKEW);
  567. regp->tv_vsync_delay = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_VSYNC_DELAY);
  568. regp->tv_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HTOTAL);
  569. regp->tv_hskew = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HSKEW);
  570. regp->tv_hsync_delay = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY);
  571. regp->tv_hsync_delay2 = NVReadRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY2);
  572. for (i = 0; i < 7; i++) {
  573. uint32_t ramdac_reg = NV_PRAMDAC_FP_VDISPLAY_END + (i * 4);
  574. regp->fp_vert_regs[i] = NVReadRAMDAC(dev, head, ramdac_reg);
  575. regp->fp_horiz_regs[i] = NVReadRAMDAC(dev, head, ramdac_reg + 0x20);
  576. }
  577. if (nv_gf4_disp_arch(dev)) {
  578. regp->dither = NVReadRAMDAC(dev, head, NV_RAMDAC_FP_DITHER);
  579. for (i = 0; i < 3; i++) {
  580. regp->dither_regs[i] = NVReadRAMDAC(dev, head, NV_PRAMDAC_850 + i * 4);
  581. regp->dither_regs[i + 3] = NVReadRAMDAC(dev, head, NV_PRAMDAC_85C + i * 4);
  582. }
  583. }
  584. regp->fp_control = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL);
  585. regp->fp_debug_0 = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_0);
  586. if (!nv_gf4_disp_arch(dev) && head == 0) {
  587. /* early chips don't allow access to PRAMDAC_TMDS_* without
  588. * the head A FPCLK on (nv11 even locks up) */
  589. NVWriteRAMDAC(dev, 0, NV_PRAMDAC_FP_DEBUG_0, regp->fp_debug_0 &
  590. ~NV_PRAMDAC_FP_DEBUG_0_PWRDOWN_FPCLK);
  591. }
  592. regp->fp_debug_1 = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_1);
  593. regp->fp_debug_2 = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_2);
  594. regp->fp_margin_color = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_MARGIN_COLOR);
  595. if (nv_gf4_disp_arch(dev))
  596. regp->ramdac_8c0 = NVReadRAMDAC(dev, head, NV_PRAMDAC_8C0);
  597. if (dev_priv->card_type == NV_40) {
  598. regp->ramdac_a20 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A20);
  599. regp->ramdac_a24 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A24);
  600. regp->ramdac_a34 = NVReadRAMDAC(dev, head, NV_PRAMDAC_A34);
  601. for (i = 0; i < 38; i++)
  602. regp->ctv_regs[i] = NVReadRAMDAC(dev, head,
  603. NV_PRAMDAC_CTV + 4*i);
  604. }
  605. }
  606. static void
  607. nv_load_state_ramdac(struct drm_device *dev, int head,
  608. struct nv04_mode_state *state)
  609. {
  610. struct drm_nouveau_private *dev_priv = dev->dev_private;
  611. struct nv04_crtc_reg *regp = &state->crtc_reg[head];
  612. uint32_t pllreg = head ? NV_RAMDAC_VPLL2 : NV_PRAMDAC_VPLL_COEFF;
  613. int i;
  614. if (dev_priv->card_type >= NV_10)
  615. NVWriteRAMDAC(dev, head, NV_RAMDAC_NV10_CURSYNC, regp->nv10_cursync);
  616. nouveau_hw_setpll(dev, pllreg, &regp->pllvals);
  617. NVWriteRAMDAC(dev, 0, NV_PRAMDAC_PLL_COEFF_SELECT, state->pllsel);
  618. if (nv_two_heads(dev))
  619. NVWriteRAMDAC(dev, 0, NV_PRAMDAC_SEL_CLK, state->sel_clk);
  620. if (dev_priv->chipset == 0x11)
  621. NVWriteRAMDAC(dev, head, NV_RAMDAC_DITHER_NV11, regp->dither);
  622. NVWriteRAMDAC(dev, head, NV_PRAMDAC_GENERAL_CONTROL, regp->ramdac_gen_ctrl);
  623. if (nv_gf4_disp_arch(dev))
  624. NVWriteRAMDAC(dev, head, NV_PRAMDAC_630, regp->ramdac_630);
  625. if (dev_priv->chipset >= 0x30)
  626. NVWriteRAMDAC(dev, head, NV_PRAMDAC_634, regp->ramdac_634);
  627. NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_SETUP, regp->tv_setup);
  628. NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_VTOTAL, regp->tv_vtotal);
  629. NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_VSKEW, regp->tv_vskew);
  630. NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_VSYNC_DELAY, regp->tv_vsync_delay);
  631. NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HTOTAL, regp->tv_htotal);
  632. NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HSKEW, regp->tv_hskew);
  633. NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY, regp->tv_hsync_delay);
  634. NVWriteRAMDAC(dev, head, NV_PRAMDAC_TV_HSYNC_DELAY2, regp->tv_hsync_delay2);
  635. for (i = 0; i < 7; i++) {
  636. uint32_t ramdac_reg = NV_PRAMDAC_FP_VDISPLAY_END + (i * 4);
  637. NVWriteRAMDAC(dev, head, ramdac_reg, regp->fp_vert_regs[i]);
  638. NVWriteRAMDAC(dev, head, ramdac_reg + 0x20, regp->fp_horiz_regs[i]);
  639. }
  640. if (nv_gf4_disp_arch(dev)) {
  641. NVWriteRAMDAC(dev, head, NV_RAMDAC_FP_DITHER, regp->dither);
  642. for (i = 0; i < 3; i++) {
  643. NVWriteRAMDAC(dev, head, NV_PRAMDAC_850 + i * 4, regp->dither_regs[i]);
  644. NVWriteRAMDAC(dev, head, NV_PRAMDAC_85C + i * 4, regp->dither_regs[i + 3]);
  645. }
  646. }
  647. NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL, regp->fp_control);
  648. NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_0, regp->fp_debug_0);
  649. NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_1, regp->fp_debug_1);
  650. NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_DEBUG_2, regp->fp_debug_2);
  651. NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_MARGIN_COLOR, regp->fp_margin_color);
  652. if (nv_gf4_disp_arch(dev))
  653. NVWriteRAMDAC(dev, head, NV_PRAMDAC_8C0, regp->ramdac_8c0);
  654. if (dev_priv->card_type == NV_40) {
  655. NVWriteRAMDAC(dev, head, NV_PRAMDAC_A20, regp->ramdac_a20);
  656. NVWriteRAMDAC(dev, head, NV_PRAMDAC_A24, regp->ramdac_a24);
  657. NVWriteRAMDAC(dev, head, NV_PRAMDAC_A34, regp->ramdac_a34);
  658. for (i = 0; i < 38; i++)
  659. NVWriteRAMDAC(dev, head,
  660. NV_PRAMDAC_CTV + 4*i, regp->ctv_regs[i]);
  661. }
  662. }
  663. static void
  664. nv_save_state_vga(struct drm_device *dev, int head,
  665. struct nv04_mode_state *state)
  666. {
  667. struct nv04_crtc_reg *regp = &state->crtc_reg[head];
  668. int i;
  669. regp->MiscOutReg = NVReadPRMVIO(dev, head, NV_PRMVIO_MISC__READ);
  670. for (i = 0; i < 25; i++)
  671. rd_cio_state(dev, head, regp, i);
  672. NVSetEnablePalette(dev, head, true);
  673. for (i = 0; i < 21; i++)
  674. regp->Attribute[i] = NVReadVgaAttr(dev, head, i);
  675. NVSetEnablePalette(dev, head, false);
  676. for (i = 0; i < 9; i++)
  677. regp->Graphics[i] = NVReadVgaGr(dev, head, i);
  678. for (i = 0; i < 5; i++)
  679. regp->Sequencer[i] = NVReadVgaSeq(dev, head, i);
  680. }
  681. static void
  682. nv_load_state_vga(struct drm_device *dev, int head,
  683. struct nv04_mode_state *state)
  684. {
  685. struct nv04_crtc_reg *regp = &state->crtc_reg[head];
  686. int i;
  687. NVWritePRMVIO(dev, head, NV_PRMVIO_MISC__WRITE, regp->MiscOutReg);
  688. for (i = 0; i < 5; i++)
  689. NVWriteVgaSeq(dev, head, i, regp->Sequencer[i]);
  690. nv_lock_vga_crtc_base(dev, head, false);
  691. for (i = 0; i < 25; i++)
  692. wr_cio_state(dev, head, regp, i);
  693. nv_lock_vga_crtc_base(dev, head, true);
  694. for (i = 0; i < 9; i++)
  695. NVWriteVgaGr(dev, head, i, regp->Graphics[i]);
  696. NVSetEnablePalette(dev, head, true);
  697. for (i = 0; i < 21; i++)
  698. NVWriteVgaAttr(dev, head, i, regp->Attribute[i]);
  699. NVSetEnablePalette(dev, head, false);
  700. }
  701. static void
  702. nv_save_state_ext(struct drm_device *dev, int head,
  703. struct nv04_mode_state *state)
  704. {
  705. struct drm_nouveau_private *dev_priv = dev->dev_private;
  706. struct nv04_crtc_reg *regp = &state->crtc_reg[head];
  707. int i;
  708. rd_cio_state(dev, head, regp, NV_CIO_CRE_LCD__INDEX);
  709. rd_cio_state(dev, head, regp, NV_CIO_CRE_RPC0_INDEX);
  710. rd_cio_state(dev, head, regp, NV_CIO_CRE_RPC1_INDEX);
  711. rd_cio_state(dev, head, regp, NV_CIO_CRE_LSR_INDEX);
  712. rd_cio_state(dev, head, regp, NV_CIO_CRE_PIXEL_INDEX);
  713. rd_cio_state(dev, head, regp, NV_CIO_CRE_HEB__INDEX);
  714. rd_cio_state(dev, head, regp, NV_CIO_CRE_ENH_INDEX);
  715. rd_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX);
  716. rd_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX);
  717. rd_cio_state(dev, head, regp, NV_CIO_CRE_21);
  718. if (dev_priv->card_type >= NV_30)
  719. rd_cio_state(dev, head, regp, NV_CIO_CRE_47);
  720. rd_cio_state(dev, head, regp, NV_CIO_CRE_49);
  721. rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
  722. rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX);
  723. rd_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
  724. rd_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX);
  725. if (dev_priv->card_type >= NV_10) {
  726. regp->crtc_830 = NVReadCRTC(dev, head, NV_PCRTC_830);
  727. regp->crtc_834 = NVReadCRTC(dev, head, NV_PCRTC_834);
  728. if (dev_priv->card_type >= NV_30)
  729. regp->gpio_ext = NVReadCRTC(dev, head, NV_PCRTC_GPIO_EXT);
  730. if (dev_priv->card_type == NV_40)
  731. regp->crtc_850 = NVReadCRTC(dev, head, NV_PCRTC_850);
  732. if (nv_two_heads(dev))
  733. regp->crtc_eng_ctrl = NVReadCRTC(dev, head, NV_PCRTC_ENGINE_CTRL);
  734. regp->cursor_cfg = NVReadCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG);
  735. }
  736. regp->crtc_cfg = NVReadCRTC(dev, head, NV_PCRTC_CONFIG);
  737. rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX);
  738. rd_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX);
  739. if (dev_priv->card_type >= NV_10) {
  740. rd_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX);
  741. rd_cio_state(dev, head, regp, NV_CIO_CRE_CSB);
  742. rd_cio_state(dev, head, regp, NV_CIO_CRE_4B);
  743. rd_cio_state(dev, head, regp, NV_CIO_CRE_TVOUT_LATENCY);
  744. }
  745. /* NV11 and NV20 don't have this, they stop at 0x52. */
  746. if (nv_gf4_disp_arch(dev)) {
  747. rd_cio_state(dev, head, regp, NV_CIO_CRE_53);
  748. rd_cio_state(dev, head, regp, NV_CIO_CRE_54);
  749. for (i = 0; i < 0x10; i++)
  750. regp->CR58[i] = NVReadVgaCrtc5758(dev, head, i);
  751. rd_cio_state(dev, head, regp, NV_CIO_CRE_59);
  752. rd_cio_state(dev, head, regp, NV_CIO_CRE_5B);
  753. rd_cio_state(dev, head, regp, NV_CIO_CRE_85);
  754. rd_cio_state(dev, head, regp, NV_CIO_CRE_86);
  755. }
  756. regp->fb_start = NVReadCRTC(dev, head, NV_PCRTC_START);
  757. }
  758. static void
  759. nv_load_state_ext(struct drm_device *dev, int head,
  760. struct nv04_mode_state *state)
  761. {
  762. struct drm_nouveau_private *dev_priv = dev->dev_private;
  763. struct nv04_crtc_reg *regp = &state->crtc_reg[head];
  764. uint32_t reg900;
  765. int i;
  766. if (dev_priv->card_type >= NV_10) {
  767. if (nv_two_heads(dev))
  768. /* setting ENGINE_CTRL (EC) *must* come before
  769. * CIO_CRE_LCD, as writing CRE_LCD sets bits 16 & 17 in
  770. * EC that should not be overwritten by writing stale EC
  771. */
  772. NVWriteCRTC(dev, head, NV_PCRTC_ENGINE_CTRL, regp->crtc_eng_ctrl);
  773. nvWriteVIDEO(dev, NV_PVIDEO_STOP, 1);
  774. nvWriteVIDEO(dev, NV_PVIDEO_INTR_EN, 0);
  775. nvWriteVIDEO(dev, NV_PVIDEO_OFFSET_BUFF(0), 0);
  776. nvWriteVIDEO(dev, NV_PVIDEO_OFFSET_BUFF(1), 0);
  777. nvWriteVIDEO(dev, NV_PVIDEO_LIMIT(0), dev_priv->fb_available_size - 1);
  778. nvWriteVIDEO(dev, NV_PVIDEO_LIMIT(1), dev_priv->fb_available_size - 1);
  779. nvWriteVIDEO(dev, NV_PVIDEO_UVPLANE_LIMIT(0), dev_priv->fb_available_size - 1);
  780. nvWriteVIDEO(dev, NV_PVIDEO_UVPLANE_LIMIT(1), dev_priv->fb_available_size - 1);
  781. nvWriteMC(dev, NV_PBUS_POWERCTRL_2, 0);
  782. NVWriteCRTC(dev, head, NV_PCRTC_CURSOR_CONFIG, regp->cursor_cfg);
  783. NVWriteCRTC(dev, head, NV_PCRTC_830, regp->crtc_830);
  784. NVWriteCRTC(dev, head, NV_PCRTC_834, regp->crtc_834);
  785. if (dev_priv->card_type >= NV_30)
  786. NVWriteCRTC(dev, head, NV_PCRTC_GPIO_EXT, regp->gpio_ext);
  787. if (dev_priv->card_type == NV_40) {
  788. NVWriteCRTC(dev, head, NV_PCRTC_850, regp->crtc_850);
  789. reg900 = NVReadRAMDAC(dev, head, NV_PRAMDAC_900);
  790. if (regp->crtc_cfg == NV_PCRTC_CONFIG_START_ADDRESS_HSYNC)
  791. NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 | 0x10000);
  792. else
  793. NVWriteRAMDAC(dev, head, NV_PRAMDAC_900, reg900 & ~0x10000);
  794. }
  795. }
  796. NVWriteCRTC(dev, head, NV_PCRTC_CONFIG, regp->crtc_cfg);
  797. wr_cio_state(dev, head, regp, NV_CIO_CRE_RPC0_INDEX);
  798. wr_cio_state(dev, head, regp, NV_CIO_CRE_RPC1_INDEX);
  799. wr_cio_state(dev, head, regp, NV_CIO_CRE_LSR_INDEX);
  800. wr_cio_state(dev, head, regp, NV_CIO_CRE_PIXEL_INDEX);
  801. wr_cio_state(dev, head, regp, NV_CIO_CRE_LCD__INDEX);
  802. wr_cio_state(dev, head, regp, NV_CIO_CRE_HEB__INDEX);
  803. wr_cio_state(dev, head, regp, NV_CIO_CRE_ENH_INDEX);
  804. wr_cio_state(dev, head, regp, NV_CIO_CRE_FF_INDEX);
  805. wr_cio_state(dev, head, regp, NV_CIO_CRE_FFLWM__INDEX);
  806. if (dev_priv->card_type >= NV_30)
  807. wr_cio_state(dev, head, regp, NV_CIO_CRE_47);
  808. wr_cio_state(dev, head, regp, NV_CIO_CRE_49);
  809. wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR0_INDEX);
  810. wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR1_INDEX);
  811. wr_cio_state(dev, head, regp, NV_CIO_CRE_HCUR_ADDR2_INDEX);
  812. if (dev_priv->card_type == NV_40)
  813. nv_fix_nv40_hw_cursor(dev, head);
  814. wr_cio_state(dev, head, regp, NV_CIO_CRE_ILACE__INDEX);
  815. wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH3__INDEX);
  816. wr_cio_state(dev, head, regp, NV_CIO_CRE_SCRATCH4__INDEX);
  817. if (dev_priv->card_type >= NV_10) {
  818. wr_cio_state(dev, head, regp, NV_CIO_CRE_EBR_INDEX);
  819. wr_cio_state(dev, head, regp, NV_CIO_CRE_CSB);
  820. wr_cio_state(dev, head, regp, NV_CIO_CRE_4B);
  821. wr_cio_state(dev, head, regp, NV_CIO_CRE_TVOUT_LATENCY);
  822. }
  823. /* NV11 and NV20 stop at 0x52. */
  824. if (nv_gf4_disp_arch(dev)) {
  825. if (dev_priv->card_type == NV_10) {
  826. /* Not waiting for vertical retrace before modifying
  827. CRE_53/CRE_54 causes lockups. */
  828. nouveau_wait_until(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x8);
  829. nouveau_wait_until(dev, 650000000, NV_PRMCIO_INP0__COLOR, 0x8, 0x0);
  830. }
  831. wr_cio_state(dev, head, regp, NV_CIO_CRE_53);
  832. wr_cio_state(dev, head, regp, NV_CIO_CRE_54);
  833. for (i = 0; i < 0x10; i++)
  834. NVWriteVgaCrtc5758(dev, head, i, regp->CR58[i]);
  835. wr_cio_state(dev, head, regp, NV_CIO_CRE_59);
  836. wr_cio_state(dev, head, regp, NV_CIO_CRE_5B);
  837. wr_cio_state(dev, head, regp, NV_CIO_CRE_85);
  838. wr_cio_state(dev, head, regp, NV_CIO_CRE_86);
  839. }
  840. NVWriteCRTC(dev, head, NV_PCRTC_START, regp->fb_start);
  841. /* Setting 1 on this value gives you interrupts for every vblank period. */
  842. NVWriteCRTC(dev, head, NV_PCRTC_INTR_EN_0, 0);
  843. NVWriteCRTC(dev, head, NV_PCRTC_INTR_0, NV_PCRTC_INTR_0_VBLANK);
  844. }
  845. static void
  846. nv_save_state_palette(struct drm_device *dev, int head,
  847. struct nv04_mode_state *state)
  848. {
  849. int head_offset = head * NV_PRMDIO_SIZE, i;
  850. nv_wr08(dev, NV_PRMDIO_PIXEL_MASK + head_offset,
  851. NV_PRMDIO_PIXEL_MASK_MASK);
  852. nv_wr08(dev, NV_PRMDIO_READ_MODE_ADDRESS + head_offset, 0x0);
  853. for (i = 0; i < 768; i++) {
  854. state->crtc_reg[head].DAC[i] = nv_rd08(dev,
  855. NV_PRMDIO_PALETTE_DATA + head_offset);
  856. }
  857. NVSetEnablePalette(dev, head, false);
  858. }
  859. void
  860. nouveau_hw_load_state_palette(struct drm_device *dev, int head,
  861. struct nv04_mode_state *state)
  862. {
  863. int head_offset = head * NV_PRMDIO_SIZE, i;
  864. nv_wr08(dev, NV_PRMDIO_PIXEL_MASK + head_offset,
  865. NV_PRMDIO_PIXEL_MASK_MASK);
  866. nv_wr08(dev, NV_PRMDIO_WRITE_MODE_ADDRESS + head_offset, 0x0);
  867. for (i = 0; i < 768; i++) {
  868. nv_wr08(dev, NV_PRMDIO_PALETTE_DATA + head_offset,
  869. state->crtc_reg[head].DAC[i]);
  870. }
  871. NVSetEnablePalette(dev, head, false);
  872. }
  873. void nouveau_hw_save_state(struct drm_device *dev, int head,
  874. struct nv04_mode_state *state)
  875. {
  876. struct drm_nouveau_private *dev_priv = dev->dev_private;
  877. if (dev_priv->chipset == 0x11)
  878. /* NB: no attempt is made to restore the bad pll later on */
  879. nouveau_hw_fix_bad_vpll(dev, head);
  880. nv_save_state_ramdac(dev, head, state);
  881. nv_save_state_vga(dev, head, state);
  882. nv_save_state_palette(dev, head, state);
  883. nv_save_state_ext(dev, head, state);
  884. }
  885. void nouveau_hw_load_state(struct drm_device *dev, int head,
  886. struct nv04_mode_state *state)
  887. {
  888. NVVgaProtect(dev, head, true);
  889. nv_load_state_ramdac(dev, head, state);
  890. nv_load_state_ext(dev, head, state);
  891. nouveau_hw_load_state_palette(dev, head, state);
  892. nv_load_state_vga(dev, head, state);
  893. NVVgaProtect(dev, head, false);
  894. }