nva3_pm.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624
  1. /*
  2. * Copyright 2010 Red Hat Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Ben Skeggs
  23. */
  24. #include <drm/drmP.h>
  25. #include "nouveau_drm.h"
  26. #include "nouveau_bios.h"
  27. #include "nouveau_pm.h"
  28. #include <subdev/bios/pll.h>
  29. #include <subdev/bios.h>
  30. #include <subdev/clock.h>
  31. #include <subdev/timer.h>
  32. #include <subdev/fb.h>
  33. static u32 read_clk(struct drm_device *, int, bool);
  34. static u32 read_pll(struct drm_device *, int, u32);
  35. static u32
  36. read_vco(struct drm_device *dev, int clk)
  37. {
  38. struct nouveau_device *device = nouveau_dev(dev);
  39. u32 sctl = nv_rd32(device, 0x4120 + (clk * 4));
  40. if ((sctl & 0x00000030) != 0x00000030)
  41. return read_pll(dev, 0x41, 0x00e820);
  42. return read_pll(dev, 0x42, 0x00e8a0);
  43. }
  44. static u32
  45. read_clk(struct drm_device *dev, int clk, bool ignore_en)
  46. {
  47. struct nouveau_device *device = nouveau_dev(dev);
  48. struct nouveau_drm *drm = nouveau_drm(dev);
  49. u32 sctl, sdiv, sclk;
  50. /* refclk for the 0xe8xx plls is a fixed frequency */
  51. if (clk >= 0x40) {
  52. if (nv_device(drm->device)->chipset == 0xaf) {
  53. /* no joke.. seriously.. sigh.. */
  54. return nv_rd32(device, 0x00471c) * 1000;
  55. }
  56. return device->crystal;
  57. }
  58. sctl = nv_rd32(device, 0x4120 + (clk * 4));
  59. if (!ignore_en && !(sctl & 0x00000100))
  60. return 0;
  61. switch (sctl & 0x00003000) {
  62. case 0x00000000:
  63. return device->crystal;
  64. case 0x00002000:
  65. if (sctl & 0x00000040)
  66. return 108000;
  67. return 100000;
  68. case 0x00003000:
  69. sclk = read_vco(dev, clk);
  70. sdiv = ((sctl & 0x003f0000) >> 16) + 2;
  71. return (sclk * 2) / sdiv;
  72. default:
  73. return 0;
  74. }
  75. }
  76. static u32
  77. read_pll(struct drm_device *dev, int clk, u32 pll)
  78. {
  79. struct nouveau_device *device = nouveau_dev(dev);
  80. u32 ctrl = nv_rd32(device, pll + 0);
  81. u32 sclk = 0, P = 1, N = 1, M = 1;
  82. if (!(ctrl & 0x00000008)) {
  83. if (ctrl & 0x00000001) {
  84. u32 coef = nv_rd32(device, pll + 4);
  85. M = (coef & 0x000000ff) >> 0;
  86. N = (coef & 0x0000ff00) >> 8;
  87. P = (coef & 0x003f0000) >> 16;
  88. /* no post-divider on these.. */
  89. if ((pll & 0x00ff00) == 0x00e800)
  90. P = 1;
  91. sclk = read_clk(dev, 0x00 + clk, false);
  92. }
  93. } else {
  94. sclk = read_clk(dev, 0x10 + clk, false);
  95. }
  96. if (M * P)
  97. return sclk * N / (M * P);
  98. return 0;
  99. }
  100. struct creg {
  101. u32 clk;
  102. u32 pll;
  103. };
  104. static int
  105. calc_clk(struct drm_device *dev, int clk, u32 pll, u32 khz, struct creg *reg)
  106. {
  107. struct nouveau_drm *drm = nouveau_drm(dev);
  108. struct nouveau_device *device = nouveau_dev(dev);
  109. struct nouveau_bios *bios = nouveau_bios(device);
  110. struct nvbios_pll limits;
  111. u32 oclk, sclk, sdiv;
  112. int P, N, M, diff;
  113. int ret;
  114. reg->pll = 0;
  115. reg->clk = 0;
  116. if (!khz) {
  117. NV_DEBUG(drm, "no clock for 0x%04x/0x%02x\n", pll, clk);
  118. return 0;
  119. }
  120. switch (khz) {
  121. case 27000:
  122. reg->clk = 0x00000100;
  123. return khz;
  124. case 100000:
  125. reg->clk = 0x00002100;
  126. return khz;
  127. case 108000:
  128. reg->clk = 0x00002140;
  129. return khz;
  130. default:
  131. sclk = read_vco(dev, clk);
  132. sdiv = min((sclk * 2) / (khz - 2999), (u32)65);
  133. /* if the clock has a PLL attached, and we can get a within
  134. * [-2, 3) MHz of a divider, we'll disable the PLL and use
  135. * the divider instead.
  136. *
  137. * divider can go as low as 2, limited here because NVIDIA
  138. * and the VBIOS on my NVA8 seem to prefer using the PLL
  139. * for 810MHz - is there a good reason?
  140. */
  141. if (sdiv > 4) {
  142. oclk = (sclk * 2) / sdiv;
  143. diff = khz - oclk;
  144. if (!pll || (diff >= -2000 && diff < 3000)) {
  145. reg->clk = (((sdiv - 2) << 16) | 0x00003100);
  146. return oclk;
  147. }
  148. }
  149. if (!pll) {
  150. NV_ERROR(drm, "bad freq %02x: %d %d\n", clk, khz, sclk);
  151. return -ERANGE;
  152. }
  153. break;
  154. }
  155. ret = nvbios_pll_parse(bios, pll, &limits);
  156. if (ret)
  157. return ret;
  158. limits.refclk = read_clk(dev, clk - 0x10, true);
  159. if (!limits.refclk)
  160. return -EINVAL;
  161. ret = nva3_calc_pll(dev, &limits, khz, &N, NULL, &M, &P);
  162. if (ret >= 0) {
  163. reg->clk = nv_rd32(device, 0x4120 + (clk * 4));
  164. reg->pll = (P << 16) | (N << 8) | M;
  165. }
  166. return ret;
  167. }
  168. static void
  169. prog_pll(struct drm_device *dev, int clk, u32 pll, struct creg *reg)
  170. {
  171. struct nouveau_device *device = nouveau_dev(dev);
  172. struct nouveau_drm *drm = nouveau_drm(dev);
  173. const u32 src0 = 0x004120 + (clk * 4);
  174. const u32 src1 = 0x004160 + (clk * 4);
  175. const u32 ctrl = pll + 0;
  176. const u32 coef = pll + 4;
  177. if (!reg->clk && !reg->pll) {
  178. NV_DEBUG(drm, "no clock for %02x\n", clk);
  179. return;
  180. }
  181. if (reg->pll) {
  182. nv_mask(device, src0, 0x00000101, 0x00000101);
  183. nv_wr32(device, coef, reg->pll);
  184. nv_mask(device, ctrl, 0x00000015, 0x00000015);
  185. nv_mask(device, ctrl, 0x00000010, 0x00000000);
  186. nv_wait(device, ctrl, 0x00020000, 0x00020000);
  187. nv_mask(device, ctrl, 0x00000010, 0x00000010);
  188. nv_mask(device, ctrl, 0x00000008, 0x00000000);
  189. nv_mask(device, src1, 0x00000100, 0x00000000);
  190. nv_mask(device, src1, 0x00000001, 0x00000000);
  191. } else {
  192. nv_mask(device, src1, 0x003f3141, 0x00000101 | reg->clk);
  193. nv_mask(device, ctrl, 0x00000018, 0x00000018);
  194. udelay(20);
  195. nv_mask(device, ctrl, 0x00000001, 0x00000000);
  196. nv_mask(device, src0, 0x00000100, 0x00000000);
  197. nv_mask(device, src0, 0x00000001, 0x00000000);
  198. }
  199. }
  200. static void
  201. prog_clk(struct drm_device *dev, int clk, struct creg *reg)
  202. {
  203. struct nouveau_device *device = nouveau_dev(dev);
  204. struct nouveau_drm *drm = nouveau_drm(dev);
  205. if (!reg->clk) {
  206. NV_DEBUG(drm, "no clock for %02x\n", clk);
  207. return;
  208. }
  209. nv_mask(device, 0x004120 + (clk * 4), 0x003f3141, 0x00000101 | reg->clk);
  210. }
  211. int
  212. nva3_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
  213. {
  214. perflvl->core = read_pll(dev, 0x00, 0x4200);
  215. perflvl->shader = read_pll(dev, 0x01, 0x4220);
  216. perflvl->memory = read_pll(dev, 0x02, 0x4000);
  217. perflvl->unka0 = read_clk(dev, 0x20, false);
  218. perflvl->vdec = read_clk(dev, 0x21, false);
  219. perflvl->daemon = read_clk(dev, 0x25, false);
  220. perflvl->copy = perflvl->core;
  221. return 0;
  222. }
  223. struct nva3_pm_state {
  224. struct nouveau_pm_level *perflvl;
  225. struct creg nclk;
  226. struct creg sclk;
  227. struct creg vdec;
  228. struct creg unka0;
  229. struct creg mclk;
  230. u8 *rammap;
  231. u8 rammap_ver;
  232. u8 rammap_len;
  233. u8 *ramcfg;
  234. u8 ramcfg_len;
  235. u32 r004018;
  236. u32 r100760;
  237. };
  238. void *
  239. nva3_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
  240. {
  241. struct nva3_pm_state *info;
  242. u8 ramcfg_cnt;
  243. int ret;
  244. info = kzalloc(sizeof(*info), GFP_KERNEL);
  245. if (!info)
  246. return ERR_PTR(-ENOMEM);
  247. ret = calc_clk(dev, 0x10, 0x4200, perflvl->core, &info->nclk);
  248. if (ret < 0)
  249. goto out;
  250. ret = calc_clk(dev, 0x11, 0x4220, perflvl->shader, &info->sclk);
  251. if (ret < 0)
  252. goto out;
  253. ret = calc_clk(dev, 0x12, 0x4000, perflvl->memory, &info->mclk);
  254. if (ret < 0)
  255. goto out;
  256. ret = calc_clk(dev, 0x20, 0x0000, perflvl->unka0, &info->unka0);
  257. if (ret < 0)
  258. goto out;
  259. ret = calc_clk(dev, 0x21, 0x0000, perflvl->vdec, &info->vdec);
  260. if (ret < 0)
  261. goto out;
  262. info->rammap = nouveau_perf_rammap(dev, perflvl->memory,
  263. &info->rammap_ver,
  264. &info->rammap_len,
  265. &ramcfg_cnt, &info->ramcfg_len);
  266. if (info->rammap_ver != 0x10 || info->rammap_len < 5)
  267. info->rammap = NULL;
  268. info->ramcfg = nouveau_perf_ramcfg(dev, perflvl->memory,
  269. &info->rammap_ver,
  270. &info->ramcfg_len);
  271. if (info->rammap_ver != 0x10)
  272. info->ramcfg = NULL;
  273. info->perflvl = perflvl;
  274. out:
  275. if (ret < 0) {
  276. kfree(info);
  277. info = ERR_PTR(ret);
  278. }
  279. return info;
  280. }
  281. static bool
  282. nva3_pm_grcp_idle(void *data)
  283. {
  284. struct drm_device *dev = data;
  285. struct nouveau_device *device = nouveau_dev(dev);
  286. if (!(nv_rd32(device, 0x400304) & 0x00000001))
  287. return true;
  288. if (nv_rd32(device, 0x400308) == 0x0050001c)
  289. return true;
  290. return false;
  291. }
  292. static void
  293. mclk_precharge(struct nouveau_mem_exec_func *exec)
  294. {
  295. struct nouveau_device *device = nouveau_dev(exec->dev);
  296. nv_wr32(device, 0x1002d4, 0x00000001);
  297. }
  298. static void
  299. mclk_refresh(struct nouveau_mem_exec_func *exec)
  300. {
  301. struct nouveau_device *device = nouveau_dev(exec->dev);
  302. nv_wr32(device, 0x1002d0, 0x00000001);
  303. }
  304. static void
  305. mclk_refresh_auto(struct nouveau_mem_exec_func *exec, bool enable)
  306. {
  307. struct nouveau_device *device = nouveau_dev(exec->dev);
  308. nv_wr32(device, 0x100210, enable ? 0x80000000 : 0x00000000);
  309. }
  310. static void
  311. mclk_refresh_self(struct nouveau_mem_exec_func *exec, bool enable)
  312. {
  313. struct nouveau_device *device = nouveau_dev(exec->dev);
  314. nv_wr32(device, 0x1002dc, enable ? 0x00000001 : 0x00000000);
  315. }
  316. static void
  317. mclk_wait(struct nouveau_mem_exec_func *exec, u32 nsec)
  318. {
  319. struct nouveau_device *device = nouveau_dev(exec->dev);
  320. volatile u32 post = nv_rd32(device, 0); (void)post;
  321. udelay((nsec + 500) / 1000);
  322. }
  323. static u32
  324. mclk_mrg(struct nouveau_mem_exec_func *exec, int mr)
  325. {
  326. struct nouveau_device *device = nouveau_dev(exec->dev);
  327. if (mr <= 1)
  328. return nv_rd32(device, 0x1002c0 + ((mr - 0) * 4));
  329. if (mr <= 3)
  330. return nv_rd32(device, 0x1002e0 + ((mr - 2) * 4));
  331. return 0;
  332. }
  333. static void
  334. mclk_mrs(struct nouveau_mem_exec_func *exec, int mr, u32 data)
  335. {
  336. struct nouveau_device *device = nouveau_dev(exec->dev);
  337. struct nouveau_fb *pfb = nouveau_fb(device);
  338. if (mr <= 1) {
  339. if (pfb->ram.ranks > 1)
  340. nv_wr32(device, 0x1002c8 + ((mr - 0) * 4), data);
  341. nv_wr32(device, 0x1002c0 + ((mr - 0) * 4), data);
  342. } else
  343. if (mr <= 3) {
  344. if (pfb->ram.ranks > 1)
  345. nv_wr32(device, 0x1002e8 + ((mr - 2) * 4), data);
  346. nv_wr32(device, 0x1002e0 + ((mr - 2) * 4), data);
  347. }
  348. }
  349. static void
  350. mclk_clock_set(struct nouveau_mem_exec_func *exec)
  351. {
  352. struct nouveau_device *device = nouveau_dev(exec->dev);
  353. struct nva3_pm_state *info = exec->priv;
  354. u32 ctrl;
  355. ctrl = nv_rd32(device, 0x004000);
  356. if (!(ctrl & 0x00000008) && info->mclk.pll) {
  357. nv_wr32(device, 0x004000, (ctrl |= 0x00000008));
  358. nv_mask(device, 0x1110e0, 0x00088000, 0x00088000);
  359. nv_wr32(device, 0x004018, 0x00001000);
  360. nv_wr32(device, 0x004000, (ctrl &= ~0x00000001));
  361. nv_wr32(device, 0x004004, info->mclk.pll);
  362. nv_wr32(device, 0x004000, (ctrl |= 0x00000001));
  363. udelay(64);
  364. nv_wr32(device, 0x004018, 0x00005000 | info->r004018);
  365. udelay(20);
  366. } else
  367. if (!info->mclk.pll) {
  368. nv_mask(device, 0x004168, 0x003f3040, info->mclk.clk);
  369. nv_wr32(device, 0x004000, (ctrl |= 0x00000008));
  370. nv_mask(device, 0x1110e0, 0x00088000, 0x00088000);
  371. nv_wr32(device, 0x004018, 0x0000d000 | info->r004018);
  372. }
  373. if (info->rammap) {
  374. if (info->ramcfg && (info->rammap[4] & 0x08)) {
  375. u32 unk5a0 = (ROM16(info->ramcfg[5]) << 8) |
  376. info->ramcfg[5];
  377. u32 unk5a4 = ROM16(info->ramcfg[7]);
  378. u32 unk804 = (info->ramcfg[9] & 0xf0) << 16 |
  379. (info->ramcfg[3] & 0x0f) << 16 |
  380. (info->ramcfg[9] & 0x0f) |
  381. 0x80000000;
  382. nv_wr32(device, 0x1005a0, unk5a0);
  383. nv_wr32(device, 0x1005a4, unk5a4);
  384. nv_wr32(device, 0x10f804, unk804);
  385. nv_mask(device, 0x10053c, 0x00001000, 0x00000000);
  386. } else {
  387. nv_mask(device, 0x10053c, 0x00001000, 0x00001000);
  388. nv_mask(device, 0x10f804, 0x80000000, 0x00000000);
  389. nv_mask(device, 0x100760, 0x22222222, info->r100760);
  390. nv_mask(device, 0x1007a0, 0x22222222, info->r100760);
  391. nv_mask(device, 0x1007e0, 0x22222222, info->r100760);
  392. }
  393. }
  394. if (info->mclk.pll) {
  395. nv_mask(device, 0x1110e0, 0x00088000, 0x00011000);
  396. nv_wr32(device, 0x004000, (ctrl &= ~0x00000008));
  397. }
  398. }
  399. static void
  400. mclk_timing_set(struct nouveau_mem_exec_func *exec)
  401. {
  402. struct nouveau_device *device = nouveau_dev(exec->dev);
  403. struct nva3_pm_state *info = exec->priv;
  404. struct nouveau_pm_level *perflvl = info->perflvl;
  405. int i;
  406. for (i = 0; i < 9; i++)
  407. nv_wr32(device, 0x100220 + (i * 4), perflvl->timing.reg[i]);
  408. if (info->ramcfg) {
  409. u32 data = (info->ramcfg[2] & 0x08) ? 0x00000000 : 0x00001000;
  410. nv_mask(device, 0x100200, 0x00001000, data);
  411. }
  412. if (info->ramcfg) {
  413. u32 unk714 = nv_rd32(device, 0x100714) & ~0xf0000010;
  414. u32 unk718 = nv_rd32(device, 0x100718) & ~0x00000100;
  415. u32 unk71c = nv_rd32(device, 0x10071c) & ~0x00000100;
  416. if ( (info->ramcfg[2] & 0x20))
  417. unk714 |= 0xf0000000;
  418. if (!(info->ramcfg[2] & 0x04))
  419. unk714 |= 0x00000010;
  420. nv_wr32(device, 0x100714, unk714);
  421. if (info->ramcfg[2] & 0x01)
  422. unk71c |= 0x00000100;
  423. nv_wr32(device, 0x10071c, unk71c);
  424. if (info->ramcfg[2] & 0x02)
  425. unk718 |= 0x00000100;
  426. nv_wr32(device, 0x100718, unk718);
  427. if (info->ramcfg[2] & 0x10)
  428. nv_wr32(device, 0x111100, 0x48000000); /*XXX*/
  429. }
  430. }
  431. static void
  432. prog_mem(struct drm_device *dev, struct nva3_pm_state *info)
  433. {
  434. struct nouveau_device *device = nouveau_dev(dev);
  435. struct nouveau_mem_exec_func exec = {
  436. .dev = dev,
  437. .precharge = mclk_precharge,
  438. .refresh = mclk_refresh,
  439. .refresh_auto = mclk_refresh_auto,
  440. .refresh_self = mclk_refresh_self,
  441. .wait = mclk_wait,
  442. .mrg = mclk_mrg,
  443. .mrs = mclk_mrs,
  444. .clock_set = mclk_clock_set,
  445. .timing_set = mclk_timing_set,
  446. .priv = info
  447. };
  448. u32 ctrl;
  449. /* XXX: where the fuck does 750MHz come from? */
  450. if (info->perflvl->memory <= 750000) {
  451. info->r004018 = 0x10000000;
  452. info->r100760 = 0x22222222;
  453. }
  454. ctrl = nv_rd32(device, 0x004000);
  455. if (ctrl & 0x00000008) {
  456. if (info->mclk.pll) {
  457. nv_mask(device, 0x004128, 0x00000101, 0x00000101);
  458. nv_wr32(device, 0x004004, info->mclk.pll);
  459. nv_wr32(device, 0x004000, (ctrl |= 0x00000001));
  460. nv_wr32(device, 0x004000, (ctrl &= 0xffffffef));
  461. nv_wait(device, 0x004000, 0x00020000, 0x00020000);
  462. nv_wr32(device, 0x004000, (ctrl |= 0x00000010));
  463. nv_wr32(device, 0x004018, 0x00005000 | info->r004018);
  464. nv_wr32(device, 0x004000, (ctrl |= 0x00000004));
  465. }
  466. } else {
  467. u32 ssel = 0x00000101;
  468. if (info->mclk.clk)
  469. ssel |= info->mclk.clk;
  470. else
  471. ssel |= 0x00080000; /* 324MHz, shouldn't matter... */
  472. nv_mask(device, 0x004168, 0x003f3141, ctrl);
  473. }
  474. if (info->ramcfg) {
  475. if (info->ramcfg[2] & 0x10) {
  476. nv_mask(device, 0x111104, 0x00000600, 0x00000000);
  477. } else {
  478. nv_mask(device, 0x111100, 0x40000000, 0x40000000);
  479. nv_mask(device, 0x111104, 0x00000180, 0x00000000);
  480. }
  481. }
  482. if (info->rammap && !(info->rammap[4] & 0x02))
  483. nv_mask(device, 0x100200, 0x00000800, 0x00000000);
  484. nv_wr32(device, 0x611200, 0x00003300);
  485. if (!(info->ramcfg[2] & 0x10))
  486. nv_wr32(device, 0x111100, 0x4c020000); /*XXX*/
  487. nouveau_mem_exec(&exec, info->perflvl);
  488. nv_wr32(device, 0x611200, 0x00003330);
  489. if (info->rammap && (info->rammap[4] & 0x02))
  490. nv_mask(device, 0x100200, 0x00000800, 0x00000800);
  491. if (info->ramcfg) {
  492. if (info->ramcfg[2] & 0x10) {
  493. nv_mask(device, 0x111104, 0x00000180, 0x00000180);
  494. nv_mask(device, 0x111100, 0x40000000, 0x00000000);
  495. } else {
  496. nv_mask(device, 0x111104, 0x00000600, 0x00000600);
  497. }
  498. }
  499. if (info->mclk.pll) {
  500. nv_mask(device, 0x004168, 0x00000001, 0x00000000);
  501. nv_mask(device, 0x004168, 0x00000100, 0x00000000);
  502. } else {
  503. nv_mask(device, 0x004000, 0x00000001, 0x00000000);
  504. nv_mask(device, 0x004128, 0x00000001, 0x00000000);
  505. nv_mask(device, 0x004128, 0x00000100, 0x00000000);
  506. }
  507. }
  508. int
  509. nva3_pm_clocks_set(struct drm_device *dev, void *pre_state)
  510. {
  511. struct nouveau_device *device = nouveau_dev(dev);
  512. struct nouveau_drm *drm = nouveau_drm(dev);
  513. struct nva3_pm_state *info = pre_state;
  514. int ret = -EAGAIN;
  515. /* prevent any new grctx switches from starting */
  516. nv_wr32(device, 0x400324, 0x00000000);
  517. nv_wr32(device, 0x400328, 0x0050001c); /* wait flag 0x1c */
  518. /* wait for any pending grctx switches to complete */
  519. if (!nv_wait_cb(device, nva3_pm_grcp_idle, dev)) {
  520. NV_ERROR(drm, "pm: ctxprog didn't go idle\n");
  521. goto cleanup;
  522. }
  523. /* freeze PFIFO */
  524. nv_mask(device, 0x002504, 0x00000001, 0x00000001);
  525. if (!nv_wait(device, 0x002504, 0x00000010, 0x00000010)) {
  526. NV_ERROR(drm, "pm: fifo didn't go idle\n");
  527. goto cleanup;
  528. }
  529. prog_pll(dev, 0x00, 0x004200, &info->nclk);
  530. prog_pll(dev, 0x01, 0x004220, &info->sclk);
  531. prog_clk(dev, 0x20, &info->unka0);
  532. prog_clk(dev, 0x21, &info->vdec);
  533. if (info->mclk.clk || info->mclk.pll)
  534. prog_mem(dev, info);
  535. ret = 0;
  536. cleanup:
  537. /* unfreeze PFIFO */
  538. nv_mask(device, 0x002504, 0x00000001, 0x00000000);
  539. /* restore ctxprog to normal */
  540. nv_wr32(device, 0x400324, 0x00000000);
  541. nv_wr32(device, 0x400328, 0x0070009c); /* set flag 0x1c */
  542. /* unblock it if necessary */
  543. if (nv_rd32(device, 0x400308) == 0x0050001c)
  544. nv_mask(device, 0x400824, 0x10000000, 0x10000000);
  545. kfree(info);
  546. return ret;
  547. }