nva3_pm.c 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331
  1. /*
  2. * Copyright 2010 Red Hat Inc.
  3. *
  4. * Permission is hereby granted, free of charge, to any person obtaining a
  5. * copy of this software and associated documentation files (the "Software"),
  6. * to deal in the Software without restriction, including without limitation
  7. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  8. * and/or sell copies of the Software, and to permit persons to whom the
  9. * Software is furnished to do so, subject to the following conditions:
  10. *
  11. * The above copyright notice and this permission notice shall be included in
  12. * all copies or substantial portions of the Software.
  13. *
  14. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  17. * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20. * OTHER DEALINGS IN THE SOFTWARE.
  21. *
  22. * Authors: Ben Skeggs
  23. */
  24. #include "drmP.h"
  25. #include "nouveau_drv.h"
  26. #include "nouveau_bios.h"
  27. #include "nouveau_pm.h"
  28. static u32 read_clk(struct drm_device *, int, bool);
  29. static u32 read_pll(struct drm_device *, int, u32);
  30. static u32
  31. read_vco(struct drm_device *dev, int clk)
  32. {
  33. u32 sctl = nv_rd32(dev, 0x4120 + (clk * 4));
  34. if ((sctl & 0x00000030) != 0x00000030)
  35. return read_pll(dev, 0x41, 0x00e820);
  36. return read_pll(dev, 0x42, 0x00e8a0);
  37. }
  38. static u32
  39. read_clk(struct drm_device *dev, int clk, bool ignore_en)
  40. {
  41. u32 sctl, sdiv, sclk;
  42. /* refclk for the 0xe8xx plls always 27KHz */
  43. if (clk >= 0x40)
  44. return 27000;
  45. sctl = nv_rd32(dev, 0x4120 + (clk * 4));
  46. if (!ignore_en && !(sctl & 0x00000100))
  47. return 0;
  48. switch (sctl & 0x00003000) {
  49. case 0x00000000:
  50. return 27000;
  51. case 0x00002000:
  52. if (sctl & 0x00000040)
  53. return 108000;
  54. return 100000;
  55. case 0x00003000:
  56. sclk = read_vco(dev, clk);
  57. sdiv = ((sctl & 0x003f0000) >> 16) + 2;
  58. return (sclk * 2) / sdiv;
  59. default:
  60. return 0;
  61. }
  62. }
  63. static u32
  64. read_pll(struct drm_device *dev, int clk, u32 pll)
  65. {
  66. u32 ctrl = nv_rd32(dev, pll + 0);
  67. u32 sclk, P = 1, N = 1, M = 1;
  68. if (!(ctrl & 0x00000008)) {
  69. u32 coef = nv_rd32(dev, pll + 4);
  70. M = (coef & 0x000000ff) >> 0;
  71. N = (coef & 0x0000ff00) >> 8;
  72. P = (coef & 0x003f0000) >> 16;
  73. /* not post-divider on these.. */
  74. if ((pll & 0x00ff00) == 0x00e800)
  75. P = 1;
  76. sclk = read_clk(dev, 0x00 + clk, false);
  77. } else {
  78. sclk = read_clk(dev, 0x10 + clk, false);
  79. }
  80. return sclk * N / (M * P);
  81. }
  82. struct creg {
  83. u32 clk;
  84. u32 pll;
  85. };
  86. static int
  87. calc_clk(struct drm_device *dev, int clk, u32 pll, u32 khz, struct creg *reg)
  88. {
  89. struct pll_lims limits;
  90. u32 oclk, sclk, sdiv;
  91. int P, N, M, diff;
  92. int ret;
  93. reg->pll = 0;
  94. reg->clk = 0;
  95. if (!khz) {
  96. NV_DEBUG(dev, "no clock for 0x%04x/0x%02x\n", pll, clk);
  97. return 0;
  98. }
  99. switch (khz) {
  100. case 27000:
  101. reg->clk = 0x00000100;
  102. return khz;
  103. case 100000:
  104. reg->clk = 0x00002100;
  105. return khz;
  106. case 108000:
  107. reg->clk = 0x00002140;
  108. return khz;
  109. default:
  110. sclk = read_vco(dev, clk);
  111. sdiv = min((sclk * 2) / (khz - 2999), (u32)65);
  112. /* if the clock has a PLL attached, and we can get a within
  113. * [-2, 3) MHz of a divider, we'll disable the PLL and use
  114. * the divider instead.
  115. *
  116. * divider can go as low as 2, limited here because NVIDIA
  117. * and the VBIOS on my NVA8 seem to prefer using the PLL
  118. * for 810MHz - is there a good reason?
  119. */
  120. if (sdiv > 4) {
  121. oclk = (sclk * 2) / sdiv;
  122. diff = khz - oclk;
  123. if (!pll || (diff >= -2000 && diff < 3000)) {
  124. reg->clk = (((sdiv - 2) << 16) | 0x00003100);
  125. return oclk;
  126. }
  127. }
  128. if (!pll) {
  129. NV_ERROR(dev, "bad freq %02x: %d %d\n", clk, khz, sclk);
  130. return -ERANGE;
  131. }
  132. break;
  133. }
  134. ret = get_pll_limits(dev, pll, &limits);
  135. if (ret)
  136. return ret;
  137. limits.refclk = read_clk(dev, clk - 0x10, true);
  138. if (!limits.refclk)
  139. return -EINVAL;
  140. ret = nva3_calc_pll(dev, &limits, khz, &N, NULL, &M, &P);
  141. if (ret >= 0) {
  142. reg->clk = nv_rd32(dev, 0x4120 + (clk * 4));
  143. reg->pll = (P << 16) | (N << 8) | M;
  144. }
  145. return ret;
  146. }
  147. static void
  148. prog_pll(struct drm_device *dev, int clk, u32 pll, struct creg *reg)
  149. {
  150. const u32 src0 = 0x004120 + (clk * 4);
  151. const u32 src1 = 0x004160 + (clk * 4);
  152. const u32 ctrl = pll + 0;
  153. const u32 coef = pll + 4;
  154. u32 cntl;
  155. if (!reg->clk && !reg->pll) {
  156. NV_DEBUG(dev, "no clock for %02x\n", clk);
  157. return;
  158. }
  159. cntl = nv_rd32(dev, ctrl) & 0xfffffff2;
  160. if (reg->pll) {
  161. nv_mask(dev, src0, 0x00000101, 0x00000101);
  162. nv_wr32(dev, coef, reg->pll);
  163. nv_wr32(dev, ctrl, cntl | 0x00000015);
  164. nv_mask(dev, src1, 0x00000100, 0x00000000);
  165. nv_mask(dev, src1, 0x00000001, 0x00000000);
  166. } else {
  167. nv_mask(dev, src1, 0x003f3141, 0x00000101 | reg->clk);
  168. nv_wr32(dev, ctrl, cntl | 0x0000001d);
  169. nv_mask(dev, ctrl, 0x00000001, 0x00000000);
  170. nv_mask(dev, src0, 0x00000100, 0x00000000);
  171. nv_mask(dev, src0, 0x00000001, 0x00000000);
  172. }
  173. }
  174. static void
  175. prog_clk(struct drm_device *dev, int clk, struct creg *reg)
  176. {
  177. if (!reg->clk) {
  178. NV_DEBUG(dev, "no clock for %02x\n", clk);
  179. return;
  180. }
  181. nv_mask(dev, 0x004120 + (clk * 4), 0x003f3141, 0x00000101 | reg->clk);
  182. }
  183. int
  184. nva3_pm_clocks_get(struct drm_device *dev, struct nouveau_pm_level *perflvl)
  185. {
  186. perflvl->core = read_pll(dev, 0x00, 0x4200);
  187. perflvl->shader = read_pll(dev, 0x01, 0x4220);
  188. perflvl->memory = read_pll(dev, 0x02, 0x4000);
  189. perflvl->unka0 = read_clk(dev, 0x20, false);
  190. perflvl->vdec = read_clk(dev, 0x21, false);
  191. perflvl->daemon = read_clk(dev, 0x25, false);
  192. perflvl->copy = perflvl->core;
  193. return 0;
  194. }
  195. struct nva3_pm_state {
  196. struct creg nclk;
  197. struct creg sclk;
  198. struct creg mclk;
  199. struct creg vdec;
  200. struct creg unka0;
  201. };
  202. void *
  203. nva3_pm_clocks_pre(struct drm_device *dev, struct nouveau_pm_level *perflvl)
  204. {
  205. struct nva3_pm_state *info;
  206. int ret;
  207. info = kzalloc(sizeof(*info), GFP_KERNEL);
  208. if (!info)
  209. return ERR_PTR(-ENOMEM);
  210. ret = calc_clk(dev, 0x10, 0x4200, perflvl->core, &info->nclk);
  211. if (ret < 0)
  212. goto out;
  213. ret = calc_clk(dev, 0x11, 0x4220, perflvl->shader, &info->sclk);
  214. if (ret < 0)
  215. goto out;
  216. ret = calc_clk(dev, 0x12, 0x4000, perflvl->memory, &info->mclk);
  217. if (ret < 0)
  218. goto out;
  219. ret = calc_clk(dev, 0x20, 0x0000, perflvl->unka0, &info->unka0);
  220. if (ret < 0)
  221. goto out;
  222. ret = calc_clk(dev, 0x21, 0x0000, perflvl->vdec, &info->vdec);
  223. if (ret < 0)
  224. goto out;
  225. out:
  226. if (ret < 0) {
  227. kfree(info);
  228. info = ERR_PTR(ret);
  229. }
  230. return info;
  231. }
  232. static bool
  233. nva3_pm_grcp_idle(void *data)
  234. {
  235. struct drm_device *dev = data;
  236. if (!(nv_rd32(dev, 0x400304) & 0x00000001))
  237. return true;
  238. if (nv_rd32(dev, 0x400308) == 0x0050001c)
  239. return true;
  240. return false;
  241. }
  242. void
  243. nva3_pm_clocks_set(struct drm_device *dev, void *pre_state)
  244. {
  245. struct drm_nouveau_private *dev_priv = dev->dev_private;
  246. struct nva3_pm_state *info = pre_state;
  247. unsigned long flags;
  248. /* prevent any new grctx switches from starting */
  249. spin_lock_irqsave(&dev_priv->context_switch_lock, flags);
  250. nv_wr32(dev, 0x400324, 0x00000000);
  251. nv_wr32(dev, 0x400328, 0x0050001c); /* wait flag 0x1c */
  252. /* wait for any pending grctx switches to complete */
  253. if (!nv_wait_cb(dev, nva3_pm_grcp_idle, dev)) {
  254. NV_ERROR(dev, "pm: ctxprog didn't go idle\n");
  255. goto cleanup;
  256. }
  257. /* freeze PFIFO */
  258. nv_mask(dev, 0x002504, 0x00000001, 0x00000001);
  259. if (!nv_wait(dev, 0x002504, 0x00000010, 0x00000010)) {
  260. NV_ERROR(dev, "pm: fifo didn't go idle\n");
  261. goto cleanup;
  262. }
  263. prog_pll(dev, 0x00, 0x004200, &info->nclk);
  264. prog_pll(dev, 0x01, 0x004220, &info->sclk);
  265. prog_clk(dev, 0x20, &info->unka0);
  266. prog_clk(dev, 0x21, &info->vdec);
  267. nv_wr32(dev, 0x100210, 0);
  268. nv_wr32(dev, 0x1002dc, 1);
  269. nv_wr32(dev, 0x004018, 0x00001000);
  270. prog_pll(dev, 0x02, 0x004000, &info->mclk);
  271. if (nv_rd32(dev, 0x4000) & 0x00000008)
  272. nv_wr32(dev, 0x004018, 0x1000d000);
  273. else
  274. nv_wr32(dev, 0x004018, 0x10005000);
  275. nv_wr32(dev, 0x1002dc, 0);
  276. nv_wr32(dev, 0x100210, 0x80000000);
  277. cleanup:
  278. /* unfreeze PFIFO */
  279. nv_mask(dev, 0x002504, 0x00000001, 0x00000000);
  280. /* restore ctxprog to normal */
  281. nv_wr32(dev, 0x400324, 0x00000000);
  282. nv_wr32(dev, 0x400328, 0x0070009c); /* set flag 0x1c */
  283. /* unblock it if necessary */
  284. if (nv_rd32(dev, 0x400308) == 0x0050001c)
  285. nv_mask(dev, 0x400824, 0x10000000, 0x10000000);
  286. spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags);
  287. kfree(info);
  288. }