nouveau_mem.c 26 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060
  1. /*
  2. * Copyright (C) The Weather Channel, Inc. 2002. All Rights Reserved.
  3. * Copyright 2005 Stephane Marchesin
  4. *
  5. * The Weather Channel (TM) funded Tungsten Graphics to develop the
  6. * initial release of the Radeon 8500 driver under the XFree86 license.
  7. * This notice must be preserved.
  8. *
  9. * Permission is hereby granted, free of charge, to any person obtaining a
  10. * copy of this software and associated documentation files (the "Software"),
  11. * to deal in the Software without restriction, including without limitation
  12. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  13. * and/or sell copies of the Software, and to permit persons to whom the
  14. * Software is furnished to do so, subject to the following conditions:
  15. *
  16. * The above copyright notice and this permission notice (including the next
  17. * paragraph) shall be included in all copies or substantial portions of the
  18. * Software.
  19. *
  20. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  21. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  22. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  23. * THE AUTHORS AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  24. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  25. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
  26. * DEALINGS IN THE SOFTWARE.
  27. *
  28. * Authors:
  29. * Ben Skeggs <bskeggs@redhat.com>
  30. * Roy Spliet <r.spliet@student.tudelft.nl>
  31. */
  32. #include "drmP.h"
  33. #include "drm.h"
  34. #include "drm_sarea.h"
  35. #include "nouveau_drv.h"
  36. #include "nouveau_pm.h"
  37. #include <core/mm.h>
  38. #include <subdev/vm.h>
  39. #include <engine/fifo.h>
  40. #include "nouveau_fence.h"
  41. /*
  42. * NV10-NV40 tiling helpers
  43. */
  44. static void
  45. nv10_mem_update_tile_region(struct drm_device *dev,
  46. struct nouveau_tile_reg *tilereg, uint32_t addr,
  47. uint32_t size, uint32_t pitch, uint32_t flags)
  48. {
  49. struct drm_nouveau_private *dev_priv = dev->dev_private;
  50. int i = tilereg - dev_priv->tile.reg, j;
  51. struct nouveau_fb_tile *tile = nvfb_tile(dev, i);
  52. unsigned long save;
  53. nouveau_fence_unref(&tilereg->fence);
  54. if (tile->pitch)
  55. nvfb_tile_fini(dev, i);
  56. if (pitch)
  57. nvfb_tile_init(dev, i, addr, size, pitch, flags);
  58. spin_lock_irqsave(&dev_priv->context_switch_lock, save);
  59. nv_wr32(dev, NV03_PFIFO_CACHES, 0);
  60. nv04_fifo_cache_pull(dev, false);
  61. nouveau_wait_for_idle(dev);
  62. nvfb_tile_prog(dev, i);
  63. for (j = 0; j < NVOBJ_ENGINE_NR; j++) {
  64. if (dev_priv->eng[j] && dev_priv->eng[j]->set_tile_region)
  65. dev_priv->eng[j]->set_tile_region(dev, i);
  66. }
  67. nv04_fifo_cache_pull(dev, true);
  68. nv_wr32(dev, NV03_PFIFO_CACHES, 1);
  69. spin_unlock_irqrestore(&dev_priv->context_switch_lock, save);
  70. }
  71. static struct nouveau_tile_reg *
  72. nv10_mem_get_tile_region(struct drm_device *dev, int i)
  73. {
  74. struct drm_nouveau_private *dev_priv = dev->dev_private;
  75. struct nouveau_tile_reg *tile = &dev_priv->tile.reg[i];
  76. spin_lock(&dev_priv->tile.lock);
  77. if (!tile->used &&
  78. (!tile->fence || nouveau_fence_done(tile->fence)))
  79. tile->used = true;
  80. else
  81. tile = NULL;
  82. spin_unlock(&dev_priv->tile.lock);
  83. return tile;
  84. }
  85. void
  86. nv10_mem_put_tile_region(struct drm_device *dev, struct nouveau_tile_reg *tile,
  87. struct nouveau_fence *fence)
  88. {
  89. struct drm_nouveau_private *dev_priv = dev->dev_private;
  90. if (tile) {
  91. spin_lock(&dev_priv->tile.lock);
  92. if (fence) {
  93. /* Mark it as pending. */
  94. tile->fence = fence;
  95. nouveau_fence_ref(fence);
  96. }
  97. tile->used = false;
  98. spin_unlock(&dev_priv->tile.lock);
  99. }
  100. }
  101. struct nouveau_tile_reg *
  102. nv10_mem_set_tiling(struct drm_device *dev, uint32_t addr, uint32_t size,
  103. uint32_t pitch, uint32_t flags)
  104. {
  105. struct nouveau_tile_reg *tile, *found = NULL;
  106. int i;
  107. for (i = 0; i < nvfb_tile_nr(dev); i++) {
  108. tile = nv10_mem_get_tile_region(dev, i);
  109. if (pitch && !found) {
  110. found = tile;
  111. continue;
  112. } else if (tile && nvfb_tile(dev, i)->pitch) {
  113. /* Kill an unused tile region. */
  114. nv10_mem_update_tile_region(dev, tile, 0, 0, 0, 0);
  115. }
  116. nv10_mem_put_tile_region(dev, tile, NULL);
  117. }
  118. if (found)
  119. nv10_mem_update_tile_region(dev, found, addr, size,
  120. pitch, flags);
  121. return found;
  122. }
  123. /*
  124. * Cleanup everything
  125. */
  126. void
  127. nouveau_mem_vram_fini(struct drm_device *dev)
  128. {
  129. struct drm_nouveau_private *dev_priv = dev->dev_private;
  130. ttm_bo_device_release(&dev_priv->ttm.bdev);
  131. nouveau_ttm_global_release(dev_priv);
  132. if (dev_priv->fb_mtrr >= 0) {
  133. drm_mtrr_del(dev_priv->fb_mtrr,
  134. pci_resource_start(dev->pdev, 1),
  135. pci_resource_len(dev->pdev, 1), DRM_MTRR_WC);
  136. dev_priv->fb_mtrr = -1;
  137. }
  138. }
  139. void
  140. nouveau_mem_gart_fini(struct drm_device *dev)
  141. {
  142. nouveau_sgdma_takedown(dev);
  143. }
  144. int
  145. nouveau_mem_vram_init(struct drm_device *dev)
  146. {
  147. struct drm_nouveau_private *dev_priv = dev->dev_private;
  148. struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
  149. int ret, dma_bits;
  150. dma_bits = 32;
  151. if (dev_priv->card_type >= NV_50) {
  152. if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(40)))
  153. dma_bits = 40;
  154. } else
  155. if (0 && pci_is_pcie(dev->pdev) &&
  156. dev_priv->chipset > 0x40 &&
  157. dev_priv->chipset != 0x45) {
  158. if (pci_dma_supported(dev->pdev, DMA_BIT_MASK(39)))
  159. dma_bits = 39;
  160. }
  161. ret = pci_set_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
  162. if (ret)
  163. return ret;
  164. ret = pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(dma_bits));
  165. if (ret) {
  166. /* Reset to default value. */
  167. pci_set_consistent_dma_mask(dev->pdev, DMA_BIT_MASK(32));
  168. }
  169. ret = nouveau_ttm_global_init(dev_priv);
  170. if (ret)
  171. return ret;
  172. ret = ttm_bo_device_init(&dev_priv->ttm.bdev,
  173. dev_priv->ttm.bo_global_ref.ref.object,
  174. &nouveau_bo_driver, DRM_FILE_PAGE_OFFSET,
  175. dma_bits <= 32 ? true : false);
  176. if (ret) {
  177. NV_ERROR(dev, "Error initialising bo driver: %d\n", ret);
  178. return ret;
  179. }
  180. dev_priv->fb_available_size = nvfb_vram_size(dev);
  181. dev_priv->fb_mappable_pages = dev_priv->fb_available_size;
  182. if (dev_priv->fb_mappable_pages > pci_resource_len(dev->pdev, 1))
  183. dev_priv->fb_mappable_pages = pci_resource_len(dev->pdev, 1);
  184. dev_priv->fb_mappable_pages >>= PAGE_SHIFT;
  185. dev_priv->fb_available_size -= dev_priv->ramin_rsvd_vram;
  186. dev_priv->fb_aper_free = dev_priv->fb_available_size;
  187. /* mappable vram */
  188. ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
  189. dev_priv->fb_available_size >> PAGE_SHIFT);
  190. if (ret) {
  191. NV_ERROR(dev, "Failed VRAM mm init: %d\n", ret);
  192. return ret;
  193. }
  194. if (dev_priv->card_type < NV_50) {
  195. ret = nouveau_bo_new(dev, 256*1024, 0, TTM_PL_FLAG_VRAM,
  196. 0, 0, NULL, &dev_priv->vga_ram);
  197. if (ret == 0)
  198. ret = nouveau_bo_pin(dev_priv->vga_ram,
  199. TTM_PL_FLAG_VRAM);
  200. if (ret) {
  201. NV_WARN(dev, "failed to reserve VGA memory\n");
  202. nouveau_bo_ref(NULL, &dev_priv->vga_ram);
  203. }
  204. }
  205. dev_priv->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 1),
  206. pci_resource_len(dev->pdev, 1),
  207. DRM_MTRR_WC);
  208. return 0;
  209. }
  210. int
  211. nouveau_mem_gart_init(struct drm_device *dev)
  212. {
  213. struct drm_nouveau_private *dev_priv = dev->dev_private;
  214. struct ttm_bo_device *bdev = &dev_priv->ttm.bdev;
  215. int ret;
  216. if (!nvdrm_gart_init(dev, &dev_priv->gart_info.aper_base,
  217. &dev_priv->gart_info.aper_size))
  218. dev_priv->gart_info.type = NOUVEAU_GART_AGP;
  219. if (dev_priv->gart_info.type == NOUVEAU_GART_NONE) {
  220. ret = nouveau_sgdma_init(dev);
  221. if (ret) {
  222. NV_ERROR(dev, "Error initialising PCI(E): %d\n", ret);
  223. return ret;
  224. }
  225. }
  226. NV_INFO(dev, "%d MiB GART (aperture)\n",
  227. (int)(dev_priv->gart_info.aper_size >> 20));
  228. dev_priv->gart_info.aper_free = dev_priv->gart_info.aper_size;
  229. ret = ttm_bo_init_mm(bdev, TTM_PL_TT,
  230. dev_priv->gart_info.aper_size >> PAGE_SHIFT);
  231. if (ret) {
  232. NV_ERROR(dev, "Failed TT mm init: %d\n", ret);
  233. return ret;
  234. }
  235. return 0;
  236. }
  237. static int
  238. nv40_mem_timing_calc(struct drm_device *dev, u32 freq,
  239. struct nouveau_pm_tbl_entry *e, u8 len,
  240. struct nouveau_pm_memtiming *boot,
  241. struct nouveau_pm_memtiming *t)
  242. {
  243. t->reg[0] = (e->tRP << 24 | e->tRAS << 16 | e->tRFC << 8 | e->tRC);
  244. /* XXX: I don't trust the -1's and +1's... they must come
  245. * from somewhere! */
  246. t->reg[1] = (e->tWR + 2 + (t->tCWL - 1)) << 24 |
  247. 1 << 16 |
  248. (e->tWTR + 2 + (t->tCWL - 1)) << 8 |
  249. (e->tCL + 2 - (t->tCWL - 1));
  250. t->reg[2] = 0x20200000 |
  251. ((t->tCWL - 1) << 24 |
  252. e->tRRD << 16 |
  253. e->tRCDWR << 8 |
  254. e->tRCDRD);
  255. NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x\n", t->id,
  256. t->reg[0], t->reg[1], t->reg[2]);
  257. return 0;
  258. }
  259. static int
  260. nv50_mem_timing_calc(struct drm_device *dev, u32 freq,
  261. struct nouveau_pm_tbl_entry *e, u8 len,
  262. struct nouveau_pm_memtiming *boot,
  263. struct nouveau_pm_memtiming *t)
  264. {
  265. struct bit_entry P;
  266. uint8_t unk18 = 1, unk20 = 0, unk21 = 0, tmp7_3;
  267. if (bit_table(dev, 'P', &P))
  268. return -EINVAL;
  269. switch (min(len, (u8) 22)) {
  270. case 22:
  271. unk21 = e->tUNK_21;
  272. case 21:
  273. unk20 = e->tUNK_20;
  274. case 20:
  275. if (e->tCWL > 0)
  276. t->tCWL = e->tCWL;
  277. case 19:
  278. unk18 = e->tUNK_18;
  279. break;
  280. }
  281. t->reg[0] = (e->tRP << 24 | e->tRAS << 16 | e->tRFC << 8 | e->tRC);
  282. t->reg[1] = (e->tWR + 2 + (t->tCWL - 1)) << 24 |
  283. max(unk18, (u8) 1) << 16 |
  284. (e->tWTR + 2 + (t->tCWL - 1)) << 8;
  285. t->reg[2] = ((t->tCWL - 1) << 24 |
  286. e->tRRD << 16 |
  287. e->tRCDWR << 8 |
  288. e->tRCDRD);
  289. t->reg[4] = e->tUNK_13 << 8 | e->tUNK_13;
  290. t->reg[5] = (e->tRFC << 24 | max(e->tRCDRD, e->tRCDWR) << 16 | e->tRP);
  291. t->reg[8] = boot->reg[8] & 0xffffff00;
  292. if (P.version == 1) {
  293. t->reg[1] |= (e->tCL + 2 - (t->tCWL - 1));
  294. t->reg[3] = (0x14 + e->tCL) << 24 |
  295. 0x16 << 16 |
  296. (e->tCL - 1) << 8 |
  297. (e->tCL - 1);
  298. t->reg[4] |= boot->reg[4] & 0xffff0000;
  299. t->reg[6] = (0x33 - t->tCWL) << 16 |
  300. t->tCWL << 8 |
  301. (0x2e + e->tCL - t->tCWL);
  302. t->reg[7] = 0x4000202 | (e->tCL - 1) << 16;
  303. /* XXX: P.version == 1 only has DDR2 and GDDR3? */
  304. if (nvfb_vram_type(dev) == NV_MEM_TYPE_DDR2) {
  305. t->reg[5] |= (e->tCL + 3) << 8;
  306. t->reg[6] |= (t->tCWL - 2) << 8;
  307. t->reg[8] |= (e->tCL - 4);
  308. } else {
  309. t->reg[5] |= (e->tCL + 2) << 8;
  310. t->reg[6] |= t->tCWL << 8;
  311. t->reg[8] |= (e->tCL - 2);
  312. }
  313. } else {
  314. t->reg[1] |= (5 + e->tCL - (t->tCWL));
  315. /* XXX: 0xb? 0x30? */
  316. t->reg[3] = (0x30 + e->tCL) << 24 |
  317. (boot->reg[3] & 0x00ff0000)|
  318. (0xb + e->tCL) << 8 |
  319. (e->tCL - 1);
  320. t->reg[4] |= (unk20 << 24 | unk21 << 16);
  321. /* XXX: +6? */
  322. t->reg[5] |= (t->tCWL + 6) << 8;
  323. t->reg[6] = (0x5a + e->tCL) << 16 |
  324. (6 - e->tCL + t->tCWL) << 8 |
  325. (0x50 + e->tCL - t->tCWL);
  326. tmp7_3 = (boot->reg[7] & 0xff000000) >> 24;
  327. t->reg[7] = (tmp7_3 << 24) |
  328. ((tmp7_3 - 6 + e->tCL) << 16) |
  329. 0x202;
  330. }
  331. NV_DEBUG(dev, "Entry %d: 220: %08x %08x %08x %08x\n", t->id,
  332. t->reg[0], t->reg[1], t->reg[2], t->reg[3]);
  333. NV_DEBUG(dev, " 230: %08x %08x %08x %08x\n",
  334. t->reg[4], t->reg[5], t->reg[6], t->reg[7]);
  335. NV_DEBUG(dev, " 240: %08x\n", t->reg[8]);
  336. return 0;
  337. }
  338. static int
  339. nvc0_mem_timing_calc(struct drm_device *dev, u32 freq,
  340. struct nouveau_pm_tbl_entry *e, u8 len,
  341. struct nouveau_pm_memtiming *boot,
  342. struct nouveau_pm_memtiming *t)
  343. {
  344. if (e->tCWL > 0)
  345. t->tCWL = e->tCWL;
  346. t->reg[0] = (e->tRP << 24 | (e->tRAS & 0x7f) << 17 |
  347. e->tRFC << 8 | e->tRC);
  348. t->reg[1] = (boot->reg[1] & 0xff000000) |
  349. (e->tRCDWR & 0x0f) << 20 |
  350. (e->tRCDRD & 0x0f) << 14 |
  351. (t->tCWL << 7) |
  352. (e->tCL & 0x0f);
  353. t->reg[2] = (boot->reg[2] & 0xff0000ff) |
  354. e->tWR << 16 | e->tWTR << 8;
  355. t->reg[3] = (e->tUNK_20 & 0x1f) << 9 |
  356. (e->tUNK_21 & 0xf) << 5 |
  357. (e->tUNK_13 & 0x1f);
  358. t->reg[4] = (boot->reg[4] & 0xfff00fff) |
  359. (e->tRRD&0x1f) << 15;
  360. NV_DEBUG(dev, "Entry %d: 290: %08x %08x %08x %08x\n", t->id,
  361. t->reg[0], t->reg[1], t->reg[2], t->reg[3]);
  362. NV_DEBUG(dev, " 2a0: %08x\n", t->reg[4]);
  363. return 0;
  364. }
  365. /**
  366. * MR generation methods
  367. */
  368. static int
  369. nouveau_mem_ddr2_mr(struct drm_device *dev, u32 freq,
  370. struct nouveau_pm_tbl_entry *e, u8 len,
  371. struct nouveau_pm_memtiming *boot,
  372. struct nouveau_pm_memtiming *t)
  373. {
  374. t->drive_strength = 0;
  375. if (len < 15) {
  376. t->odt = boot->odt;
  377. } else {
  378. t->odt = e->RAM_FT1 & 0x07;
  379. }
  380. if (e->tCL >= NV_MEM_CL_DDR2_MAX) {
  381. NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL);
  382. return -ERANGE;
  383. }
  384. if (e->tWR >= NV_MEM_WR_DDR2_MAX) {
  385. NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR);
  386. return -ERANGE;
  387. }
  388. if (t->odt > 3) {
  389. NV_WARN(dev, "(%u) Invalid odt value, assuming disabled: %x",
  390. t->id, t->odt);
  391. t->odt = 0;
  392. }
  393. t->mr[0] = (boot->mr[0] & 0x100f) |
  394. (e->tCL) << 4 |
  395. (e->tWR - 1) << 9;
  396. t->mr[1] = (boot->mr[1] & 0x101fbb) |
  397. (t->odt & 0x1) << 2 |
  398. (t->odt & 0x2) << 5;
  399. NV_DEBUG(dev, "(%u) MR: %08x", t->id, t->mr[0]);
  400. return 0;
  401. }
  402. uint8_t nv_mem_wr_lut_ddr3[NV_MEM_WR_DDR3_MAX] = {
  403. 0, 0, 0, 0, 0, 1, 2, 3, 4, 5, 5, 6, 6, 7, 7, 0, 0};
  404. static int
  405. nouveau_mem_ddr3_mr(struct drm_device *dev, u32 freq,
  406. struct nouveau_pm_tbl_entry *e, u8 len,
  407. struct nouveau_pm_memtiming *boot,
  408. struct nouveau_pm_memtiming *t)
  409. {
  410. u8 cl = e->tCL - 4;
  411. t->drive_strength = 0;
  412. if (len < 15) {
  413. t->odt = boot->odt;
  414. } else {
  415. t->odt = e->RAM_FT1 & 0x07;
  416. }
  417. if (e->tCL >= NV_MEM_CL_DDR3_MAX || e->tCL < 4) {
  418. NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL);
  419. return -ERANGE;
  420. }
  421. if (e->tWR >= NV_MEM_WR_DDR3_MAX || e->tWR < 4) {
  422. NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR);
  423. return -ERANGE;
  424. }
  425. if (e->tCWL < 5) {
  426. NV_WARN(dev, "(%u) Invalid tCWL: %u", t->id, e->tCWL);
  427. return -ERANGE;
  428. }
  429. t->mr[0] = (boot->mr[0] & 0x180b) |
  430. /* CAS */
  431. (cl & 0x7) << 4 |
  432. (cl & 0x8) >> 1 |
  433. (nv_mem_wr_lut_ddr3[e->tWR]) << 9;
  434. t->mr[1] = (boot->mr[1] & 0x101dbb) |
  435. (t->odt & 0x1) << 2 |
  436. (t->odt & 0x2) << 5 |
  437. (t->odt & 0x4) << 7;
  438. t->mr[2] = (boot->mr[2] & 0x20ffb7) | (e->tCWL - 5) << 3;
  439. NV_DEBUG(dev, "(%u) MR: %08x %08x", t->id, t->mr[0], t->mr[2]);
  440. return 0;
  441. }
  442. uint8_t nv_mem_cl_lut_gddr3[NV_MEM_CL_GDDR3_MAX] = {
  443. 0, 0, 0, 0, 4, 5, 6, 7, 0, 1, 2, 3, 8, 9, 10, 11};
  444. uint8_t nv_mem_wr_lut_gddr3[NV_MEM_WR_GDDR3_MAX] = {
  445. 0, 0, 0, 0, 0, 2, 3, 8, 9, 10, 11, 0, 0, 1, 1, 0, 3};
  446. static int
  447. nouveau_mem_gddr3_mr(struct drm_device *dev, u32 freq,
  448. struct nouveau_pm_tbl_entry *e, u8 len,
  449. struct nouveau_pm_memtiming *boot,
  450. struct nouveau_pm_memtiming *t)
  451. {
  452. if (len < 15) {
  453. t->drive_strength = boot->drive_strength;
  454. t->odt = boot->odt;
  455. } else {
  456. t->drive_strength = (e->RAM_FT1 & 0x30) >> 4;
  457. t->odt = e->RAM_FT1 & 0x07;
  458. }
  459. if (e->tCL >= NV_MEM_CL_GDDR3_MAX) {
  460. NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL);
  461. return -ERANGE;
  462. }
  463. if (e->tWR >= NV_MEM_WR_GDDR3_MAX) {
  464. NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR);
  465. return -ERANGE;
  466. }
  467. if (t->odt > 3) {
  468. NV_WARN(dev, "(%u) Invalid odt value, assuming autocal: %x",
  469. t->id, t->odt);
  470. t->odt = 0;
  471. }
  472. t->mr[0] = (boot->mr[0] & 0xe0b) |
  473. /* CAS */
  474. ((nv_mem_cl_lut_gddr3[e->tCL] & 0x7) << 4) |
  475. ((nv_mem_cl_lut_gddr3[e->tCL] & 0x8) >> 2);
  476. t->mr[1] = (boot->mr[1] & 0x100f40) | t->drive_strength |
  477. (t->odt << 2) |
  478. (nv_mem_wr_lut_gddr3[e->tWR] & 0xf) << 4;
  479. t->mr[2] = boot->mr[2];
  480. NV_DEBUG(dev, "(%u) MR: %08x %08x %08x", t->id,
  481. t->mr[0], t->mr[1], t->mr[2]);
  482. return 0;
  483. }
  484. static int
  485. nouveau_mem_gddr5_mr(struct drm_device *dev, u32 freq,
  486. struct nouveau_pm_tbl_entry *e, u8 len,
  487. struct nouveau_pm_memtiming *boot,
  488. struct nouveau_pm_memtiming *t)
  489. {
  490. if (len < 15) {
  491. t->drive_strength = boot->drive_strength;
  492. t->odt = boot->odt;
  493. } else {
  494. t->drive_strength = (e->RAM_FT1 & 0x30) >> 4;
  495. t->odt = e->RAM_FT1 & 0x03;
  496. }
  497. if (e->tCL >= NV_MEM_CL_GDDR5_MAX) {
  498. NV_WARN(dev, "(%u) Invalid tCL: %u", t->id, e->tCL);
  499. return -ERANGE;
  500. }
  501. if (e->tWR >= NV_MEM_WR_GDDR5_MAX) {
  502. NV_WARN(dev, "(%u) Invalid tWR: %u", t->id, e->tWR);
  503. return -ERANGE;
  504. }
  505. if (t->odt > 3) {
  506. NV_WARN(dev, "(%u) Invalid odt value, assuming autocal: %x",
  507. t->id, t->odt);
  508. t->odt = 0;
  509. }
  510. t->mr[0] = (boot->mr[0] & 0x007) |
  511. ((e->tCL - 5) << 3) |
  512. ((e->tWR - 4) << 8);
  513. t->mr[1] = (boot->mr[1] & 0x1007f0) |
  514. t->drive_strength |
  515. (t->odt << 2);
  516. NV_DEBUG(dev, "(%u) MR: %08x %08x", t->id, t->mr[0], t->mr[1]);
  517. return 0;
  518. }
  519. int
  520. nouveau_mem_timing_calc(struct drm_device *dev, u32 freq,
  521. struct nouveau_pm_memtiming *t)
  522. {
  523. struct drm_nouveau_private *dev_priv = dev->dev_private;
  524. struct nouveau_pm_engine *pm = &dev_priv->engine.pm;
  525. struct nouveau_pm_memtiming *boot = &pm->boot.timing;
  526. struct nouveau_pm_tbl_entry *e;
  527. u8 ver, len, *ptr, *ramcfg;
  528. int ret;
  529. ptr = nouveau_perf_timing(dev, freq, &ver, &len);
  530. if (!ptr || ptr[0] == 0x00) {
  531. *t = *boot;
  532. return 0;
  533. }
  534. e = (struct nouveau_pm_tbl_entry *)ptr;
  535. t->tCWL = boot->tCWL;
  536. switch (dev_priv->card_type) {
  537. case NV_40:
  538. ret = nv40_mem_timing_calc(dev, freq, e, len, boot, t);
  539. break;
  540. case NV_50:
  541. ret = nv50_mem_timing_calc(dev, freq, e, len, boot, t);
  542. break;
  543. case NV_C0:
  544. case NV_D0:
  545. ret = nvc0_mem_timing_calc(dev, freq, e, len, boot, t);
  546. break;
  547. default:
  548. ret = -ENODEV;
  549. break;
  550. }
  551. switch (nvfb_vram_type(dev) * !ret) {
  552. case NV_MEM_TYPE_GDDR3:
  553. ret = nouveau_mem_gddr3_mr(dev, freq, e, len, boot, t);
  554. break;
  555. case NV_MEM_TYPE_GDDR5:
  556. ret = nouveau_mem_gddr5_mr(dev, freq, e, len, boot, t);
  557. break;
  558. case NV_MEM_TYPE_DDR2:
  559. ret = nouveau_mem_ddr2_mr(dev, freq, e, len, boot, t);
  560. break;
  561. case NV_MEM_TYPE_DDR3:
  562. ret = nouveau_mem_ddr3_mr(dev, freq, e, len, boot, t);
  563. break;
  564. default:
  565. ret = -EINVAL;
  566. break;
  567. }
  568. ramcfg = nouveau_perf_ramcfg(dev, freq, &ver, &len);
  569. if (ramcfg) {
  570. int dll_off;
  571. if (ver == 0x00)
  572. dll_off = !!(ramcfg[3] & 0x04);
  573. else
  574. dll_off = !!(ramcfg[2] & 0x40);
  575. switch (nvfb_vram_type(dev)) {
  576. case NV_MEM_TYPE_GDDR3:
  577. t->mr[1] &= ~0x00000040;
  578. t->mr[1] |= 0x00000040 * dll_off;
  579. break;
  580. default:
  581. t->mr[1] &= ~0x00000001;
  582. t->mr[1] |= 0x00000001 * dll_off;
  583. break;
  584. }
  585. }
  586. return ret;
  587. }
  588. void
  589. nouveau_mem_timing_read(struct drm_device *dev, struct nouveau_pm_memtiming *t)
  590. {
  591. struct drm_nouveau_private *dev_priv = dev->dev_private;
  592. u32 timing_base, timing_regs, mr_base;
  593. int i;
  594. if (dev_priv->card_type >= 0xC0) {
  595. timing_base = 0x10f290;
  596. mr_base = 0x10f300;
  597. } else {
  598. timing_base = 0x100220;
  599. mr_base = 0x1002c0;
  600. }
  601. t->id = -1;
  602. switch (dev_priv->card_type) {
  603. case NV_50:
  604. timing_regs = 9;
  605. break;
  606. case NV_C0:
  607. case NV_D0:
  608. timing_regs = 5;
  609. break;
  610. case NV_30:
  611. case NV_40:
  612. timing_regs = 3;
  613. break;
  614. default:
  615. timing_regs = 0;
  616. return;
  617. }
  618. for(i = 0; i < timing_regs; i++)
  619. t->reg[i] = nv_rd32(dev, timing_base + (0x04 * i));
  620. t->tCWL = 0;
  621. if (dev_priv->card_type < NV_C0) {
  622. t->tCWL = ((nv_rd32(dev, 0x100228) & 0x0f000000) >> 24) + 1;
  623. } else if (dev_priv->card_type <= NV_D0) {
  624. t->tCWL = ((nv_rd32(dev, 0x10f294) & 0x00000f80) >> 7);
  625. }
  626. t->mr[0] = nv_rd32(dev, mr_base);
  627. t->mr[1] = nv_rd32(dev, mr_base + 0x04);
  628. t->mr[2] = nv_rd32(dev, mr_base + 0x20);
  629. t->mr[3] = nv_rd32(dev, mr_base + 0x24);
  630. t->odt = 0;
  631. t->drive_strength = 0;
  632. switch (nvfb_vram_type(dev)) {
  633. case NV_MEM_TYPE_DDR3:
  634. t->odt |= (t->mr[1] & 0x200) >> 7;
  635. case NV_MEM_TYPE_DDR2:
  636. t->odt |= (t->mr[1] & 0x04) >> 2 |
  637. (t->mr[1] & 0x40) >> 5;
  638. break;
  639. case NV_MEM_TYPE_GDDR3:
  640. case NV_MEM_TYPE_GDDR5:
  641. t->drive_strength = t->mr[1] & 0x03;
  642. t->odt = (t->mr[1] & 0x0c) >> 2;
  643. break;
  644. default:
  645. break;
  646. }
  647. }
  648. int
  649. nouveau_mem_exec(struct nouveau_mem_exec_func *exec,
  650. struct nouveau_pm_level *perflvl)
  651. {
  652. struct drm_nouveau_private *dev_priv = exec->dev->dev_private;
  653. struct nouveau_pm_memtiming *info = &perflvl->timing;
  654. u32 tMRD = 1000, tCKSRE = 0, tCKSRX = 0, tXS = 0, tDLLK = 0;
  655. u32 mr[3] = { info->mr[0], info->mr[1], info->mr[2] };
  656. u32 mr1_dlloff;
  657. switch (nvfb_vram_type(dev_priv->dev)) {
  658. case NV_MEM_TYPE_DDR2:
  659. tDLLK = 2000;
  660. mr1_dlloff = 0x00000001;
  661. break;
  662. case NV_MEM_TYPE_DDR3:
  663. tDLLK = 12000;
  664. tCKSRE = 2000;
  665. tXS = 1000;
  666. mr1_dlloff = 0x00000001;
  667. break;
  668. case NV_MEM_TYPE_GDDR3:
  669. tDLLK = 40000;
  670. mr1_dlloff = 0x00000040;
  671. break;
  672. default:
  673. NV_ERROR(exec->dev, "cannot reclock unsupported memtype\n");
  674. return -ENODEV;
  675. }
  676. /* fetch current MRs */
  677. switch (nvfb_vram_type(dev_priv->dev)) {
  678. case NV_MEM_TYPE_GDDR3:
  679. case NV_MEM_TYPE_DDR3:
  680. mr[2] = exec->mrg(exec, 2);
  681. default:
  682. mr[1] = exec->mrg(exec, 1);
  683. mr[0] = exec->mrg(exec, 0);
  684. break;
  685. }
  686. /* DLL 'on' -> DLL 'off' mode, disable before entering self-refresh */
  687. if (!(mr[1] & mr1_dlloff) && (info->mr[1] & mr1_dlloff)) {
  688. exec->precharge(exec);
  689. exec->mrs (exec, 1, mr[1] | mr1_dlloff);
  690. exec->wait(exec, tMRD);
  691. }
  692. /* enter self-refresh mode */
  693. exec->precharge(exec);
  694. exec->refresh(exec);
  695. exec->refresh(exec);
  696. exec->refresh_auto(exec, false);
  697. exec->refresh_self(exec, true);
  698. exec->wait(exec, tCKSRE);
  699. /* modify input clock frequency */
  700. exec->clock_set(exec);
  701. /* exit self-refresh mode */
  702. exec->wait(exec, tCKSRX);
  703. exec->precharge(exec);
  704. exec->refresh_self(exec, false);
  705. exec->refresh_auto(exec, true);
  706. exec->wait(exec, tXS);
  707. exec->wait(exec, tXS);
  708. /* update MRs */
  709. if (mr[2] != info->mr[2]) {
  710. exec->mrs (exec, 2, info->mr[2]);
  711. exec->wait(exec, tMRD);
  712. }
  713. if (mr[1] != info->mr[1]) {
  714. /* need to keep DLL off until later, at least on GDDR3 */
  715. exec->mrs (exec, 1, info->mr[1] | (mr[1] & mr1_dlloff));
  716. exec->wait(exec, tMRD);
  717. }
  718. if (mr[0] != info->mr[0]) {
  719. exec->mrs (exec, 0, info->mr[0]);
  720. exec->wait(exec, tMRD);
  721. }
  722. /* update PFB timing registers */
  723. exec->timing_set(exec);
  724. /* DLL (enable + ) reset */
  725. if (!(info->mr[1] & mr1_dlloff)) {
  726. if (mr[1] & mr1_dlloff) {
  727. exec->mrs (exec, 1, info->mr[1]);
  728. exec->wait(exec, tMRD);
  729. }
  730. exec->mrs (exec, 0, info->mr[0] | 0x00000100);
  731. exec->wait(exec, tMRD);
  732. exec->mrs (exec, 0, info->mr[0] | 0x00000000);
  733. exec->wait(exec, tMRD);
  734. exec->wait(exec, tDLLK);
  735. if (nvfb_vram_type(dev_priv->dev) == NV_MEM_TYPE_GDDR3)
  736. exec->precharge(exec);
  737. }
  738. return 0;
  739. }
  740. int
  741. nouveau_mem_vbios_type(struct drm_device *dev)
  742. {
  743. struct bit_entry M;
  744. u8 ramcfg = (nv_rd32(dev, 0x101000) & 0x0000003c) >> 2;
  745. if (!bit_table(dev, 'M', &M) || M.version != 2 || M.length < 5) {
  746. u8 *table = ROMPTR(dev, M.data[3]);
  747. if (table && table[0] == 0x10 && ramcfg < table[3]) {
  748. u8 *entry = table + table[1] + (ramcfg * table[2]);
  749. switch (entry[0] & 0x0f) {
  750. case 0: return NV_MEM_TYPE_DDR2;
  751. case 1: return NV_MEM_TYPE_DDR3;
  752. case 2: return NV_MEM_TYPE_GDDR3;
  753. case 3: return NV_MEM_TYPE_GDDR5;
  754. default:
  755. break;
  756. }
  757. }
  758. }
  759. return NV_MEM_TYPE_UNKNOWN;
  760. }
  761. static int
  762. nouveau_vram_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
  763. {
  764. /* nothing to do */
  765. return 0;
  766. }
  767. static int
  768. nouveau_vram_manager_fini(struct ttm_mem_type_manager *man)
  769. {
  770. /* nothing to do */
  771. return 0;
  772. }
  773. static inline void
  774. nouveau_mem_node_cleanup(struct nouveau_mem *node)
  775. {
  776. if (node->vma[0].node) {
  777. nouveau_vm_unmap(&node->vma[0]);
  778. nouveau_vm_put(&node->vma[0]);
  779. }
  780. if (node->vma[1].node) {
  781. nouveau_vm_unmap(&node->vma[1]);
  782. nouveau_vm_put(&node->vma[1]);
  783. }
  784. }
  785. static void
  786. nouveau_vram_manager_del(struct ttm_mem_type_manager *man,
  787. struct ttm_mem_reg *mem)
  788. {
  789. struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
  790. struct drm_device *dev = dev_priv->dev;
  791. nouveau_mem_node_cleanup(mem->mm_node);
  792. nvfb_vram_put(dev, (struct nouveau_mem **)&mem->mm_node);
  793. }
  794. static int
  795. nouveau_vram_manager_new(struct ttm_mem_type_manager *man,
  796. struct ttm_buffer_object *bo,
  797. struct ttm_placement *placement,
  798. struct ttm_mem_reg *mem)
  799. {
  800. struct drm_nouveau_private *dev_priv = nouveau_bdev(man->bdev);
  801. struct drm_device *dev = dev_priv->dev;
  802. struct nouveau_bo *nvbo = nouveau_bo(bo);
  803. struct nouveau_mem *node;
  804. u32 size_nc = 0;
  805. int ret;
  806. if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG)
  807. size_nc = 1 << nvbo->page_shift;
  808. ret = nvfb_vram_get(dev, mem->num_pages << PAGE_SHIFT,
  809. mem->page_alignment << PAGE_SHIFT, size_nc,
  810. (nvbo->tile_flags >> 8) & 0x3ff, &node);
  811. if (ret) {
  812. mem->mm_node = NULL;
  813. return (ret == -ENOSPC) ? 0 : ret;
  814. }
  815. node->page_shift = nvbo->page_shift;
  816. mem->mm_node = node;
  817. mem->start = node->offset >> PAGE_SHIFT;
  818. return 0;
  819. }
  820. void
  821. nouveau_vram_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
  822. {
  823. struct nouveau_mm *mm = man->priv;
  824. struct nouveau_mm_node *r;
  825. u32 total = 0, free = 0;
  826. mutex_lock(&mm->mutex);
  827. list_for_each_entry(r, &mm->nodes, nl_entry) {
  828. printk(KERN_DEBUG "%s %d: 0x%010llx 0x%010llx\n",
  829. prefix, r->type, ((u64)r->offset << 12),
  830. (((u64)r->offset + r->length) << 12));
  831. total += r->length;
  832. if (!r->type)
  833. free += r->length;
  834. }
  835. mutex_unlock(&mm->mutex);
  836. printk(KERN_DEBUG "%s total: 0x%010llx free: 0x%010llx\n",
  837. prefix, (u64)total << 12, (u64)free << 12);
  838. printk(KERN_DEBUG "%s block: 0x%08x\n",
  839. prefix, mm->block_size << 12);
  840. }
  841. const struct ttm_mem_type_manager_func nouveau_vram_manager = {
  842. nouveau_vram_manager_init,
  843. nouveau_vram_manager_fini,
  844. nouveau_vram_manager_new,
  845. nouveau_vram_manager_del,
  846. nouveau_vram_manager_debug
  847. };
  848. static int
  849. nouveau_gart_manager_init(struct ttm_mem_type_manager *man, unsigned long psize)
  850. {
  851. return 0;
  852. }
  853. static int
  854. nouveau_gart_manager_fini(struct ttm_mem_type_manager *man)
  855. {
  856. return 0;
  857. }
  858. static void
  859. nouveau_gart_manager_del(struct ttm_mem_type_manager *man,
  860. struct ttm_mem_reg *mem)
  861. {
  862. nouveau_mem_node_cleanup(mem->mm_node);
  863. kfree(mem->mm_node);
  864. mem->mm_node = NULL;
  865. }
  866. static int
  867. nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
  868. struct ttm_buffer_object *bo,
  869. struct ttm_placement *placement,
  870. struct ttm_mem_reg *mem)
  871. {
  872. struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
  873. struct nouveau_mem *node;
  874. if (unlikely((mem->num_pages << PAGE_SHIFT) >=
  875. dev_priv->gart_info.aper_size))
  876. return -ENOMEM;
  877. node = kzalloc(sizeof(*node), GFP_KERNEL);
  878. if (!node)
  879. return -ENOMEM;
  880. node->page_shift = 12;
  881. mem->mm_node = node;
  882. mem->start = 0;
  883. return 0;
  884. }
  885. void
  886. nouveau_gart_manager_debug(struct ttm_mem_type_manager *man, const char *prefix)
  887. {
  888. }
  889. const struct ttm_mem_type_manager_func nouveau_gart_manager = {
  890. nouveau_gart_manager_init,
  891. nouveau_gart_manager_fini,
  892. nouveau_gart_manager_new,
  893. nouveau_gart_manager_del,
  894. nouveau_gart_manager_debug
  895. };