nv50_instmem.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531
  1. /*
  2. * Copyright (C) 2007 Ben Skeggs.
  3. *
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining
  7. * a copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sublicense, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial
  16. * portions of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  19. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  20. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
  21. * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
  22. * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
  23. * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
  24. * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. */
  27. #include "drmP.h"
  28. #include "drm.h"
  29. #include "nouveau_drv.h"
  30. struct nv50_instmem_priv {
  31. uint32_t save1700[5]; /* 0x1700->0x1710 */
  32. struct nouveau_gpuobj_ref *pramin_pt;
  33. struct nouveau_gpuobj_ref *pramin_bar;
  34. struct nouveau_gpuobj_ref *fb_bar;
  35. bool last_access_wr;
  36. };
  37. #define NV50_INSTMEM_PAGE_SHIFT 12
  38. #define NV50_INSTMEM_PAGE_SIZE (1 << NV50_INSTMEM_PAGE_SHIFT)
  39. #define NV50_INSTMEM_PT_SIZE(a) (((a) >> 12) << 3)
  40. /*NOTE: - Assumes 0x1700 already covers the correct MiB of PRAMIN
  41. */
  42. #define BAR0_WI32(g, o, v) do { \
  43. uint32_t offset; \
  44. if ((g)->im_backing) { \
  45. offset = (g)->im_backing_start; \
  46. } else { \
  47. offset = chan->ramin->gpuobj->im_backing_start; \
  48. offset += (g)->im_pramin->start; \
  49. } \
  50. offset += (o); \
  51. nv_wr32(dev, NV_RAMIN + (offset & 0xfffff), (v)); \
  52. } while (0)
  53. int
  54. nv50_instmem_init(struct drm_device *dev)
  55. {
  56. struct drm_nouveau_private *dev_priv = dev->dev_private;
  57. struct nouveau_channel *chan;
  58. uint32_t c_offset, c_size, c_ramfc, c_vmpd, c_base, pt_size;
  59. struct nv50_instmem_priv *priv;
  60. int ret, i;
  61. uint32_t v, save_nv001700;
  62. priv = kzalloc(sizeof(*priv), GFP_KERNEL);
  63. if (!priv)
  64. return -ENOMEM;
  65. dev_priv->engine.instmem.priv = priv;
  66. /* Save state, will restore at takedown. */
  67. for (i = 0x1700; i <= 0x1710; i += 4)
  68. priv->save1700[(i-0x1700)/4] = nv_rd32(dev, i);
  69. if (dev_priv->chipset == 0xaa || dev_priv->chipset == 0xac)
  70. dev_priv->vram_sys_base = nv_rd32(dev, 0x100e10) << 12;
  71. else
  72. dev_priv->vram_sys_base = 0;
  73. /* Reserve the last MiB of VRAM, we should probably try to avoid
  74. * setting up the below tables over the top of the VBIOS image at
  75. * some point.
  76. */
  77. dev_priv->ramin_rsvd_vram = 1 << 20;
  78. c_offset = nouveau_mem_fb_amount(dev) - dev_priv->ramin_rsvd_vram;
  79. c_size = 128 << 10;
  80. c_vmpd = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x1400 : 0x200;
  81. c_ramfc = ((dev_priv->chipset & 0xf0) == 0x50) ? 0x0 : 0x20;
  82. c_base = c_vmpd + 0x4000;
  83. pt_size = NV50_INSTMEM_PT_SIZE(dev_priv->ramin_size);
  84. NV_DEBUG(dev, " Rsvd VRAM base: 0x%08x\n", c_offset);
  85. NV_DEBUG(dev, " VBIOS image: 0x%08x\n",
  86. (nv_rd32(dev, 0x619f04) & ~0xff) << 8);
  87. NV_DEBUG(dev, " Aperture size: %d MiB\n", dev_priv->ramin_size >> 20);
  88. NV_DEBUG(dev, " PT size: %d KiB\n", pt_size >> 10);
  89. /* Determine VM layout, we need to do this first to make sure
  90. * we allocate enough memory for all the page tables.
  91. */
  92. dev_priv->vm_gart_base = roundup(NV50_VM_BLOCK, NV50_VM_BLOCK);
  93. dev_priv->vm_gart_size = NV50_VM_BLOCK;
  94. dev_priv->vm_vram_base = dev_priv->vm_gart_base + dev_priv->vm_gart_size;
  95. dev_priv->vm_vram_size = nouveau_mem_fb_amount(dev);
  96. if (dev_priv->vm_vram_size > NV50_VM_MAX_VRAM)
  97. dev_priv->vm_vram_size = NV50_VM_MAX_VRAM;
  98. dev_priv->vm_vram_size = roundup(dev_priv->vm_vram_size, NV50_VM_BLOCK);
  99. dev_priv->vm_vram_pt_nr = dev_priv->vm_vram_size / NV50_VM_BLOCK;
  100. dev_priv->vm_end = dev_priv->vm_vram_base + dev_priv->vm_vram_size;
  101. NV_DEBUG(dev, "NV50VM: GART 0x%016llx-0x%016llx\n",
  102. dev_priv->vm_gart_base,
  103. dev_priv->vm_gart_base + dev_priv->vm_gart_size - 1);
  104. NV_DEBUG(dev, "NV50VM: VRAM 0x%016llx-0x%016llx\n",
  105. dev_priv->vm_vram_base,
  106. dev_priv->vm_vram_base + dev_priv->vm_vram_size - 1);
  107. c_size += dev_priv->vm_vram_pt_nr * (NV50_VM_BLOCK / 65536 * 8);
  108. /* Map BAR0 PRAMIN aperture over the memory we want to use */
  109. save_nv001700 = nv_rd32(dev, NV50_PUNK_BAR0_PRAMIN);
  110. nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, (c_offset >> 16));
  111. /* Create a fake channel, and use it as our "dummy" channels 0/127.
  112. * The main reason for creating a channel is so we can use the gpuobj
  113. * code. However, it's probably worth noting that NVIDIA also setup
  114. * their channels 0/127 with the same values they configure here.
  115. * So, there may be some other reason for doing this.
  116. *
  117. * Have to create the entire channel manually, as the real channel
  118. * creation code assumes we have PRAMIN access, and we don't until
  119. * we're done here.
  120. */
  121. chan = kzalloc(sizeof(*chan), GFP_KERNEL);
  122. if (!chan)
  123. return -ENOMEM;
  124. chan->id = 0;
  125. chan->dev = dev;
  126. chan->file_priv = (struct drm_file *)-2;
  127. dev_priv->fifos[0] = dev_priv->fifos[127] = chan;
  128. /* Channel's PRAMIN object + heap */
  129. ret = nouveau_gpuobj_new_fake(dev, 0, c_offset, c_size, 0,
  130. NULL, &chan->ramin);
  131. if (ret)
  132. return ret;
  133. if (nouveau_mem_init_heap(&chan->ramin_heap, c_base, c_size - c_base))
  134. return -ENOMEM;
  135. /* RAMFC + zero channel's PRAMIN up to start of VM pagedir */
  136. ret = nouveau_gpuobj_new_fake(dev, c_ramfc, c_offset + c_ramfc,
  137. 0x4000, 0, NULL, &chan->ramfc);
  138. if (ret)
  139. return ret;
  140. for (i = 0; i < c_vmpd; i += 4)
  141. BAR0_WI32(chan->ramin->gpuobj, i, 0);
  142. /* VM page directory */
  143. ret = nouveau_gpuobj_new_fake(dev, c_vmpd, c_offset + c_vmpd,
  144. 0x4000, 0, &chan->vm_pd, NULL);
  145. if (ret)
  146. return ret;
  147. for (i = 0; i < 0x4000; i += 8) {
  148. BAR0_WI32(chan->vm_pd, i + 0x00, 0x00000000);
  149. BAR0_WI32(chan->vm_pd, i + 0x04, 0x00000000);
  150. }
  151. /* PRAMIN page table, cheat and map into VM at 0x0000000000.
  152. * We map the entire fake channel into the start of the PRAMIN BAR
  153. */
  154. ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, pt_size, 0x1000,
  155. 0, &priv->pramin_pt);
  156. if (ret)
  157. return ret;
  158. v = c_offset | 1;
  159. if (dev_priv->vram_sys_base) {
  160. v += dev_priv->vram_sys_base;
  161. v |= 0x30;
  162. }
  163. i = 0;
  164. while (v < dev_priv->vram_sys_base + c_offset + c_size) {
  165. BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, v);
  166. BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000);
  167. v += 0x1000;
  168. i += 8;
  169. }
  170. while (i < pt_size) {
  171. BAR0_WI32(priv->pramin_pt->gpuobj, i + 0, 0x00000000);
  172. BAR0_WI32(priv->pramin_pt->gpuobj, i + 4, 0x00000000);
  173. i += 8;
  174. }
  175. BAR0_WI32(chan->vm_pd, 0x00, priv->pramin_pt->instance | 0x63);
  176. BAR0_WI32(chan->vm_pd, 0x04, 0x00000000);
  177. /* VRAM page table(s), mapped into VM at +1GiB */
  178. for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
  179. ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0,
  180. NV50_VM_BLOCK/65536*8, 0, 0,
  181. &chan->vm_vram_pt[i]);
  182. if (ret) {
  183. NV_ERROR(dev, "Error creating VRAM page tables: %d\n",
  184. ret);
  185. dev_priv->vm_vram_pt_nr = i;
  186. return ret;
  187. }
  188. dev_priv->vm_vram_pt[i] = chan->vm_vram_pt[i]->gpuobj;
  189. for (v = 0; v < dev_priv->vm_vram_pt[i]->im_pramin->size;
  190. v += 4)
  191. BAR0_WI32(dev_priv->vm_vram_pt[i], v, 0);
  192. BAR0_WI32(chan->vm_pd, 0x10 + (i*8),
  193. chan->vm_vram_pt[i]->instance | 0x61);
  194. BAR0_WI32(chan->vm_pd, 0x14 + (i*8), 0);
  195. }
  196. /* DMA object for PRAMIN BAR */
  197. ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 6*4, 16, 0,
  198. &priv->pramin_bar);
  199. if (ret)
  200. return ret;
  201. BAR0_WI32(priv->pramin_bar->gpuobj, 0x00, 0x7fc00000);
  202. BAR0_WI32(priv->pramin_bar->gpuobj, 0x04, dev_priv->ramin_size - 1);
  203. BAR0_WI32(priv->pramin_bar->gpuobj, 0x08, 0x00000000);
  204. BAR0_WI32(priv->pramin_bar->gpuobj, 0x0c, 0x00000000);
  205. BAR0_WI32(priv->pramin_bar->gpuobj, 0x10, 0x00000000);
  206. BAR0_WI32(priv->pramin_bar->gpuobj, 0x14, 0x00000000);
  207. /* DMA object for FB BAR */
  208. ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0, 6*4, 16, 0,
  209. &priv->fb_bar);
  210. if (ret)
  211. return ret;
  212. BAR0_WI32(priv->fb_bar->gpuobj, 0x00, 0x7fc00000);
  213. BAR0_WI32(priv->fb_bar->gpuobj, 0x04, 0x40000000 +
  214. drm_get_resource_len(dev, 1) - 1);
  215. BAR0_WI32(priv->fb_bar->gpuobj, 0x08, 0x40000000);
  216. BAR0_WI32(priv->fb_bar->gpuobj, 0x0c, 0x00000000);
  217. BAR0_WI32(priv->fb_bar->gpuobj, 0x10, 0x00000000);
  218. BAR0_WI32(priv->fb_bar->gpuobj, 0x14, 0x00000000);
  219. /* Poke the relevant regs, and pray it works :) */
  220. nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12));
  221. nv_wr32(dev, NV50_PUNK_UNK1710, 0);
  222. nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12) |
  223. NV50_PUNK_BAR_CFG_BASE_VALID);
  224. nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->fb_bar->instance >> 4) |
  225. NV50_PUNK_BAR1_CTXDMA_VALID);
  226. nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->instance >> 4) |
  227. NV50_PUNK_BAR3_CTXDMA_VALID);
  228. for (i = 0; i < 8; i++)
  229. nv_wr32(dev, 0x1900 + (i*4), 0);
  230. /* Assume that praying isn't enough, check that we can re-read the
  231. * entire fake channel back from the PRAMIN BAR */
  232. dev_priv->engine.instmem.prepare_access(dev, false);
  233. for (i = 0; i < c_size; i += 4) {
  234. if (nv_rd32(dev, NV_RAMIN + i) != nv_ri32(dev, i)) {
  235. NV_ERROR(dev, "Error reading back PRAMIN at 0x%08x\n",
  236. i);
  237. dev_priv->engine.instmem.finish_access(dev);
  238. return -EINVAL;
  239. }
  240. }
  241. dev_priv->engine.instmem.finish_access(dev);
  242. nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, save_nv001700);
  243. /* Global PRAMIN heap */
  244. if (nouveau_mem_init_heap(&dev_priv->ramin_heap,
  245. c_size, dev_priv->ramin_size - c_size)) {
  246. dev_priv->ramin_heap = NULL;
  247. NV_ERROR(dev, "Failed to init RAMIN heap\n");
  248. }
  249. /*XXX: incorrect, but needed to make hash func "work" */
  250. dev_priv->ramht_offset = 0x10000;
  251. dev_priv->ramht_bits = 9;
  252. dev_priv->ramht_size = (1 << dev_priv->ramht_bits);
  253. return 0;
  254. }
  255. void
  256. nv50_instmem_takedown(struct drm_device *dev)
  257. {
  258. struct drm_nouveau_private *dev_priv = dev->dev_private;
  259. struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
  260. struct nouveau_channel *chan = dev_priv->fifos[0];
  261. int i;
  262. NV_DEBUG(dev, "\n");
  263. if (!priv)
  264. return;
  265. /* Restore state from before init */
  266. for (i = 0x1700; i <= 0x1710; i += 4)
  267. nv_wr32(dev, i, priv->save1700[(i - 0x1700) / 4]);
  268. nouveau_gpuobj_ref_del(dev, &priv->fb_bar);
  269. nouveau_gpuobj_ref_del(dev, &priv->pramin_bar);
  270. nouveau_gpuobj_ref_del(dev, &priv->pramin_pt);
  271. /* Destroy dummy channel */
  272. if (chan) {
  273. for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
  274. nouveau_gpuobj_ref_del(dev, &chan->vm_vram_pt[i]);
  275. dev_priv->vm_vram_pt[i] = NULL;
  276. }
  277. dev_priv->vm_vram_pt_nr = 0;
  278. nouveau_gpuobj_del(dev, &chan->vm_pd);
  279. nouveau_gpuobj_ref_del(dev, &chan->ramfc);
  280. nouveau_gpuobj_ref_del(dev, &chan->ramin);
  281. nouveau_mem_takedown(&chan->ramin_heap);
  282. dev_priv->fifos[0] = dev_priv->fifos[127] = NULL;
  283. kfree(chan);
  284. }
  285. dev_priv->engine.instmem.priv = NULL;
  286. kfree(priv);
  287. }
  288. int
  289. nv50_instmem_suspend(struct drm_device *dev)
  290. {
  291. struct drm_nouveau_private *dev_priv = dev->dev_private;
  292. struct nouveau_channel *chan = dev_priv->fifos[0];
  293. struct nouveau_gpuobj *ramin = chan->ramin->gpuobj;
  294. int i;
  295. ramin->im_backing_suspend = vmalloc(ramin->im_pramin->size);
  296. if (!ramin->im_backing_suspend)
  297. return -ENOMEM;
  298. for (i = 0; i < ramin->im_pramin->size; i += 4)
  299. ramin->im_backing_suspend[i/4] = nv_ri32(dev, i);
  300. return 0;
  301. }
  302. void
  303. nv50_instmem_resume(struct drm_device *dev)
  304. {
  305. struct drm_nouveau_private *dev_priv = dev->dev_private;
  306. struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
  307. struct nouveau_channel *chan = dev_priv->fifos[0];
  308. struct nouveau_gpuobj *ramin = chan->ramin->gpuobj;
  309. int i;
  310. nv_wr32(dev, NV50_PUNK_BAR0_PRAMIN, (ramin->im_backing_start >> 16));
  311. for (i = 0; i < ramin->im_pramin->size; i += 4)
  312. BAR0_WI32(ramin, i, ramin->im_backing_suspend[i/4]);
  313. vfree(ramin->im_backing_suspend);
  314. ramin->im_backing_suspend = NULL;
  315. /* Poke the relevant regs, and pray it works :) */
  316. nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12));
  317. nv_wr32(dev, NV50_PUNK_UNK1710, 0);
  318. nv_wr32(dev, NV50_PUNK_BAR_CFG_BASE, (chan->ramin->instance >> 12) |
  319. NV50_PUNK_BAR_CFG_BASE_VALID);
  320. nv_wr32(dev, NV50_PUNK_BAR1_CTXDMA, (priv->fb_bar->instance >> 4) |
  321. NV50_PUNK_BAR1_CTXDMA_VALID);
  322. nv_wr32(dev, NV50_PUNK_BAR3_CTXDMA, (priv->pramin_bar->instance >> 4) |
  323. NV50_PUNK_BAR3_CTXDMA_VALID);
  324. for (i = 0; i < 8; i++)
  325. nv_wr32(dev, 0x1900 + (i*4), 0);
  326. }
  327. int
  328. nv50_instmem_populate(struct drm_device *dev, struct nouveau_gpuobj *gpuobj,
  329. uint32_t *sz)
  330. {
  331. int ret;
  332. if (gpuobj->im_backing)
  333. return -EINVAL;
  334. *sz = ALIGN(*sz, NV50_INSTMEM_PAGE_SIZE);
  335. if (*sz == 0)
  336. return -EINVAL;
  337. ret = nouveau_bo_new(dev, NULL, *sz, 0, TTM_PL_FLAG_VRAM, 0, 0x0000,
  338. true, false, &gpuobj->im_backing);
  339. if (ret) {
  340. NV_ERROR(dev, "error getting PRAMIN backing pages: %d\n", ret);
  341. return ret;
  342. }
  343. ret = nouveau_bo_pin(gpuobj->im_backing, TTM_PL_FLAG_VRAM);
  344. if (ret) {
  345. NV_ERROR(dev, "error pinning PRAMIN backing VRAM: %d\n", ret);
  346. nouveau_bo_ref(NULL, &gpuobj->im_backing);
  347. return ret;
  348. }
  349. gpuobj->im_backing_start = gpuobj->im_backing->bo.mem.mm_node->start;
  350. gpuobj->im_backing_start <<= PAGE_SHIFT;
  351. return 0;
  352. }
  353. void
  354. nv50_instmem_clear(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
  355. {
  356. struct drm_nouveau_private *dev_priv = dev->dev_private;
  357. if (gpuobj && gpuobj->im_backing) {
  358. if (gpuobj->im_bound)
  359. dev_priv->engine.instmem.unbind(dev, gpuobj);
  360. nouveau_bo_unpin(gpuobj->im_backing);
  361. nouveau_bo_ref(NULL, &gpuobj->im_backing);
  362. gpuobj->im_backing = NULL;
  363. }
  364. }
  365. int
  366. nv50_instmem_bind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
  367. {
  368. struct drm_nouveau_private *dev_priv = dev->dev_private;
  369. struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
  370. struct nouveau_gpuobj *pramin_pt = priv->pramin_pt->gpuobj;
  371. uint32_t pte, pte_end;
  372. uint64_t vram;
  373. if (!gpuobj->im_backing || !gpuobj->im_pramin || gpuobj->im_bound)
  374. return -EINVAL;
  375. NV_DEBUG(dev, "st=0x%0llx sz=0x%0llx\n",
  376. gpuobj->im_pramin->start, gpuobj->im_pramin->size);
  377. pte = (gpuobj->im_pramin->start >> 12) << 1;
  378. pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
  379. vram = gpuobj->im_backing_start;
  380. NV_DEBUG(dev, "pramin=0x%llx, pte=%d, pte_end=%d\n",
  381. gpuobj->im_pramin->start, pte, pte_end);
  382. NV_DEBUG(dev, "first vram page: 0x%08x\n", gpuobj->im_backing_start);
  383. vram |= 1;
  384. if (dev_priv->vram_sys_base) {
  385. vram += dev_priv->vram_sys_base;
  386. vram |= 0x30;
  387. }
  388. dev_priv->engine.instmem.prepare_access(dev, true);
  389. while (pte < pte_end) {
  390. nv_wo32(dev, pramin_pt, pte++, lower_32_bits(vram));
  391. nv_wo32(dev, pramin_pt, pte++, upper_32_bits(vram));
  392. vram += NV50_INSTMEM_PAGE_SIZE;
  393. }
  394. dev_priv->engine.instmem.finish_access(dev);
  395. nv_wr32(dev, 0x100c80, 0x00040001);
  396. if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
  397. NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (1)\n");
  398. NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
  399. return -EBUSY;
  400. }
  401. nv_wr32(dev, 0x100c80, 0x00060001);
  402. if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) {
  403. NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n");
  404. NV_ERROR(dev, "0x100c80 = 0x%08x\n", nv_rd32(dev, 0x100c80));
  405. return -EBUSY;
  406. }
  407. gpuobj->im_bound = 1;
  408. return 0;
  409. }
  410. int
  411. nv50_instmem_unbind(struct drm_device *dev, struct nouveau_gpuobj *gpuobj)
  412. {
  413. struct drm_nouveau_private *dev_priv = dev->dev_private;
  414. struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
  415. uint32_t pte, pte_end;
  416. if (gpuobj->im_bound == 0)
  417. return -EINVAL;
  418. pte = (gpuobj->im_pramin->start >> 12) << 1;
  419. pte_end = ((gpuobj->im_pramin->size >> 12) << 1) + pte;
  420. dev_priv->engine.instmem.prepare_access(dev, true);
  421. while (pte < pte_end) {
  422. nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000);
  423. nv_wo32(dev, priv->pramin_pt->gpuobj, pte++, 0x00000000);
  424. }
  425. dev_priv->engine.instmem.finish_access(dev);
  426. gpuobj->im_bound = 0;
  427. return 0;
  428. }
  429. void
  430. nv50_instmem_prepare_access(struct drm_device *dev, bool write)
  431. {
  432. struct drm_nouveau_private *dev_priv = dev->dev_private;
  433. struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
  434. priv->last_access_wr = write;
  435. }
  436. void
  437. nv50_instmem_finish_access(struct drm_device *dev)
  438. {
  439. struct drm_nouveau_private *dev_priv = dev->dev_private;
  440. struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv;
  441. if (priv->last_access_wr) {
  442. nv_wr32(dev, 0x070000, 0x00000001);
  443. if (!nv_wait(0x070000, 0x00000001, 0x00000000))
  444. NV_ERROR(dev, "PRAMIN flush timeout\n");
  445. }
  446. }