nouveau_gpuobj.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829
  1. /*
  2. * Copyright (C) 2006 Ben Skeggs.
  3. *
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining
  7. * a copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sublicense, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial
  16. * portions of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  19. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  20. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
  21. * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
  22. * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
  23. * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
  24. * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. */
  27. /*
  28. * Authors:
  29. * Ben Skeggs <darktama@iinet.net.au>
  30. */
  31. #include "drmP.h"
  32. #include "drm.h"
  33. #include "nouveau_drv.h"
  34. #include <nouveau_drm.h>
  35. #include <engine/fifo.h>
  36. #include <core/ramht.h>
  37. #include "nouveau_software.h"
  38. #include <subdev/vm.h>
  39. struct nouveau_gpuobj_method {
  40. struct list_head head;
  41. u32 mthd;
  42. int (*exec)(struct nouveau_channel *, u32 class, u32 mthd, u32 data);
  43. };
  44. struct nouveau_gpuobj_class {
  45. struct list_head head;
  46. struct list_head methods;
  47. u32 id;
  48. u32 engine;
  49. };
  50. int
  51. nouveau_gpuobj_class_new(struct drm_device *dev, u32 class, u32 engine)
  52. {
  53. struct drm_nouveau_private *dev_priv = dev->dev_private;
  54. struct nouveau_gpuobj_class *oc;
  55. oc = kzalloc(sizeof(*oc), GFP_KERNEL);
  56. if (!oc)
  57. return -ENOMEM;
  58. INIT_LIST_HEAD(&oc->methods);
  59. oc->id = class;
  60. oc->engine = engine;
  61. list_add(&oc->head, &dev_priv->classes);
  62. return 0;
  63. }
  64. int
  65. nouveau_gpuobj_mthd_new(struct drm_device *dev, u32 class, u32 mthd,
  66. int (*exec)(struct nouveau_channel *, u32, u32, u32))
  67. {
  68. struct drm_nouveau_private *dev_priv = dev->dev_private;
  69. struct nouveau_gpuobj_method *om;
  70. struct nouveau_gpuobj_class *oc;
  71. list_for_each_entry(oc, &dev_priv->classes, head) {
  72. if (oc->id == class)
  73. goto found;
  74. }
  75. return -EINVAL;
  76. found:
  77. om = kzalloc(sizeof(*om), GFP_KERNEL);
  78. if (!om)
  79. return -ENOMEM;
  80. om->mthd = mthd;
  81. om->exec = exec;
  82. list_add(&om->head, &oc->methods);
  83. return 0;
  84. }
  85. int
  86. nouveau_gpuobj_mthd_call(struct nouveau_channel *chan,
  87. u32 class, u32 mthd, u32 data)
  88. {
  89. struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
  90. struct nouveau_gpuobj_method *om;
  91. struct nouveau_gpuobj_class *oc;
  92. list_for_each_entry(oc, &dev_priv->classes, head) {
  93. if (oc->id != class)
  94. continue;
  95. list_for_each_entry(om, &oc->methods, head) {
  96. if (om->mthd == mthd)
  97. return om->exec(chan, class, mthd, data);
  98. }
  99. }
  100. return -ENOENT;
  101. }
  102. int
  103. nouveau_gpuobj_mthd_call2(struct drm_device *dev, int chid,
  104. u32 class, u32 mthd, u32 data)
  105. {
  106. struct drm_nouveau_private *dev_priv = dev->dev_private;
  107. struct nouveau_fifo_priv *pfifo = nv_engine(dev, NVOBJ_ENGINE_FIFO);
  108. struct nouveau_channel *chan = NULL;
  109. unsigned long flags;
  110. int ret = -EINVAL;
  111. spin_lock_irqsave(&dev_priv->channels.lock, flags);
  112. if (chid >= 0 && chid < pfifo->channels)
  113. chan = dev_priv->channels.ptr[chid];
  114. if (chan)
  115. ret = nouveau_gpuobj_mthd_call(chan, class, mthd, data);
  116. spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
  117. return ret;
  118. }
  119. int
  120. nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
  121. uint32_t size, int align, uint32_t flags,
  122. struct nouveau_gpuobj **gpuobj_ret)
  123. {
  124. struct drm_nouveau_private *dev_priv = dev->dev_private;
  125. struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
  126. struct nouveau_gpuobj *gpuobj;
  127. struct drm_mm_node *ramin = NULL;
  128. int ret, i;
  129. NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n",
  130. chan ? chan->id : -1, size, align, flags);
  131. gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
  132. if (!gpuobj)
  133. return -ENOMEM;
  134. NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
  135. gpuobj->dev = dev;
  136. gpuobj->flags = flags;
  137. kref_init(&gpuobj->refcount);
  138. gpuobj->size = size;
  139. spin_lock(&dev_priv->ramin_lock);
  140. list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
  141. spin_unlock(&dev_priv->ramin_lock);
  142. if (chan) {
  143. ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0);
  144. if (ramin)
  145. ramin = drm_mm_get_block(ramin, size, align);
  146. if (!ramin) {
  147. nouveau_gpuobj_ref(NULL, &gpuobj);
  148. return -ENOMEM;
  149. }
  150. gpuobj->pinst = chan->ramin->pinst;
  151. if (gpuobj->pinst != ~0)
  152. gpuobj->pinst += ramin->start;
  153. gpuobj->cinst = ramin->start;
  154. gpuobj->vinst = ramin->start + chan->ramin->vinst;
  155. gpuobj->node = ramin;
  156. } else {
  157. ret = instmem->get(gpuobj, chan, size, align);
  158. if (ret) {
  159. nouveau_gpuobj_ref(NULL, &gpuobj);
  160. return ret;
  161. }
  162. ret = -ENOSYS;
  163. if (!(flags & NVOBJ_FLAG_DONT_MAP))
  164. ret = instmem->map(gpuobj);
  165. if (ret)
  166. gpuobj->pinst = ~0;
  167. gpuobj->cinst = NVOBJ_CINST_GLOBAL;
  168. }
  169. if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
  170. for (i = 0; i < gpuobj->size; i += 4)
  171. nv_wo32(gpuobj, i, 0);
  172. instmem->flush(dev);
  173. }
  174. *gpuobj_ret = gpuobj;
  175. return 0;
  176. }
  177. int
  178. nouveau_gpuobj_init(struct drm_device *dev)
  179. {
  180. struct drm_nouveau_private *dev_priv = dev->dev_private;
  181. NV_DEBUG(dev, "\n");
  182. INIT_LIST_HEAD(&dev_priv->gpuobj_list);
  183. INIT_LIST_HEAD(&dev_priv->classes);
  184. spin_lock_init(&dev_priv->ramin_lock);
  185. dev_priv->ramin_base = ~0;
  186. return 0;
  187. }
  188. void
  189. nouveau_gpuobj_takedown(struct drm_device *dev)
  190. {
  191. struct drm_nouveau_private *dev_priv = dev->dev_private;
  192. struct nouveau_gpuobj_method *om, *tm;
  193. struct nouveau_gpuobj_class *oc, *tc;
  194. NV_DEBUG(dev, "\n");
  195. list_for_each_entry_safe(oc, tc, &dev_priv->classes, head) {
  196. list_for_each_entry_safe(om, tm, &oc->methods, head) {
  197. list_del(&om->head);
  198. kfree(om);
  199. }
  200. list_del(&oc->head);
  201. kfree(oc);
  202. }
  203. WARN_ON(!list_empty(&dev_priv->gpuobj_list));
  204. }
  205. static void
  206. nouveau_gpuobj_del(struct kref *ref)
  207. {
  208. struct nouveau_gpuobj *gpuobj =
  209. container_of(ref, struct nouveau_gpuobj, refcount);
  210. struct drm_device *dev = gpuobj->dev;
  211. struct drm_nouveau_private *dev_priv = dev->dev_private;
  212. struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
  213. int i;
  214. NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
  215. if (gpuobj->node && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
  216. for (i = 0; i < gpuobj->size; i += 4)
  217. nv_wo32(gpuobj, i, 0);
  218. instmem->flush(dev);
  219. }
  220. if (gpuobj->dtor)
  221. gpuobj->dtor(dev, gpuobj);
  222. if (gpuobj->cinst == NVOBJ_CINST_GLOBAL) {
  223. if (gpuobj->node) {
  224. instmem->unmap(gpuobj);
  225. instmem->put(gpuobj);
  226. }
  227. } else {
  228. if (gpuobj->node) {
  229. spin_lock(&dev_priv->ramin_lock);
  230. drm_mm_put_block(gpuobj->node);
  231. spin_unlock(&dev_priv->ramin_lock);
  232. }
  233. }
  234. spin_lock(&dev_priv->ramin_lock);
  235. list_del(&gpuobj->list);
  236. spin_unlock(&dev_priv->ramin_lock);
  237. kfree(gpuobj);
  238. }
  239. void
  240. nouveau_gpuobj_ref(struct nouveau_gpuobj *ref, struct nouveau_gpuobj **ptr)
  241. {
  242. if (ref)
  243. kref_get(&ref->refcount);
  244. if (*ptr)
  245. kref_put(&(*ptr)->refcount, nouveau_gpuobj_del);
  246. *ptr = ref;
  247. }
  248. void
  249. nv50_gpuobj_dma_init(struct nouveau_gpuobj *obj, u32 offset, int class,
  250. u64 base, u64 size, int target, int access,
  251. u32 type, u32 comp)
  252. {
  253. struct drm_nouveau_private *dev_priv = obj->dev->dev_private;
  254. struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
  255. u32 flags0;
  256. flags0 = (comp << 29) | (type << 22) | class;
  257. flags0 |= 0x00100000;
  258. switch (access) {
  259. case NV_MEM_ACCESS_RO: flags0 |= 0x00040000; break;
  260. case NV_MEM_ACCESS_RW:
  261. case NV_MEM_ACCESS_WO: flags0 |= 0x00080000; break;
  262. default:
  263. break;
  264. }
  265. switch (target) {
  266. case NV_MEM_TARGET_VRAM:
  267. flags0 |= 0x00010000;
  268. break;
  269. case NV_MEM_TARGET_PCI:
  270. flags0 |= 0x00020000;
  271. break;
  272. case NV_MEM_TARGET_PCI_NOSNOOP:
  273. flags0 |= 0x00030000;
  274. break;
  275. case NV_MEM_TARGET_GART:
  276. base += dev_priv->gart_info.aper_base;
  277. default:
  278. flags0 &= ~0x00100000;
  279. break;
  280. }
  281. /* convert to base + limit */
  282. size = (base + size) - 1;
  283. nv_wo32(obj, offset + 0x00, flags0);
  284. nv_wo32(obj, offset + 0x04, lower_32_bits(size));
  285. nv_wo32(obj, offset + 0x08, lower_32_bits(base));
  286. nv_wo32(obj, offset + 0x0c, upper_32_bits(size) << 24 |
  287. upper_32_bits(base));
  288. nv_wo32(obj, offset + 0x10, 0x00000000);
  289. nv_wo32(obj, offset + 0x14, 0x00000000);
  290. pinstmem->flush(obj->dev);
  291. }
  292. int
  293. nv50_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base, u64 size,
  294. int target, int access, u32 type, u32 comp,
  295. struct nouveau_gpuobj **pobj)
  296. {
  297. struct drm_device *dev = chan->dev;
  298. int ret;
  299. ret = nouveau_gpuobj_new(dev, chan, 24, 16, NVOBJ_FLAG_ZERO_FREE, pobj);
  300. if (ret)
  301. return ret;
  302. nv50_gpuobj_dma_init(*pobj, 0, class, base, size, target,
  303. access, type, comp);
  304. return 0;
  305. }
  306. int
  307. nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base,
  308. u64 size, int access, int target,
  309. struct nouveau_gpuobj **pobj)
  310. {
  311. struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
  312. struct drm_device *dev = chan->dev;
  313. struct nouveau_gpuobj *obj;
  314. u32 flags0, flags2;
  315. int ret;
  316. if (dev_priv->card_type >= NV_50) {
  317. u32 comp = (target == NV_MEM_TARGET_VM) ? NV_MEM_COMP_VM : 0;
  318. u32 type = (target == NV_MEM_TARGET_VM) ? NV_MEM_TYPE_VM : 0;
  319. return nv50_gpuobj_dma_new(chan, class, base, size,
  320. target, access, type, comp, pobj);
  321. }
  322. if (target == NV_MEM_TARGET_GART) {
  323. struct nouveau_gpuobj *gart = dev_priv->gart_info.sg_ctxdma;
  324. if (dev_priv->gart_info.type == NOUVEAU_GART_PDMA) {
  325. if (base == 0) {
  326. nouveau_gpuobj_ref(gart, pobj);
  327. return 0;
  328. }
  329. base = nouveau_sgdma_get_physical(dev, base);
  330. target = NV_MEM_TARGET_PCI;
  331. } else {
  332. base += dev_priv->gart_info.aper_base;
  333. if (dev_priv->gart_info.type == NOUVEAU_GART_AGP)
  334. target = NV_MEM_TARGET_PCI_NOSNOOP;
  335. else
  336. target = NV_MEM_TARGET_PCI;
  337. }
  338. }
  339. flags0 = class;
  340. flags0 |= 0x00003000; /* PT present, PT linear */
  341. flags2 = 0;
  342. switch (target) {
  343. case NV_MEM_TARGET_PCI:
  344. flags0 |= 0x00020000;
  345. break;
  346. case NV_MEM_TARGET_PCI_NOSNOOP:
  347. flags0 |= 0x00030000;
  348. break;
  349. default:
  350. break;
  351. }
  352. switch (access) {
  353. case NV_MEM_ACCESS_RO:
  354. flags0 |= 0x00004000;
  355. break;
  356. case NV_MEM_ACCESS_WO:
  357. flags0 |= 0x00008000;
  358. default:
  359. flags2 |= 0x00000002;
  360. break;
  361. }
  362. flags0 |= (base & 0x00000fff) << 20;
  363. flags2 |= (base & 0xfffff000);
  364. ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
  365. if (ret)
  366. return ret;
  367. nv_wo32(obj, 0x00, flags0);
  368. nv_wo32(obj, 0x04, size - 1);
  369. nv_wo32(obj, 0x08, flags2);
  370. nv_wo32(obj, 0x0c, flags2);
  371. obj->engine = NVOBJ_ENGINE_SW;
  372. obj->class = class;
  373. *pobj = obj;
  374. return 0;
  375. }
  376. int
  377. nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class)
  378. {
  379. struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
  380. struct drm_device *dev = chan->dev;
  381. struct nouveau_gpuobj_class *oc;
  382. int ret;
  383. NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class);
  384. list_for_each_entry(oc, &dev_priv->classes, head) {
  385. struct nouveau_exec_engine *eng = dev_priv->eng[oc->engine];
  386. if (oc->id != class)
  387. continue;
  388. if (!chan->engctx[oc->engine]) {
  389. ret = eng->context_new(chan, oc->engine);
  390. if (ret)
  391. return ret;
  392. }
  393. return eng->object_new(chan, oc->engine, handle, class);
  394. }
  395. return -EINVAL;
  396. }
  397. static int
  398. nv04_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
  399. {
  400. struct drm_device *dev = chan->dev;
  401. int ret;
  402. ret = nouveau_gpuobj_new(dev, NULL, 0x10000, 0x1000,
  403. NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin);
  404. if (ret)
  405. return ret;
  406. ret = drm_mm_init(&chan->ramin_heap, 0, chan->ramin->size);
  407. if (ret)
  408. return ret;
  409. return 0;
  410. }
  411. static int
  412. nv50_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
  413. {
  414. struct drm_device *dev = chan->dev;
  415. int ret;
  416. ret = nouveau_gpuobj_new(dev, NULL, 0x10000, 0x1000,
  417. NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin);
  418. if (ret)
  419. return ret;
  420. ret = drm_mm_init(&chan->ramin_heap, 0, chan->ramin->size);
  421. if (ret)
  422. return ret;
  423. ret = nouveau_gpuobj_new(dev, chan, 0x0200, 0, 0, &chan->ramfc);
  424. if (ret)
  425. return ret;
  426. ret = nouveau_gpuobj_new(dev, chan, 0x1000, 0, 0, &chan->engptr);
  427. if (ret)
  428. return ret;
  429. ret = nouveau_gpuobj_new(dev, chan, 0x4000, 0, 0, &chan->vm_pd);
  430. if (ret)
  431. return ret;
  432. return 0;
  433. }
  434. static int
  435. nv84_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
  436. {
  437. struct drm_device *dev = chan->dev;
  438. int ret;
  439. ret = nouveau_gpuobj_new(dev, NULL, 0x10000, 0x1000,
  440. NVOBJ_FLAG_ZERO_ALLOC, &chan->ramin);
  441. if (ret)
  442. return ret;
  443. ret = drm_mm_init(&chan->ramin_heap, 0, chan->ramin->size);
  444. if (ret)
  445. return ret;
  446. ret = nouveau_gpuobj_new(dev, chan, 0x0200, 0, 0, &chan->engptr);
  447. if (ret)
  448. return ret;
  449. ret = nouveau_gpuobj_new(dev, chan, 0x4000, 0, 0, &chan->vm_pd);
  450. if (ret)
  451. return ret;
  452. return 0;
  453. }
  454. static int
  455. nvc0_gpuobj_channel_init(struct nouveau_channel *chan, struct nouveau_vm *vm)
  456. {
  457. struct drm_device *dev = chan->dev;
  458. struct nouveau_gpuobj *pgd = NULL;
  459. struct nouveau_vm_pgd *vpgd;
  460. int ret;
  461. ret = nouveau_gpuobj_new(dev, NULL, 4096, 0x1000, 0, &chan->ramin);
  462. if (ret)
  463. return ret;
  464. /* create page directory for this vm if none currently exists,
  465. * will be destroyed automagically when last reference to the
  466. * vm is removed
  467. */
  468. if (list_empty(&vm->pgd_list)) {
  469. ret = nouveau_gpuobj_new(dev, NULL, 65536, 0x1000, 0, &pgd);
  470. if (ret)
  471. return ret;
  472. }
  473. nouveau_vm_ref(vm, &chan->vm, pgd);
  474. nouveau_gpuobj_ref(NULL, &pgd);
  475. /* point channel at vm's page directory */
  476. vpgd = list_first_entry(&vm->pgd_list, struct nouveau_vm_pgd, head);
  477. nv_wo32(chan->ramin, 0x0200, lower_32_bits(vpgd->obj->vinst));
  478. nv_wo32(chan->ramin, 0x0204, upper_32_bits(vpgd->obj->vinst));
  479. nv_wo32(chan->ramin, 0x0208, 0xffffffff);
  480. nv_wo32(chan->ramin, 0x020c, 0x000000ff);
  481. return 0;
  482. }
  483. int
  484. nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
  485. uint32_t vram_h, uint32_t tt_h)
  486. {
  487. struct drm_device *dev = chan->dev;
  488. struct drm_nouveau_private *dev_priv = dev->dev_private;
  489. struct nouveau_fpriv *fpriv = nouveau_fpriv(chan->file_priv);
  490. struct nouveau_vm *vm = fpriv ? fpriv->vm : dev_priv->chan_vm;
  491. struct nouveau_gpuobj *vram = NULL, *tt = NULL;
  492. int ret;
  493. NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
  494. if (dev_priv->card_type >= NV_C0)
  495. return nvc0_gpuobj_channel_init(chan, vm);
  496. /* Allocate a chunk of memory for per-channel object storage */
  497. if (dev_priv->chipset >= 0x84)
  498. ret = nv84_gpuobj_channel_init_pramin(chan);
  499. else
  500. if (dev_priv->chipset == 0x50)
  501. ret = nv50_gpuobj_channel_init_pramin(chan);
  502. else
  503. ret = nv04_gpuobj_channel_init_pramin(chan);
  504. if (ret) {
  505. NV_ERROR(dev, "init pramin\n");
  506. return ret;
  507. }
  508. /* NV50 VM
  509. * - Allocate per-channel page-directory
  510. * - Link with shared channel VM
  511. */
  512. if (vm)
  513. nouveau_vm_ref(vm, &chan->vm, chan->vm_pd);
  514. /* RAMHT */
  515. if (dev_priv->card_type < NV_50) {
  516. nouveau_ramht_ref(dev_priv->ramht, &chan->ramht, NULL);
  517. } else {
  518. struct nouveau_gpuobj *ramht = NULL;
  519. ret = nouveau_gpuobj_new(dev, chan, 0x8000, 16,
  520. NVOBJ_FLAG_ZERO_ALLOC, &ramht);
  521. if (ret)
  522. return ret;
  523. ret = nouveau_ramht_new(dev, ramht, &chan->ramht);
  524. nouveau_gpuobj_ref(NULL, &ramht);
  525. if (ret)
  526. return ret;
  527. }
  528. /* VRAM ctxdma */
  529. if (dev_priv->card_type >= NV_50) {
  530. ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
  531. 0, (1ULL << 40), NV_MEM_ACCESS_RW,
  532. NV_MEM_TARGET_VM, &vram);
  533. if (ret) {
  534. NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
  535. return ret;
  536. }
  537. } else {
  538. ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
  539. 0, dev_priv->fb_available_size,
  540. NV_MEM_ACCESS_RW,
  541. NV_MEM_TARGET_VRAM, &vram);
  542. if (ret) {
  543. NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
  544. return ret;
  545. }
  546. }
  547. ret = nouveau_ramht_insert(chan, vram_h, vram);
  548. nouveau_gpuobj_ref(NULL, &vram);
  549. if (ret) {
  550. NV_ERROR(dev, "Error adding VRAM ctxdma to RAMHT: %d\n", ret);
  551. return ret;
  552. }
  553. /* TT memory ctxdma */
  554. if (dev_priv->card_type >= NV_50) {
  555. ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
  556. 0, (1ULL << 40), NV_MEM_ACCESS_RW,
  557. NV_MEM_TARGET_VM, &tt);
  558. } else {
  559. ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
  560. 0, dev_priv->gart_info.aper_size,
  561. NV_MEM_ACCESS_RW,
  562. NV_MEM_TARGET_GART, &tt);
  563. }
  564. if (ret) {
  565. NV_ERROR(dev, "Error creating TT ctxdma: %d\n", ret);
  566. return ret;
  567. }
  568. ret = nouveau_ramht_insert(chan, tt_h, tt);
  569. nouveau_gpuobj_ref(NULL, &tt);
  570. if (ret) {
  571. NV_ERROR(dev, "Error adding TT ctxdma to RAMHT: %d\n", ret);
  572. return ret;
  573. }
  574. return 0;
  575. }
  576. void
  577. nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
  578. {
  579. NV_DEBUG(chan->dev, "ch%d\n", chan->id);
  580. nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
  581. nouveau_gpuobj_ref(NULL, &chan->vm_pd);
  582. nouveau_gpuobj_ref(NULL, &chan->ramfc);
  583. nouveau_gpuobj_ref(NULL, &chan->engptr);
  584. if (drm_mm_initialized(&chan->ramin_heap))
  585. drm_mm_takedown(&chan->ramin_heap);
  586. nouveau_gpuobj_ref(NULL, &chan->ramin);
  587. }
  588. int
  589. nouveau_gpuobj_suspend(struct drm_device *dev)
  590. {
  591. struct drm_nouveau_private *dev_priv = dev->dev_private;
  592. struct nouveau_gpuobj *gpuobj;
  593. int i;
  594. list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
  595. if (gpuobj->cinst != NVOBJ_CINST_GLOBAL)
  596. continue;
  597. gpuobj->suspend = vmalloc(gpuobj->size);
  598. if (!gpuobj->suspend) {
  599. nouveau_gpuobj_resume(dev);
  600. return -ENOMEM;
  601. }
  602. for (i = 0; i < gpuobj->size; i += 4)
  603. gpuobj->suspend[i/4] = nv_ro32(gpuobj, i);
  604. }
  605. return 0;
  606. }
  607. void
  608. nouveau_gpuobj_resume(struct drm_device *dev)
  609. {
  610. struct drm_nouveau_private *dev_priv = dev->dev_private;
  611. struct nouveau_gpuobj *gpuobj;
  612. int i;
  613. list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
  614. if (!gpuobj->suspend)
  615. continue;
  616. for (i = 0; i < gpuobj->size; i += 4)
  617. nv_wo32(gpuobj, i, gpuobj->suspend[i/4]);
  618. vfree(gpuobj->suspend);
  619. gpuobj->suspend = NULL;
  620. }
  621. dev_priv->engine.instmem.flush(dev);
  622. }
  623. u32
  624. nv_ro32(struct nouveau_gpuobj *gpuobj, u32 offset)
  625. {
  626. struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
  627. struct drm_device *dev = gpuobj->dev;
  628. unsigned long flags;
  629. if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
  630. u64 ptr = gpuobj->vinst + offset;
  631. u32 base = ptr >> 16;
  632. u32 val;
  633. spin_lock_irqsave(&dev_priv->vm_lock, flags);
  634. if (dev_priv->ramin_base != base) {
  635. dev_priv->ramin_base = base;
  636. nv_wr32(dev, 0x001700, dev_priv->ramin_base);
  637. }
  638. val = nv_rd32(dev, 0x700000 + (ptr & 0xffff));
  639. spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
  640. return val;
  641. }
  642. return nv_ri32(dev, gpuobj->pinst + offset);
  643. }
  644. void
  645. nv_wo32(struct nouveau_gpuobj *gpuobj, u32 offset, u32 val)
  646. {
  647. struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
  648. struct drm_device *dev = gpuobj->dev;
  649. unsigned long flags;
  650. if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
  651. u64 ptr = gpuobj->vinst + offset;
  652. u32 base = ptr >> 16;
  653. spin_lock_irqsave(&dev_priv->vm_lock, flags);
  654. if (dev_priv->ramin_base != base) {
  655. dev_priv->ramin_base = base;
  656. nv_wr32(dev, 0x001700, dev_priv->ramin_base);
  657. }
  658. nv_wr32(dev, 0x700000 + (ptr & 0xffff), val);
  659. spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
  660. return;
  661. }
  662. nv_wi32(dev, gpuobj->pinst + offset, val);
  663. }
  664. int
  665. nouveau_gpuobj_map_vm(struct nouveau_gpuobj *gpuobj, u32 flags,
  666. struct nouveau_vm *vm, struct nouveau_vma *vma)
  667. {
  668. struct nouveau_mem **mem = gpuobj->node;
  669. struct nouveau_mem *node = *mem;
  670. int ret;
  671. ret = nouveau_vm_get(vm, node->size << 12, 12, flags, vma);
  672. if (ret)
  673. return ret;
  674. nouveau_vm_map(vma, node);
  675. return 0;
  676. }
  677. int
  678. nouveau_gpuobj_map_bar(struct nouveau_gpuobj *gpuobj, u32 flags,
  679. struct nouveau_vma *vma)
  680. {
  681. struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
  682. return nouveau_gpuobj_map_vm(gpuobj, flags, dev_priv->bar1_vm, vma);
  683. }
  684. void
  685. nouveau_gpuobj_unmap(struct nouveau_vma *vma)
  686. {
  687. nouveau_vm_unmap(vma);
  688. nouveau_vm_put(vma);
  689. }