nouveau_object.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867
  1. /*
  2. * Copyright (C) 2006 Ben Skeggs.
  3. *
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining
  7. * a copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sublicense, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial
  16. * portions of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  19. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  20. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
  21. * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
  22. * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
  23. * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
  24. * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. */
  27. /*
  28. * Authors:
  29. * Ben Skeggs <darktama@iinet.net.au>
  30. */
  31. #include "drmP.h"
  32. #include "drm.h"
  33. #include "nouveau_drv.h"
  34. #include "nouveau_drm.h"
  35. #include "nouveau_ramht.h"
  36. #include "nouveau_software.h"
  37. #include "nouveau_vm.h"
  38. struct nouveau_gpuobj_method {
  39. struct list_head head;
  40. u32 mthd;
  41. int (*exec)(struct nouveau_channel *, u32 class, u32 mthd, u32 data);
  42. };
  43. struct nouveau_gpuobj_class {
  44. struct list_head head;
  45. struct list_head methods;
  46. u32 id;
  47. u32 engine;
  48. };
  49. int
  50. nouveau_gpuobj_class_new(struct drm_device *dev, u32 class, u32 engine)
  51. {
  52. struct drm_nouveau_private *dev_priv = dev->dev_private;
  53. struct nouveau_gpuobj_class *oc;
  54. oc = kzalloc(sizeof(*oc), GFP_KERNEL);
  55. if (!oc)
  56. return -ENOMEM;
  57. INIT_LIST_HEAD(&oc->methods);
  58. oc->id = class;
  59. oc->engine = engine;
  60. list_add(&oc->head, &dev_priv->classes);
  61. return 0;
  62. }
  63. int
  64. nouveau_gpuobj_mthd_new(struct drm_device *dev, u32 class, u32 mthd,
  65. int (*exec)(struct nouveau_channel *, u32, u32, u32))
  66. {
  67. struct drm_nouveau_private *dev_priv = dev->dev_private;
  68. struct nouveau_gpuobj_method *om;
  69. struct nouveau_gpuobj_class *oc;
  70. list_for_each_entry(oc, &dev_priv->classes, head) {
  71. if (oc->id == class)
  72. goto found;
  73. }
  74. return -EINVAL;
  75. found:
  76. om = kzalloc(sizeof(*om), GFP_KERNEL);
  77. if (!om)
  78. return -ENOMEM;
  79. om->mthd = mthd;
  80. om->exec = exec;
  81. list_add(&om->head, &oc->methods);
  82. return 0;
  83. }
  84. int
  85. nouveau_gpuobj_mthd_call(struct nouveau_channel *chan,
  86. u32 class, u32 mthd, u32 data)
  87. {
  88. struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
  89. struct nouveau_gpuobj_method *om;
  90. struct nouveau_gpuobj_class *oc;
  91. list_for_each_entry(oc, &dev_priv->classes, head) {
  92. if (oc->id != class)
  93. continue;
  94. list_for_each_entry(om, &oc->methods, head) {
  95. if (om->mthd == mthd)
  96. return om->exec(chan, class, mthd, data);
  97. }
  98. }
  99. return -ENOENT;
  100. }
  101. int
  102. nouveau_gpuobj_mthd_call2(struct drm_device *dev, int chid,
  103. u32 class, u32 mthd, u32 data)
  104. {
  105. struct drm_nouveau_private *dev_priv = dev->dev_private;
  106. struct nouveau_channel *chan = NULL;
  107. unsigned long flags;
  108. int ret = -EINVAL;
  109. spin_lock_irqsave(&dev_priv->channels.lock, flags);
  110. if (chid >= 0 && chid < dev_priv->engine.fifo.channels)
  111. chan = dev_priv->channels.ptr[chid];
  112. if (chan)
  113. ret = nouveau_gpuobj_mthd_call(chan, class, mthd, data);
  114. spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
  115. return ret;
  116. }
  117. int
  118. nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
  119. uint32_t size, int align, uint32_t flags,
  120. struct nouveau_gpuobj **gpuobj_ret)
  121. {
  122. struct drm_nouveau_private *dev_priv = dev->dev_private;
  123. struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
  124. struct nouveau_gpuobj *gpuobj;
  125. struct drm_mm_node *ramin = NULL;
  126. int ret, i;
  127. NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n",
  128. chan ? chan->id : -1, size, align, flags);
  129. gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
  130. if (!gpuobj)
  131. return -ENOMEM;
  132. NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
  133. gpuobj->dev = dev;
  134. gpuobj->flags = flags;
  135. kref_init(&gpuobj->refcount);
  136. gpuobj->size = size;
  137. spin_lock(&dev_priv->ramin_lock);
  138. list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
  139. spin_unlock(&dev_priv->ramin_lock);
  140. if (!(flags & NVOBJ_FLAG_VM) && chan) {
  141. ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0);
  142. if (ramin)
  143. ramin = drm_mm_get_block(ramin, size, align);
  144. if (!ramin) {
  145. nouveau_gpuobj_ref(NULL, &gpuobj);
  146. return -ENOMEM;
  147. }
  148. gpuobj->pinst = chan->ramin->pinst;
  149. if (gpuobj->pinst != ~0)
  150. gpuobj->pinst += ramin->start;
  151. gpuobj->cinst = ramin->start;
  152. gpuobj->vinst = ramin->start + chan->ramin->vinst;
  153. gpuobj->node = ramin;
  154. } else {
  155. ret = instmem->get(gpuobj, chan, size, align);
  156. if (ret) {
  157. nouveau_gpuobj_ref(NULL, &gpuobj);
  158. return ret;
  159. }
  160. ret = -ENOSYS;
  161. if (!(flags & NVOBJ_FLAG_DONT_MAP))
  162. ret = instmem->map(gpuobj);
  163. if (ret)
  164. gpuobj->pinst = ~0;
  165. gpuobj->cinst = NVOBJ_CINST_GLOBAL;
  166. }
  167. if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
  168. for (i = 0; i < gpuobj->size; i += 4)
  169. nv_wo32(gpuobj, i, 0);
  170. instmem->flush(dev);
  171. }
  172. *gpuobj_ret = gpuobj;
  173. return 0;
  174. }
  175. int
  176. nouveau_gpuobj_init(struct drm_device *dev)
  177. {
  178. struct drm_nouveau_private *dev_priv = dev->dev_private;
  179. NV_DEBUG(dev, "\n");
  180. INIT_LIST_HEAD(&dev_priv->gpuobj_list);
  181. INIT_LIST_HEAD(&dev_priv->classes);
  182. spin_lock_init(&dev_priv->ramin_lock);
  183. dev_priv->ramin_base = ~0;
  184. return 0;
  185. }
  186. void
  187. nouveau_gpuobj_takedown(struct drm_device *dev)
  188. {
  189. struct drm_nouveau_private *dev_priv = dev->dev_private;
  190. struct nouveau_gpuobj_method *om, *tm;
  191. struct nouveau_gpuobj_class *oc, *tc;
  192. NV_DEBUG(dev, "\n");
  193. list_for_each_entry_safe(oc, tc, &dev_priv->classes, head) {
  194. list_for_each_entry_safe(om, tm, &oc->methods, head) {
  195. list_del(&om->head);
  196. kfree(om);
  197. }
  198. list_del(&oc->head);
  199. kfree(oc);
  200. }
  201. BUG_ON(!list_empty(&dev_priv->gpuobj_list));
  202. }
  203. static void
  204. nouveau_gpuobj_del(struct kref *ref)
  205. {
  206. struct nouveau_gpuobj *gpuobj =
  207. container_of(ref, struct nouveau_gpuobj, refcount);
  208. struct drm_device *dev = gpuobj->dev;
  209. struct drm_nouveau_private *dev_priv = dev->dev_private;
  210. struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
  211. int i;
  212. NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
  213. if (gpuobj->node && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
  214. for (i = 0; i < gpuobj->size; i += 4)
  215. nv_wo32(gpuobj, i, 0);
  216. instmem->flush(dev);
  217. }
  218. if (gpuobj->dtor)
  219. gpuobj->dtor(dev, gpuobj);
  220. if (gpuobj->cinst == NVOBJ_CINST_GLOBAL) {
  221. if (gpuobj->node) {
  222. instmem->unmap(gpuobj);
  223. instmem->put(gpuobj);
  224. }
  225. } else {
  226. if (gpuobj->node) {
  227. spin_lock(&dev_priv->ramin_lock);
  228. drm_mm_put_block(gpuobj->node);
  229. spin_unlock(&dev_priv->ramin_lock);
  230. }
  231. }
  232. spin_lock(&dev_priv->ramin_lock);
  233. list_del(&gpuobj->list);
  234. spin_unlock(&dev_priv->ramin_lock);
  235. kfree(gpuobj);
  236. }
  237. void
  238. nouveau_gpuobj_ref(struct nouveau_gpuobj *ref, struct nouveau_gpuobj **ptr)
  239. {
  240. if (ref)
  241. kref_get(&ref->refcount);
  242. if (*ptr)
  243. kref_put(&(*ptr)->refcount, nouveau_gpuobj_del);
  244. *ptr = ref;
  245. }
  246. int
  247. nouveau_gpuobj_new_fake(struct drm_device *dev, u32 pinst, u64 vinst,
  248. u32 size, u32 flags, struct nouveau_gpuobj **pgpuobj)
  249. {
  250. struct drm_nouveau_private *dev_priv = dev->dev_private;
  251. struct nouveau_gpuobj *gpuobj = NULL;
  252. int i;
  253. NV_DEBUG(dev,
  254. "pinst=0x%08x vinst=0x%010llx size=0x%08x flags=0x%08x\n",
  255. pinst, vinst, size, flags);
  256. gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
  257. if (!gpuobj)
  258. return -ENOMEM;
  259. NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
  260. gpuobj->dev = dev;
  261. gpuobj->flags = flags;
  262. kref_init(&gpuobj->refcount);
  263. gpuobj->size = size;
  264. gpuobj->pinst = pinst;
  265. gpuobj->cinst = NVOBJ_CINST_GLOBAL;
  266. gpuobj->vinst = vinst;
  267. if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
  268. for (i = 0; i < gpuobj->size; i += 4)
  269. nv_wo32(gpuobj, i, 0);
  270. dev_priv->engine.instmem.flush(dev);
  271. }
  272. spin_lock(&dev_priv->ramin_lock);
  273. list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
  274. spin_unlock(&dev_priv->ramin_lock);
  275. *pgpuobj = gpuobj;
  276. return 0;
  277. }
  278. void
  279. nv50_gpuobj_dma_init(struct nouveau_gpuobj *obj, u32 offset, int class,
  280. u64 base, u64 size, int target, int access,
  281. u32 type, u32 comp)
  282. {
  283. struct drm_nouveau_private *dev_priv = obj->dev->dev_private;
  284. struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
  285. u32 flags0;
  286. flags0 = (comp << 29) | (type << 22) | class;
  287. flags0 |= 0x00100000;
  288. switch (access) {
  289. case NV_MEM_ACCESS_RO: flags0 |= 0x00040000; break;
  290. case NV_MEM_ACCESS_RW:
  291. case NV_MEM_ACCESS_WO: flags0 |= 0x00080000; break;
  292. default:
  293. break;
  294. }
  295. switch (target) {
  296. case NV_MEM_TARGET_VRAM:
  297. flags0 |= 0x00010000;
  298. break;
  299. case NV_MEM_TARGET_PCI:
  300. flags0 |= 0x00020000;
  301. break;
  302. case NV_MEM_TARGET_PCI_NOSNOOP:
  303. flags0 |= 0x00030000;
  304. break;
  305. case NV_MEM_TARGET_GART:
  306. base += dev_priv->gart_info.aper_base;
  307. default:
  308. flags0 &= ~0x00100000;
  309. break;
  310. }
  311. /* convert to base + limit */
  312. size = (base + size) - 1;
  313. nv_wo32(obj, offset + 0x00, flags0);
  314. nv_wo32(obj, offset + 0x04, lower_32_bits(size));
  315. nv_wo32(obj, offset + 0x08, lower_32_bits(base));
  316. nv_wo32(obj, offset + 0x0c, upper_32_bits(size) << 24 |
  317. upper_32_bits(base));
  318. nv_wo32(obj, offset + 0x10, 0x00000000);
  319. nv_wo32(obj, offset + 0x14, 0x00000000);
  320. pinstmem->flush(obj->dev);
  321. }
  322. int
  323. nv50_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base, u64 size,
  324. int target, int access, u32 type, u32 comp,
  325. struct nouveau_gpuobj **pobj)
  326. {
  327. struct drm_device *dev = chan->dev;
  328. int ret;
  329. ret = nouveau_gpuobj_new(dev, chan, 24, 16, NVOBJ_FLAG_ZERO_FREE, pobj);
  330. if (ret)
  331. return ret;
  332. nv50_gpuobj_dma_init(*pobj, 0, class, base, size, target,
  333. access, type, comp);
  334. return 0;
  335. }
  336. int
  337. nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base,
  338. u64 size, int access, int target,
  339. struct nouveau_gpuobj **pobj)
  340. {
  341. struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
  342. struct drm_device *dev = chan->dev;
  343. struct nouveau_gpuobj *obj;
  344. u32 flags0, flags2;
  345. int ret;
  346. if (dev_priv->card_type >= NV_50) {
  347. u32 comp = (target == NV_MEM_TARGET_VM) ? NV_MEM_COMP_VM : 0;
  348. u32 type = (target == NV_MEM_TARGET_VM) ? NV_MEM_TYPE_VM : 0;
  349. return nv50_gpuobj_dma_new(chan, class, base, size,
  350. target, access, type, comp, pobj);
  351. }
  352. if (target == NV_MEM_TARGET_GART) {
  353. struct nouveau_gpuobj *gart = dev_priv->gart_info.sg_ctxdma;
  354. if (dev_priv->gart_info.type == NOUVEAU_GART_PDMA) {
  355. if (base == 0) {
  356. nouveau_gpuobj_ref(gart, pobj);
  357. return 0;
  358. }
  359. base = nouveau_sgdma_get_physical(dev, base);
  360. target = NV_MEM_TARGET_PCI;
  361. } else {
  362. base += dev_priv->gart_info.aper_base;
  363. if (dev_priv->gart_info.type == NOUVEAU_GART_AGP)
  364. target = NV_MEM_TARGET_PCI_NOSNOOP;
  365. else
  366. target = NV_MEM_TARGET_PCI;
  367. }
  368. }
  369. flags0 = class;
  370. flags0 |= 0x00003000; /* PT present, PT linear */
  371. flags2 = 0;
  372. switch (target) {
  373. case NV_MEM_TARGET_PCI:
  374. flags0 |= 0x00020000;
  375. break;
  376. case NV_MEM_TARGET_PCI_NOSNOOP:
  377. flags0 |= 0x00030000;
  378. break;
  379. default:
  380. break;
  381. }
  382. switch (access) {
  383. case NV_MEM_ACCESS_RO:
  384. flags0 |= 0x00004000;
  385. break;
  386. case NV_MEM_ACCESS_WO:
  387. flags0 |= 0x00008000;
  388. default:
  389. flags2 |= 0x00000002;
  390. break;
  391. }
  392. flags0 |= (base & 0x00000fff) << 20;
  393. flags2 |= (base & 0xfffff000);
  394. ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
  395. if (ret)
  396. return ret;
  397. nv_wo32(obj, 0x00, flags0);
  398. nv_wo32(obj, 0x04, size - 1);
  399. nv_wo32(obj, 0x08, flags2);
  400. nv_wo32(obj, 0x0c, flags2);
  401. obj->engine = NVOBJ_ENGINE_SW;
  402. obj->class = class;
  403. *pobj = obj;
  404. return 0;
  405. }
  406. int
  407. nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class)
  408. {
  409. struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
  410. struct drm_device *dev = chan->dev;
  411. struct nouveau_gpuobj_class *oc;
  412. int ret;
  413. NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class);
  414. list_for_each_entry(oc, &dev_priv->classes, head) {
  415. struct nouveau_exec_engine *eng = dev_priv->eng[oc->engine];
  416. if (oc->id != class)
  417. continue;
  418. if (!chan->engctx[oc->engine]) {
  419. ret = eng->context_new(chan, oc->engine);
  420. if (ret)
  421. return ret;
  422. }
  423. return eng->object_new(chan, oc->engine, handle, class);
  424. }
  425. NV_ERROR(dev, "illegal object class: 0x%x\n", class);
  426. return -EINVAL;
  427. }
  428. static int
  429. nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
  430. {
  431. struct drm_device *dev = chan->dev;
  432. struct drm_nouveau_private *dev_priv = dev->dev_private;
  433. uint32_t size;
  434. uint32_t base;
  435. int ret;
  436. NV_DEBUG(dev, "ch%d\n", chan->id);
  437. /* Base amount for object storage (4KiB enough?) */
  438. size = 0x2000;
  439. base = 0;
  440. if (dev_priv->card_type == NV_50) {
  441. /* Various fixed table thingos */
  442. size += 0x1400; /* mostly unknown stuff */
  443. size += 0x4000; /* vm pd */
  444. base = 0x6000;
  445. /* RAMHT, not sure about setting size yet, 32KiB to be safe */
  446. size += 0x8000;
  447. /* RAMFC */
  448. size += 0x1000;
  449. }
  450. ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
  451. if (ret) {
  452. NV_ERROR(dev, "Error allocating channel PRAMIN: %d\n", ret);
  453. return ret;
  454. }
  455. ret = drm_mm_init(&chan->ramin_heap, base, size - base);
  456. if (ret) {
  457. NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret);
  458. nouveau_gpuobj_ref(NULL, &chan->ramin);
  459. return ret;
  460. }
  461. return 0;
  462. }
  463. static int
  464. nvc0_gpuobj_channel_init(struct nouveau_channel *chan, struct nouveau_vm *vm)
  465. {
  466. struct drm_device *dev = chan->dev;
  467. struct nouveau_gpuobj *pgd = NULL;
  468. struct nouveau_vm_pgd *vpgd;
  469. int ret;
  470. ret = nouveau_gpuobj_new(dev, NULL, 4096, 0x1000, 0, &chan->ramin);
  471. if (ret)
  472. return ret;
  473. /* create page directory for this vm if none currently exists,
  474. * will be destroyed automagically when last reference to the
  475. * vm is removed
  476. */
  477. if (list_empty(&vm->pgd_list)) {
  478. ret = nouveau_gpuobj_new(dev, NULL, 65536, 0x1000, 0, &pgd);
  479. if (ret)
  480. return ret;
  481. }
  482. nouveau_vm_ref(vm, &chan->vm, pgd);
  483. nouveau_gpuobj_ref(NULL, &pgd);
  484. /* point channel at vm's page directory */
  485. vpgd = list_first_entry(&vm->pgd_list, struct nouveau_vm_pgd, head);
  486. nv_wo32(chan->ramin, 0x0200, lower_32_bits(vpgd->obj->vinst));
  487. nv_wo32(chan->ramin, 0x0204, upper_32_bits(vpgd->obj->vinst));
  488. nv_wo32(chan->ramin, 0x0208, 0xffffffff);
  489. nv_wo32(chan->ramin, 0x020c, 0x000000ff);
  490. return 0;
  491. }
  492. int
  493. nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
  494. uint32_t vram_h, uint32_t tt_h)
  495. {
  496. struct drm_device *dev = chan->dev;
  497. struct drm_nouveau_private *dev_priv = dev->dev_private;
  498. struct nouveau_fpriv *fpriv = nouveau_fpriv(chan->file_priv);
  499. struct nouveau_vm *vm = fpriv ? fpriv->vm : dev_priv->chan_vm;
  500. struct nouveau_gpuobj *vram = NULL, *tt = NULL;
  501. int ret;
  502. NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
  503. if (dev_priv->card_type >= NV_C0)
  504. return nvc0_gpuobj_channel_init(chan, vm);
  505. /* Allocate a chunk of memory for per-channel object storage */
  506. ret = nouveau_gpuobj_channel_init_pramin(chan);
  507. if (ret) {
  508. NV_ERROR(dev, "init pramin\n");
  509. return ret;
  510. }
  511. /* NV50 VM
  512. * - Allocate per-channel page-directory
  513. * - Link with shared channel VM
  514. */
  515. if (vm) {
  516. u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
  517. u64 vm_vinst = chan->ramin->vinst + pgd_offs;
  518. u32 vm_pinst = chan->ramin->pinst;
  519. if (vm_pinst != ~0)
  520. vm_pinst += pgd_offs;
  521. ret = nouveau_gpuobj_new_fake(dev, vm_pinst, vm_vinst, 0x4000,
  522. 0, &chan->vm_pd);
  523. if (ret)
  524. return ret;
  525. nouveau_vm_ref(vm, &chan->vm, chan->vm_pd);
  526. }
  527. /* RAMHT */
  528. if (dev_priv->card_type < NV_50) {
  529. nouveau_ramht_ref(dev_priv->ramht, &chan->ramht, NULL);
  530. } else {
  531. struct nouveau_gpuobj *ramht = NULL;
  532. ret = nouveau_gpuobj_new(dev, chan, 0x8000, 16,
  533. NVOBJ_FLAG_ZERO_ALLOC, &ramht);
  534. if (ret)
  535. return ret;
  536. ret = nouveau_ramht_new(dev, ramht, &chan->ramht);
  537. nouveau_gpuobj_ref(NULL, &ramht);
  538. if (ret)
  539. return ret;
  540. }
  541. /* VRAM ctxdma */
  542. if (dev_priv->card_type >= NV_50) {
  543. ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
  544. 0, (1ULL << 40), NV_MEM_ACCESS_RW,
  545. NV_MEM_TARGET_VM, &vram);
  546. if (ret) {
  547. NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
  548. return ret;
  549. }
  550. } else {
  551. ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
  552. 0, dev_priv->fb_available_size,
  553. NV_MEM_ACCESS_RW,
  554. NV_MEM_TARGET_VRAM, &vram);
  555. if (ret) {
  556. NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
  557. return ret;
  558. }
  559. }
  560. ret = nouveau_ramht_insert(chan, vram_h, vram);
  561. nouveau_gpuobj_ref(NULL, &vram);
  562. if (ret) {
  563. NV_ERROR(dev, "Error adding VRAM ctxdma to RAMHT: %d\n", ret);
  564. return ret;
  565. }
  566. /* TT memory ctxdma */
  567. if (dev_priv->card_type >= NV_50) {
  568. ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
  569. 0, (1ULL << 40), NV_MEM_ACCESS_RW,
  570. NV_MEM_TARGET_VM, &tt);
  571. } else {
  572. ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
  573. 0, dev_priv->gart_info.aper_size,
  574. NV_MEM_ACCESS_RW,
  575. NV_MEM_TARGET_GART, &tt);
  576. }
  577. if (ret) {
  578. NV_ERROR(dev, "Error creating TT ctxdma: %d\n", ret);
  579. return ret;
  580. }
  581. ret = nouveau_ramht_insert(chan, tt_h, tt);
  582. nouveau_gpuobj_ref(NULL, &tt);
  583. if (ret) {
  584. NV_ERROR(dev, "Error adding TT ctxdma to RAMHT: %d\n", ret);
  585. return ret;
  586. }
  587. return 0;
  588. }
  589. void
  590. nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
  591. {
  592. NV_DEBUG(chan->dev, "ch%d\n", chan->id);
  593. nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
  594. nouveau_gpuobj_ref(NULL, &chan->vm_pd);
  595. if (drm_mm_initialized(&chan->ramin_heap))
  596. drm_mm_takedown(&chan->ramin_heap);
  597. nouveau_gpuobj_ref(NULL, &chan->ramin);
  598. }
  599. int
  600. nouveau_gpuobj_suspend(struct drm_device *dev)
  601. {
  602. struct drm_nouveau_private *dev_priv = dev->dev_private;
  603. struct nouveau_gpuobj *gpuobj;
  604. int i;
  605. list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
  606. if (gpuobj->cinst != NVOBJ_CINST_GLOBAL)
  607. continue;
  608. gpuobj->suspend = vmalloc(gpuobj->size);
  609. if (!gpuobj->suspend) {
  610. nouveau_gpuobj_resume(dev);
  611. return -ENOMEM;
  612. }
  613. for (i = 0; i < gpuobj->size; i += 4)
  614. gpuobj->suspend[i/4] = nv_ro32(gpuobj, i);
  615. }
  616. return 0;
  617. }
  618. void
  619. nouveau_gpuobj_resume(struct drm_device *dev)
  620. {
  621. struct drm_nouveau_private *dev_priv = dev->dev_private;
  622. struct nouveau_gpuobj *gpuobj;
  623. int i;
  624. list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
  625. if (!gpuobj->suspend)
  626. continue;
  627. for (i = 0; i < gpuobj->size; i += 4)
  628. nv_wo32(gpuobj, i, gpuobj->suspend[i/4]);
  629. vfree(gpuobj->suspend);
  630. gpuobj->suspend = NULL;
  631. }
  632. dev_priv->engine.instmem.flush(dev);
  633. }
  634. int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
  635. struct drm_file *file_priv)
  636. {
  637. struct drm_nouveau_grobj_alloc *init = data;
  638. struct nouveau_channel *chan;
  639. int ret;
  640. if (init->handle == ~0)
  641. return -EINVAL;
  642. /* compatibility with userspace that assumes 506e for all chipsets */
  643. if (init->class == 0x506e) {
  644. init->class = nouveau_software_class(dev);
  645. if (init->class == 0x906e)
  646. return 0;
  647. } else
  648. if (init->class == 0x906e) {
  649. NV_ERROR(dev, "906e not supported yet\n");
  650. return -EINVAL;
  651. }
  652. chan = nouveau_channel_get(file_priv, init->channel);
  653. if (IS_ERR(chan))
  654. return PTR_ERR(chan);
  655. if (nouveau_ramht_find(chan, init->handle)) {
  656. ret = -EEXIST;
  657. goto out;
  658. }
  659. ret = nouveau_gpuobj_gr_new(chan, init->handle, init->class);
  660. if (ret) {
  661. NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n",
  662. ret, init->channel, init->handle);
  663. }
  664. out:
  665. nouveau_channel_put(&chan);
  666. return ret;
  667. }
  668. int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
  669. struct drm_file *file_priv)
  670. {
  671. struct drm_nouveau_gpuobj_free *objfree = data;
  672. struct nouveau_channel *chan;
  673. int ret;
  674. chan = nouveau_channel_get(file_priv, objfree->channel);
  675. if (IS_ERR(chan))
  676. return PTR_ERR(chan);
  677. /* Synchronize with the user channel */
  678. nouveau_channel_idle(chan);
  679. ret = nouveau_ramht_remove(chan, objfree->handle);
  680. nouveau_channel_put(&chan);
  681. return ret;
  682. }
  683. u32
  684. nv_ro32(struct nouveau_gpuobj *gpuobj, u32 offset)
  685. {
  686. struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
  687. struct drm_device *dev = gpuobj->dev;
  688. unsigned long flags;
  689. if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
  690. u64 ptr = gpuobj->vinst + offset;
  691. u32 base = ptr >> 16;
  692. u32 val;
  693. spin_lock_irqsave(&dev_priv->vm_lock, flags);
  694. if (dev_priv->ramin_base != base) {
  695. dev_priv->ramin_base = base;
  696. nv_wr32(dev, 0x001700, dev_priv->ramin_base);
  697. }
  698. val = nv_rd32(dev, 0x700000 + (ptr & 0xffff));
  699. spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
  700. return val;
  701. }
  702. return nv_ri32(dev, gpuobj->pinst + offset);
  703. }
  704. void
  705. nv_wo32(struct nouveau_gpuobj *gpuobj, u32 offset, u32 val)
  706. {
  707. struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
  708. struct drm_device *dev = gpuobj->dev;
  709. unsigned long flags;
  710. if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
  711. u64 ptr = gpuobj->vinst + offset;
  712. u32 base = ptr >> 16;
  713. spin_lock_irqsave(&dev_priv->vm_lock, flags);
  714. if (dev_priv->ramin_base != base) {
  715. dev_priv->ramin_base = base;
  716. nv_wr32(dev, 0x001700, dev_priv->ramin_base);
  717. }
  718. nv_wr32(dev, 0x700000 + (ptr & 0xffff), val);
  719. spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
  720. return;
  721. }
  722. nv_wi32(dev, gpuobj->pinst + offset, val);
  723. }