nouveau_object.c 31 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252
  1. /*
  2. * Copyright (C) 2006 Ben Skeggs.
  3. *
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining
  7. * a copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sublicense, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial
  16. * portions of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  19. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  20. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
  21. * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
  22. * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
  23. * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
  24. * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. */
  27. /*
  28. * Authors:
  29. * Ben Skeggs <darktama@iinet.net.au>
  30. */
  31. #include "drmP.h"
  32. #include "drm.h"
  33. #include "nouveau_drv.h"
  34. #include "nouveau_drm.h"
  35. /* NVidia uses context objects to drive drawing operations.
  36. Context objects can be selected into 8 subchannels in the FIFO,
  37. and then used via DMA command buffers.
  38. A context object is referenced by a user defined handle (CARD32). The HW
  39. looks up graphics objects in a hash table in the instance RAM.
  40. An entry in the hash table consists of 2 CARD32. The first CARD32 contains
  41. the handle, the second one a bitfield, that contains the address of the
  42. object in instance RAM.
  43. The format of the second CARD32 seems to be:
  44. NV4 to NV30:
  45. 15: 0 instance_addr >> 4
  46. 17:16 engine (here uses 1 = graphics)
  47. 28:24 channel id (here uses 0)
  48. 31 valid (use 1)
  49. NV40:
  50. 15: 0 instance_addr >> 4 (maybe 19-0)
  51. 21:20 engine (here uses 1 = graphics)
  52. I'm unsure about the other bits, but using 0 seems to work.
  53. The key into the hash table depends on the object handle and channel id and
  54. is given as:
  55. */
  56. static uint32_t
  57. nouveau_ramht_hash_handle(struct drm_device *dev, int channel, uint32_t handle)
  58. {
  59. struct drm_nouveau_private *dev_priv = dev->dev_private;
  60. uint32_t hash = 0;
  61. int i;
  62. NV_DEBUG(dev, "ch%d handle=0x%08x\n", channel, handle);
  63. for (i = 32; i > 0; i -= dev_priv->ramht_bits) {
  64. hash ^= (handle & ((1 << dev_priv->ramht_bits) - 1));
  65. handle >>= dev_priv->ramht_bits;
  66. }
  67. if (dev_priv->card_type < NV_50)
  68. hash ^= channel << (dev_priv->ramht_bits - 4);
  69. hash <<= 3;
  70. NV_DEBUG(dev, "hash=0x%08x\n", hash);
  71. return hash;
  72. }
  73. static int
  74. nouveau_ramht_entry_valid(struct drm_device *dev, struct nouveau_gpuobj *ramht,
  75. uint32_t offset)
  76. {
  77. struct drm_nouveau_private *dev_priv = dev->dev_private;
  78. uint32_t ctx = nv_ro32(dev, ramht, (offset + 4)/4);
  79. if (dev_priv->card_type < NV_40)
  80. return ((ctx & NV_RAMHT_CONTEXT_VALID) != 0);
  81. return (ctx != 0);
  82. }
  83. static int
  84. nouveau_ramht_insert(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
  85. {
  86. struct drm_nouveau_private *dev_priv = dev->dev_private;
  87. struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
  88. struct nouveau_channel *chan = ref->channel;
  89. struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL;
  90. uint32_t ctx, co, ho;
  91. if (!ramht) {
  92. NV_ERROR(dev, "No hash table!\n");
  93. return -EINVAL;
  94. }
  95. if (dev_priv->card_type < NV_40) {
  96. ctx = NV_RAMHT_CONTEXT_VALID | (ref->instance >> 4) |
  97. (chan->id << NV_RAMHT_CONTEXT_CHANNEL_SHIFT) |
  98. (ref->gpuobj->engine << NV_RAMHT_CONTEXT_ENGINE_SHIFT);
  99. } else
  100. if (dev_priv->card_type < NV_50) {
  101. ctx = (ref->instance >> 4) |
  102. (chan->id << NV40_RAMHT_CONTEXT_CHANNEL_SHIFT) |
  103. (ref->gpuobj->engine << NV40_RAMHT_CONTEXT_ENGINE_SHIFT);
  104. } else {
  105. if (ref->gpuobj->engine == NVOBJ_ENGINE_DISPLAY) {
  106. ctx = (ref->instance << 10) | 2;
  107. } else {
  108. ctx = (ref->instance >> 4) |
  109. ((ref->gpuobj->engine <<
  110. NV40_RAMHT_CONTEXT_ENGINE_SHIFT));
  111. }
  112. }
  113. co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle);
  114. do {
  115. if (!nouveau_ramht_entry_valid(dev, ramht, co)) {
  116. NV_DEBUG(dev,
  117. "insert ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
  118. chan->id, co, ref->handle, ctx);
  119. nv_wo32(dev, ramht, (co + 0)/4, ref->handle);
  120. nv_wo32(dev, ramht, (co + 4)/4, ctx);
  121. list_add_tail(&ref->list, &chan->ramht_refs);
  122. instmem->flush(dev);
  123. return 0;
  124. }
  125. NV_DEBUG(dev, "collision ch%d 0x%08x: h=0x%08x\n",
  126. chan->id, co, nv_ro32(dev, ramht, co/4));
  127. co += 8;
  128. if (co >= dev_priv->ramht_size)
  129. co = 0;
  130. } while (co != ho);
  131. NV_ERROR(dev, "RAMHT space exhausted. ch=%d\n", chan->id);
  132. return -ENOMEM;
  133. }
  134. static void
  135. nouveau_ramht_remove(struct drm_device *dev, struct nouveau_gpuobj_ref *ref)
  136. {
  137. struct drm_nouveau_private *dev_priv = dev->dev_private;
  138. struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
  139. struct nouveau_channel *chan = ref->channel;
  140. struct nouveau_gpuobj *ramht = chan->ramht ? chan->ramht->gpuobj : NULL;
  141. uint32_t co, ho;
  142. if (!ramht) {
  143. NV_ERROR(dev, "No hash table!\n");
  144. return;
  145. }
  146. co = ho = nouveau_ramht_hash_handle(dev, chan->id, ref->handle);
  147. do {
  148. if (nouveau_ramht_entry_valid(dev, ramht, co) &&
  149. (ref->handle == nv_ro32(dev, ramht, (co/4)))) {
  150. NV_DEBUG(dev,
  151. "remove ch%d 0x%08x: h=0x%08x, c=0x%08x\n",
  152. chan->id, co, ref->handle,
  153. nv_ro32(dev, ramht, (co + 4)));
  154. nv_wo32(dev, ramht, (co + 0)/4, 0x00000000);
  155. nv_wo32(dev, ramht, (co + 4)/4, 0x00000000);
  156. list_del(&ref->list);
  157. instmem->flush(dev);
  158. return;
  159. }
  160. co += 8;
  161. if (co >= dev_priv->ramht_size)
  162. co = 0;
  163. } while (co != ho);
  164. list_del(&ref->list);
  165. NV_ERROR(dev, "RAMHT entry not found. ch=%d, handle=0x%08x\n",
  166. chan->id, ref->handle);
  167. }
  168. int
  169. nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
  170. uint32_t size, int align, uint32_t flags,
  171. struct nouveau_gpuobj **gpuobj_ret)
  172. {
  173. struct drm_nouveau_private *dev_priv = dev->dev_private;
  174. struct nouveau_engine *engine = &dev_priv->engine;
  175. struct nouveau_gpuobj *gpuobj;
  176. struct drm_mm *pramin = NULL;
  177. int ret;
  178. NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n",
  179. chan ? chan->id : -1, size, align, flags);
  180. if (!dev_priv || !gpuobj_ret || *gpuobj_ret != NULL)
  181. return -EINVAL;
  182. gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
  183. if (!gpuobj)
  184. return -ENOMEM;
  185. NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
  186. gpuobj->flags = flags;
  187. gpuobj->im_channel = chan;
  188. list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
  189. /* Choose between global instmem heap, and per-channel private
  190. * instmem heap. On <NV50 allow requests for private instmem
  191. * to be satisfied from global heap if no per-channel area
  192. * available.
  193. */
  194. if (chan) {
  195. NV_DEBUG(dev, "channel heap\n");
  196. pramin = &chan->ramin_heap;
  197. } else {
  198. NV_DEBUG(dev, "global heap\n");
  199. pramin = &dev_priv->ramin_heap;
  200. ret = engine->instmem.populate(dev, gpuobj, &size);
  201. if (ret) {
  202. nouveau_gpuobj_del(dev, &gpuobj);
  203. return ret;
  204. }
  205. }
  206. /* Allocate a chunk of the PRAMIN aperture */
  207. gpuobj->im_pramin = drm_mm_search_free(pramin, size, align, 0);
  208. if (gpuobj->im_pramin)
  209. gpuobj->im_pramin = drm_mm_get_block(gpuobj->im_pramin, size, align);
  210. if (!gpuobj->im_pramin) {
  211. nouveau_gpuobj_del(dev, &gpuobj);
  212. return -ENOMEM;
  213. }
  214. if (!chan) {
  215. ret = engine->instmem.bind(dev, gpuobj);
  216. if (ret) {
  217. nouveau_gpuobj_del(dev, &gpuobj);
  218. return ret;
  219. }
  220. }
  221. if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
  222. int i;
  223. for (i = 0; i < gpuobj->im_pramin->size; i += 4)
  224. nv_wo32(dev, gpuobj, i/4, 0);
  225. engine->instmem.flush(dev);
  226. }
  227. *gpuobj_ret = gpuobj;
  228. return 0;
  229. }
  230. int
  231. nouveau_gpuobj_early_init(struct drm_device *dev)
  232. {
  233. struct drm_nouveau_private *dev_priv = dev->dev_private;
  234. NV_DEBUG(dev, "\n");
  235. INIT_LIST_HEAD(&dev_priv->gpuobj_list);
  236. return 0;
  237. }
  238. int
  239. nouveau_gpuobj_init(struct drm_device *dev)
  240. {
  241. struct drm_nouveau_private *dev_priv = dev->dev_private;
  242. int ret;
  243. NV_DEBUG(dev, "\n");
  244. if (dev_priv->card_type < NV_50) {
  245. ret = nouveau_gpuobj_new_fake(dev,
  246. dev_priv->ramht_offset, ~0, dev_priv->ramht_size,
  247. NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ALLOW_NO_REFS,
  248. &dev_priv->ramht, NULL);
  249. if (ret)
  250. return ret;
  251. }
  252. return 0;
  253. }
  254. void
  255. nouveau_gpuobj_takedown(struct drm_device *dev)
  256. {
  257. struct drm_nouveau_private *dev_priv = dev->dev_private;
  258. NV_DEBUG(dev, "\n");
  259. nouveau_gpuobj_del(dev, &dev_priv->ramht);
  260. }
  261. void
  262. nouveau_gpuobj_late_takedown(struct drm_device *dev)
  263. {
  264. struct drm_nouveau_private *dev_priv = dev->dev_private;
  265. struct nouveau_gpuobj *gpuobj = NULL;
  266. struct list_head *entry, *tmp;
  267. NV_DEBUG(dev, "\n");
  268. list_for_each_safe(entry, tmp, &dev_priv->gpuobj_list) {
  269. gpuobj = list_entry(entry, struct nouveau_gpuobj, list);
  270. NV_ERROR(dev, "gpuobj %p still exists at takedown, refs=%d\n",
  271. gpuobj, gpuobj->refcount);
  272. gpuobj->refcount = 0;
  273. nouveau_gpuobj_del(dev, &gpuobj);
  274. }
  275. }
  276. int
  277. nouveau_gpuobj_del(struct drm_device *dev, struct nouveau_gpuobj **pgpuobj)
  278. {
  279. struct drm_nouveau_private *dev_priv = dev->dev_private;
  280. struct nouveau_engine *engine = &dev_priv->engine;
  281. struct nouveau_gpuobj *gpuobj;
  282. int i;
  283. NV_DEBUG(dev, "gpuobj %p\n", pgpuobj ? *pgpuobj : NULL);
  284. if (!dev_priv || !pgpuobj || !(*pgpuobj))
  285. return -EINVAL;
  286. gpuobj = *pgpuobj;
  287. if (gpuobj->refcount != 0) {
  288. NV_ERROR(dev, "gpuobj refcount is %d\n", gpuobj->refcount);
  289. return -EINVAL;
  290. }
  291. if (gpuobj->im_pramin && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
  292. for (i = 0; i < gpuobj->im_pramin->size; i += 4)
  293. nv_wo32(dev, gpuobj, i/4, 0);
  294. engine->instmem.flush(dev);
  295. }
  296. if (gpuobj->dtor)
  297. gpuobj->dtor(dev, gpuobj);
  298. if (gpuobj->im_backing && !(gpuobj->flags & NVOBJ_FLAG_FAKE))
  299. engine->instmem.clear(dev, gpuobj);
  300. if (gpuobj->im_pramin) {
  301. if (gpuobj->flags & NVOBJ_FLAG_FAKE)
  302. kfree(gpuobj->im_pramin);
  303. else
  304. drm_mm_put_block(gpuobj->im_pramin);
  305. }
  306. list_del(&gpuobj->list);
  307. *pgpuobj = NULL;
  308. kfree(gpuobj);
  309. return 0;
  310. }
  311. static int
  312. nouveau_gpuobj_instance_get(struct drm_device *dev,
  313. struct nouveau_channel *chan,
  314. struct nouveau_gpuobj *gpuobj, uint32_t *inst)
  315. {
  316. struct drm_nouveau_private *dev_priv = dev->dev_private;
  317. struct nouveau_gpuobj *cpramin;
  318. /* <NV50 use PRAMIN address everywhere */
  319. if (dev_priv->card_type < NV_50) {
  320. *inst = gpuobj->im_pramin->start;
  321. return 0;
  322. }
  323. if (chan && gpuobj->im_channel != chan) {
  324. NV_ERROR(dev, "Channel mismatch: obj %d, ref %d\n",
  325. gpuobj->im_channel->id, chan->id);
  326. return -EINVAL;
  327. }
  328. /* NV50 channel-local instance */
  329. if (chan) {
  330. cpramin = chan->ramin->gpuobj;
  331. *inst = gpuobj->im_pramin->start - cpramin->im_pramin->start;
  332. return 0;
  333. }
  334. /* NV50 global (VRAM) instance */
  335. if (!gpuobj->im_channel) {
  336. /* ...from global heap */
  337. if (!gpuobj->im_backing) {
  338. NV_ERROR(dev, "AII, no VRAM backing gpuobj\n");
  339. return -EINVAL;
  340. }
  341. *inst = gpuobj->im_backing_start;
  342. return 0;
  343. } else {
  344. /* ...from local heap */
  345. cpramin = gpuobj->im_channel->ramin->gpuobj;
  346. *inst = cpramin->im_backing_start +
  347. (gpuobj->im_pramin->start - cpramin->im_pramin->start);
  348. return 0;
  349. }
  350. return -EINVAL;
  351. }
  352. int
  353. nouveau_gpuobj_ref_add(struct drm_device *dev, struct nouveau_channel *chan,
  354. uint32_t handle, struct nouveau_gpuobj *gpuobj,
  355. struct nouveau_gpuobj_ref **ref_ret)
  356. {
  357. struct drm_nouveau_private *dev_priv = dev->dev_private;
  358. struct nouveau_gpuobj_ref *ref;
  359. uint32_t instance;
  360. int ret;
  361. NV_DEBUG(dev, "ch%d h=0x%08x gpuobj=%p\n",
  362. chan ? chan->id : -1, handle, gpuobj);
  363. if (!dev_priv || !gpuobj || (ref_ret && *ref_ret != NULL))
  364. return -EINVAL;
  365. if (!chan && !ref_ret)
  366. return -EINVAL;
  367. if (gpuobj->engine == NVOBJ_ENGINE_SW && !gpuobj->im_pramin) {
  368. /* sw object */
  369. instance = 0x40;
  370. } else {
  371. ret = nouveau_gpuobj_instance_get(dev, chan, gpuobj, &instance);
  372. if (ret)
  373. return ret;
  374. }
  375. ref = kzalloc(sizeof(*ref), GFP_KERNEL);
  376. if (!ref)
  377. return -ENOMEM;
  378. INIT_LIST_HEAD(&ref->list);
  379. ref->gpuobj = gpuobj;
  380. ref->channel = chan;
  381. ref->instance = instance;
  382. if (!ref_ret) {
  383. ref->handle = handle;
  384. ret = nouveau_ramht_insert(dev, ref);
  385. if (ret) {
  386. kfree(ref);
  387. return ret;
  388. }
  389. } else {
  390. ref->handle = ~0;
  391. *ref_ret = ref;
  392. }
  393. ref->gpuobj->refcount++;
  394. return 0;
  395. }
  396. int nouveau_gpuobj_ref_del(struct drm_device *dev, struct nouveau_gpuobj_ref **pref)
  397. {
  398. struct nouveau_gpuobj_ref *ref;
  399. NV_DEBUG(dev, "ref %p\n", pref ? *pref : NULL);
  400. if (!dev || !pref || *pref == NULL)
  401. return -EINVAL;
  402. ref = *pref;
  403. if (ref->handle != ~0)
  404. nouveau_ramht_remove(dev, ref);
  405. if (ref->gpuobj) {
  406. ref->gpuobj->refcount--;
  407. if (ref->gpuobj->refcount == 0) {
  408. if (!(ref->gpuobj->flags & NVOBJ_FLAG_ALLOW_NO_REFS))
  409. nouveau_gpuobj_del(dev, &ref->gpuobj);
  410. }
  411. }
  412. *pref = NULL;
  413. kfree(ref);
  414. return 0;
  415. }
  416. int
  417. nouveau_gpuobj_new_ref(struct drm_device *dev,
  418. struct nouveau_channel *oc, struct nouveau_channel *rc,
  419. uint32_t handle, uint32_t size, int align,
  420. uint32_t flags, struct nouveau_gpuobj_ref **ref)
  421. {
  422. struct nouveau_gpuobj *gpuobj = NULL;
  423. int ret;
  424. ret = nouveau_gpuobj_new(dev, oc, size, align, flags, &gpuobj);
  425. if (ret)
  426. return ret;
  427. ret = nouveau_gpuobj_ref_add(dev, rc, handle, gpuobj, ref);
  428. if (ret) {
  429. nouveau_gpuobj_del(dev, &gpuobj);
  430. return ret;
  431. }
  432. return 0;
  433. }
  434. int
  435. nouveau_gpuobj_ref_find(struct nouveau_channel *chan, uint32_t handle,
  436. struct nouveau_gpuobj_ref **ref_ret)
  437. {
  438. struct nouveau_gpuobj_ref *ref;
  439. struct list_head *entry, *tmp;
  440. list_for_each_safe(entry, tmp, &chan->ramht_refs) {
  441. ref = list_entry(entry, struct nouveau_gpuobj_ref, list);
  442. if (ref->handle == handle) {
  443. if (ref_ret)
  444. *ref_ret = ref;
  445. return 0;
  446. }
  447. }
  448. return -EINVAL;
  449. }
  450. int
  451. nouveau_gpuobj_new_fake(struct drm_device *dev, uint32_t p_offset,
  452. uint32_t b_offset, uint32_t size,
  453. uint32_t flags, struct nouveau_gpuobj **pgpuobj,
  454. struct nouveau_gpuobj_ref **pref)
  455. {
  456. struct drm_nouveau_private *dev_priv = dev->dev_private;
  457. struct nouveau_gpuobj *gpuobj = NULL;
  458. int i;
  459. NV_DEBUG(dev,
  460. "p_offset=0x%08x b_offset=0x%08x size=0x%08x flags=0x%08x\n",
  461. p_offset, b_offset, size, flags);
  462. gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
  463. if (!gpuobj)
  464. return -ENOMEM;
  465. NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
  466. gpuobj->im_channel = NULL;
  467. gpuobj->flags = flags | NVOBJ_FLAG_FAKE;
  468. list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
  469. if (p_offset != ~0) {
  470. gpuobj->im_pramin = kzalloc(sizeof(struct drm_mm_node),
  471. GFP_KERNEL);
  472. if (!gpuobj->im_pramin) {
  473. nouveau_gpuobj_del(dev, &gpuobj);
  474. return -ENOMEM;
  475. }
  476. gpuobj->im_pramin->start = p_offset;
  477. gpuobj->im_pramin->size = size;
  478. }
  479. if (b_offset != ~0) {
  480. gpuobj->im_backing = (struct nouveau_bo *)-1;
  481. gpuobj->im_backing_start = b_offset;
  482. }
  483. if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
  484. for (i = 0; i < gpuobj->im_pramin->size; i += 4)
  485. nv_wo32(dev, gpuobj, i/4, 0);
  486. dev_priv->engine.instmem.flush(dev);
  487. }
  488. if (pref) {
  489. i = nouveau_gpuobj_ref_add(dev, NULL, 0, gpuobj, pref);
  490. if (i) {
  491. nouveau_gpuobj_del(dev, &gpuobj);
  492. return i;
  493. }
  494. }
  495. if (pgpuobj)
  496. *pgpuobj = gpuobj;
  497. return 0;
  498. }
  499. static uint32_t
  500. nouveau_gpuobj_class_instmem_size(struct drm_device *dev, int class)
  501. {
  502. struct drm_nouveau_private *dev_priv = dev->dev_private;
  503. /*XXX: dodgy hack for now */
  504. if (dev_priv->card_type >= NV_50)
  505. return 24;
  506. if (dev_priv->card_type >= NV_40)
  507. return 32;
  508. return 16;
  509. }
  510. /*
  511. DMA objects are used to reference a piece of memory in the
  512. framebuffer, PCI or AGP address space. Each object is 16 bytes big
  513. and looks as follows:
  514. entry[0]
  515. 11:0 class (seems like I can always use 0 here)
  516. 12 page table present?
  517. 13 page entry linear?
  518. 15:14 access: 0 rw, 1 ro, 2 wo
  519. 17:16 target: 0 NV memory, 1 NV memory tiled, 2 PCI, 3 AGP
  520. 31:20 dma adjust (bits 0-11 of the address)
  521. entry[1]
  522. dma limit (size of transfer)
  523. entry[X]
  524. 1 0 readonly, 1 readwrite
  525. 31:12 dma frame address of the page (bits 12-31 of the address)
  526. entry[N]
  527. page table terminator, same value as the first pte, as does nvidia
  528. rivatv uses 0xffffffff
  529. Non linear page tables need a list of frame addresses afterwards,
  530. the rivatv project has some info on this.
  531. The method below creates a DMA object in instance RAM and returns a handle
  532. to it that can be used to set up context objects.
  533. */
  534. int
  535. nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class,
  536. uint64_t offset, uint64_t size, int access,
  537. int target, struct nouveau_gpuobj **gpuobj)
  538. {
  539. struct drm_device *dev = chan->dev;
  540. struct drm_nouveau_private *dev_priv = dev->dev_private;
  541. struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
  542. int ret;
  543. NV_DEBUG(dev, "ch%d class=0x%04x offset=0x%llx size=0x%llx\n",
  544. chan->id, class, offset, size);
  545. NV_DEBUG(dev, "access=%d target=%d\n", access, target);
  546. switch (target) {
  547. case NV_DMA_TARGET_AGP:
  548. offset += dev_priv->gart_info.aper_base;
  549. break;
  550. default:
  551. break;
  552. }
  553. ret = nouveau_gpuobj_new(dev, chan,
  554. nouveau_gpuobj_class_instmem_size(dev, class),
  555. 16, NVOBJ_FLAG_ZERO_ALLOC |
  556. NVOBJ_FLAG_ZERO_FREE, gpuobj);
  557. if (ret) {
  558. NV_ERROR(dev, "Error creating gpuobj: %d\n", ret);
  559. return ret;
  560. }
  561. if (dev_priv->card_type < NV_50) {
  562. uint32_t frame, adjust, pte_flags = 0;
  563. if (access != NV_DMA_ACCESS_RO)
  564. pte_flags |= (1<<1);
  565. adjust = offset & 0x00000fff;
  566. frame = offset & ~0x00000fff;
  567. nv_wo32(dev, *gpuobj, 0, ((1<<12) | (1<<13) |
  568. (adjust << 20) |
  569. (access << 14) |
  570. (target << 16) |
  571. class));
  572. nv_wo32(dev, *gpuobj, 1, size - 1);
  573. nv_wo32(dev, *gpuobj, 2, frame | pte_flags);
  574. nv_wo32(dev, *gpuobj, 3, frame | pte_flags);
  575. } else {
  576. uint64_t limit = offset + size - 1;
  577. uint32_t flags0, flags5;
  578. if (target == NV_DMA_TARGET_VIDMEM) {
  579. flags0 = 0x00190000;
  580. flags5 = 0x00010000;
  581. } else {
  582. flags0 = 0x7fc00000;
  583. flags5 = 0x00080000;
  584. }
  585. nv_wo32(dev, *gpuobj, 0, flags0 | class);
  586. nv_wo32(dev, *gpuobj, 1, lower_32_bits(limit));
  587. nv_wo32(dev, *gpuobj, 2, lower_32_bits(offset));
  588. nv_wo32(dev, *gpuobj, 3, ((upper_32_bits(limit) & 0xff) << 24) |
  589. (upper_32_bits(offset) & 0xff));
  590. nv_wo32(dev, *gpuobj, 5, flags5);
  591. }
  592. instmem->flush(dev);
  593. (*gpuobj)->engine = NVOBJ_ENGINE_SW;
  594. (*gpuobj)->class = class;
  595. return 0;
  596. }
  597. int
  598. nouveau_gpuobj_gart_dma_new(struct nouveau_channel *chan,
  599. uint64_t offset, uint64_t size, int access,
  600. struct nouveau_gpuobj **gpuobj,
  601. uint32_t *o_ret)
  602. {
  603. struct drm_device *dev = chan->dev;
  604. struct drm_nouveau_private *dev_priv = dev->dev_private;
  605. int ret;
  606. if (dev_priv->gart_info.type == NOUVEAU_GART_AGP ||
  607. (dev_priv->card_type >= NV_50 &&
  608. dev_priv->gart_info.type == NOUVEAU_GART_SGDMA)) {
  609. ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
  610. offset + dev_priv->vm_gart_base,
  611. size, access, NV_DMA_TARGET_AGP,
  612. gpuobj);
  613. if (o_ret)
  614. *o_ret = 0;
  615. } else
  616. if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA) {
  617. *gpuobj = dev_priv->gart_info.sg_ctxdma;
  618. if (offset & ~0xffffffffULL) {
  619. NV_ERROR(dev, "obj offset exceeds 32-bits\n");
  620. return -EINVAL;
  621. }
  622. if (o_ret)
  623. *o_ret = (uint32_t)offset;
  624. ret = (*gpuobj != NULL) ? 0 : -EINVAL;
  625. } else {
  626. NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type);
  627. return -EINVAL;
  628. }
  629. return ret;
  630. }
  631. /* Context objects in the instance RAM have the following structure.
  632. * On NV40 they are 32 byte long, on NV30 and smaller 16 bytes.
  633. NV4 - NV30:
  634. entry[0]
  635. 11:0 class
  636. 12 chroma key enable
  637. 13 user clip enable
  638. 14 swizzle enable
  639. 17:15 patch config:
  640. scrcopy_and, rop_and, blend_and, scrcopy, srccopy_pre, blend_pre
  641. 18 synchronize enable
  642. 19 endian: 1 big, 0 little
  643. 21:20 dither mode
  644. 23 single step enable
  645. 24 patch status: 0 invalid, 1 valid
  646. 25 context_surface 0: 1 valid
  647. 26 context surface 1: 1 valid
  648. 27 context pattern: 1 valid
  649. 28 context rop: 1 valid
  650. 29,30 context beta, beta4
  651. entry[1]
  652. 7:0 mono format
  653. 15:8 color format
  654. 31:16 notify instance address
  655. entry[2]
  656. 15:0 dma 0 instance address
  657. 31:16 dma 1 instance address
  658. entry[3]
  659. dma method traps
  660. NV40:
  661. No idea what the exact format is. Here's what can be deducted:
  662. entry[0]:
  663. 11:0 class (maybe uses more bits here?)
  664. 17 user clip enable
  665. 21:19 patch config
  666. 25 patch status valid ?
  667. entry[1]:
  668. 15:0 DMA notifier (maybe 20:0)
  669. entry[2]:
  670. 15:0 DMA 0 instance (maybe 20:0)
  671. 24 big endian
  672. entry[3]:
  673. 15:0 DMA 1 instance (maybe 20:0)
  674. entry[4]:
  675. entry[5]:
  676. set to 0?
  677. */
  678. int
  679. nouveau_gpuobj_gr_new(struct nouveau_channel *chan, int class,
  680. struct nouveau_gpuobj **gpuobj)
  681. {
  682. struct drm_device *dev = chan->dev;
  683. struct drm_nouveau_private *dev_priv = dev->dev_private;
  684. int ret;
  685. NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class);
  686. ret = nouveau_gpuobj_new(dev, chan,
  687. nouveau_gpuobj_class_instmem_size(dev, class),
  688. 16,
  689. NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE,
  690. gpuobj);
  691. if (ret) {
  692. NV_ERROR(dev, "Error creating gpuobj: %d\n", ret);
  693. return ret;
  694. }
  695. if (dev_priv->card_type >= NV_50) {
  696. nv_wo32(dev, *gpuobj, 0, class);
  697. nv_wo32(dev, *gpuobj, 5, 0x00010000);
  698. } else {
  699. switch (class) {
  700. case NV_CLASS_NULL:
  701. nv_wo32(dev, *gpuobj, 0, 0x00001030);
  702. nv_wo32(dev, *gpuobj, 1, 0xFFFFFFFF);
  703. break;
  704. default:
  705. if (dev_priv->card_type >= NV_40) {
  706. nv_wo32(dev, *gpuobj, 0, class);
  707. #ifdef __BIG_ENDIAN
  708. nv_wo32(dev, *gpuobj, 2, 0x01000000);
  709. #endif
  710. } else {
  711. #ifdef __BIG_ENDIAN
  712. nv_wo32(dev, *gpuobj, 0, class | 0x00080000);
  713. #else
  714. nv_wo32(dev, *gpuobj, 0, class);
  715. #endif
  716. }
  717. }
  718. }
  719. dev_priv->engine.instmem.flush(dev);
  720. (*gpuobj)->engine = NVOBJ_ENGINE_GR;
  721. (*gpuobj)->class = class;
  722. return 0;
  723. }
  724. int
  725. nouveau_gpuobj_sw_new(struct nouveau_channel *chan, int class,
  726. struct nouveau_gpuobj **gpuobj_ret)
  727. {
  728. struct drm_nouveau_private *dev_priv;
  729. struct nouveau_gpuobj *gpuobj;
  730. if (!chan || !gpuobj_ret || *gpuobj_ret != NULL)
  731. return -EINVAL;
  732. dev_priv = chan->dev->dev_private;
  733. gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
  734. if (!gpuobj)
  735. return -ENOMEM;
  736. gpuobj->engine = NVOBJ_ENGINE_SW;
  737. gpuobj->class = class;
  738. list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
  739. *gpuobj_ret = gpuobj;
  740. return 0;
  741. }
  742. static int
  743. nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
  744. {
  745. struct drm_device *dev = chan->dev;
  746. struct drm_nouveau_private *dev_priv = dev->dev_private;
  747. struct nouveau_gpuobj *pramin = NULL;
  748. uint32_t size;
  749. uint32_t base;
  750. int ret;
  751. NV_DEBUG(dev, "ch%d\n", chan->id);
  752. /* Base amount for object storage (4KiB enough?) */
  753. size = 0x1000;
  754. base = 0;
  755. /* PGRAPH context */
  756. size += dev_priv->engine.graph.grctx_size;
  757. if (dev_priv->card_type == NV_50) {
  758. /* Various fixed table thingos */
  759. size += 0x1400; /* mostly unknown stuff */
  760. size += 0x4000; /* vm pd */
  761. base = 0x6000;
  762. /* RAMHT, not sure about setting size yet, 32KiB to be safe */
  763. size += 0x8000;
  764. /* RAMFC */
  765. size += 0x1000;
  766. }
  767. ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, size, 0x1000, 0,
  768. &chan->ramin);
  769. if (ret) {
  770. NV_ERROR(dev, "Error allocating channel PRAMIN: %d\n", ret);
  771. return ret;
  772. }
  773. pramin = chan->ramin->gpuobj;
  774. ret = drm_mm_init(&chan->ramin_heap, pramin->im_pramin->start + base, size);
  775. if (ret) {
  776. NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret);
  777. nouveau_gpuobj_ref_del(dev, &chan->ramin);
  778. return ret;
  779. }
  780. return 0;
  781. }
  782. int
  783. nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
  784. uint32_t vram_h, uint32_t tt_h)
  785. {
  786. struct drm_device *dev = chan->dev;
  787. struct drm_nouveau_private *dev_priv = dev->dev_private;
  788. struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
  789. struct nouveau_gpuobj *vram = NULL, *tt = NULL;
  790. int ret, i;
  791. INIT_LIST_HEAD(&chan->ramht_refs);
  792. NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
  793. /* Allocate a chunk of memory for per-channel object storage */
  794. ret = nouveau_gpuobj_channel_init_pramin(chan);
  795. if (ret) {
  796. NV_ERROR(dev, "init pramin\n");
  797. return ret;
  798. }
  799. /* NV50 VM
  800. * - Allocate per-channel page-directory
  801. * - Map GART and VRAM into the channel's address space at the
  802. * locations determined during init.
  803. */
  804. if (dev_priv->card_type >= NV_50) {
  805. uint32_t vm_offset, pde;
  806. vm_offset = (dev_priv->chipset & 0xf0) == 0x50 ? 0x1400 : 0x200;
  807. vm_offset += chan->ramin->gpuobj->im_pramin->start;
  808. ret = nouveau_gpuobj_new_fake(dev, vm_offset, ~0, 0x4000,
  809. 0, &chan->vm_pd, NULL);
  810. if (ret)
  811. return ret;
  812. for (i = 0; i < 0x4000; i += 8) {
  813. nv_wo32(dev, chan->vm_pd, (i+0)/4, 0x00000000);
  814. nv_wo32(dev, chan->vm_pd, (i+4)/4, 0xdeadcafe);
  815. }
  816. pde = (dev_priv->vm_gart_base / (512*1024*1024)) * 2;
  817. ret = nouveau_gpuobj_ref_add(dev, NULL, 0,
  818. dev_priv->gart_info.sg_ctxdma,
  819. &chan->vm_gart_pt);
  820. if (ret)
  821. return ret;
  822. nv_wo32(dev, chan->vm_pd, pde++,
  823. chan->vm_gart_pt->instance | 0x03);
  824. nv_wo32(dev, chan->vm_pd, pde++, 0x00000000);
  825. pde = (dev_priv->vm_vram_base / (512*1024*1024)) * 2;
  826. for (i = 0; i < dev_priv->vm_vram_pt_nr; i++) {
  827. ret = nouveau_gpuobj_ref_add(dev, NULL, 0,
  828. dev_priv->vm_vram_pt[i],
  829. &chan->vm_vram_pt[i]);
  830. if (ret)
  831. return ret;
  832. nv_wo32(dev, chan->vm_pd, pde++,
  833. chan->vm_vram_pt[i]->instance | 0x61);
  834. nv_wo32(dev, chan->vm_pd, pde++, 0x00000000);
  835. }
  836. instmem->flush(dev);
  837. }
  838. /* RAMHT */
  839. if (dev_priv->card_type < NV_50) {
  840. ret = nouveau_gpuobj_ref_add(dev, NULL, 0, dev_priv->ramht,
  841. &chan->ramht);
  842. if (ret)
  843. return ret;
  844. } else {
  845. ret = nouveau_gpuobj_new_ref(dev, chan, chan, 0,
  846. 0x8000, 16,
  847. NVOBJ_FLAG_ZERO_ALLOC,
  848. &chan->ramht);
  849. if (ret)
  850. return ret;
  851. }
  852. /* VRAM ctxdma */
  853. if (dev_priv->card_type >= NV_50) {
  854. ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
  855. 0, dev_priv->vm_end,
  856. NV_DMA_ACCESS_RW,
  857. NV_DMA_TARGET_AGP, &vram);
  858. if (ret) {
  859. NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
  860. return ret;
  861. }
  862. } else {
  863. ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
  864. 0, dev_priv->fb_available_size,
  865. NV_DMA_ACCESS_RW,
  866. NV_DMA_TARGET_VIDMEM, &vram);
  867. if (ret) {
  868. NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
  869. return ret;
  870. }
  871. }
  872. ret = nouveau_gpuobj_ref_add(dev, chan, vram_h, vram, NULL);
  873. if (ret) {
  874. NV_ERROR(dev, "Error referencing VRAM ctxdma: %d\n", ret);
  875. return ret;
  876. }
  877. /* TT memory ctxdma */
  878. if (dev_priv->card_type >= NV_50) {
  879. tt = vram;
  880. } else
  881. if (dev_priv->gart_info.type != NOUVEAU_GART_NONE) {
  882. ret = nouveau_gpuobj_gart_dma_new(chan, 0,
  883. dev_priv->gart_info.aper_size,
  884. NV_DMA_ACCESS_RW, &tt, NULL);
  885. } else {
  886. NV_ERROR(dev, "Invalid GART type %d\n", dev_priv->gart_info.type);
  887. ret = -EINVAL;
  888. }
  889. if (ret) {
  890. NV_ERROR(dev, "Error creating TT ctxdma: %d\n", ret);
  891. return ret;
  892. }
  893. ret = nouveau_gpuobj_ref_add(dev, chan, tt_h, tt, NULL);
  894. if (ret) {
  895. NV_ERROR(dev, "Error referencing TT ctxdma: %d\n", ret);
  896. return ret;
  897. }
  898. return 0;
  899. }
  900. void
  901. nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
  902. {
  903. struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
  904. struct drm_device *dev = chan->dev;
  905. struct list_head *entry, *tmp;
  906. struct nouveau_gpuobj_ref *ref;
  907. int i;
  908. NV_DEBUG(dev, "ch%d\n", chan->id);
  909. if (!chan->ramht_refs.next)
  910. return;
  911. list_for_each_safe(entry, tmp, &chan->ramht_refs) {
  912. ref = list_entry(entry, struct nouveau_gpuobj_ref, list);
  913. nouveau_gpuobj_ref_del(dev, &ref);
  914. }
  915. nouveau_gpuobj_ref_del(dev, &chan->ramht);
  916. nouveau_gpuobj_del(dev, &chan->vm_pd);
  917. nouveau_gpuobj_ref_del(dev, &chan->vm_gart_pt);
  918. for (i = 0; i < dev_priv->vm_vram_pt_nr; i++)
  919. nouveau_gpuobj_ref_del(dev, &chan->vm_vram_pt[i]);
  920. if (chan->ramin_heap.free_stack.next)
  921. drm_mm_takedown(&chan->ramin_heap);
  922. if (chan->ramin)
  923. nouveau_gpuobj_ref_del(dev, &chan->ramin);
  924. }
  925. int
  926. nouveau_gpuobj_suspend(struct drm_device *dev)
  927. {
  928. struct drm_nouveau_private *dev_priv = dev->dev_private;
  929. struct nouveau_gpuobj *gpuobj;
  930. int i;
  931. if (dev_priv->card_type < NV_50) {
  932. dev_priv->susres.ramin_copy = vmalloc(dev_priv->ramin_rsvd_vram);
  933. if (!dev_priv->susres.ramin_copy)
  934. return -ENOMEM;
  935. for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4)
  936. dev_priv->susres.ramin_copy[i/4] = nv_ri32(dev, i);
  937. return 0;
  938. }
  939. list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
  940. if (!gpuobj->im_backing || (gpuobj->flags & NVOBJ_FLAG_FAKE))
  941. continue;
  942. gpuobj->im_backing_suspend = vmalloc(gpuobj->im_pramin->size);
  943. if (!gpuobj->im_backing_suspend) {
  944. nouveau_gpuobj_resume(dev);
  945. return -ENOMEM;
  946. }
  947. for (i = 0; i < gpuobj->im_pramin->size / 4; i++)
  948. gpuobj->im_backing_suspend[i] = nv_ro32(dev, gpuobj, i);
  949. }
  950. return 0;
  951. }
  952. void
  953. nouveau_gpuobj_suspend_cleanup(struct drm_device *dev)
  954. {
  955. struct drm_nouveau_private *dev_priv = dev->dev_private;
  956. struct nouveau_gpuobj *gpuobj;
  957. if (dev_priv->card_type < NV_50) {
  958. vfree(dev_priv->susres.ramin_copy);
  959. dev_priv->susres.ramin_copy = NULL;
  960. return;
  961. }
  962. list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
  963. if (!gpuobj->im_backing_suspend)
  964. continue;
  965. vfree(gpuobj->im_backing_suspend);
  966. gpuobj->im_backing_suspend = NULL;
  967. }
  968. }
  969. void
  970. nouveau_gpuobj_resume(struct drm_device *dev)
  971. {
  972. struct drm_nouveau_private *dev_priv = dev->dev_private;
  973. struct nouveau_gpuobj *gpuobj;
  974. int i;
  975. if (dev_priv->card_type < NV_50) {
  976. for (i = 0; i < dev_priv->ramin_rsvd_vram; i += 4)
  977. nv_wi32(dev, i, dev_priv->susres.ramin_copy[i/4]);
  978. nouveau_gpuobj_suspend_cleanup(dev);
  979. return;
  980. }
  981. list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
  982. if (!gpuobj->im_backing_suspend)
  983. continue;
  984. for (i = 0; i < gpuobj->im_pramin->size / 4; i++)
  985. nv_wo32(dev, gpuobj, i, gpuobj->im_backing_suspend[i]);
  986. dev_priv->engine.instmem.flush(dev);
  987. }
  988. nouveau_gpuobj_suspend_cleanup(dev);
  989. }
  990. int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
  991. struct drm_file *file_priv)
  992. {
  993. struct drm_nouveau_private *dev_priv = dev->dev_private;
  994. struct drm_nouveau_grobj_alloc *init = data;
  995. struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph;
  996. struct nouveau_pgraph_object_class *grc;
  997. struct nouveau_gpuobj *gr = NULL;
  998. struct nouveau_channel *chan;
  999. int ret;
  1000. NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(init->channel, file_priv, chan);
  1001. if (init->handle == ~0)
  1002. return -EINVAL;
  1003. grc = pgraph->grclass;
  1004. while (grc->id) {
  1005. if (grc->id == init->class)
  1006. break;
  1007. grc++;
  1008. }
  1009. if (!grc->id) {
  1010. NV_ERROR(dev, "Illegal object class: 0x%x\n", init->class);
  1011. return -EPERM;
  1012. }
  1013. if (nouveau_gpuobj_ref_find(chan, init->handle, NULL) == 0)
  1014. return -EEXIST;
  1015. if (!grc->software)
  1016. ret = nouveau_gpuobj_gr_new(chan, grc->id, &gr);
  1017. else
  1018. ret = nouveau_gpuobj_sw_new(chan, grc->id, &gr);
  1019. if (ret) {
  1020. NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n",
  1021. ret, init->channel, init->handle);
  1022. return ret;
  1023. }
  1024. ret = nouveau_gpuobj_ref_add(dev, chan, init->handle, gr, NULL);
  1025. if (ret) {
  1026. NV_ERROR(dev, "Error referencing object: %d (%d/0x%08x)\n",
  1027. ret, init->channel, init->handle);
  1028. nouveau_gpuobj_del(dev, &gr);
  1029. return ret;
  1030. }
  1031. return 0;
  1032. }
  1033. int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
  1034. struct drm_file *file_priv)
  1035. {
  1036. struct drm_nouveau_gpuobj_free *objfree = data;
  1037. struct nouveau_gpuobj_ref *ref;
  1038. struct nouveau_channel *chan;
  1039. int ret;
  1040. NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(objfree->channel, file_priv, chan);
  1041. ret = nouveau_gpuobj_ref_find(chan, objfree->handle, &ref);
  1042. if (ret)
  1043. return ret;
  1044. nouveau_gpuobj_ref_del(dev, &ref);
  1045. return 0;
  1046. }