nouveau_object.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996
  1. /*
  2. * Copyright (C) 2006 Ben Skeggs.
  3. *
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining
  7. * a copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sublicense, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial
  16. * portions of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  19. * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  20. * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
  21. * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
  22. * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
  23. * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
  24. * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. */
  27. /*
  28. * Authors:
  29. * Ben Skeggs <darktama@iinet.net.au>
  30. */
  31. #include "drmP.h"
  32. #include "drm.h"
  33. #include "nouveau_drv.h"
  34. #include "nouveau_drm.h"
  35. #include "nouveau_ramht.h"
  36. #include "nouveau_vm.h"
  37. #include "nv50_display.h"
  38. struct nouveau_gpuobj_method {
  39. struct list_head head;
  40. u32 mthd;
  41. int (*exec)(struct nouveau_channel *, u32 class, u32 mthd, u32 data);
  42. };
  43. struct nouveau_gpuobj_class {
  44. struct list_head head;
  45. struct list_head methods;
  46. u32 id;
  47. u32 engine;
  48. };
  49. int
  50. nouveau_gpuobj_class_new(struct drm_device *dev, u32 class, u32 engine)
  51. {
  52. struct drm_nouveau_private *dev_priv = dev->dev_private;
  53. struct nouveau_gpuobj_class *oc;
  54. oc = kzalloc(sizeof(*oc), GFP_KERNEL);
  55. if (!oc)
  56. return -ENOMEM;
  57. INIT_LIST_HEAD(&oc->methods);
  58. oc->id = class;
  59. oc->engine = engine;
  60. list_add(&oc->head, &dev_priv->classes);
  61. return 0;
  62. }
  63. int
  64. nouveau_gpuobj_mthd_new(struct drm_device *dev, u32 class, u32 mthd,
  65. int (*exec)(struct nouveau_channel *, u32, u32, u32))
  66. {
  67. struct drm_nouveau_private *dev_priv = dev->dev_private;
  68. struct nouveau_gpuobj_method *om;
  69. struct nouveau_gpuobj_class *oc;
  70. list_for_each_entry(oc, &dev_priv->classes, head) {
  71. if (oc->id == class)
  72. goto found;
  73. }
  74. return -EINVAL;
  75. found:
  76. om = kzalloc(sizeof(*om), GFP_KERNEL);
  77. if (!om)
  78. return -ENOMEM;
  79. om->mthd = mthd;
  80. om->exec = exec;
  81. list_add(&om->head, &oc->methods);
  82. return 0;
  83. }
  84. int
  85. nouveau_gpuobj_mthd_call(struct nouveau_channel *chan,
  86. u32 class, u32 mthd, u32 data)
  87. {
  88. struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
  89. struct nouveau_gpuobj_method *om;
  90. struct nouveau_gpuobj_class *oc;
  91. list_for_each_entry(oc, &dev_priv->classes, head) {
  92. if (oc->id != class)
  93. continue;
  94. list_for_each_entry(om, &oc->methods, head) {
  95. if (om->mthd == mthd)
  96. return om->exec(chan, class, mthd, data);
  97. }
  98. }
  99. return -ENOENT;
  100. }
  101. int
  102. nouveau_gpuobj_mthd_call2(struct drm_device *dev, int chid,
  103. u32 class, u32 mthd, u32 data)
  104. {
  105. struct drm_nouveau_private *dev_priv = dev->dev_private;
  106. struct nouveau_channel *chan = NULL;
  107. unsigned long flags;
  108. int ret = -EINVAL;
  109. spin_lock_irqsave(&dev_priv->channels.lock, flags);
  110. if (chid > 0 && chid < dev_priv->engine.fifo.channels)
  111. chan = dev_priv->channels.ptr[chid];
  112. if (chan)
  113. ret = nouveau_gpuobj_mthd_call(chan, class, mthd, data);
  114. spin_unlock_irqrestore(&dev_priv->channels.lock, flags);
  115. return ret;
  116. }
  117. /* NVidia uses context objects to drive drawing operations.
  118. Context objects can be selected into 8 subchannels in the FIFO,
  119. and then used via DMA command buffers.
  120. A context object is referenced by a user defined handle (CARD32). The HW
  121. looks up graphics objects in a hash table in the instance RAM.
  122. An entry in the hash table consists of 2 CARD32. The first CARD32 contains
  123. the handle, the second one a bitfield, that contains the address of the
  124. object in instance RAM.
  125. The format of the second CARD32 seems to be:
  126. NV4 to NV30:
  127. 15: 0 instance_addr >> 4
  128. 17:16 engine (here uses 1 = graphics)
  129. 28:24 channel id (here uses 0)
  130. 31 valid (use 1)
  131. NV40:
  132. 15: 0 instance_addr >> 4 (maybe 19-0)
  133. 21:20 engine (here uses 1 = graphics)
  134. I'm unsure about the other bits, but using 0 seems to work.
  135. The key into the hash table depends on the object handle and channel id and
  136. is given as:
  137. */
  138. int
  139. nouveau_gpuobj_new(struct drm_device *dev, struct nouveau_channel *chan,
  140. uint32_t size, int align, uint32_t flags,
  141. struct nouveau_gpuobj **gpuobj_ret)
  142. {
  143. struct drm_nouveau_private *dev_priv = dev->dev_private;
  144. struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
  145. struct nouveau_gpuobj *gpuobj;
  146. struct drm_mm_node *ramin = NULL;
  147. int ret, i;
  148. NV_DEBUG(dev, "ch%d size=%u align=%d flags=0x%08x\n",
  149. chan ? chan->id : -1, size, align, flags);
  150. gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
  151. if (!gpuobj)
  152. return -ENOMEM;
  153. NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
  154. gpuobj->dev = dev;
  155. gpuobj->flags = flags;
  156. kref_init(&gpuobj->refcount);
  157. gpuobj->size = size;
  158. spin_lock(&dev_priv->ramin_lock);
  159. list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
  160. spin_unlock(&dev_priv->ramin_lock);
  161. if (chan) {
  162. ramin = drm_mm_search_free(&chan->ramin_heap, size, align, 0);
  163. if (ramin)
  164. ramin = drm_mm_get_block(ramin, size, align);
  165. if (!ramin) {
  166. nouveau_gpuobj_ref(NULL, &gpuobj);
  167. return -ENOMEM;
  168. }
  169. gpuobj->pinst = chan->ramin->pinst;
  170. if (gpuobj->pinst != ~0)
  171. gpuobj->pinst += ramin->start;
  172. gpuobj->cinst = ramin->start;
  173. gpuobj->vinst = ramin->start + chan->ramin->vinst;
  174. gpuobj->node = ramin;
  175. } else {
  176. ret = instmem->get(gpuobj, size, align);
  177. if (ret) {
  178. nouveau_gpuobj_ref(NULL, &gpuobj);
  179. return ret;
  180. }
  181. ret = -ENOSYS;
  182. if (!(flags & NVOBJ_FLAG_DONT_MAP))
  183. ret = instmem->map(gpuobj);
  184. if (ret)
  185. gpuobj->pinst = ~0;
  186. gpuobj->cinst = NVOBJ_CINST_GLOBAL;
  187. }
  188. if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
  189. for (i = 0; i < gpuobj->size; i += 4)
  190. nv_wo32(gpuobj, i, 0);
  191. instmem->flush(dev);
  192. }
  193. *gpuobj_ret = gpuobj;
  194. return 0;
  195. }
  196. int
  197. nouveau_gpuobj_init(struct drm_device *dev)
  198. {
  199. struct drm_nouveau_private *dev_priv = dev->dev_private;
  200. NV_DEBUG(dev, "\n");
  201. INIT_LIST_HEAD(&dev_priv->gpuobj_list);
  202. INIT_LIST_HEAD(&dev_priv->classes);
  203. spin_lock_init(&dev_priv->ramin_lock);
  204. dev_priv->ramin_base = ~0;
  205. return 0;
  206. }
  207. void
  208. nouveau_gpuobj_takedown(struct drm_device *dev)
  209. {
  210. struct drm_nouveau_private *dev_priv = dev->dev_private;
  211. struct nouveau_gpuobj_method *om, *tm;
  212. struct nouveau_gpuobj_class *oc, *tc;
  213. NV_DEBUG(dev, "\n");
  214. list_for_each_entry_safe(oc, tc, &dev_priv->classes, head) {
  215. list_for_each_entry_safe(om, tm, &oc->methods, head) {
  216. list_del(&om->head);
  217. kfree(om);
  218. }
  219. list_del(&oc->head);
  220. kfree(oc);
  221. }
  222. BUG_ON(!list_empty(&dev_priv->gpuobj_list));
  223. }
  224. static void
  225. nouveau_gpuobj_del(struct kref *ref)
  226. {
  227. struct nouveau_gpuobj *gpuobj =
  228. container_of(ref, struct nouveau_gpuobj, refcount);
  229. struct drm_device *dev = gpuobj->dev;
  230. struct drm_nouveau_private *dev_priv = dev->dev_private;
  231. struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem;
  232. int i;
  233. NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
  234. if (gpuobj->node && (gpuobj->flags & NVOBJ_FLAG_ZERO_FREE)) {
  235. for (i = 0; i < gpuobj->size; i += 4)
  236. nv_wo32(gpuobj, i, 0);
  237. instmem->flush(dev);
  238. }
  239. if (gpuobj->dtor)
  240. gpuobj->dtor(dev, gpuobj);
  241. if (gpuobj->cinst == NVOBJ_CINST_GLOBAL) {
  242. if (gpuobj->node) {
  243. instmem->unmap(gpuobj);
  244. instmem->put(gpuobj);
  245. }
  246. } else {
  247. if (gpuobj->node) {
  248. spin_lock(&dev_priv->ramin_lock);
  249. drm_mm_put_block(gpuobj->node);
  250. spin_unlock(&dev_priv->ramin_lock);
  251. }
  252. }
  253. spin_lock(&dev_priv->ramin_lock);
  254. list_del(&gpuobj->list);
  255. spin_unlock(&dev_priv->ramin_lock);
  256. kfree(gpuobj);
  257. }
  258. void
  259. nouveau_gpuobj_ref(struct nouveau_gpuobj *ref, struct nouveau_gpuobj **ptr)
  260. {
  261. if (ref)
  262. kref_get(&ref->refcount);
  263. if (*ptr)
  264. kref_put(&(*ptr)->refcount, nouveau_gpuobj_del);
  265. *ptr = ref;
  266. }
  267. int
  268. nouveau_gpuobj_new_fake(struct drm_device *dev, u32 pinst, u64 vinst,
  269. u32 size, u32 flags, struct nouveau_gpuobj **pgpuobj)
  270. {
  271. struct drm_nouveau_private *dev_priv = dev->dev_private;
  272. struct nouveau_gpuobj *gpuobj = NULL;
  273. int i;
  274. NV_DEBUG(dev,
  275. "pinst=0x%08x vinst=0x%010llx size=0x%08x flags=0x%08x\n",
  276. pinst, vinst, size, flags);
  277. gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
  278. if (!gpuobj)
  279. return -ENOMEM;
  280. NV_DEBUG(dev, "gpuobj %p\n", gpuobj);
  281. gpuobj->dev = dev;
  282. gpuobj->flags = flags;
  283. kref_init(&gpuobj->refcount);
  284. gpuobj->size = size;
  285. gpuobj->pinst = pinst;
  286. gpuobj->cinst = NVOBJ_CINST_GLOBAL;
  287. gpuobj->vinst = vinst;
  288. if (gpuobj->flags & NVOBJ_FLAG_ZERO_ALLOC) {
  289. for (i = 0; i < gpuobj->size; i += 4)
  290. nv_wo32(gpuobj, i, 0);
  291. dev_priv->engine.instmem.flush(dev);
  292. }
  293. spin_lock(&dev_priv->ramin_lock);
  294. list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
  295. spin_unlock(&dev_priv->ramin_lock);
  296. *pgpuobj = gpuobj;
  297. return 0;
  298. }
  299. /*
  300. DMA objects are used to reference a piece of memory in the
  301. framebuffer, PCI or AGP address space. Each object is 16 bytes big
  302. and looks as follows:
  303. entry[0]
  304. 11:0 class (seems like I can always use 0 here)
  305. 12 page table present?
  306. 13 page entry linear?
  307. 15:14 access: 0 rw, 1 ro, 2 wo
  308. 17:16 target: 0 NV memory, 1 NV memory tiled, 2 PCI, 3 AGP
  309. 31:20 dma adjust (bits 0-11 of the address)
  310. entry[1]
  311. dma limit (size of transfer)
  312. entry[X]
  313. 1 0 readonly, 1 readwrite
  314. 31:12 dma frame address of the page (bits 12-31 of the address)
  315. entry[N]
  316. page table terminator, same value as the first pte, as does nvidia
  317. rivatv uses 0xffffffff
  318. Non linear page tables need a list of frame addresses afterwards,
  319. the rivatv project has some info on this.
  320. The method below creates a DMA object in instance RAM and returns a handle
  321. to it that can be used to set up context objects.
  322. */
  323. void
  324. nv50_gpuobj_dma_init(struct nouveau_gpuobj *obj, u32 offset, int class,
  325. u64 base, u64 size, int target, int access,
  326. u32 type, u32 comp)
  327. {
  328. struct drm_nouveau_private *dev_priv = obj->dev->dev_private;
  329. struct nouveau_instmem_engine *pinstmem = &dev_priv->engine.instmem;
  330. u32 flags0;
  331. flags0 = (comp << 29) | (type << 22) | class;
  332. flags0 |= 0x00100000;
  333. switch (access) {
  334. case NV_MEM_ACCESS_RO: flags0 |= 0x00040000; break;
  335. case NV_MEM_ACCESS_RW:
  336. case NV_MEM_ACCESS_WO: flags0 |= 0x00080000; break;
  337. default:
  338. break;
  339. }
  340. switch (target) {
  341. case NV_MEM_TARGET_VRAM:
  342. flags0 |= 0x00010000;
  343. break;
  344. case NV_MEM_TARGET_PCI:
  345. flags0 |= 0x00020000;
  346. break;
  347. case NV_MEM_TARGET_PCI_NOSNOOP:
  348. flags0 |= 0x00030000;
  349. break;
  350. case NV_MEM_TARGET_GART:
  351. base += dev_priv->gart_info.aper_base;
  352. default:
  353. flags0 &= ~0x00100000;
  354. break;
  355. }
  356. /* convert to base + limit */
  357. size = (base + size) - 1;
  358. nv_wo32(obj, offset + 0x00, flags0);
  359. nv_wo32(obj, offset + 0x04, lower_32_bits(size));
  360. nv_wo32(obj, offset + 0x08, lower_32_bits(base));
  361. nv_wo32(obj, offset + 0x0c, upper_32_bits(size) << 24 |
  362. upper_32_bits(base));
  363. nv_wo32(obj, offset + 0x10, 0x00000000);
  364. nv_wo32(obj, offset + 0x14, 0x00000000);
  365. pinstmem->flush(obj->dev);
  366. }
  367. int
  368. nv50_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base, u64 size,
  369. int target, int access, u32 type, u32 comp,
  370. struct nouveau_gpuobj **pobj)
  371. {
  372. struct drm_device *dev = chan->dev;
  373. int ret;
  374. ret = nouveau_gpuobj_new(dev, chan, 24, 16, NVOBJ_FLAG_ZERO_FREE, pobj);
  375. if (ret)
  376. return ret;
  377. nv50_gpuobj_dma_init(*pobj, 0, class, base, size, target,
  378. access, type, comp);
  379. return 0;
  380. }
  381. int
  382. nouveau_gpuobj_dma_new(struct nouveau_channel *chan, int class, u64 base,
  383. u64 size, int access, int target,
  384. struct nouveau_gpuobj **pobj)
  385. {
  386. struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
  387. struct drm_device *dev = chan->dev;
  388. struct nouveau_gpuobj *obj;
  389. u32 flags0, flags2;
  390. int ret;
  391. if (dev_priv->card_type >= NV_50) {
  392. u32 comp = (target == NV_MEM_TARGET_VM) ? NV_MEM_COMP_VM : 0;
  393. u32 type = (target == NV_MEM_TARGET_VM) ? NV_MEM_TYPE_VM : 0;
  394. return nv50_gpuobj_dma_new(chan, class, base, size,
  395. target, access, type, comp, pobj);
  396. }
  397. if (target == NV_MEM_TARGET_GART) {
  398. struct nouveau_gpuobj *gart = dev_priv->gart_info.sg_ctxdma;
  399. if (dev_priv->gart_info.type == NOUVEAU_GART_PDMA) {
  400. if (base == 0) {
  401. nouveau_gpuobj_ref(gart, pobj);
  402. return 0;
  403. }
  404. base = nouveau_sgdma_get_physical(dev, base);
  405. target = NV_MEM_TARGET_PCI;
  406. } else {
  407. base += dev_priv->gart_info.aper_base;
  408. if (dev_priv->gart_info.type == NOUVEAU_GART_AGP)
  409. target = NV_MEM_TARGET_PCI_NOSNOOP;
  410. else
  411. target = NV_MEM_TARGET_PCI;
  412. }
  413. }
  414. flags0 = class;
  415. flags0 |= 0x00003000; /* PT present, PT linear */
  416. flags2 = 0;
  417. switch (target) {
  418. case NV_MEM_TARGET_PCI:
  419. flags0 |= 0x00020000;
  420. break;
  421. case NV_MEM_TARGET_PCI_NOSNOOP:
  422. flags0 |= 0x00030000;
  423. break;
  424. default:
  425. break;
  426. }
  427. switch (access) {
  428. case NV_MEM_ACCESS_RO:
  429. flags0 |= 0x00004000;
  430. break;
  431. case NV_MEM_ACCESS_WO:
  432. flags0 |= 0x00008000;
  433. default:
  434. flags2 |= 0x00000002;
  435. break;
  436. }
  437. flags0 |= (base & 0x00000fff) << 20;
  438. flags2 |= (base & 0xfffff000);
  439. ret = nouveau_gpuobj_new(dev, chan, 16, 16, NVOBJ_FLAG_ZERO_FREE, &obj);
  440. if (ret)
  441. return ret;
  442. nv_wo32(obj, 0x00, flags0);
  443. nv_wo32(obj, 0x04, size - 1);
  444. nv_wo32(obj, 0x08, flags2);
  445. nv_wo32(obj, 0x0c, flags2);
  446. obj->engine = NVOBJ_ENGINE_SW;
  447. obj->class = class;
  448. *pobj = obj;
  449. return 0;
  450. }
  451. /* Context objects in the instance RAM have the following structure.
  452. * On NV40 they are 32 byte long, on NV30 and smaller 16 bytes.
  453. NV4 - NV30:
  454. entry[0]
  455. 11:0 class
  456. 12 chroma key enable
  457. 13 user clip enable
  458. 14 swizzle enable
  459. 17:15 patch config:
  460. scrcopy_and, rop_and, blend_and, scrcopy, srccopy_pre, blend_pre
  461. 18 synchronize enable
  462. 19 endian: 1 big, 0 little
  463. 21:20 dither mode
  464. 23 single step enable
  465. 24 patch status: 0 invalid, 1 valid
  466. 25 context_surface 0: 1 valid
  467. 26 context surface 1: 1 valid
  468. 27 context pattern: 1 valid
  469. 28 context rop: 1 valid
  470. 29,30 context beta, beta4
  471. entry[1]
  472. 7:0 mono format
  473. 15:8 color format
  474. 31:16 notify instance address
  475. entry[2]
  476. 15:0 dma 0 instance address
  477. 31:16 dma 1 instance address
  478. entry[3]
  479. dma method traps
  480. NV40:
  481. No idea what the exact format is. Here's what can be deducted:
  482. entry[0]:
  483. 11:0 class (maybe uses more bits here?)
  484. 17 user clip enable
  485. 21:19 patch config
  486. 25 patch status valid ?
  487. entry[1]:
  488. 15:0 DMA notifier (maybe 20:0)
  489. entry[2]:
  490. 15:0 DMA 0 instance (maybe 20:0)
  491. 24 big endian
  492. entry[3]:
  493. 15:0 DMA 1 instance (maybe 20:0)
  494. entry[4]:
  495. entry[5]:
  496. set to 0?
  497. */
  498. static int
  499. nouveau_gpuobj_sw_new(struct nouveau_channel *chan, u32 handle, u16 class)
  500. {
  501. struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
  502. struct nouveau_gpuobj *gpuobj;
  503. int ret;
  504. gpuobj = kzalloc(sizeof(*gpuobj), GFP_KERNEL);
  505. if (!gpuobj)
  506. return -ENOMEM;
  507. gpuobj->dev = chan->dev;
  508. gpuobj->engine = NVOBJ_ENGINE_SW;
  509. gpuobj->class = class;
  510. kref_init(&gpuobj->refcount);
  511. gpuobj->cinst = 0x40;
  512. spin_lock(&dev_priv->ramin_lock);
  513. list_add_tail(&gpuobj->list, &dev_priv->gpuobj_list);
  514. spin_unlock(&dev_priv->ramin_lock);
  515. ret = nouveau_ramht_insert(chan, handle, gpuobj);
  516. nouveau_gpuobj_ref(NULL, &gpuobj);
  517. return ret;
  518. }
  519. int
  520. nouveau_gpuobj_gr_new(struct nouveau_channel *chan, u32 handle, int class)
  521. {
  522. struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
  523. struct drm_device *dev = chan->dev;
  524. struct nouveau_gpuobj_class *oc;
  525. int ret;
  526. NV_DEBUG(dev, "ch%d class=0x%04x\n", chan->id, class);
  527. list_for_each_entry(oc, &dev_priv->classes, head) {
  528. struct nouveau_exec_engine *eng = dev_priv->eng[oc->engine];
  529. if (oc->id != class)
  530. continue;
  531. if (oc->engine == NVOBJ_ENGINE_SW)
  532. return nouveau_gpuobj_sw_new(chan, handle, class);
  533. if (!chan->engctx[oc->engine]) {
  534. ret = eng->context_new(chan, oc->engine);
  535. if (ret)
  536. return ret;
  537. }
  538. return eng->object_new(chan, oc->engine, handle, class);
  539. }
  540. NV_ERROR(dev, "illegal object class: 0x%x\n", class);
  541. return -EINVAL;
  542. }
  543. static int
  544. nouveau_gpuobj_channel_init_pramin(struct nouveau_channel *chan)
  545. {
  546. struct drm_device *dev = chan->dev;
  547. struct drm_nouveau_private *dev_priv = dev->dev_private;
  548. uint32_t size;
  549. uint32_t base;
  550. int ret;
  551. NV_DEBUG(dev, "ch%d\n", chan->id);
  552. /* Base amount for object storage (4KiB enough?) */
  553. size = 0x2000;
  554. base = 0;
  555. if (dev_priv->card_type == NV_50) {
  556. /* Various fixed table thingos */
  557. size += 0x1400; /* mostly unknown stuff */
  558. size += 0x4000; /* vm pd */
  559. base = 0x6000;
  560. /* RAMHT, not sure about setting size yet, 32KiB to be safe */
  561. size += 0x8000;
  562. /* RAMFC */
  563. size += 0x1000;
  564. }
  565. ret = nouveau_gpuobj_new(dev, NULL, size, 0x1000, 0, &chan->ramin);
  566. if (ret) {
  567. NV_ERROR(dev, "Error allocating channel PRAMIN: %d\n", ret);
  568. return ret;
  569. }
  570. ret = drm_mm_init(&chan->ramin_heap, base, size);
  571. if (ret) {
  572. NV_ERROR(dev, "Error creating PRAMIN heap: %d\n", ret);
  573. nouveau_gpuobj_ref(NULL, &chan->ramin);
  574. return ret;
  575. }
  576. return 0;
  577. }
  578. int
  579. nouveau_gpuobj_channel_init(struct nouveau_channel *chan,
  580. uint32_t vram_h, uint32_t tt_h)
  581. {
  582. struct drm_device *dev = chan->dev;
  583. struct drm_nouveau_private *dev_priv = dev->dev_private;
  584. struct nouveau_fpriv *fpriv = nouveau_fpriv(chan->file_priv);
  585. struct nouveau_vm *vm = fpriv ? fpriv->vm : dev_priv->chan_vm;
  586. struct nouveau_gpuobj *vram = NULL, *tt = NULL;
  587. int ret, i;
  588. NV_DEBUG(dev, "ch%d vram=0x%08x tt=0x%08x\n", chan->id, vram_h, tt_h);
  589. if (dev_priv->card_type == NV_C0) {
  590. struct nouveau_vm_pgd *vpgd;
  591. ret = nouveau_gpuobj_new(dev, NULL, 4096, 0x1000, 0,
  592. &chan->ramin);
  593. if (ret)
  594. return ret;
  595. nouveau_vm_ref(vm, &chan->vm, NULL);
  596. vpgd = list_first_entry(&vm->pgd_list, struct nouveau_vm_pgd, head);
  597. nv_wo32(chan->ramin, 0x0200, lower_32_bits(vpgd->obj->vinst));
  598. nv_wo32(chan->ramin, 0x0204, upper_32_bits(vpgd->obj->vinst));
  599. nv_wo32(chan->ramin, 0x0208, 0xffffffff);
  600. nv_wo32(chan->ramin, 0x020c, 0x000000ff);
  601. return 0;
  602. }
  603. /* Allocate a chunk of memory for per-channel object storage */
  604. ret = nouveau_gpuobj_channel_init_pramin(chan);
  605. if (ret) {
  606. NV_ERROR(dev, "init pramin\n");
  607. return ret;
  608. }
  609. /* NV50 VM
  610. * - Allocate per-channel page-directory
  611. * - Link with shared channel VM
  612. */
  613. if (vm) {
  614. u32 pgd_offs = (dev_priv->chipset == 0x50) ? 0x1400 : 0x0200;
  615. u64 vm_vinst = chan->ramin->vinst + pgd_offs;
  616. u32 vm_pinst = chan->ramin->pinst;
  617. if (vm_pinst != ~0)
  618. vm_pinst += pgd_offs;
  619. ret = nouveau_gpuobj_new_fake(dev, vm_pinst, vm_vinst, 0x4000,
  620. 0, &chan->vm_pd);
  621. if (ret)
  622. return ret;
  623. nouveau_vm_ref(vm, &chan->vm, chan->vm_pd);
  624. }
  625. /* RAMHT */
  626. if (dev_priv->card_type < NV_50) {
  627. nouveau_ramht_ref(dev_priv->ramht, &chan->ramht, NULL);
  628. } else {
  629. struct nouveau_gpuobj *ramht = NULL;
  630. ret = nouveau_gpuobj_new(dev, chan, 0x8000, 16,
  631. NVOBJ_FLAG_ZERO_ALLOC, &ramht);
  632. if (ret)
  633. return ret;
  634. ret = nouveau_ramht_new(dev, ramht, &chan->ramht);
  635. nouveau_gpuobj_ref(NULL, &ramht);
  636. if (ret)
  637. return ret;
  638. /* dma objects for display sync channel semaphore blocks */
  639. for (i = 0; i < 2; i++) {
  640. struct nouveau_gpuobj *sem = NULL;
  641. struct nv50_display_crtc *dispc =
  642. &nv50_display(dev)->crtc[i];
  643. u64 offset = dispc->sem.bo->bo.mem.start << PAGE_SHIFT;
  644. ret = nouveau_gpuobj_dma_new(chan, 0x3d, offset, 0xfff,
  645. NV_MEM_ACCESS_RW,
  646. NV_MEM_TARGET_VRAM, &sem);
  647. if (ret)
  648. return ret;
  649. ret = nouveau_ramht_insert(chan, NvEvoSema0 + i, sem);
  650. nouveau_gpuobj_ref(NULL, &sem);
  651. if (ret)
  652. return ret;
  653. }
  654. }
  655. /* VRAM ctxdma */
  656. if (dev_priv->card_type >= NV_50) {
  657. ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
  658. 0, (1ULL << 40), NV_MEM_ACCESS_RW,
  659. NV_MEM_TARGET_VM, &vram);
  660. if (ret) {
  661. NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
  662. return ret;
  663. }
  664. } else {
  665. ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
  666. 0, dev_priv->fb_available_size,
  667. NV_MEM_ACCESS_RW,
  668. NV_MEM_TARGET_VRAM, &vram);
  669. if (ret) {
  670. NV_ERROR(dev, "Error creating VRAM ctxdma: %d\n", ret);
  671. return ret;
  672. }
  673. }
  674. ret = nouveau_ramht_insert(chan, vram_h, vram);
  675. nouveau_gpuobj_ref(NULL, &vram);
  676. if (ret) {
  677. NV_ERROR(dev, "Error adding VRAM ctxdma to RAMHT: %d\n", ret);
  678. return ret;
  679. }
  680. /* TT memory ctxdma */
  681. if (dev_priv->card_type >= NV_50) {
  682. ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
  683. 0, (1ULL << 40), NV_MEM_ACCESS_RW,
  684. NV_MEM_TARGET_VM, &tt);
  685. } else {
  686. ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY,
  687. 0, dev_priv->gart_info.aper_size,
  688. NV_MEM_ACCESS_RW,
  689. NV_MEM_TARGET_GART, &tt);
  690. }
  691. if (ret) {
  692. NV_ERROR(dev, "Error creating TT ctxdma: %d\n", ret);
  693. return ret;
  694. }
  695. ret = nouveau_ramht_insert(chan, tt_h, tt);
  696. nouveau_gpuobj_ref(NULL, &tt);
  697. if (ret) {
  698. NV_ERROR(dev, "Error adding TT ctxdma to RAMHT: %d\n", ret);
  699. return ret;
  700. }
  701. return 0;
  702. }
  703. void
  704. nouveau_gpuobj_channel_takedown(struct nouveau_channel *chan)
  705. {
  706. struct drm_device *dev = chan->dev;
  707. NV_DEBUG(dev, "ch%d\n", chan->id);
  708. nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd);
  709. nouveau_gpuobj_ref(NULL, &chan->vm_pd);
  710. if (drm_mm_initialized(&chan->ramin_heap))
  711. drm_mm_takedown(&chan->ramin_heap);
  712. nouveau_gpuobj_ref(NULL, &chan->ramin);
  713. }
  714. int
  715. nouveau_gpuobj_suspend(struct drm_device *dev)
  716. {
  717. struct drm_nouveau_private *dev_priv = dev->dev_private;
  718. struct nouveau_gpuobj *gpuobj;
  719. int i;
  720. list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
  721. if (gpuobj->cinst != NVOBJ_CINST_GLOBAL)
  722. continue;
  723. gpuobj->suspend = vmalloc(gpuobj->size);
  724. if (!gpuobj->suspend) {
  725. nouveau_gpuobj_resume(dev);
  726. return -ENOMEM;
  727. }
  728. for (i = 0; i < gpuobj->size; i += 4)
  729. gpuobj->suspend[i/4] = nv_ro32(gpuobj, i);
  730. }
  731. return 0;
  732. }
  733. void
  734. nouveau_gpuobj_resume(struct drm_device *dev)
  735. {
  736. struct drm_nouveau_private *dev_priv = dev->dev_private;
  737. struct nouveau_gpuobj *gpuobj;
  738. int i;
  739. list_for_each_entry(gpuobj, &dev_priv->gpuobj_list, list) {
  740. if (!gpuobj->suspend)
  741. continue;
  742. for (i = 0; i < gpuobj->size; i += 4)
  743. nv_wo32(gpuobj, i, gpuobj->suspend[i/4]);
  744. vfree(gpuobj->suspend);
  745. gpuobj->suspend = NULL;
  746. }
  747. dev_priv->engine.instmem.flush(dev);
  748. }
  749. int nouveau_ioctl_grobj_alloc(struct drm_device *dev, void *data,
  750. struct drm_file *file_priv)
  751. {
  752. struct drm_nouveau_grobj_alloc *init = data;
  753. struct nouveau_channel *chan;
  754. int ret;
  755. if (init->handle == ~0)
  756. return -EINVAL;
  757. chan = nouveau_channel_get(file_priv, init->channel);
  758. if (IS_ERR(chan))
  759. return PTR_ERR(chan);
  760. if (nouveau_ramht_find(chan, init->handle)) {
  761. ret = -EEXIST;
  762. goto out;
  763. }
  764. ret = nouveau_gpuobj_gr_new(chan, init->handle, init->class);
  765. if (ret) {
  766. NV_ERROR(dev, "Error creating object: %d (%d/0x%08x)\n",
  767. ret, init->channel, init->handle);
  768. }
  769. out:
  770. nouveau_channel_put(&chan);
  771. return ret;
  772. }
  773. int nouveau_ioctl_gpuobj_free(struct drm_device *dev, void *data,
  774. struct drm_file *file_priv)
  775. {
  776. struct drm_nouveau_gpuobj_free *objfree = data;
  777. struct nouveau_channel *chan;
  778. int ret;
  779. chan = nouveau_channel_get(file_priv, objfree->channel);
  780. if (IS_ERR(chan))
  781. return PTR_ERR(chan);
  782. /* Synchronize with the user channel */
  783. nouveau_channel_idle(chan);
  784. ret = nouveau_ramht_remove(chan, objfree->handle);
  785. nouveau_channel_put(&chan);
  786. return ret;
  787. }
  788. u32
  789. nv_ro32(struct nouveau_gpuobj *gpuobj, u32 offset)
  790. {
  791. struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
  792. struct drm_device *dev = gpuobj->dev;
  793. unsigned long flags;
  794. if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
  795. u64 ptr = gpuobj->vinst + offset;
  796. u32 base = ptr >> 16;
  797. u32 val;
  798. spin_lock_irqsave(&dev_priv->vm_lock, flags);
  799. if (dev_priv->ramin_base != base) {
  800. dev_priv->ramin_base = base;
  801. nv_wr32(dev, 0x001700, dev_priv->ramin_base);
  802. }
  803. val = nv_rd32(dev, 0x700000 + (ptr & 0xffff));
  804. spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
  805. return val;
  806. }
  807. return nv_ri32(dev, gpuobj->pinst + offset);
  808. }
  809. void
  810. nv_wo32(struct nouveau_gpuobj *gpuobj, u32 offset, u32 val)
  811. {
  812. struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private;
  813. struct drm_device *dev = gpuobj->dev;
  814. unsigned long flags;
  815. if (gpuobj->pinst == ~0 || !dev_priv->ramin_available) {
  816. u64 ptr = gpuobj->vinst + offset;
  817. u32 base = ptr >> 16;
  818. spin_lock_irqsave(&dev_priv->vm_lock, flags);
  819. if (dev_priv->ramin_base != base) {
  820. dev_priv->ramin_base = base;
  821. nv_wr32(dev, 0x001700, dev_priv->ramin_base);
  822. }
  823. nv_wr32(dev, 0x700000 + (ptr & 0xffff), val);
  824. spin_unlock_irqrestore(&dev_priv->vm_lock, flags);
  825. return;
  826. }
  827. nv_wi32(dev, gpuobj->pinst + offset, val);
  828. }