nouveau_bo.c 39 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565
  1. /*
  2. * Copyright 2007 Dave Airlied
  3. * All Rights Reserved.
  4. *
  5. * Permission is hereby granted, free of charge, to any person obtaining a
  6. * copy of this software and associated documentation files (the "Software"),
  7. * to deal in the Software without restriction, including without limitation
  8. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  9. * and/or sell copies of the Software, and to permit persons to whom the
  10. * Software is furnished to do so, subject to the following conditions:
  11. *
  12. * The above copyright notice and this permission notice (including the next
  13. * paragraph) shall be included in all copies or substantial portions of the
  14. * Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22. * OTHER DEALINGS IN THE SOFTWARE.
  23. */
  24. /*
  25. * Authors: Dave Airlied <airlied@linux.ie>
  26. * Ben Skeggs <darktama@iinet.net.au>
  27. * Jeremy Kolb <jkolb@brandeis.edu>
  28. */
  29. #include <core/engine.h>
  30. #include <subdev/fb.h>
  31. #include <subdev/vm.h>
  32. #include <subdev/bar.h>
  33. #include "nouveau_drm.h"
  34. #include "nouveau_dma.h"
  35. #include "nouveau_fence.h"
  36. #include "nouveau_bo.h"
  37. #include "nouveau_ttm.h"
  38. #include "nouveau_gem.h"
  39. /*
  40. * NV10-NV40 tiling helpers
  41. */
  42. static void
  43. nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
  44. u32 addr, u32 size, u32 pitch, u32 flags)
  45. {
  46. struct nouveau_drm *drm = nouveau_drm(dev);
  47. int i = reg - drm->tile.reg;
  48. struct nouveau_fb *pfb = nouveau_fb(drm->device);
  49. struct nouveau_fb_tile *tile = &pfb->tile.region[i];
  50. struct nouveau_engine *engine;
  51. nouveau_fence_unref(&reg->fence);
  52. if (tile->pitch)
  53. pfb->tile.fini(pfb, i, tile);
  54. if (pitch)
  55. pfb->tile.init(pfb, i, addr, size, pitch, flags, tile);
  56. pfb->tile.prog(pfb, i, tile);
  57. if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_GR)))
  58. engine->tile_prog(engine, i);
  59. if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_MPEG)))
  60. engine->tile_prog(engine, i);
  61. }
  62. static struct nouveau_drm_tile *
  63. nv10_bo_get_tile_region(struct drm_device *dev, int i)
  64. {
  65. struct nouveau_drm *drm = nouveau_drm(dev);
  66. struct nouveau_drm_tile *tile = &drm->tile.reg[i];
  67. spin_lock(&drm->tile.lock);
  68. if (!tile->used &&
  69. (!tile->fence || nouveau_fence_done(tile->fence)))
  70. tile->used = true;
  71. else
  72. tile = NULL;
  73. spin_unlock(&drm->tile.lock);
  74. return tile;
  75. }
  76. static void
  77. nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
  78. struct nouveau_fence *fence)
  79. {
  80. struct nouveau_drm *drm = nouveau_drm(dev);
  81. if (tile) {
  82. spin_lock(&drm->tile.lock);
  83. if (fence) {
  84. /* Mark it as pending. */
  85. tile->fence = fence;
  86. nouveau_fence_ref(fence);
  87. }
  88. tile->used = false;
  89. spin_unlock(&drm->tile.lock);
  90. }
  91. }
  92. static struct nouveau_drm_tile *
  93. nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
  94. u32 size, u32 pitch, u32 flags)
  95. {
  96. struct nouveau_drm *drm = nouveau_drm(dev);
  97. struct nouveau_fb *pfb = nouveau_fb(drm->device);
  98. struct nouveau_drm_tile *tile, *found = NULL;
  99. int i;
  100. for (i = 0; i < pfb->tile.regions; i++) {
  101. tile = nv10_bo_get_tile_region(dev, i);
  102. if (pitch && !found) {
  103. found = tile;
  104. continue;
  105. } else if (tile && pfb->tile.region[i].pitch) {
  106. /* Kill an unused tile region. */
  107. nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
  108. }
  109. nv10_bo_put_tile_region(dev, tile, NULL);
  110. }
  111. if (found)
  112. nv10_bo_update_tile_region(dev, found, addr, size,
  113. pitch, flags);
  114. return found;
  115. }
  116. static void
  117. nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
  118. {
  119. struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
  120. struct drm_device *dev = drm->dev;
  121. struct nouveau_bo *nvbo = nouveau_bo(bo);
  122. if (unlikely(nvbo->gem))
  123. DRM_ERROR("bo %p still attached to GEM object\n", bo);
  124. nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
  125. kfree(nvbo);
  126. }
  127. static void
  128. nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
  129. int *align, int *size)
  130. {
  131. struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
  132. struct nouveau_device *device = nv_device(drm->device);
  133. if (device->card_type < NV_50) {
  134. if (nvbo->tile_mode) {
  135. if (device->chipset >= 0x40) {
  136. *align = 65536;
  137. *size = roundup(*size, 64 * nvbo->tile_mode);
  138. } else if (device->chipset >= 0x30) {
  139. *align = 32768;
  140. *size = roundup(*size, 64 * nvbo->tile_mode);
  141. } else if (device->chipset >= 0x20) {
  142. *align = 16384;
  143. *size = roundup(*size, 64 * nvbo->tile_mode);
  144. } else if (device->chipset >= 0x10) {
  145. *align = 16384;
  146. *size = roundup(*size, 32 * nvbo->tile_mode);
  147. }
  148. }
  149. } else {
  150. *size = roundup(*size, (1 << nvbo->page_shift));
  151. *align = max((1 << nvbo->page_shift), *align);
  152. }
  153. *size = roundup(*size, PAGE_SIZE);
  154. }
  155. int
  156. nouveau_bo_new(struct drm_device *dev, int size, int align,
  157. uint32_t flags, uint32_t tile_mode, uint32_t tile_flags,
  158. struct sg_table *sg,
  159. struct nouveau_bo **pnvbo)
  160. {
  161. struct nouveau_drm *drm = nouveau_drm(dev);
  162. struct nouveau_bo *nvbo;
  163. size_t acc_size;
  164. int ret;
  165. int type = ttm_bo_type_device;
  166. if (sg)
  167. type = ttm_bo_type_sg;
  168. nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
  169. if (!nvbo)
  170. return -ENOMEM;
  171. INIT_LIST_HEAD(&nvbo->head);
  172. INIT_LIST_HEAD(&nvbo->entry);
  173. INIT_LIST_HEAD(&nvbo->vma_list);
  174. nvbo->tile_mode = tile_mode;
  175. nvbo->tile_flags = tile_flags;
  176. nvbo->bo.bdev = &drm->ttm.bdev;
  177. nvbo->page_shift = 12;
  178. if (drm->client.base.vm) {
  179. if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
  180. nvbo->page_shift = drm->client.base.vm->vmm->lpg_shift;
  181. }
  182. nouveau_bo_fixup_align(nvbo, flags, &align, &size);
  183. nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
  184. nouveau_bo_placement_set(nvbo, flags, 0);
  185. acc_size = ttm_bo_dma_acc_size(&drm->ttm.bdev, size,
  186. sizeof(struct nouveau_bo));
  187. ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size,
  188. type, &nvbo->placement,
  189. align >> PAGE_SHIFT, false, NULL, acc_size, sg,
  190. nouveau_bo_del_ttm);
  191. if (ret) {
  192. /* ttm will call nouveau_bo_del_ttm if it fails.. */
  193. return ret;
  194. }
  195. *pnvbo = nvbo;
  196. return 0;
  197. }
  198. static void
  199. set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
  200. {
  201. *n = 0;
  202. if (type & TTM_PL_FLAG_VRAM)
  203. pl[(*n)++] = TTM_PL_FLAG_VRAM | flags;
  204. if (type & TTM_PL_FLAG_TT)
  205. pl[(*n)++] = TTM_PL_FLAG_TT | flags;
  206. if (type & TTM_PL_FLAG_SYSTEM)
  207. pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags;
  208. }
  209. static void
  210. set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
  211. {
  212. struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
  213. struct nouveau_fb *pfb = nouveau_fb(drm->device);
  214. u32 vram_pages = pfb->ram.size >> PAGE_SHIFT;
  215. if (nv_device(drm->device)->card_type == NV_10 &&
  216. nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
  217. nvbo->bo.mem.num_pages < vram_pages / 4) {
  218. /*
  219. * Make sure that the color and depth buffers are handled
  220. * by independent memory controller units. Up to a 9x
  221. * speed up when alpha-blending and depth-test are enabled
  222. * at the same time.
  223. */
  224. if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
  225. nvbo->placement.fpfn = vram_pages / 2;
  226. nvbo->placement.lpfn = ~0;
  227. } else {
  228. nvbo->placement.fpfn = 0;
  229. nvbo->placement.lpfn = vram_pages / 2;
  230. }
  231. }
  232. }
  233. void
  234. nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
  235. {
  236. struct ttm_placement *pl = &nvbo->placement;
  237. uint32_t flags = TTM_PL_MASK_CACHING |
  238. (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
  239. pl->placement = nvbo->placements;
  240. set_placement_list(nvbo->placements, &pl->num_placement,
  241. type, flags);
  242. pl->busy_placement = nvbo->busy_placements;
  243. set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
  244. type | busy, flags);
  245. set_placement_range(nvbo, type);
  246. }
  247. int
  248. nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
  249. {
  250. struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
  251. struct ttm_buffer_object *bo = &nvbo->bo;
  252. int ret;
  253. if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
  254. NV_ERROR(drm, "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
  255. 1 << bo->mem.mem_type, memtype);
  256. return -EINVAL;
  257. }
  258. if (nvbo->pin_refcnt++)
  259. return 0;
  260. ret = ttm_bo_reserve(bo, false, false, false, 0);
  261. if (ret)
  262. goto out;
  263. nouveau_bo_placement_set(nvbo, memtype, 0);
  264. ret = nouveau_bo_validate(nvbo, false, false, false);
  265. if (ret == 0) {
  266. switch (bo->mem.mem_type) {
  267. case TTM_PL_VRAM:
  268. drm->gem.vram_available -= bo->mem.size;
  269. break;
  270. case TTM_PL_TT:
  271. drm->gem.gart_available -= bo->mem.size;
  272. break;
  273. default:
  274. break;
  275. }
  276. }
  277. ttm_bo_unreserve(bo);
  278. out:
  279. if (unlikely(ret))
  280. nvbo->pin_refcnt--;
  281. return ret;
  282. }
  283. int
  284. nouveau_bo_unpin(struct nouveau_bo *nvbo)
  285. {
  286. struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
  287. struct ttm_buffer_object *bo = &nvbo->bo;
  288. int ret;
  289. if (--nvbo->pin_refcnt)
  290. return 0;
  291. ret = ttm_bo_reserve(bo, false, false, false, 0);
  292. if (ret)
  293. return ret;
  294. nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
  295. ret = nouveau_bo_validate(nvbo, false, false, false);
  296. if (ret == 0) {
  297. switch (bo->mem.mem_type) {
  298. case TTM_PL_VRAM:
  299. drm->gem.vram_available += bo->mem.size;
  300. break;
  301. case TTM_PL_TT:
  302. drm->gem.gart_available += bo->mem.size;
  303. break;
  304. default:
  305. break;
  306. }
  307. }
  308. ttm_bo_unreserve(bo);
  309. return ret;
  310. }
  311. int
  312. nouveau_bo_map(struct nouveau_bo *nvbo)
  313. {
  314. int ret;
  315. ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
  316. if (ret)
  317. return ret;
  318. ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
  319. ttm_bo_unreserve(&nvbo->bo);
  320. return ret;
  321. }
  322. void
  323. nouveau_bo_unmap(struct nouveau_bo *nvbo)
  324. {
  325. if (nvbo)
  326. ttm_bo_kunmap(&nvbo->kmap);
  327. }
  328. int
  329. nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
  330. bool no_wait_reserve, bool no_wait_gpu)
  331. {
  332. int ret;
  333. ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, interruptible,
  334. no_wait_reserve, no_wait_gpu);
  335. if (ret)
  336. return ret;
  337. return 0;
  338. }
  339. u16
  340. nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
  341. {
  342. bool is_iomem;
  343. u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
  344. mem = &mem[index];
  345. if (is_iomem)
  346. return ioread16_native((void __force __iomem *)mem);
  347. else
  348. return *mem;
  349. }
  350. void
  351. nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
  352. {
  353. bool is_iomem;
  354. u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
  355. mem = &mem[index];
  356. if (is_iomem)
  357. iowrite16_native(val, (void __force __iomem *)mem);
  358. else
  359. *mem = val;
  360. }
  361. u32
  362. nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
  363. {
  364. bool is_iomem;
  365. u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
  366. mem = &mem[index];
  367. if (is_iomem)
  368. return ioread32_native((void __force __iomem *)mem);
  369. else
  370. return *mem;
  371. }
  372. void
  373. nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
  374. {
  375. bool is_iomem;
  376. u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
  377. mem = &mem[index];
  378. if (is_iomem)
  379. iowrite32_native(val, (void __force __iomem *)mem);
  380. else
  381. *mem = val;
  382. }
  383. static struct ttm_tt *
  384. nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
  385. uint32_t page_flags, struct page *dummy_read)
  386. {
  387. #if __OS_HAS_AGP
  388. struct nouveau_drm *drm = nouveau_bdev(bdev);
  389. struct drm_device *dev = drm->dev;
  390. if (drm->agp.stat == ENABLED) {
  391. return ttm_agp_tt_create(bdev, dev->agp->bridge, size,
  392. page_flags, dummy_read);
  393. }
  394. #endif
  395. return nouveau_sgdma_create_ttm(bdev, size, page_flags, dummy_read);
  396. }
  397. static int
  398. nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
  399. {
  400. /* We'll do this from user space. */
  401. return 0;
  402. }
  403. static int
  404. nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
  405. struct ttm_mem_type_manager *man)
  406. {
  407. struct nouveau_drm *drm = nouveau_bdev(bdev);
  408. switch (type) {
  409. case TTM_PL_SYSTEM:
  410. man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
  411. man->available_caching = TTM_PL_MASK_CACHING;
  412. man->default_caching = TTM_PL_FLAG_CACHED;
  413. break;
  414. case TTM_PL_VRAM:
  415. if (nv_device(drm->device)->card_type >= NV_50) {
  416. man->func = &nouveau_vram_manager;
  417. man->io_reserve_fastpath = false;
  418. man->use_io_reserve_lru = true;
  419. } else {
  420. man->func = &ttm_bo_manager_func;
  421. }
  422. man->flags = TTM_MEMTYPE_FLAG_FIXED |
  423. TTM_MEMTYPE_FLAG_MAPPABLE;
  424. man->available_caching = TTM_PL_FLAG_UNCACHED |
  425. TTM_PL_FLAG_WC;
  426. man->default_caching = TTM_PL_FLAG_WC;
  427. break;
  428. case TTM_PL_TT:
  429. if (nv_device(drm->device)->card_type >= NV_50)
  430. man->func = &nouveau_gart_manager;
  431. else
  432. if (drm->agp.stat != ENABLED)
  433. man->func = &nv04_gart_manager;
  434. else
  435. man->func = &ttm_bo_manager_func;
  436. if (drm->agp.stat == ENABLED) {
  437. man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
  438. man->available_caching = TTM_PL_FLAG_UNCACHED |
  439. TTM_PL_FLAG_WC;
  440. man->default_caching = TTM_PL_FLAG_WC;
  441. } else {
  442. man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
  443. TTM_MEMTYPE_FLAG_CMA;
  444. man->available_caching = TTM_PL_MASK_CACHING;
  445. man->default_caching = TTM_PL_FLAG_CACHED;
  446. }
  447. break;
  448. default:
  449. return -EINVAL;
  450. }
  451. return 0;
  452. }
  453. static void
  454. nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
  455. {
  456. struct nouveau_bo *nvbo = nouveau_bo(bo);
  457. switch (bo->mem.mem_type) {
  458. case TTM_PL_VRAM:
  459. nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
  460. TTM_PL_FLAG_SYSTEM);
  461. break;
  462. default:
  463. nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
  464. break;
  465. }
  466. *pl = nvbo->placement;
  467. }
  468. /* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
  469. * TTM_PL_{VRAM,TT} directly.
  470. */
  471. static int
  472. nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
  473. struct nouveau_bo *nvbo, bool evict,
  474. bool no_wait_reserve, bool no_wait_gpu,
  475. struct ttm_mem_reg *new_mem)
  476. {
  477. struct nouveau_fence *fence = NULL;
  478. int ret;
  479. ret = nouveau_fence_new(chan, &fence);
  480. if (ret)
  481. return ret;
  482. ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, evict,
  483. no_wait_reserve, no_wait_gpu, new_mem);
  484. nouveau_fence_unref(&fence);
  485. return ret;
  486. }
  487. static int
  488. nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
  489. {
  490. int ret = RING_SPACE(chan, 2);
  491. if (ret == 0) {
  492. BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
  493. OUT_RING (chan, handle);
  494. FIRE_RING (chan);
  495. }
  496. return ret;
  497. }
  498. static int
  499. nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
  500. struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
  501. {
  502. struct nouveau_mem *node = old_mem->mm_node;
  503. int ret = RING_SPACE(chan, 10);
  504. if (ret == 0) {
  505. BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8);
  506. OUT_RING (chan, upper_32_bits(node->vma[0].offset));
  507. OUT_RING (chan, lower_32_bits(node->vma[0].offset));
  508. OUT_RING (chan, upper_32_bits(node->vma[1].offset));
  509. OUT_RING (chan, lower_32_bits(node->vma[1].offset));
  510. OUT_RING (chan, PAGE_SIZE);
  511. OUT_RING (chan, PAGE_SIZE);
  512. OUT_RING (chan, PAGE_SIZE);
  513. OUT_RING (chan, new_mem->num_pages);
  514. BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386);
  515. }
  516. return ret;
  517. }
  518. static int
  519. nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle)
  520. {
  521. int ret = RING_SPACE(chan, 2);
  522. if (ret == 0) {
  523. BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
  524. OUT_RING (chan, handle);
  525. }
  526. return ret;
  527. }
  528. static int
  529. nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
  530. struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
  531. {
  532. struct nouveau_mem *node = old_mem->mm_node;
  533. u64 src_offset = node->vma[0].offset;
  534. u64 dst_offset = node->vma[1].offset;
  535. u32 page_count = new_mem->num_pages;
  536. int ret;
  537. page_count = new_mem->num_pages;
  538. while (page_count) {
  539. int line_count = (page_count > 8191) ? 8191 : page_count;
  540. ret = RING_SPACE(chan, 11);
  541. if (ret)
  542. return ret;
  543. BEGIN_NVC0(chan, NvSubCopy, 0x030c, 8);
  544. OUT_RING (chan, upper_32_bits(src_offset));
  545. OUT_RING (chan, lower_32_bits(src_offset));
  546. OUT_RING (chan, upper_32_bits(dst_offset));
  547. OUT_RING (chan, lower_32_bits(dst_offset));
  548. OUT_RING (chan, PAGE_SIZE);
  549. OUT_RING (chan, PAGE_SIZE);
  550. OUT_RING (chan, PAGE_SIZE);
  551. OUT_RING (chan, line_count);
  552. BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
  553. OUT_RING (chan, 0x00000110);
  554. page_count -= line_count;
  555. src_offset += (PAGE_SIZE * line_count);
  556. dst_offset += (PAGE_SIZE * line_count);
  557. }
  558. return 0;
  559. }
  560. static int
  561. nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
  562. struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
  563. {
  564. struct nouveau_mem *node = old_mem->mm_node;
  565. u64 src_offset = node->vma[0].offset;
  566. u64 dst_offset = node->vma[1].offset;
  567. u32 page_count = new_mem->num_pages;
  568. int ret;
  569. page_count = new_mem->num_pages;
  570. while (page_count) {
  571. int line_count = (page_count > 2047) ? 2047 : page_count;
  572. ret = RING_SPACE(chan, 12);
  573. if (ret)
  574. return ret;
  575. BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2);
  576. OUT_RING (chan, upper_32_bits(dst_offset));
  577. OUT_RING (chan, lower_32_bits(dst_offset));
  578. BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6);
  579. OUT_RING (chan, upper_32_bits(src_offset));
  580. OUT_RING (chan, lower_32_bits(src_offset));
  581. OUT_RING (chan, PAGE_SIZE); /* src_pitch */
  582. OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
  583. OUT_RING (chan, PAGE_SIZE); /* line_length */
  584. OUT_RING (chan, line_count);
  585. BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1);
  586. OUT_RING (chan, 0x00100110);
  587. page_count -= line_count;
  588. src_offset += (PAGE_SIZE * line_count);
  589. dst_offset += (PAGE_SIZE * line_count);
  590. }
  591. return 0;
  592. }
  593. static int
  594. nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
  595. struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
  596. {
  597. struct nouveau_mem *node = old_mem->mm_node;
  598. u64 src_offset = node->vma[0].offset;
  599. u64 dst_offset = node->vma[1].offset;
  600. u32 page_count = new_mem->num_pages;
  601. int ret;
  602. page_count = new_mem->num_pages;
  603. while (page_count) {
  604. int line_count = (page_count > 8191) ? 8191 : page_count;
  605. ret = RING_SPACE(chan, 11);
  606. if (ret)
  607. return ret;
  608. BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
  609. OUT_RING (chan, upper_32_bits(src_offset));
  610. OUT_RING (chan, lower_32_bits(src_offset));
  611. OUT_RING (chan, upper_32_bits(dst_offset));
  612. OUT_RING (chan, lower_32_bits(dst_offset));
  613. OUT_RING (chan, PAGE_SIZE);
  614. OUT_RING (chan, PAGE_SIZE);
  615. OUT_RING (chan, PAGE_SIZE);
  616. OUT_RING (chan, line_count);
  617. BEGIN_NV04(chan, NvSubCopy, 0x0300, 1);
  618. OUT_RING (chan, 0x00000110);
  619. page_count -= line_count;
  620. src_offset += (PAGE_SIZE * line_count);
  621. dst_offset += (PAGE_SIZE * line_count);
  622. }
  623. return 0;
  624. }
  625. static int
  626. nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
  627. struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
  628. {
  629. struct nouveau_mem *node = old_mem->mm_node;
  630. int ret = RING_SPACE(chan, 7);
  631. if (ret == 0) {
  632. BEGIN_NV04(chan, NvSubCopy, 0x0320, 6);
  633. OUT_RING (chan, upper_32_bits(node->vma[0].offset));
  634. OUT_RING (chan, lower_32_bits(node->vma[0].offset));
  635. OUT_RING (chan, upper_32_bits(node->vma[1].offset));
  636. OUT_RING (chan, lower_32_bits(node->vma[1].offset));
  637. OUT_RING (chan, 0x00000000 /* COPY */);
  638. OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT);
  639. }
  640. return ret;
  641. }
  642. static int
  643. nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
  644. struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
  645. {
  646. struct nouveau_mem *node = old_mem->mm_node;
  647. int ret = RING_SPACE(chan, 7);
  648. if (ret == 0) {
  649. BEGIN_NV04(chan, NvSubCopy, 0x0304, 6);
  650. OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT);
  651. OUT_RING (chan, upper_32_bits(node->vma[0].offset));
  652. OUT_RING (chan, lower_32_bits(node->vma[0].offset));
  653. OUT_RING (chan, upper_32_bits(node->vma[1].offset));
  654. OUT_RING (chan, lower_32_bits(node->vma[1].offset));
  655. OUT_RING (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */);
  656. }
  657. return ret;
  658. }
  659. static int
  660. nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
  661. {
  662. int ret = RING_SPACE(chan, 6);
  663. if (ret == 0) {
  664. BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
  665. OUT_RING (chan, handle);
  666. BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
  667. OUT_RING (chan, NvNotify0);
  668. OUT_RING (chan, NvDmaFB);
  669. OUT_RING (chan, NvDmaFB);
  670. }
  671. return ret;
  672. }
  673. static int
  674. nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
  675. struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
  676. {
  677. struct nouveau_mem *node = old_mem->mm_node;
  678. struct nouveau_bo *nvbo = nouveau_bo(bo);
  679. u64 length = (new_mem->num_pages << PAGE_SHIFT);
  680. u64 src_offset = node->vma[0].offset;
  681. u64 dst_offset = node->vma[1].offset;
  682. int ret;
  683. while (length) {
  684. u32 amount, stride, height;
  685. amount = min(length, (u64)(4 * 1024 * 1024));
  686. stride = 16 * 4;
  687. height = amount / stride;
  688. if (new_mem->mem_type == TTM_PL_VRAM &&
  689. nouveau_bo_tile_layout(nvbo)) {
  690. ret = RING_SPACE(chan, 8);
  691. if (ret)
  692. return ret;
  693. BEGIN_NV04(chan, NvSubCopy, 0x0200, 7);
  694. OUT_RING (chan, 0);
  695. OUT_RING (chan, 0);
  696. OUT_RING (chan, stride);
  697. OUT_RING (chan, height);
  698. OUT_RING (chan, 1);
  699. OUT_RING (chan, 0);
  700. OUT_RING (chan, 0);
  701. } else {
  702. ret = RING_SPACE(chan, 2);
  703. if (ret)
  704. return ret;
  705. BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
  706. OUT_RING (chan, 1);
  707. }
  708. if (old_mem->mem_type == TTM_PL_VRAM &&
  709. nouveau_bo_tile_layout(nvbo)) {
  710. ret = RING_SPACE(chan, 8);
  711. if (ret)
  712. return ret;
  713. BEGIN_NV04(chan, NvSubCopy, 0x021c, 7);
  714. OUT_RING (chan, 0);
  715. OUT_RING (chan, 0);
  716. OUT_RING (chan, stride);
  717. OUT_RING (chan, height);
  718. OUT_RING (chan, 1);
  719. OUT_RING (chan, 0);
  720. OUT_RING (chan, 0);
  721. } else {
  722. ret = RING_SPACE(chan, 2);
  723. if (ret)
  724. return ret;
  725. BEGIN_NV04(chan, NvSubCopy, 0x021c, 1);
  726. OUT_RING (chan, 1);
  727. }
  728. ret = RING_SPACE(chan, 14);
  729. if (ret)
  730. return ret;
  731. BEGIN_NV04(chan, NvSubCopy, 0x0238, 2);
  732. OUT_RING (chan, upper_32_bits(src_offset));
  733. OUT_RING (chan, upper_32_bits(dst_offset));
  734. BEGIN_NV04(chan, NvSubCopy, 0x030c, 8);
  735. OUT_RING (chan, lower_32_bits(src_offset));
  736. OUT_RING (chan, lower_32_bits(dst_offset));
  737. OUT_RING (chan, stride);
  738. OUT_RING (chan, stride);
  739. OUT_RING (chan, stride);
  740. OUT_RING (chan, height);
  741. OUT_RING (chan, 0x00000101);
  742. OUT_RING (chan, 0x00000000);
  743. BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
  744. OUT_RING (chan, 0);
  745. length -= amount;
  746. src_offset += amount;
  747. dst_offset += amount;
  748. }
  749. return 0;
  750. }
  751. static int
  752. nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
  753. {
  754. int ret = RING_SPACE(chan, 4);
  755. if (ret == 0) {
  756. BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
  757. OUT_RING (chan, handle);
  758. BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
  759. OUT_RING (chan, NvNotify0);
  760. }
  761. return ret;
  762. }
  763. static inline uint32_t
  764. nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
  765. struct nouveau_channel *chan, struct ttm_mem_reg *mem)
  766. {
  767. if (mem->mem_type == TTM_PL_TT)
  768. return NvDmaTT;
  769. return NvDmaFB;
  770. }
  771. static int
  772. nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
  773. struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
  774. {
  775. u32 src_offset = old_mem->start << PAGE_SHIFT;
  776. u32 dst_offset = new_mem->start << PAGE_SHIFT;
  777. u32 page_count = new_mem->num_pages;
  778. int ret;
  779. ret = RING_SPACE(chan, 3);
  780. if (ret)
  781. return ret;
  782. BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
  783. OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
  784. OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
  785. page_count = new_mem->num_pages;
  786. while (page_count) {
  787. int line_count = (page_count > 2047) ? 2047 : page_count;
  788. ret = RING_SPACE(chan, 11);
  789. if (ret)
  790. return ret;
  791. BEGIN_NV04(chan, NvSubCopy,
  792. NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
  793. OUT_RING (chan, src_offset);
  794. OUT_RING (chan, dst_offset);
  795. OUT_RING (chan, PAGE_SIZE); /* src_pitch */
  796. OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
  797. OUT_RING (chan, PAGE_SIZE); /* line_length */
  798. OUT_RING (chan, line_count);
  799. OUT_RING (chan, 0x00000101);
  800. OUT_RING (chan, 0x00000000);
  801. BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
  802. OUT_RING (chan, 0);
  803. page_count -= line_count;
  804. src_offset += (PAGE_SIZE * line_count);
  805. dst_offset += (PAGE_SIZE * line_count);
  806. }
  807. return 0;
  808. }
  809. static int
  810. nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo,
  811. struct ttm_mem_reg *mem, struct nouveau_vma *vma)
  812. {
  813. struct nouveau_mem *node = mem->mm_node;
  814. int ret;
  815. ret = nouveau_vm_get(nv_client(chan->cli)->vm, mem->num_pages <<
  816. PAGE_SHIFT, node->page_shift,
  817. NV_MEM_ACCESS_RW, vma);
  818. if (ret)
  819. return ret;
  820. if (mem->mem_type == TTM_PL_VRAM)
  821. nouveau_vm_map(vma, node);
  822. else
  823. nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT, node);
  824. return 0;
  825. }
  826. static int
  827. nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
  828. bool no_wait_reserve, bool no_wait_gpu,
  829. struct ttm_mem_reg *new_mem)
  830. {
  831. struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
  832. struct nouveau_channel *chan = chan = drm->channel;
  833. struct nouveau_bo *nvbo = nouveau_bo(bo);
  834. struct ttm_mem_reg *old_mem = &bo->mem;
  835. int ret;
  836. mutex_lock(&chan->cli->mutex);
  837. /* create temporary vmas for the transfer and attach them to the
  838. * old nouveau_mem node, these will get cleaned up after ttm has
  839. * destroyed the ttm_mem_reg
  840. */
  841. if (nv_device(drm->device)->card_type >= NV_50) {
  842. struct nouveau_mem *node = old_mem->mm_node;
  843. ret = nouveau_vma_getmap(chan, nvbo, old_mem, &node->vma[0]);
  844. if (ret)
  845. goto out;
  846. ret = nouveau_vma_getmap(chan, nvbo, new_mem, &node->vma[1]);
  847. if (ret)
  848. goto out;
  849. }
  850. ret = drm->ttm.move(chan, bo, &bo->mem, new_mem);
  851. if (ret == 0) {
  852. ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
  853. no_wait_reserve,
  854. no_wait_gpu, new_mem);
  855. }
  856. out:
  857. mutex_unlock(&chan->cli->mutex);
  858. return ret;
  859. }
  860. void
  861. nouveau_bo_move_init(struct nouveau_drm *drm)
  862. {
  863. static const struct {
  864. const char *name;
  865. int engine;
  866. u32 oclass;
  867. int (*exec)(struct nouveau_channel *,
  868. struct ttm_buffer_object *,
  869. struct ttm_mem_reg *, struct ttm_mem_reg *);
  870. int (*init)(struct nouveau_channel *, u32 handle);
  871. } _methods[] = {
  872. { "COPY", 0, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
  873. { "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
  874. { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
  875. { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
  876. { "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
  877. { "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init },
  878. { "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init },
  879. { "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init },
  880. { "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init },
  881. {},
  882. { "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init },
  883. }, *mthd = _methods;
  884. const char *name = "CPU";
  885. int ret;
  886. do {
  887. struct nouveau_object *object;
  888. struct nouveau_channel *chan;
  889. u32 handle = (mthd->engine << 16) | mthd->oclass;
  890. if (mthd->init == nve0_bo_move_init)
  891. chan = drm->cechan;
  892. else
  893. chan = drm->channel;
  894. if (chan == NULL)
  895. continue;
  896. ret = nouveau_object_new(nv_object(drm), chan->handle, handle,
  897. mthd->oclass, NULL, 0, &object);
  898. if (ret == 0) {
  899. ret = mthd->init(chan, handle);
  900. if (ret) {
  901. nouveau_object_del(nv_object(drm),
  902. chan->handle, handle);
  903. continue;
  904. }
  905. drm->ttm.move = mthd->exec;
  906. name = mthd->name;
  907. break;
  908. }
  909. } while ((++mthd)->exec);
  910. NV_INFO(drm, "MM: using %s for buffer copies\n", name);
  911. }
  912. static int
  913. nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
  914. bool no_wait_reserve, bool no_wait_gpu,
  915. struct ttm_mem_reg *new_mem)
  916. {
  917. u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
  918. struct ttm_placement placement;
  919. struct ttm_mem_reg tmp_mem;
  920. int ret;
  921. placement.fpfn = placement.lpfn = 0;
  922. placement.num_placement = placement.num_busy_placement = 1;
  923. placement.placement = placement.busy_placement = &placement_memtype;
  924. tmp_mem = *new_mem;
  925. tmp_mem.mm_node = NULL;
  926. ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
  927. if (ret)
  928. return ret;
  929. ret = ttm_tt_bind(bo->ttm, &tmp_mem);
  930. if (ret)
  931. goto out;
  932. ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
  933. if (ret)
  934. goto out;
  935. ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
  936. out:
  937. ttm_bo_mem_put(bo, &tmp_mem);
  938. return ret;
  939. }
  940. static int
  941. nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
  942. bool no_wait_reserve, bool no_wait_gpu,
  943. struct ttm_mem_reg *new_mem)
  944. {
  945. u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
  946. struct ttm_placement placement;
  947. struct ttm_mem_reg tmp_mem;
  948. int ret;
  949. placement.fpfn = placement.lpfn = 0;
  950. placement.num_placement = placement.num_busy_placement = 1;
  951. placement.placement = placement.busy_placement = &placement_memtype;
  952. tmp_mem = *new_mem;
  953. tmp_mem.mm_node = NULL;
  954. ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
  955. if (ret)
  956. return ret;
  957. ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
  958. if (ret)
  959. goto out;
  960. ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, new_mem);
  961. if (ret)
  962. goto out;
  963. out:
  964. ttm_bo_mem_put(bo, &tmp_mem);
  965. return ret;
  966. }
  967. static void
  968. nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
  969. {
  970. struct nouveau_bo *nvbo = nouveau_bo(bo);
  971. struct nouveau_vma *vma;
  972. /* ttm can now (stupidly) pass the driver bos it didn't create... */
  973. if (bo->destroy != nouveau_bo_del_ttm)
  974. return;
  975. list_for_each_entry(vma, &nvbo->vma_list, head) {
  976. if (new_mem && new_mem->mem_type == TTM_PL_VRAM) {
  977. nouveau_vm_map(vma, new_mem->mm_node);
  978. } else
  979. if (new_mem && new_mem->mem_type == TTM_PL_TT &&
  980. nvbo->page_shift == vma->vm->vmm->spg_shift) {
  981. if (((struct nouveau_mem *)new_mem->mm_node)->sg)
  982. nouveau_vm_map_sg_table(vma, 0, new_mem->
  983. num_pages << PAGE_SHIFT,
  984. new_mem->mm_node);
  985. else
  986. nouveau_vm_map_sg(vma, 0, new_mem->
  987. num_pages << PAGE_SHIFT,
  988. new_mem->mm_node);
  989. } else {
  990. nouveau_vm_unmap(vma);
  991. }
  992. }
  993. }
  994. static int
  995. nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
  996. struct nouveau_drm_tile **new_tile)
  997. {
  998. struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
  999. struct drm_device *dev = drm->dev;
  1000. struct nouveau_bo *nvbo = nouveau_bo(bo);
  1001. u64 offset = new_mem->start << PAGE_SHIFT;
  1002. *new_tile = NULL;
  1003. if (new_mem->mem_type != TTM_PL_VRAM)
  1004. return 0;
  1005. if (nv_device(drm->device)->card_type >= NV_10) {
  1006. *new_tile = nv10_bo_set_tiling(dev, offset, new_mem->size,
  1007. nvbo->tile_mode,
  1008. nvbo->tile_flags);
  1009. }
  1010. return 0;
  1011. }
  1012. static void
  1013. nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
  1014. struct nouveau_drm_tile *new_tile,
  1015. struct nouveau_drm_tile **old_tile)
  1016. {
  1017. struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
  1018. struct drm_device *dev = drm->dev;
  1019. nv10_bo_put_tile_region(dev, *old_tile, bo->sync_obj);
  1020. *old_tile = new_tile;
  1021. }
  1022. static int
  1023. nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
  1024. bool no_wait_reserve, bool no_wait_gpu,
  1025. struct ttm_mem_reg *new_mem)
  1026. {
  1027. struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
  1028. struct nouveau_bo *nvbo = nouveau_bo(bo);
  1029. struct ttm_mem_reg *old_mem = &bo->mem;
  1030. struct nouveau_drm_tile *new_tile = NULL;
  1031. int ret = 0;
  1032. if (nv_device(drm->device)->card_type < NV_50) {
  1033. ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
  1034. if (ret)
  1035. return ret;
  1036. }
  1037. /* Fake bo copy. */
  1038. if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
  1039. BUG_ON(bo->mem.mm_node != NULL);
  1040. bo->mem = *new_mem;
  1041. new_mem->mm_node = NULL;
  1042. goto out;
  1043. }
  1044. /* CPU copy if we have no accelerated method available */
  1045. if (!drm->ttm.move) {
  1046. ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
  1047. goto out;
  1048. }
  1049. /* Hardware assisted copy. */
  1050. if (new_mem->mem_type == TTM_PL_SYSTEM)
  1051. ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
  1052. else if (old_mem->mem_type == TTM_PL_SYSTEM)
  1053. ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
  1054. else
  1055. ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
  1056. if (!ret)
  1057. goto out;
  1058. /* Fallback to software copy. */
  1059. ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
  1060. out:
  1061. if (nv_device(drm->device)->card_type < NV_50) {
  1062. if (ret)
  1063. nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
  1064. else
  1065. nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
  1066. }
  1067. return ret;
  1068. }
  1069. static int
  1070. nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
  1071. {
  1072. return 0;
  1073. }
  1074. static int
  1075. nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
  1076. {
  1077. struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
  1078. struct nouveau_drm *drm = nouveau_bdev(bdev);
  1079. struct drm_device *dev = drm->dev;
  1080. int ret;
  1081. mem->bus.addr = NULL;
  1082. mem->bus.offset = 0;
  1083. mem->bus.size = mem->num_pages << PAGE_SHIFT;
  1084. mem->bus.base = 0;
  1085. mem->bus.is_iomem = false;
  1086. if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
  1087. return -EINVAL;
  1088. switch (mem->mem_type) {
  1089. case TTM_PL_SYSTEM:
  1090. /* System memory */
  1091. return 0;
  1092. case TTM_PL_TT:
  1093. #if __OS_HAS_AGP
  1094. if (drm->agp.stat == ENABLED) {
  1095. mem->bus.offset = mem->start << PAGE_SHIFT;
  1096. mem->bus.base = drm->agp.base;
  1097. mem->bus.is_iomem = true;
  1098. }
  1099. #endif
  1100. break;
  1101. case TTM_PL_VRAM:
  1102. mem->bus.offset = mem->start << PAGE_SHIFT;
  1103. mem->bus.base = pci_resource_start(dev->pdev, 1);
  1104. mem->bus.is_iomem = true;
  1105. if (nv_device(drm->device)->card_type >= NV_50) {
  1106. struct nouveau_bar *bar = nouveau_bar(drm->device);
  1107. struct nouveau_mem *node = mem->mm_node;
  1108. ret = bar->umap(bar, node, NV_MEM_ACCESS_RW,
  1109. &node->bar_vma);
  1110. if (ret)
  1111. return ret;
  1112. mem->bus.offset = node->bar_vma.offset;
  1113. }
  1114. break;
  1115. default:
  1116. return -EINVAL;
  1117. }
  1118. return 0;
  1119. }
  1120. static void
  1121. nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
  1122. {
  1123. struct nouveau_drm *drm = nouveau_bdev(bdev);
  1124. struct nouveau_bar *bar = nouveau_bar(drm->device);
  1125. struct nouveau_mem *node = mem->mm_node;
  1126. if (!node->bar_vma.node)
  1127. return;
  1128. bar->unmap(bar, &node->bar_vma);
  1129. }
  1130. static int
  1131. nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
  1132. {
  1133. struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
  1134. struct nouveau_bo *nvbo = nouveau_bo(bo);
  1135. struct nouveau_device *device = nv_device(drm->device);
  1136. u32 mappable = pci_resource_len(device->pdev, 1) >> PAGE_SHIFT;
  1137. /* as long as the bo isn't in vram, and isn't tiled, we've got
  1138. * nothing to do here.
  1139. */
  1140. if (bo->mem.mem_type != TTM_PL_VRAM) {
  1141. if (nv_device(drm->device)->card_type < NV_50 ||
  1142. !nouveau_bo_tile_layout(nvbo))
  1143. return 0;
  1144. }
  1145. /* make sure bo is in mappable vram */
  1146. if (bo->mem.start + bo->mem.num_pages < mappable)
  1147. return 0;
  1148. nvbo->placement.fpfn = 0;
  1149. nvbo->placement.lpfn = mappable;
  1150. nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
  1151. return nouveau_bo_validate(nvbo, false, true, false);
  1152. }
  1153. static int
  1154. nouveau_ttm_tt_populate(struct ttm_tt *ttm)
  1155. {
  1156. struct ttm_dma_tt *ttm_dma = (void *)ttm;
  1157. struct nouveau_drm *drm;
  1158. struct drm_device *dev;
  1159. unsigned i;
  1160. int r;
  1161. bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
  1162. if (ttm->state != tt_unpopulated)
  1163. return 0;
  1164. if (slave && ttm->sg) {
  1165. /* make userspace faulting work */
  1166. drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
  1167. ttm_dma->dma_address, ttm->num_pages);
  1168. ttm->state = tt_unbound;
  1169. return 0;
  1170. }
  1171. drm = nouveau_bdev(ttm->bdev);
  1172. dev = drm->dev;
  1173. #if __OS_HAS_AGP
  1174. if (drm->agp.stat == ENABLED) {
  1175. return ttm_agp_tt_populate(ttm);
  1176. }
  1177. #endif
  1178. #ifdef CONFIG_SWIOTLB
  1179. if (swiotlb_nr_tbl()) {
  1180. return ttm_dma_populate((void *)ttm, dev->dev);
  1181. }
  1182. #endif
  1183. r = ttm_pool_populate(ttm);
  1184. if (r) {
  1185. return r;
  1186. }
  1187. for (i = 0; i < ttm->num_pages; i++) {
  1188. ttm_dma->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i],
  1189. 0, PAGE_SIZE,
  1190. PCI_DMA_BIDIRECTIONAL);
  1191. if (pci_dma_mapping_error(dev->pdev, ttm_dma->dma_address[i])) {
  1192. while (--i) {
  1193. pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
  1194. PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
  1195. ttm_dma->dma_address[i] = 0;
  1196. }
  1197. ttm_pool_unpopulate(ttm);
  1198. return -EFAULT;
  1199. }
  1200. }
  1201. return 0;
  1202. }
  1203. static void
  1204. nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
  1205. {
  1206. struct ttm_dma_tt *ttm_dma = (void *)ttm;
  1207. struct nouveau_drm *drm;
  1208. struct drm_device *dev;
  1209. unsigned i;
  1210. bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
  1211. if (slave)
  1212. return;
  1213. drm = nouveau_bdev(ttm->bdev);
  1214. dev = drm->dev;
  1215. #if __OS_HAS_AGP
  1216. if (drm->agp.stat == ENABLED) {
  1217. ttm_agp_tt_unpopulate(ttm);
  1218. return;
  1219. }
  1220. #endif
  1221. #ifdef CONFIG_SWIOTLB
  1222. if (swiotlb_nr_tbl()) {
  1223. ttm_dma_unpopulate((void *)ttm, dev->dev);
  1224. return;
  1225. }
  1226. #endif
  1227. for (i = 0; i < ttm->num_pages; i++) {
  1228. if (ttm_dma->dma_address[i]) {
  1229. pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
  1230. PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
  1231. }
  1232. }
  1233. ttm_pool_unpopulate(ttm);
  1234. }
  1235. void
  1236. nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
  1237. {
  1238. struct nouveau_fence *old_fence = NULL;
  1239. if (likely(fence))
  1240. nouveau_fence_ref(fence);
  1241. spin_lock(&nvbo->bo.bdev->fence_lock);
  1242. old_fence = nvbo->bo.sync_obj;
  1243. nvbo->bo.sync_obj = fence;
  1244. spin_unlock(&nvbo->bo.bdev->fence_lock);
  1245. nouveau_fence_unref(&old_fence);
  1246. }
  1247. static void
  1248. nouveau_bo_fence_unref(void **sync_obj)
  1249. {
  1250. nouveau_fence_unref((struct nouveau_fence **)sync_obj);
  1251. }
  1252. static void *
  1253. nouveau_bo_fence_ref(void *sync_obj)
  1254. {
  1255. return nouveau_fence_ref(sync_obj);
  1256. }
  1257. static bool
  1258. nouveau_bo_fence_signalled(void *sync_obj)
  1259. {
  1260. return nouveau_fence_done(sync_obj);
  1261. }
  1262. static int
  1263. nouveau_bo_fence_wait(void *sync_obj, bool lazy, bool intr)
  1264. {
  1265. return nouveau_fence_wait(sync_obj, lazy, intr);
  1266. }
  1267. static int
  1268. nouveau_bo_fence_flush(void *sync_obj)
  1269. {
  1270. return 0;
  1271. }
  1272. struct ttm_bo_driver nouveau_bo_driver = {
  1273. .ttm_tt_create = &nouveau_ttm_tt_create,
  1274. .ttm_tt_populate = &nouveau_ttm_tt_populate,
  1275. .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
  1276. .invalidate_caches = nouveau_bo_invalidate_caches,
  1277. .init_mem_type = nouveau_bo_init_mem_type,
  1278. .evict_flags = nouveau_bo_evict_flags,
  1279. .move_notify = nouveau_bo_move_ntfy,
  1280. .move = nouveau_bo_move,
  1281. .verify_access = nouveau_bo_verify_access,
  1282. .sync_obj_signaled = nouveau_bo_fence_signalled,
  1283. .sync_obj_wait = nouveau_bo_fence_wait,
  1284. .sync_obj_flush = nouveau_bo_fence_flush,
  1285. .sync_obj_unref = nouveau_bo_fence_unref,
  1286. .sync_obj_ref = nouveau_bo_fence_ref,
  1287. .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
  1288. .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
  1289. .io_mem_free = &nouveau_ttm_io_mem_free,
  1290. };
  1291. struct nouveau_vma *
  1292. nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nouveau_vm *vm)
  1293. {
  1294. struct nouveau_vma *vma;
  1295. list_for_each_entry(vma, &nvbo->vma_list, head) {
  1296. if (vma->vm == vm)
  1297. return vma;
  1298. }
  1299. return NULL;
  1300. }
  1301. int
  1302. nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
  1303. struct nouveau_vma *vma)
  1304. {
  1305. const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
  1306. struct nouveau_mem *node = nvbo->bo.mem.mm_node;
  1307. int ret;
  1308. ret = nouveau_vm_get(vm, size, nvbo->page_shift,
  1309. NV_MEM_ACCESS_RW, vma);
  1310. if (ret)
  1311. return ret;
  1312. if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
  1313. nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
  1314. else if (nvbo->bo.mem.mem_type == TTM_PL_TT) {
  1315. if (node->sg)
  1316. nouveau_vm_map_sg_table(vma, 0, size, node);
  1317. else
  1318. nouveau_vm_map_sg(vma, 0, size, node);
  1319. }
  1320. list_add_tail(&vma->head, &nvbo->vma_list);
  1321. vma->refcount = 1;
  1322. return 0;
  1323. }
  1324. void
  1325. nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
  1326. {
  1327. if (vma->node) {
  1328. if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM) {
  1329. spin_lock(&nvbo->bo.bdev->fence_lock);
  1330. ttm_bo_wait(&nvbo->bo, false, false, false);
  1331. spin_unlock(&nvbo->bo.bdev->fence_lock);
  1332. nouveau_vm_unmap(vma);
  1333. }
  1334. nouveau_vm_put(vma);
  1335. list_del(&vma->head);
  1336. }
  1337. }