nouveau_bo.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778
  1. /*
  2. * Copyright 2007 Dave Airlied
  3. * All Rights Reserved.
  4. *
  5. * Permission is hereby granted, free of charge, to any person obtaining a
  6. * copy of this software and associated documentation files (the "Software"),
  7. * to deal in the Software without restriction, including without limitation
  8. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  9. * and/or sell copies of the Software, and to permit persons to whom the
  10. * Software is furnished to do so, subject to the following conditions:
  11. *
  12. * The above copyright notice and this permission notice (including the next
  13. * paragraph) shall be included in all copies or substantial portions of the
  14. * Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22. * OTHER DEALINGS IN THE SOFTWARE.
  23. */
  24. /*
  25. * Authors: Dave Airlied <airlied@linux.ie>
  26. * Ben Skeggs <darktama@iinet.net.au>
  27. * Jeremy Kolb <jkolb@brandeis.edu>
  28. */
  29. #include "drmP.h"
  30. #include "nouveau_drm.h"
  31. #include "nouveau_drv.h"
  32. #include "nouveau_dma.h"
  33. #include <linux/log2.h>
  34. #include <linux/slab.h>
  35. static void
  36. nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
  37. {
  38. struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
  39. struct drm_device *dev = dev_priv->dev;
  40. struct nouveau_bo *nvbo = nouveau_bo(bo);
  41. ttm_bo_kunmap(&nvbo->kmap);
  42. if (unlikely(nvbo->gem))
  43. DRM_ERROR("bo %p still attached to GEM object\n", bo);
  44. if (nvbo->tile)
  45. nv10_mem_expire_tiling(dev, nvbo->tile, NULL);
  46. spin_lock(&dev_priv->ttm.bo_list_lock);
  47. list_del(&nvbo->head);
  48. spin_unlock(&dev_priv->ttm.bo_list_lock);
  49. kfree(nvbo);
  50. }
  51. static void
  52. nouveau_bo_fixup_align(struct drm_device *dev,
  53. uint32_t tile_mode, uint32_t tile_flags,
  54. int *align, int *size)
  55. {
  56. struct drm_nouveau_private *dev_priv = dev->dev_private;
  57. /*
  58. * Some of the tile_flags have a periodic structure of N*4096 bytes,
  59. * align to to that as well as the page size. Align the size to the
  60. * appropriate boundaries. This does imply that sizes are rounded up
  61. * 3-7 pages, so be aware of this and do not waste memory by allocating
  62. * many small buffers.
  63. */
  64. if (dev_priv->card_type == NV_50) {
  65. uint32_t block_size = dev_priv->vram_size >> 15;
  66. int i;
  67. switch (tile_flags) {
  68. case 0x1800:
  69. case 0x2800:
  70. case 0x4800:
  71. case 0x7a00:
  72. if (is_power_of_2(block_size)) {
  73. for (i = 1; i < 10; i++) {
  74. *align = 12 * i * block_size;
  75. if (!(*align % 65536))
  76. break;
  77. }
  78. } else {
  79. for (i = 1; i < 10; i++) {
  80. *align = 8 * i * block_size;
  81. if (!(*align % 65536))
  82. break;
  83. }
  84. }
  85. *size = roundup(*size, *align);
  86. break;
  87. default:
  88. break;
  89. }
  90. } else {
  91. if (tile_mode) {
  92. if (dev_priv->chipset >= 0x40) {
  93. *align = 65536;
  94. *size = roundup(*size, 64 * tile_mode);
  95. } else if (dev_priv->chipset >= 0x30) {
  96. *align = 32768;
  97. *size = roundup(*size, 64 * tile_mode);
  98. } else if (dev_priv->chipset >= 0x20) {
  99. *align = 16384;
  100. *size = roundup(*size, 64 * tile_mode);
  101. } else if (dev_priv->chipset >= 0x10) {
  102. *align = 16384;
  103. *size = roundup(*size, 32 * tile_mode);
  104. }
  105. }
  106. }
  107. /* ALIGN works only on powers of two. */
  108. *size = roundup(*size, PAGE_SIZE);
  109. if (dev_priv->card_type == NV_50) {
  110. *size = roundup(*size, 65536);
  111. *align = max(65536, *align);
  112. }
  113. }
  114. int
  115. nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
  116. int size, int align, uint32_t flags, uint32_t tile_mode,
  117. uint32_t tile_flags, bool no_vm, bool mappable,
  118. struct nouveau_bo **pnvbo)
  119. {
  120. struct drm_nouveau_private *dev_priv = dev->dev_private;
  121. struct nouveau_bo *nvbo;
  122. int ret = 0;
  123. nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
  124. if (!nvbo)
  125. return -ENOMEM;
  126. INIT_LIST_HEAD(&nvbo->head);
  127. INIT_LIST_HEAD(&nvbo->entry);
  128. nvbo->mappable = mappable;
  129. nvbo->no_vm = no_vm;
  130. nvbo->tile_mode = tile_mode;
  131. nvbo->tile_flags = tile_flags;
  132. nouveau_bo_fixup_align(dev, tile_mode, tile_flags, &align, &size);
  133. align >>= PAGE_SHIFT;
  134. nvbo->placement.fpfn = 0;
  135. nvbo->placement.lpfn = mappable ? dev_priv->fb_mappable_pages : 0;
  136. nouveau_bo_placement_set(nvbo, flags, 0);
  137. nvbo->channel = chan;
  138. ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
  139. ttm_bo_type_device, &nvbo->placement, align, 0,
  140. false, NULL, size, nouveau_bo_del_ttm);
  141. nvbo->channel = NULL;
  142. if (ret) {
  143. /* ttm will call nouveau_bo_del_ttm if it fails.. */
  144. return ret;
  145. }
  146. spin_lock(&dev_priv->ttm.bo_list_lock);
  147. list_add_tail(&nvbo->head, &dev_priv->ttm.bo_list);
  148. spin_unlock(&dev_priv->ttm.bo_list_lock);
  149. *pnvbo = nvbo;
  150. return 0;
  151. }
  152. static void
  153. set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
  154. {
  155. *n = 0;
  156. if (type & TTM_PL_FLAG_VRAM)
  157. pl[(*n)++] = TTM_PL_FLAG_VRAM | flags;
  158. if (type & TTM_PL_FLAG_TT)
  159. pl[(*n)++] = TTM_PL_FLAG_TT | flags;
  160. if (type & TTM_PL_FLAG_SYSTEM)
  161. pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags;
  162. }
  163. void
  164. nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
  165. {
  166. struct ttm_placement *pl = &nvbo->placement;
  167. uint32_t flags = TTM_PL_MASK_CACHING |
  168. (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
  169. pl->placement = nvbo->placements;
  170. set_placement_list(nvbo->placements, &pl->num_placement,
  171. type, flags);
  172. pl->busy_placement = nvbo->busy_placements;
  173. set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
  174. type | busy, flags);
  175. }
  176. int
  177. nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
  178. {
  179. struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
  180. struct ttm_buffer_object *bo = &nvbo->bo;
  181. int ret;
  182. if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
  183. NV_ERROR(nouveau_bdev(bo->bdev)->dev,
  184. "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
  185. 1 << bo->mem.mem_type, memtype);
  186. return -EINVAL;
  187. }
  188. if (nvbo->pin_refcnt++)
  189. return 0;
  190. ret = ttm_bo_reserve(bo, false, false, false, 0);
  191. if (ret)
  192. goto out;
  193. nouveau_bo_placement_set(nvbo, memtype, 0);
  194. ret = ttm_bo_validate(bo, &nvbo->placement, false, false);
  195. if (ret == 0) {
  196. switch (bo->mem.mem_type) {
  197. case TTM_PL_VRAM:
  198. dev_priv->fb_aper_free -= bo->mem.size;
  199. break;
  200. case TTM_PL_TT:
  201. dev_priv->gart_info.aper_free -= bo->mem.size;
  202. break;
  203. default:
  204. break;
  205. }
  206. }
  207. ttm_bo_unreserve(bo);
  208. out:
  209. if (unlikely(ret))
  210. nvbo->pin_refcnt--;
  211. return ret;
  212. }
  213. int
  214. nouveau_bo_unpin(struct nouveau_bo *nvbo)
  215. {
  216. struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
  217. struct ttm_buffer_object *bo = &nvbo->bo;
  218. int ret;
  219. if (--nvbo->pin_refcnt)
  220. return 0;
  221. ret = ttm_bo_reserve(bo, false, false, false, 0);
  222. if (ret)
  223. return ret;
  224. nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
  225. ret = ttm_bo_validate(bo, &nvbo->placement, false, false);
  226. if (ret == 0) {
  227. switch (bo->mem.mem_type) {
  228. case TTM_PL_VRAM:
  229. dev_priv->fb_aper_free += bo->mem.size;
  230. break;
  231. case TTM_PL_TT:
  232. dev_priv->gart_info.aper_free += bo->mem.size;
  233. break;
  234. default:
  235. break;
  236. }
  237. }
  238. ttm_bo_unreserve(bo);
  239. return ret;
  240. }
  241. int
  242. nouveau_bo_map(struct nouveau_bo *nvbo)
  243. {
  244. int ret;
  245. ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
  246. if (ret)
  247. return ret;
  248. ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
  249. ttm_bo_unreserve(&nvbo->bo);
  250. return ret;
  251. }
  252. void
  253. nouveau_bo_unmap(struct nouveau_bo *nvbo)
  254. {
  255. ttm_bo_kunmap(&nvbo->kmap);
  256. }
  257. u16
  258. nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
  259. {
  260. bool is_iomem;
  261. u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
  262. mem = &mem[index];
  263. if (is_iomem)
  264. return ioread16_native((void __force __iomem *)mem);
  265. else
  266. return *mem;
  267. }
  268. void
  269. nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
  270. {
  271. bool is_iomem;
  272. u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
  273. mem = &mem[index];
  274. if (is_iomem)
  275. iowrite16_native(val, (void __force __iomem *)mem);
  276. else
  277. *mem = val;
  278. }
  279. u32
  280. nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
  281. {
  282. bool is_iomem;
  283. u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
  284. mem = &mem[index];
  285. if (is_iomem)
  286. return ioread32_native((void __force __iomem *)mem);
  287. else
  288. return *mem;
  289. }
  290. void
  291. nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
  292. {
  293. bool is_iomem;
  294. u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
  295. mem = &mem[index];
  296. if (is_iomem)
  297. iowrite32_native(val, (void __force __iomem *)mem);
  298. else
  299. *mem = val;
  300. }
  301. static struct ttm_backend *
  302. nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
  303. {
  304. struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
  305. struct drm_device *dev = dev_priv->dev;
  306. switch (dev_priv->gart_info.type) {
  307. #if __OS_HAS_AGP
  308. case NOUVEAU_GART_AGP:
  309. return ttm_agp_backend_init(bdev, dev->agp->bridge);
  310. #endif
  311. case NOUVEAU_GART_SGDMA:
  312. return nouveau_sgdma_init_ttm(dev);
  313. default:
  314. NV_ERROR(dev, "Unknown GART type %d\n",
  315. dev_priv->gart_info.type);
  316. break;
  317. }
  318. return NULL;
  319. }
  320. static int
  321. nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
  322. {
  323. /* We'll do this from user space. */
  324. return 0;
  325. }
  326. static int
  327. nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
  328. struct ttm_mem_type_manager *man)
  329. {
  330. struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
  331. struct drm_device *dev = dev_priv->dev;
  332. switch (type) {
  333. case TTM_PL_SYSTEM:
  334. man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
  335. man->available_caching = TTM_PL_MASK_CACHING;
  336. man->default_caching = TTM_PL_FLAG_CACHED;
  337. break;
  338. case TTM_PL_VRAM:
  339. man->flags = TTM_MEMTYPE_FLAG_FIXED |
  340. TTM_MEMTYPE_FLAG_MAPPABLE |
  341. TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
  342. man->available_caching = TTM_PL_FLAG_UNCACHED |
  343. TTM_PL_FLAG_WC;
  344. man->default_caching = TTM_PL_FLAG_WC;
  345. man->io_addr = NULL;
  346. man->io_offset = drm_get_resource_start(dev, 1);
  347. man->io_size = drm_get_resource_len(dev, 1);
  348. if (man->io_size > dev_priv->vram_size)
  349. man->io_size = dev_priv->vram_size;
  350. man->gpu_offset = dev_priv->vm_vram_base;
  351. break;
  352. case TTM_PL_TT:
  353. switch (dev_priv->gart_info.type) {
  354. case NOUVEAU_GART_AGP:
  355. man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
  356. TTM_MEMTYPE_FLAG_NEEDS_IOREMAP;
  357. man->available_caching = TTM_PL_FLAG_UNCACHED;
  358. man->default_caching = TTM_PL_FLAG_UNCACHED;
  359. break;
  360. case NOUVEAU_GART_SGDMA:
  361. man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
  362. TTM_MEMTYPE_FLAG_CMA;
  363. man->available_caching = TTM_PL_MASK_CACHING;
  364. man->default_caching = TTM_PL_FLAG_CACHED;
  365. break;
  366. default:
  367. NV_ERROR(dev, "Unknown GART type: %d\n",
  368. dev_priv->gart_info.type);
  369. return -EINVAL;
  370. }
  371. man->io_offset = dev_priv->gart_info.aper_base;
  372. man->io_size = dev_priv->gart_info.aper_size;
  373. man->io_addr = NULL;
  374. man->gpu_offset = dev_priv->vm_gart_base;
  375. break;
  376. default:
  377. NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
  378. return -EINVAL;
  379. }
  380. return 0;
  381. }
  382. static void
  383. nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
  384. {
  385. struct nouveau_bo *nvbo = nouveau_bo(bo);
  386. switch (bo->mem.mem_type) {
  387. case TTM_PL_VRAM:
  388. nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
  389. TTM_PL_FLAG_SYSTEM);
  390. break;
  391. default:
  392. nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
  393. break;
  394. }
  395. *pl = nvbo->placement;
  396. }
  397. /* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
  398. * TTM_PL_{VRAM,TT} directly.
  399. */
  400. static int
  401. nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
  402. struct nouveau_bo *nvbo, bool evict, bool no_wait,
  403. struct ttm_mem_reg *new_mem)
  404. {
  405. struct nouveau_fence *fence = NULL;
  406. int ret;
  407. ret = nouveau_fence_new(chan, &fence, true);
  408. if (ret)
  409. return ret;
  410. ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL,
  411. evict, no_wait, new_mem);
  412. if (nvbo->channel && nvbo->channel != chan)
  413. ret = nouveau_fence_wait(fence, NULL, false, false);
  414. nouveau_fence_unref((void *)&fence);
  415. return ret;
  416. }
  417. static inline uint32_t
  418. nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
  419. struct ttm_mem_reg *mem)
  420. {
  421. if (chan == nouveau_bdev(nvbo->bo.bdev)->channel) {
  422. if (mem->mem_type == TTM_PL_TT)
  423. return NvDmaGART;
  424. return NvDmaVRAM;
  425. }
  426. if (mem->mem_type == TTM_PL_TT)
  427. return chan->gart_handle;
  428. return chan->vram_handle;
  429. }
  430. static int
  431. nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
  432. int no_wait, struct ttm_mem_reg *new_mem)
  433. {
  434. struct nouveau_bo *nvbo = nouveau_bo(bo);
  435. struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
  436. struct ttm_mem_reg *old_mem = &bo->mem;
  437. struct nouveau_channel *chan;
  438. uint64_t src_offset, dst_offset;
  439. uint32_t page_count;
  440. int ret;
  441. chan = nvbo->channel;
  442. if (!chan || nvbo->tile_flags || nvbo->no_vm)
  443. chan = dev_priv->channel;
  444. src_offset = old_mem->mm_node->start << PAGE_SHIFT;
  445. dst_offset = new_mem->mm_node->start << PAGE_SHIFT;
  446. if (chan != dev_priv->channel) {
  447. if (old_mem->mem_type == TTM_PL_TT)
  448. src_offset += dev_priv->vm_gart_base;
  449. else
  450. src_offset += dev_priv->vm_vram_base;
  451. if (new_mem->mem_type == TTM_PL_TT)
  452. dst_offset += dev_priv->vm_gart_base;
  453. else
  454. dst_offset += dev_priv->vm_vram_base;
  455. }
  456. ret = RING_SPACE(chan, 3);
  457. if (ret)
  458. return ret;
  459. BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
  460. OUT_RING(chan, nouveau_bo_mem_ctxdma(nvbo, chan, old_mem));
  461. OUT_RING(chan, nouveau_bo_mem_ctxdma(nvbo, chan, new_mem));
  462. if (dev_priv->card_type >= NV_50) {
  463. ret = RING_SPACE(chan, 4);
  464. if (ret)
  465. return ret;
  466. BEGIN_RING(chan, NvSubM2MF, 0x0200, 1);
  467. OUT_RING(chan, 1);
  468. BEGIN_RING(chan, NvSubM2MF, 0x021c, 1);
  469. OUT_RING(chan, 1);
  470. }
  471. page_count = new_mem->num_pages;
  472. while (page_count) {
  473. int line_count = (page_count > 2047) ? 2047 : page_count;
  474. if (dev_priv->card_type >= NV_50) {
  475. ret = RING_SPACE(chan, 3);
  476. if (ret)
  477. return ret;
  478. BEGIN_RING(chan, NvSubM2MF, 0x0238, 2);
  479. OUT_RING(chan, upper_32_bits(src_offset));
  480. OUT_RING(chan, upper_32_bits(dst_offset));
  481. }
  482. ret = RING_SPACE(chan, 11);
  483. if (ret)
  484. return ret;
  485. BEGIN_RING(chan, NvSubM2MF,
  486. NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
  487. OUT_RING(chan, lower_32_bits(src_offset));
  488. OUT_RING(chan, lower_32_bits(dst_offset));
  489. OUT_RING(chan, PAGE_SIZE); /* src_pitch */
  490. OUT_RING(chan, PAGE_SIZE); /* dst_pitch */
  491. OUT_RING(chan, PAGE_SIZE); /* line_length */
  492. OUT_RING(chan, line_count);
  493. OUT_RING(chan, (1<<8)|(1<<0));
  494. OUT_RING(chan, 0);
  495. BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
  496. OUT_RING(chan, 0);
  497. page_count -= line_count;
  498. src_offset += (PAGE_SIZE * line_count);
  499. dst_offset += (PAGE_SIZE * line_count);
  500. }
  501. return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait, new_mem);
  502. }
  503. static int
  504. nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
  505. bool no_wait, struct ttm_mem_reg *new_mem)
  506. {
  507. u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
  508. struct ttm_placement placement;
  509. struct ttm_mem_reg tmp_mem;
  510. int ret;
  511. placement.fpfn = placement.lpfn = 0;
  512. placement.num_placement = placement.num_busy_placement = 1;
  513. placement.placement = placement.busy_placement = &placement_memtype;
  514. tmp_mem = *new_mem;
  515. tmp_mem.mm_node = NULL;
  516. ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait);
  517. if (ret)
  518. return ret;
  519. ret = ttm_tt_bind(bo->ttm, &tmp_mem);
  520. if (ret)
  521. goto out;
  522. ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait, &tmp_mem);
  523. if (ret)
  524. goto out;
  525. ret = ttm_bo_move_ttm(bo, evict, no_wait, new_mem);
  526. out:
  527. if (tmp_mem.mm_node) {
  528. spin_lock(&bo->bdev->glob->lru_lock);
  529. drm_mm_put_block(tmp_mem.mm_node);
  530. spin_unlock(&bo->bdev->glob->lru_lock);
  531. }
  532. return ret;
  533. }
  534. static int
  535. nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
  536. bool no_wait, struct ttm_mem_reg *new_mem)
  537. {
  538. u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
  539. struct ttm_placement placement;
  540. struct ttm_mem_reg tmp_mem;
  541. int ret;
  542. placement.fpfn = placement.lpfn = 0;
  543. placement.num_placement = placement.num_busy_placement = 1;
  544. placement.placement = placement.busy_placement = &placement_memtype;
  545. tmp_mem = *new_mem;
  546. tmp_mem.mm_node = NULL;
  547. ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait);
  548. if (ret)
  549. return ret;
  550. ret = ttm_bo_move_ttm(bo, evict, no_wait, &tmp_mem);
  551. if (ret)
  552. goto out;
  553. ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem);
  554. if (ret)
  555. goto out;
  556. out:
  557. if (tmp_mem.mm_node) {
  558. spin_lock(&bo->bdev->glob->lru_lock);
  559. drm_mm_put_block(tmp_mem.mm_node);
  560. spin_unlock(&bo->bdev->glob->lru_lock);
  561. }
  562. return ret;
  563. }
  564. static int
  565. nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
  566. struct nouveau_tile_reg **new_tile)
  567. {
  568. struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
  569. struct drm_device *dev = dev_priv->dev;
  570. struct nouveau_bo *nvbo = nouveau_bo(bo);
  571. uint64_t offset;
  572. int ret;
  573. if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) {
  574. /* Nothing to do. */
  575. *new_tile = NULL;
  576. return 0;
  577. }
  578. offset = new_mem->mm_node->start << PAGE_SHIFT;
  579. if (dev_priv->card_type == NV_50) {
  580. ret = nv50_mem_vm_bind_linear(dev,
  581. offset + dev_priv->vm_vram_base,
  582. new_mem->size, nvbo->tile_flags,
  583. offset);
  584. if (ret)
  585. return ret;
  586. } else if (dev_priv->card_type >= NV_10) {
  587. *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
  588. nvbo->tile_mode);
  589. }
  590. return 0;
  591. }
  592. static void
  593. nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
  594. struct nouveau_tile_reg *new_tile,
  595. struct nouveau_tile_reg **old_tile)
  596. {
  597. struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
  598. struct drm_device *dev = dev_priv->dev;
  599. if (dev_priv->card_type >= NV_10 &&
  600. dev_priv->card_type < NV_50) {
  601. if (*old_tile)
  602. nv10_mem_expire_tiling(dev, *old_tile, bo->sync_obj);
  603. *old_tile = new_tile;
  604. }
  605. }
  606. static int
  607. nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
  608. bool no_wait, struct ttm_mem_reg *new_mem)
  609. {
  610. struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
  611. struct nouveau_bo *nvbo = nouveau_bo(bo);
  612. struct ttm_mem_reg *old_mem = &bo->mem;
  613. struct nouveau_tile_reg *new_tile = NULL;
  614. int ret = 0;
  615. ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
  616. if (ret)
  617. return ret;
  618. /* Software copy if the card isn't up and running yet. */
  619. if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE ||
  620. !dev_priv->channel) {
  621. ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
  622. goto out;
  623. }
  624. /* Fake bo copy. */
  625. if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
  626. BUG_ON(bo->mem.mm_node != NULL);
  627. bo->mem = *new_mem;
  628. new_mem->mm_node = NULL;
  629. goto out;
  630. }
  631. /* Hardware assisted copy. */
  632. if (new_mem->mem_type == TTM_PL_SYSTEM)
  633. ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait, new_mem);
  634. else if (old_mem->mem_type == TTM_PL_SYSTEM)
  635. ret = nouveau_bo_move_flips(bo, evict, intr, no_wait, new_mem);
  636. else
  637. ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait, new_mem);
  638. if (!ret)
  639. goto out;
  640. /* Fallback to software copy. */
  641. ret = ttm_bo_move_memcpy(bo, evict, no_wait, new_mem);
  642. out:
  643. if (ret)
  644. nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
  645. else
  646. nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
  647. return ret;
  648. }
  649. static int
  650. nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
  651. {
  652. return 0;
  653. }
  654. struct ttm_bo_driver nouveau_bo_driver = {
  655. .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
  656. .invalidate_caches = nouveau_bo_invalidate_caches,
  657. .init_mem_type = nouveau_bo_init_mem_type,
  658. .evict_flags = nouveau_bo_evict_flags,
  659. .move = nouveau_bo_move,
  660. .verify_access = nouveau_bo_verify_access,
  661. .sync_obj_signaled = nouveau_fence_signalled,
  662. .sync_obj_wait = nouveau_fence_wait,
  663. .sync_obj_flush = nouveau_fence_flush,
  664. .sync_obj_unref = nouveau_fence_unref,
  665. .sync_obj_ref = nouveau_fence_ref,
  666. };