nouveau_bo.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827
  1. /*
  2. * Copyright 2007 Dave Airlied
  3. * All Rights Reserved.
  4. *
  5. * Permission is hereby granted, free of charge, to any person obtaining a
  6. * copy of this software and associated documentation files (the "Software"),
  7. * to deal in the Software without restriction, including without limitation
  8. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  9. * and/or sell copies of the Software, and to permit persons to whom the
  10. * Software is furnished to do so, subject to the following conditions:
  11. *
  12. * The above copyright notice and this permission notice (including the next
  13. * paragraph) shall be included in all copies or substantial portions of the
  14. * Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22. * OTHER DEALINGS IN THE SOFTWARE.
  23. */
  24. /*
  25. * Authors: Dave Airlied <airlied@linux.ie>
  26. * Ben Skeggs <darktama@iinet.net.au>
  27. * Jeremy Kolb <jkolb@brandeis.edu>
  28. */
  29. #include "drmP.h"
  30. #include "nouveau_drm.h"
  31. #include "nouveau_drv.h"
  32. #include "nouveau_dma.h"
  33. #include <linux/log2.h>
  34. static void
  35. nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
  36. {
  37. struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
  38. struct drm_device *dev = dev_priv->dev;
  39. struct nouveau_bo *nvbo = nouveau_bo(bo);
  40. ttm_bo_kunmap(&nvbo->kmap);
  41. if (unlikely(nvbo->gem))
  42. DRM_ERROR("bo %p still attached to GEM object\n", bo);
  43. if (nvbo->tile)
  44. nv10_mem_expire_tiling(dev, nvbo->tile, NULL);
  45. spin_lock(&dev_priv->ttm.bo_list_lock);
  46. list_del(&nvbo->head);
  47. spin_unlock(&dev_priv->ttm.bo_list_lock);
  48. kfree(nvbo);
  49. }
  50. static void
  51. nouveau_bo_fixup_align(struct drm_device *dev,
  52. uint32_t tile_mode, uint32_t tile_flags,
  53. int *align, int *size)
  54. {
  55. struct drm_nouveau_private *dev_priv = dev->dev_private;
  56. /*
  57. * Some of the tile_flags have a periodic structure of N*4096 bytes,
  58. * align to to that as well as the page size. Align the size to the
  59. * appropriate boundaries. This does imply that sizes are rounded up
  60. * 3-7 pages, so be aware of this and do not waste memory by allocating
  61. * many small buffers.
  62. */
  63. if (dev_priv->card_type == NV_50) {
  64. uint32_t block_size = nouveau_mem_fb_amount(dev) >> 15;
  65. int i;
  66. switch (tile_flags) {
  67. case 0x1800:
  68. case 0x2800:
  69. case 0x4800:
  70. case 0x7a00:
  71. if (is_power_of_2(block_size)) {
  72. for (i = 1; i < 10; i++) {
  73. *align = 12 * i * block_size;
  74. if (!(*align % 65536))
  75. break;
  76. }
  77. } else {
  78. for (i = 1; i < 10; i++) {
  79. *align = 8 * i * block_size;
  80. if (!(*align % 65536))
  81. break;
  82. }
  83. }
  84. *size = roundup(*size, *align);
  85. break;
  86. default:
  87. break;
  88. }
  89. } else {
  90. if (tile_mode) {
  91. if (dev_priv->chipset >= 0x40) {
  92. *align = 65536;
  93. *size = roundup(*size, 64 * tile_mode);
  94. } else if (dev_priv->chipset >= 0x30) {
  95. *align = 32768;
  96. *size = roundup(*size, 64 * tile_mode);
  97. } else if (dev_priv->chipset >= 0x20) {
  98. *align = 16384;
  99. *size = roundup(*size, 64 * tile_mode);
  100. } else if (dev_priv->chipset >= 0x10) {
  101. *align = 16384;
  102. *size = roundup(*size, 32 * tile_mode);
  103. }
  104. }
  105. }
  106. /* ALIGN works only on powers of two. */
  107. *size = roundup(*size, PAGE_SIZE);
  108. if (dev_priv->card_type == NV_50) {
  109. *size = roundup(*size, 65536);
  110. *align = max(65536, *align);
  111. }
  112. }
  113. int
  114. nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
  115. int size, int align, uint32_t flags, uint32_t tile_mode,
  116. uint32_t tile_flags, bool no_vm, bool mappable,
  117. struct nouveau_bo **pnvbo)
  118. {
  119. struct drm_nouveau_private *dev_priv = dev->dev_private;
  120. struct nouveau_bo *nvbo;
  121. int ret = 0;
  122. nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
  123. if (!nvbo)
  124. return -ENOMEM;
  125. INIT_LIST_HEAD(&nvbo->head);
  126. INIT_LIST_HEAD(&nvbo->entry);
  127. nvbo->mappable = mappable;
  128. nvbo->no_vm = no_vm;
  129. nvbo->tile_mode = tile_mode;
  130. nvbo->tile_flags = tile_flags;
  131. nouveau_bo_fixup_align(dev, tile_mode, tile_flags, &align, &size);
  132. align >>= PAGE_SHIFT;
  133. nvbo->placement.fpfn = 0;
  134. nvbo->placement.lpfn = mappable ? dev_priv->fb_mappable_pages : 0;
  135. nouveau_bo_placement_set(nvbo, flags);
  136. nvbo->channel = chan;
  137. ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
  138. ttm_bo_type_device, &nvbo->placement, align, 0,
  139. false, NULL, size, nouveau_bo_del_ttm);
  140. nvbo->channel = NULL;
  141. if (ret) {
  142. /* ttm will call nouveau_bo_del_ttm if it fails.. */
  143. return ret;
  144. }
  145. spin_lock(&dev_priv->ttm.bo_list_lock);
  146. list_add_tail(&nvbo->head, &dev_priv->ttm.bo_list);
  147. spin_unlock(&dev_priv->ttm.bo_list_lock);
  148. *pnvbo = nvbo;
  149. return 0;
  150. }
  151. void
  152. nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t memtype)
  153. {
  154. int n = 0;
  155. if (memtype & TTM_PL_FLAG_VRAM)
  156. nvbo->placements[n++] = TTM_PL_FLAG_VRAM | TTM_PL_MASK_CACHING;
  157. if (memtype & TTM_PL_FLAG_TT)
  158. nvbo->placements[n++] = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
  159. if (memtype & TTM_PL_FLAG_SYSTEM)
  160. nvbo->placements[n++] = TTM_PL_FLAG_SYSTEM | TTM_PL_MASK_CACHING;
  161. nvbo->placement.placement = nvbo->placements;
  162. nvbo->placement.busy_placement = nvbo->placements;
  163. nvbo->placement.num_placement = n;
  164. nvbo->placement.num_busy_placement = n;
  165. if (nvbo->pin_refcnt) {
  166. while (n--)
  167. nvbo->placements[n] |= TTM_PL_FLAG_NO_EVICT;
  168. }
  169. }
  170. int
  171. nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
  172. {
  173. struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
  174. struct ttm_buffer_object *bo = &nvbo->bo;
  175. int ret, i;
  176. if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
  177. NV_ERROR(nouveau_bdev(bo->bdev)->dev,
  178. "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
  179. 1 << bo->mem.mem_type, memtype);
  180. return -EINVAL;
  181. }
  182. if (nvbo->pin_refcnt++)
  183. return 0;
  184. ret = ttm_bo_reserve(bo, false, false, false, 0);
  185. if (ret)
  186. goto out;
  187. nouveau_bo_placement_set(nvbo, memtype);
  188. for (i = 0; i < nvbo->placement.num_placement; i++)
  189. nvbo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
  190. ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false);
  191. if (ret == 0) {
  192. switch (bo->mem.mem_type) {
  193. case TTM_PL_VRAM:
  194. dev_priv->fb_aper_free -= bo->mem.size;
  195. break;
  196. case TTM_PL_TT:
  197. dev_priv->gart_info.aper_free -= bo->mem.size;
  198. break;
  199. default:
  200. break;
  201. }
  202. }
  203. ttm_bo_unreserve(bo);
  204. out:
  205. if (unlikely(ret))
  206. nvbo->pin_refcnt--;
  207. return ret;
  208. }
  209. int
  210. nouveau_bo_unpin(struct nouveau_bo *nvbo)
  211. {
  212. struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
  213. struct ttm_buffer_object *bo = &nvbo->bo;
  214. int ret, i;
  215. if (--nvbo->pin_refcnt)
  216. return 0;
  217. ret = ttm_bo_reserve(bo, false, false, false, 0);
  218. if (ret)
  219. return ret;
  220. for (i = 0; i < nvbo->placement.num_placement; i++)
  221. nvbo->placements[i] &= ~TTM_PL_FLAG_NO_EVICT;
  222. ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false);
  223. if (ret == 0) {
  224. switch (bo->mem.mem_type) {
  225. case TTM_PL_VRAM:
  226. dev_priv->fb_aper_free += bo->mem.size;
  227. break;
  228. case TTM_PL_TT:
  229. dev_priv->gart_info.aper_free += bo->mem.size;
  230. break;
  231. default:
  232. break;
  233. }
  234. }
  235. ttm_bo_unreserve(bo);
  236. return ret;
  237. }
  238. int
  239. nouveau_bo_map(struct nouveau_bo *nvbo)
  240. {
  241. int ret;
  242. ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
  243. if (ret)
  244. return ret;
  245. ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
  246. ttm_bo_unreserve(&nvbo->bo);
  247. return ret;
  248. }
  249. void
  250. nouveau_bo_unmap(struct nouveau_bo *nvbo)
  251. {
  252. ttm_bo_kunmap(&nvbo->kmap);
  253. }
  254. u16
  255. nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
  256. {
  257. bool is_iomem;
  258. u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
  259. mem = &mem[index];
  260. if (is_iomem)
  261. return ioread16_native((void __force __iomem *)mem);
  262. else
  263. return *mem;
  264. }
  265. void
  266. nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
  267. {
  268. bool is_iomem;
  269. u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
  270. mem = &mem[index];
  271. if (is_iomem)
  272. iowrite16_native(val, (void __force __iomem *)mem);
  273. else
  274. *mem = val;
  275. }
  276. u32
  277. nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
  278. {
  279. bool is_iomem;
  280. u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
  281. mem = &mem[index];
  282. if (is_iomem)
  283. return ioread32_native((void __force __iomem *)mem);
  284. else
  285. return *mem;
  286. }
  287. void
  288. nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
  289. {
  290. bool is_iomem;
  291. u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
  292. mem = &mem[index];
  293. if (is_iomem)
  294. iowrite32_native(val, (void __force __iomem *)mem);
  295. else
  296. *mem = val;
  297. }
  298. static struct ttm_backend *
  299. nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
  300. {
  301. struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
  302. struct drm_device *dev = dev_priv->dev;
  303. switch (dev_priv->gart_info.type) {
  304. #if __OS_HAS_AGP
  305. case NOUVEAU_GART_AGP:
  306. return ttm_agp_backend_init(bdev, dev->agp->bridge);
  307. #endif
  308. case NOUVEAU_GART_SGDMA:
  309. return nouveau_sgdma_init_ttm(dev);
  310. default:
  311. NV_ERROR(dev, "Unknown GART type %d\n",
  312. dev_priv->gart_info.type);
  313. break;
  314. }
  315. return NULL;
  316. }
  317. static int
  318. nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
  319. {
  320. /* We'll do this from user space. */
  321. return 0;
  322. }
  323. static int
  324. nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
  325. struct ttm_mem_type_manager *man)
  326. {
  327. struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
  328. struct drm_device *dev = dev_priv->dev;
  329. switch (type) {
  330. case TTM_PL_SYSTEM:
  331. man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
  332. man->available_caching = TTM_PL_MASK_CACHING;
  333. man->default_caching = TTM_PL_FLAG_CACHED;
  334. break;
  335. case TTM_PL_VRAM:
  336. man->flags = TTM_MEMTYPE_FLAG_FIXED |
  337. TTM_MEMTYPE_FLAG_MAPPABLE;
  338. man->available_caching = TTM_PL_FLAG_UNCACHED |
  339. TTM_PL_FLAG_WC;
  340. man->default_caching = TTM_PL_FLAG_WC;
  341. man->io_addr = NULL;
  342. man->io_offset = drm_get_resource_start(dev, 1);
  343. man->io_size = drm_get_resource_len(dev, 1);
  344. if (man->io_size > nouveau_mem_fb_amount(dev))
  345. man->io_size = nouveau_mem_fb_amount(dev);
  346. man->gpu_offset = dev_priv->vm_vram_base;
  347. break;
  348. case TTM_PL_TT:
  349. switch (dev_priv->gart_info.type) {
  350. case NOUVEAU_GART_AGP:
  351. man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
  352. man->available_caching = TTM_PL_FLAG_UNCACHED;
  353. man->default_caching = TTM_PL_FLAG_UNCACHED;
  354. break;
  355. case NOUVEAU_GART_SGDMA:
  356. man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
  357. TTM_MEMTYPE_FLAG_CMA;
  358. man->available_caching = TTM_PL_MASK_CACHING;
  359. man->default_caching = TTM_PL_FLAG_CACHED;
  360. break;
  361. default:
  362. NV_ERROR(dev, "Unknown GART type: %d\n",
  363. dev_priv->gart_info.type);
  364. return -EINVAL;
  365. }
  366. man->io_offset = dev_priv->gart_info.aper_base;
  367. man->io_size = dev_priv->gart_info.aper_size;
  368. man->io_addr = NULL;
  369. man->gpu_offset = dev_priv->vm_gart_base;
  370. break;
  371. default:
  372. NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
  373. return -EINVAL;
  374. }
  375. return 0;
  376. }
  377. static void
  378. nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
  379. {
  380. struct nouveau_bo *nvbo = nouveau_bo(bo);
  381. switch (bo->mem.mem_type) {
  382. case TTM_PL_VRAM:
  383. nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT);
  384. break;
  385. default:
  386. nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM);
  387. break;
  388. }
  389. *pl = nvbo->placement;
  390. }
  391. /* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
  392. * TTM_PL_{VRAM,TT} directly.
  393. */
  394. static int
  395. nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
  396. struct nouveau_bo *nvbo, bool evict,
  397. bool no_wait_reserve, bool no_wait_gpu,
  398. struct ttm_mem_reg *new_mem)
  399. {
  400. struct nouveau_fence *fence = NULL;
  401. int ret;
  402. ret = nouveau_fence_new(chan, &fence, true);
  403. if (ret)
  404. return ret;
  405. ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL,
  406. evict, no_wait_reserve, no_wait_gpu, new_mem);
  407. if (nvbo->channel && nvbo->channel != chan)
  408. ret = nouveau_fence_wait(fence, NULL, false, false);
  409. nouveau_fence_unref((void *)&fence);
  410. return ret;
  411. }
  412. static inline uint32_t
  413. nouveau_bo_mem_ctxdma(struct nouveau_bo *nvbo, struct nouveau_channel *chan,
  414. struct ttm_mem_reg *mem)
  415. {
  416. if (chan == nouveau_bdev(nvbo->bo.bdev)->channel) {
  417. if (mem->mem_type == TTM_PL_TT)
  418. return NvDmaGART;
  419. return NvDmaVRAM;
  420. }
  421. if (mem->mem_type == TTM_PL_TT)
  422. return chan->gart_handle;
  423. return chan->vram_handle;
  424. }
  425. static int
  426. nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
  427. bool no_wait_reserve, bool no_wait_gpu,
  428. struct ttm_mem_reg *new_mem)
  429. {
  430. struct nouveau_bo *nvbo = nouveau_bo(bo);
  431. struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
  432. struct ttm_mem_reg *old_mem = &bo->mem;
  433. struct nouveau_channel *chan;
  434. uint64_t src_offset, dst_offset;
  435. uint32_t page_count;
  436. int ret;
  437. chan = nvbo->channel;
  438. if (!chan || nvbo->tile_flags || nvbo->no_vm)
  439. chan = dev_priv->channel;
  440. src_offset = old_mem->mm_node->start << PAGE_SHIFT;
  441. dst_offset = new_mem->mm_node->start << PAGE_SHIFT;
  442. if (chan != dev_priv->channel) {
  443. if (old_mem->mem_type == TTM_PL_TT)
  444. src_offset += dev_priv->vm_gart_base;
  445. else
  446. src_offset += dev_priv->vm_vram_base;
  447. if (new_mem->mem_type == TTM_PL_TT)
  448. dst_offset += dev_priv->vm_gart_base;
  449. else
  450. dst_offset += dev_priv->vm_vram_base;
  451. }
  452. ret = RING_SPACE(chan, 3);
  453. if (ret)
  454. return ret;
  455. BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
  456. OUT_RING(chan, nouveau_bo_mem_ctxdma(nvbo, chan, old_mem));
  457. OUT_RING(chan, nouveau_bo_mem_ctxdma(nvbo, chan, new_mem));
  458. if (dev_priv->card_type >= NV_50) {
  459. ret = RING_SPACE(chan, 4);
  460. if (ret)
  461. return ret;
  462. BEGIN_RING(chan, NvSubM2MF, 0x0200, 1);
  463. OUT_RING(chan, 1);
  464. BEGIN_RING(chan, NvSubM2MF, 0x021c, 1);
  465. OUT_RING(chan, 1);
  466. }
  467. page_count = new_mem->num_pages;
  468. while (page_count) {
  469. int line_count = (page_count > 2047) ? 2047 : page_count;
  470. if (dev_priv->card_type >= NV_50) {
  471. ret = RING_SPACE(chan, 3);
  472. if (ret)
  473. return ret;
  474. BEGIN_RING(chan, NvSubM2MF, 0x0238, 2);
  475. OUT_RING(chan, upper_32_bits(src_offset));
  476. OUT_RING(chan, upper_32_bits(dst_offset));
  477. }
  478. ret = RING_SPACE(chan, 11);
  479. if (ret)
  480. return ret;
  481. BEGIN_RING(chan, NvSubM2MF,
  482. NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
  483. OUT_RING(chan, lower_32_bits(src_offset));
  484. OUT_RING(chan, lower_32_bits(dst_offset));
  485. OUT_RING(chan, PAGE_SIZE); /* src_pitch */
  486. OUT_RING(chan, PAGE_SIZE); /* dst_pitch */
  487. OUT_RING(chan, PAGE_SIZE); /* line_length */
  488. OUT_RING(chan, line_count);
  489. OUT_RING(chan, (1<<8)|(1<<0));
  490. OUT_RING(chan, 0);
  491. BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
  492. OUT_RING(chan, 0);
  493. page_count -= line_count;
  494. src_offset += (PAGE_SIZE * line_count);
  495. dst_offset += (PAGE_SIZE * line_count);
  496. }
  497. return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait_reserve, no_wait_gpu, new_mem);
  498. }
  499. static int
  500. nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
  501. bool no_wait_reserve, bool no_wait_gpu,
  502. struct ttm_mem_reg *new_mem)
  503. {
  504. u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
  505. struct ttm_placement placement;
  506. struct ttm_mem_reg tmp_mem;
  507. int ret;
  508. placement.fpfn = placement.lpfn = 0;
  509. placement.num_placement = placement.num_busy_placement = 1;
  510. placement.placement = placement.busy_placement = &placement_memtype;
  511. tmp_mem = *new_mem;
  512. tmp_mem.mm_node = NULL;
  513. ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
  514. if (ret)
  515. return ret;
  516. ret = ttm_tt_bind(bo->ttm, &tmp_mem);
  517. if (ret)
  518. goto out;
  519. ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
  520. if (ret)
  521. goto out;
  522. ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
  523. out:
  524. if (tmp_mem.mm_node) {
  525. spin_lock(&bo->bdev->glob->lru_lock);
  526. drm_mm_put_block(tmp_mem.mm_node);
  527. spin_unlock(&bo->bdev->glob->lru_lock);
  528. }
  529. return ret;
  530. }
  531. static int
  532. nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
  533. bool no_wait_reserve, bool no_wait_gpu,
  534. struct ttm_mem_reg *new_mem)
  535. {
  536. u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
  537. struct ttm_placement placement;
  538. struct ttm_mem_reg tmp_mem;
  539. int ret;
  540. placement.fpfn = placement.lpfn = 0;
  541. placement.num_placement = placement.num_busy_placement = 1;
  542. placement.placement = placement.busy_placement = &placement_memtype;
  543. tmp_mem = *new_mem;
  544. tmp_mem.mm_node = NULL;
  545. ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
  546. if (ret)
  547. return ret;
  548. ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, &tmp_mem);
  549. if (ret)
  550. goto out;
  551. ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
  552. if (ret)
  553. goto out;
  554. out:
  555. if (tmp_mem.mm_node) {
  556. spin_lock(&bo->bdev->glob->lru_lock);
  557. drm_mm_put_block(tmp_mem.mm_node);
  558. spin_unlock(&bo->bdev->glob->lru_lock);
  559. }
  560. return ret;
  561. }
  562. static int
  563. nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
  564. struct nouveau_tile_reg **new_tile)
  565. {
  566. struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
  567. struct drm_device *dev = dev_priv->dev;
  568. struct nouveau_bo *nvbo = nouveau_bo(bo);
  569. uint64_t offset;
  570. int ret;
  571. if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) {
  572. /* Nothing to do. */
  573. *new_tile = NULL;
  574. return 0;
  575. }
  576. offset = new_mem->mm_node->start << PAGE_SHIFT;
  577. if (dev_priv->card_type == NV_50) {
  578. ret = nv50_mem_vm_bind_linear(dev,
  579. offset + dev_priv->vm_vram_base,
  580. new_mem->size, nvbo->tile_flags,
  581. offset);
  582. if (ret)
  583. return ret;
  584. } else if (dev_priv->card_type >= NV_10) {
  585. *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
  586. nvbo->tile_mode);
  587. }
  588. return 0;
  589. }
  590. static void
  591. nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
  592. struct nouveau_tile_reg *new_tile,
  593. struct nouveau_tile_reg **old_tile)
  594. {
  595. struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
  596. struct drm_device *dev = dev_priv->dev;
  597. if (dev_priv->card_type >= NV_10 &&
  598. dev_priv->card_type < NV_50) {
  599. if (*old_tile)
  600. nv10_mem_expire_tiling(dev, *old_tile, bo->sync_obj);
  601. *old_tile = new_tile;
  602. }
  603. }
  604. static int
  605. nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
  606. bool no_wait_reserve, bool no_wait_gpu,
  607. struct ttm_mem_reg *new_mem)
  608. {
  609. struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
  610. struct nouveau_bo *nvbo = nouveau_bo(bo);
  611. struct ttm_mem_reg *old_mem = &bo->mem;
  612. struct nouveau_tile_reg *new_tile = NULL;
  613. int ret = 0;
  614. ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
  615. if (ret)
  616. return ret;
  617. /* Software copy if the card isn't up and running yet. */
  618. if (dev_priv->init_state != NOUVEAU_CARD_INIT_DONE ||
  619. !dev_priv->channel) {
  620. ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
  621. goto out;
  622. }
  623. /* Fake bo copy. */
  624. if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
  625. BUG_ON(bo->mem.mm_node != NULL);
  626. bo->mem = *new_mem;
  627. new_mem->mm_node = NULL;
  628. goto out;
  629. }
  630. /* Hardware assisted copy. */
  631. if (new_mem->mem_type == TTM_PL_SYSTEM)
  632. ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
  633. else if (old_mem->mem_type == TTM_PL_SYSTEM)
  634. ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
  635. else
  636. ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
  637. if (!ret)
  638. goto out;
  639. /* Fallback to software copy. */
  640. ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
  641. out:
  642. if (ret)
  643. nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
  644. else
  645. nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
  646. return ret;
  647. }
  648. static int
  649. nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
  650. {
  651. return 0;
  652. }
  653. static int
  654. nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
  655. {
  656. struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
  657. struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
  658. struct drm_device *dev = dev_priv->dev;
  659. mem->bus.addr = NULL;
  660. mem->bus.offset = 0;
  661. mem->bus.size = mem->num_pages << PAGE_SHIFT;
  662. mem->bus.base = 0;
  663. mem->bus.is_iomem = false;
  664. if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
  665. return -EINVAL;
  666. switch (mem->mem_type) {
  667. case TTM_PL_SYSTEM:
  668. /* System memory */
  669. return 0;
  670. case TTM_PL_TT:
  671. #if __OS_HAS_AGP
  672. if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
  673. mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
  674. mem->bus.base = dev_priv->gart_info.aper_base;
  675. mem->bus.is_iomem = true;
  676. }
  677. #endif
  678. break;
  679. case TTM_PL_VRAM:
  680. mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
  681. mem->bus.base = drm_get_resource_start(dev, 1);
  682. mem->bus.is_iomem = true;
  683. break;
  684. default:
  685. return -EINVAL;
  686. }
  687. return 0;
  688. }
  689. static void
  690. nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
  691. {
  692. }
  693. static int
  694. nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
  695. {
  696. return 0;
  697. }
  698. struct ttm_bo_driver nouveau_bo_driver = {
  699. .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
  700. .invalidate_caches = nouveau_bo_invalidate_caches,
  701. .init_mem_type = nouveau_bo_init_mem_type,
  702. .evict_flags = nouveau_bo_evict_flags,
  703. .move = nouveau_bo_move,
  704. .verify_access = nouveau_bo_verify_access,
  705. .sync_obj_signaled = nouveau_fence_signalled,
  706. .sync_obj_wait = nouveau_fence_wait,
  707. .sync_obj_flush = nouveau_fence_flush,
  708. .sync_obj_unref = nouveau_fence_unref,
  709. .sync_obj_ref = nouveau_fence_ref,
  710. .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
  711. .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
  712. .io_mem_free = &nouveau_ttm_io_mem_free,
  713. };