nouveau_bo.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975
  1. /*
  2. * Copyright 2007 Dave Airlied
  3. * All Rights Reserved.
  4. *
  5. * Permission is hereby granted, free of charge, to any person obtaining a
  6. * copy of this software and associated documentation files (the "Software"),
  7. * to deal in the Software without restriction, including without limitation
  8. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  9. * and/or sell copies of the Software, and to permit persons to whom the
  10. * Software is furnished to do so, subject to the following conditions:
  11. *
  12. * The above copyright notice and this permission notice (including the next
  13. * paragraph) shall be included in all copies or substantial portions of the
  14. * Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22. * OTHER DEALINGS IN THE SOFTWARE.
  23. */
  24. /*
  25. * Authors: Dave Airlied <airlied@linux.ie>
  26. * Ben Skeggs <darktama@iinet.net.au>
  27. * Jeremy Kolb <jkolb@brandeis.edu>
  28. */
  29. #include "drmP.h"
  30. #include "nouveau_drm.h"
  31. #include "nouveau_drv.h"
  32. #include "nouveau_dma.h"
  33. #include <linux/log2.h>
  34. #include <linux/slab.h>
  35. static void
  36. nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
  37. {
  38. struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
  39. struct drm_device *dev = dev_priv->dev;
  40. struct nouveau_bo *nvbo = nouveau_bo(bo);
  41. if (unlikely(nvbo->gem))
  42. DRM_ERROR("bo %p still attached to GEM object\n", bo);
  43. nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
  44. kfree(nvbo);
  45. }
  46. static void
  47. nouveau_bo_fixup_align(struct drm_device *dev,
  48. uint32_t tile_mode, uint32_t tile_flags,
  49. int *align, int *size)
  50. {
  51. struct drm_nouveau_private *dev_priv = dev->dev_private;
  52. /*
  53. * Some of the tile_flags have a periodic structure of N*4096 bytes,
  54. * align to to that as well as the page size. Align the size to the
  55. * appropriate boundaries. This does imply that sizes are rounded up
  56. * 3-7 pages, so be aware of this and do not waste memory by allocating
  57. * many small buffers.
  58. */
  59. if (dev_priv->card_type == NV_50) {
  60. uint32_t block_size = dev_priv->vram_size >> 15;
  61. int i;
  62. switch (tile_flags) {
  63. case 0x1800:
  64. case 0x2800:
  65. case 0x4800:
  66. case 0x7a00:
  67. if (is_power_of_2(block_size)) {
  68. for (i = 1; i < 10; i++) {
  69. *align = 12 * i * block_size;
  70. if (!(*align % 65536))
  71. break;
  72. }
  73. } else {
  74. for (i = 1; i < 10; i++) {
  75. *align = 8 * i * block_size;
  76. if (!(*align % 65536))
  77. break;
  78. }
  79. }
  80. *size = roundup(*size, *align);
  81. break;
  82. default:
  83. break;
  84. }
  85. } else {
  86. if (tile_mode) {
  87. if (dev_priv->chipset >= 0x40) {
  88. *align = 65536;
  89. *size = roundup(*size, 64 * tile_mode);
  90. } else if (dev_priv->chipset >= 0x30) {
  91. *align = 32768;
  92. *size = roundup(*size, 64 * tile_mode);
  93. } else if (dev_priv->chipset >= 0x20) {
  94. *align = 16384;
  95. *size = roundup(*size, 64 * tile_mode);
  96. } else if (dev_priv->chipset >= 0x10) {
  97. *align = 16384;
  98. *size = roundup(*size, 32 * tile_mode);
  99. }
  100. }
  101. }
  102. /* ALIGN works only on powers of two. */
  103. *size = roundup(*size, PAGE_SIZE);
  104. if (dev_priv->card_type == NV_50) {
  105. *size = roundup(*size, 65536);
  106. *align = max(65536, *align);
  107. }
  108. }
  109. int
  110. nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
  111. int size, int align, uint32_t flags, uint32_t tile_mode,
  112. uint32_t tile_flags, bool no_vm, bool mappable,
  113. struct nouveau_bo **pnvbo)
  114. {
  115. struct drm_nouveau_private *dev_priv = dev->dev_private;
  116. struct nouveau_bo *nvbo;
  117. int ret = 0;
  118. nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
  119. if (!nvbo)
  120. return -ENOMEM;
  121. INIT_LIST_HEAD(&nvbo->head);
  122. INIT_LIST_HEAD(&nvbo->entry);
  123. nvbo->mappable = mappable;
  124. nvbo->no_vm = no_vm;
  125. nvbo->tile_mode = tile_mode;
  126. nvbo->tile_flags = tile_flags;
  127. nvbo->bo.bdev = &dev_priv->ttm.bdev;
  128. nouveau_bo_fixup_align(dev, tile_mode, nouveau_bo_tile_layout(nvbo),
  129. &align, &size);
  130. align >>= PAGE_SHIFT;
  131. nouveau_bo_placement_set(nvbo, flags, 0);
  132. nvbo->channel = chan;
  133. ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
  134. ttm_bo_type_device, &nvbo->placement, align, 0,
  135. false, NULL, size, nouveau_bo_del_ttm);
  136. if (ret) {
  137. /* ttm will call nouveau_bo_del_ttm if it fails.. */
  138. return ret;
  139. }
  140. nvbo->channel = NULL;
  141. *pnvbo = nvbo;
  142. return 0;
  143. }
  144. static void
  145. set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
  146. {
  147. *n = 0;
  148. if (type & TTM_PL_FLAG_VRAM)
  149. pl[(*n)++] = TTM_PL_FLAG_VRAM | flags;
  150. if (type & TTM_PL_FLAG_TT)
  151. pl[(*n)++] = TTM_PL_FLAG_TT | flags;
  152. if (type & TTM_PL_FLAG_SYSTEM)
  153. pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags;
  154. }
  155. static void
  156. set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
  157. {
  158. struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
  159. if (dev_priv->card_type == NV_10 &&
  160. nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM)) {
  161. /*
  162. * Make sure that the color and depth buffers are handled
  163. * by independent memory controller units. Up to a 9x
  164. * speed up when alpha-blending and depth-test are enabled
  165. * at the same time.
  166. */
  167. int vram_pages = dev_priv->vram_size >> PAGE_SHIFT;
  168. if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) {
  169. nvbo->placement.fpfn = vram_pages / 2;
  170. nvbo->placement.lpfn = ~0;
  171. } else {
  172. nvbo->placement.fpfn = 0;
  173. nvbo->placement.lpfn = vram_pages / 2;
  174. }
  175. }
  176. }
  177. void
  178. nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
  179. {
  180. struct ttm_placement *pl = &nvbo->placement;
  181. uint32_t flags = TTM_PL_MASK_CACHING |
  182. (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
  183. pl->placement = nvbo->placements;
  184. set_placement_list(nvbo->placements, &pl->num_placement,
  185. type, flags);
  186. pl->busy_placement = nvbo->busy_placements;
  187. set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
  188. type | busy, flags);
  189. set_placement_range(nvbo, type);
  190. }
  191. int
  192. nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
  193. {
  194. struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
  195. struct ttm_buffer_object *bo = &nvbo->bo;
  196. int ret;
  197. if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
  198. NV_ERROR(nouveau_bdev(bo->bdev)->dev,
  199. "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
  200. 1 << bo->mem.mem_type, memtype);
  201. return -EINVAL;
  202. }
  203. if (nvbo->pin_refcnt++)
  204. return 0;
  205. ret = ttm_bo_reserve(bo, false, false, false, 0);
  206. if (ret)
  207. goto out;
  208. nouveau_bo_placement_set(nvbo, memtype, 0);
  209. ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false);
  210. if (ret == 0) {
  211. switch (bo->mem.mem_type) {
  212. case TTM_PL_VRAM:
  213. dev_priv->fb_aper_free -= bo->mem.size;
  214. break;
  215. case TTM_PL_TT:
  216. dev_priv->gart_info.aper_free -= bo->mem.size;
  217. break;
  218. default:
  219. break;
  220. }
  221. }
  222. ttm_bo_unreserve(bo);
  223. out:
  224. if (unlikely(ret))
  225. nvbo->pin_refcnt--;
  226. return ret;
  227. }
  228. int
  229. nouveau_bo_unpin(struct nouveau_bo *nvbo)
  230. {
  231. struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
  232. struct ttm_buffer_object *bo = &nvbo->bo;
  233. int ret;
  234. if (--nvbo->pin_refcnt)
  235. return 0;
  236. ret = ttm_bo_reserve(bo, false, false, false, 0);
  237. if (ret)
  238. return ret;
  239. nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
  240. ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false);
  241. if (ret == 0) {
  242. switch (bo->mem.mem_type) {
  243. case TTM_PL_VRAM:
  244. dev_priv->fb_aper_free += bo->mem.size;
  245. break;
  246. case TTM_PL_TT:
  247. dev_priv->gart_info.aper_free += bo->mem.size;
  248. break;
  249. default:
  250. break;
  251. }
  252. }
  253. ttm_bo_unreserve(bo);
  254. return ret;
  255. }
  256. int
  257. nouveau_bo_map(struct nouveau_bo *nvbo)
  258. {
  259. int ret;
  260. ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
  261. if (ret)
  262. return ret;
  263. ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
  264. ttm_bo_unreserve(&nvbo->bo);
  265. return ret;
  266. }
  267. void
  268. nouveau_bo_unmap(struct nouveau_bo *nvbo)
  269. {
  270. if (nvbo)
  271. ttm_bo_kunmap(&nvbo->kmap);
  272. }
  273. u16
  274. nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
  275. {
  276. bool is_iomem;
  277. u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
  278. mem = &mem[index];
  279. if (is_iomem)
  280. return ioread16_native((void __force __iomem *)mem);
  281. else
  282. return *mem;
  283. }
  284. void
  285. nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
  286. {
  287. bool is_iomem;
  288. u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
  289. mem = &mem[index];
  290. if (is_iomem)
  291. iowrite16_native(val, (void __force __iomem *)mem);
  292. else
  293. *mem = val;
  294. }
  295. u32
  296. nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
  297. {
  298. bool is_iomem;
  299. u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
  300. mem = &mem[index];
  301. if (is_iomem)
  302. return ioread32_native((void __force __iomem *)mem);
  303. else
  304. return *mem;
  305. }
  306. void
  307. nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
  308. {
  309. bool is_iomem;
  310. u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
  311. mem = &mem[index];
  312. if (is_iomem)
  313. iowrite32_native(val, (void __force __iomem *)mem);
  314. else
  315. *mem = val;
  316. }
  317. static struct ttm_backend *
  318. nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
  319. {
  320. struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
  321. struct drm_device *dev = dev_priv->dev;
  322. switch (dev_priv->gart_info.type) {
  323. #if __OS_HAS_AGP
  324. case NOUVEAU_GART_AGP:
  325. return ttm_agp_backend_init(bdev, dev->agp->bridge);
  326. #endif
  327. case NOUVEAU_GART_SGDMA:
  328. return nouveau_sgdma_init_ttm(dev);
  329. default:
  330. NV_ERROR(dev, "Unknown GART type %d\n",
  331. dev_priv->gart_info.type);
  332. break;
  333. }
  334. return NULL;
  335. }
  336. static int
  337. nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
  338. {
  339. /* We'll do this from user space. */
  340. return 0;
  341. }
  342. static int
  343. nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
  344. struct ttm_mem_type_manager *man)
  345. {
  346. struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
  347. struct drm_device *dev = dev_priv->dev;
  348. switch (type) {
  349. case TTM_PL_SYSTEM:
  350. man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
  351. man->available_caching = TTM_PL_MASK_CACHING;
  352. man->default_caching = TTM_PL_FLAG_CACHED;
  353. break;
  354. case TTM_PL_VRAM:
  355. man->func = &ttm_bo_manager_func;
  356. man->flags = TTM_MEMTYPE_FLAG_FIXED |
  357. TTM_MEMTYPE_FLAG_MAPPABLE;
  358. man->available_caching = TTM_PL_FLAG_UNCACHED |
  359. TTM_PL_FLAG_WC;
  360. man->default_caching = TTM_PL_FLAG_WC;
  361. if (dev_priv->card_type == NV_50)
  362. man->gpu_offset = 0x40000000;
  363. else
  364. man->gpu_offset = 0;
  365. break;
  366. case TTM_PL_TT:
  367. man->func = &ttm_bo_manager_func;
  368. switch (dev_priv->gart_info.type) {
  369. case NOUVEAU_GART_AGP:
  370. man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
  371. man->available_caching = TTM_PL_FLAG_UNCACHED |
  372. TTM_PL_FLAG_WC;
  373. man->default_caching = TTM_PL_FLAG_WC;
  374. break;
  375. case NOUVEAU_GART_SGDMA:
  376. man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
  377. TTM_MEMTYPE_FLAG_CMA;
  378. man->available_caching = TTM_PL_MASK_CACHING;
  379. man->default_caching = TTM_PL_FLAG_CACHED;
  380. break;
  381. default:
  382. NV_ERROR(dev, "Unknown GART type: %d\n",
  383. dev_priv->gart_info.type);
  384. return -EINVAL;
  385. }
  386. man->gpu_offset = dev_priv->vm_gart_base;
  387. break;
  388. default:
  389. NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
  390. return -EINVAL;
  391. }
  392. return 0;
  393. }
  394. static void
  395. nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
  396. {
  397. struct nouveau_bo *nvbo = nouveau_bo(bo);
  398. switch (bo->mem.mem_type) {
  399. case TTM_PL_VRAM:
  400. nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
  401. TTM_PL_FLAG_SYSTEM);
  402. break;
  403. default:
  404. nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
  405. break;
  406. }
  407. *pl = nvbo->placement;
  408. }
  409. /* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
  410. * TTM_PL_{VRAM,TT} directly.
  411. */
  412. static int
  413. nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
  414. struct nouveau_bo *nvbo, bool evict,
  415. bool no_wait_reserve, bool no_wait_gpu,
  416. struct ttm_mem_reg *new_mem)
  417. {
  418. struct nouveau_fence *fence = NULL;
  419. int ret;
  420. ret = nouveau_fence_new(chan, &fence, true);
  421. if (ret)
  422. return ret;
  423. ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict,
  424. no_wait_reserve, no_wait_gpu, new_mem);
  425. nouveau_fence_unref(&fence);
  426. return ret;
  427. }
  428. static inline uint32_t
  429. nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
  430. struct nouveau_channel *chan, struct ttm_mem_reg *mem)
  431. {
  432. struct nouveau_bo *nvbo = nouveau_bo(bo);
  433. if (nvbo->no_vm) {
  434. if (mem->mem_type == TTM_PL_TT)
  435. return NvDmaGART;
  436. return NvDmaVRAM;
  437. }
  438. if (mem->mem_type == TTM_PL_TT)
  439. return chan->gart_handle;
  440. return chan->vram_handle;
  441. }
  442. static int
  443. nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
  444. struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
  445. {
  446. struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
  447. struct nouveau_bo *nvbo = nouveau_bo(bo);
  448. u64 length = (new_mem->num_pages << PAGE_SHIFT);
  449. u64 src_offset, dst_offset;
  450. int ret;
  451. src_offset = old_mem->start << PAGE_SHIFT;
  452. dst_offset = new_mem->start << PAGE_SHIFT;
  453. if (!nvbo->no_vm) {
  454. if (old_mem->mem_type == TTM_PL_VRAM)
  455. src_offset += dev_priv->vm_vram_base;
  456. else
  457. src_offset += dev_priv->vm_gart_base;
  458. if (new_mem->mem_type == TTM_PL_VRAM)
  459. dst_offset += dev_priv->vm_vram_base;
  460. else
  461. dst_offset += dev_priv->vm_gart_base;
  462. }
  463. ret = RING_SPACE(chan, 3);
  464. if (ret)
  465. return ret;
  466. BEGIN_RING(chan, NvSubM2MF, 0x0184, 2);
  467. OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
  468. OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
  469. while (length) {
  470. u32 amount, stride, height;
  471. amount = min(length, (u64)(4 * 1024 * 1024));
  472. stride = 16 * 4;
  473. height = amount / stride;
  474. if (new_mem->mem_type == TTM_PL_VRAM &&
  475. nouveau_bo_tile_layout(nvbo)) {
  476. ret = RING_SPACE(chan, 8);
  477. if (ret)
  478. return ret;
  479. BEGIN_RING(chan, NvSubM2MF, 0x0200, 7);
  480. OUT_RING (chan, 0);
  481. OUT_RING (chan, 0);
  482. OUT_RING (chan, stride);
  483. OUT_RING (chan, height);
  484. OUT_RING (chan, 1);
  485. OUT_RING (chan, 0);
  486. OUT_RING (chan, 0);
  487. } else {
  488. ret = RING_SPACE(chan, 2);
  489. if (ret)
  490. return ret;
  491. BEGIN_RING(chan, NvSubM2MF, 0x0200, 1);
  492. OUT_RING (chan, 1);
  493. }
  494. if (old_mem->mem_type == TTM_PL_VRAM &&
  495. nouveau_bo_tile_layout(nvbo)) {
  496. ret = RING_SPACE(chan, 8);
  497. if (ret)
  498. return ret;
  499. BEGIN_RING(chan, NvSubM2MF, 0x021c, 7);
  500. OUT_RING (chan, 0);
  501. OUT_RING (chan, 0);
  502. OUT_RING (chan, stride);
  503. OUT_RING (chan, height);
  504. OUT_RING (chan, 1);
  505. OUT_RING (chan, 0);
  506. OUT_RING (chan, 0);
  507. } else {
  508. ret = RING_SPACE(chan, 2);
  509. if (ret)
  510. return ret;
  511. BEGIN_RING(chan, NvSubM2MF, 0x021c, 1);
  512. OUT_RING (chan, 1);
  513. }
  514. ret = RING_SPACE(chan, 14);
  515. if (ret)
  516. return ret;
  517. BEGIN_RING(chan, NvSubM2MF, 0x0238, 2);
  518. OUT_RING (chan, upper_32_bits(src_offset));
  519. OUT_RING (chan, upper_32_bits(dst_offset));
  520. BEGIN_RING(chan, NvSubM2MF, 0x030c, 8);
  521. OUT_RING (chan, lower_32_bits(src_offset));
  522. OUT_RING (chan, lower_32_bits(dst_offset));
  523. OUT_RING (chan, stride);
  524. OUT_RING (chan, stride);
  525. OUT_RING (chan, stride);
  526. OUT_RING (chan, height);
  527. OUT_RING (chan, 0x00000101);
  528. OUT_RING (chan, 0x00000000);
  529. BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
  530. OUT_RING (chan, 0);
  531. length -= amount;
  532. src_offset += amount;
  533. dst_offset += amount;
  534. }
  535. return 0;
  536. }
  537. static int
  538. nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
  539. struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
  540. {
  541. u32 src_offset = old_mem->start << PAGE_SHIFT;
  542. u32 dst_offset = new_mem->start << PAGE_SHIFT;
  543. u32 page_count = new_mem->num_pages;
  544. int ret;
  545. ret = RING_SPACE(chan, 3);
  546. if (ret)
  547. return ret;
  548. BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
  549. OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
  550. OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
  551. page_count = new_mem->num_pages;
  552. while (page_count) {
  553. int line_count = (page_count > 2047) ? 2047 : page_count;
  554. ret = RING_SPACE(chan, 11);
  555. if (ret)
  556. return ret;
  557. BEGIN_RING(chan, NvSubM2MF,
  558. NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
  559. OUT_RING (chan, src_offset);
  560. OUT_RING (chan, dst_offset);
  561. OUT_RING (chan, PAGE_SIZE); /* src_pitch */
  562. OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
  563. OUT_RING (chan, PAGE_SIZE); /* line_length */
  564. OUT_RING (chan, line_count);
  565. OUT_RING (chan, 0x00000101);
  566. OUT_RING (chan, 0x00000000);
  567. BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
  568. OUT_RING (chan, 0);
  569. page_count -= line_count;
  570. src_offset += (PAGE_SIZE * line_count);
  571. dst_offset += (PAGE_SIZE * line_count);
  572. }
  573. return 0;
  574. }
  575. static int
  576. nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
  577. bool no_wait_reserve, bool no_wait_gpu,
  578. struct ttm_mem_reg *new_mem)
  579. {
  580. struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
  581. struct nouveau_bo *nvbo = nouveau_bo(bo);
  582. struct nouveau_channel *chan;
  583. int ret;
  584. chan = nvbo->channel;
  585. if (!chan || nvbo->no_vm) {
  586. chan = dev_priv->channel;
  587. mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
  588. }
  589. if (dev_priv->card_type < NV_50)
  590. ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
  591. else
  592. ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
  593. if (ret == 0) {
  594. ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
  595. no_wait_reserve,
  596. no_wait_gpu, new_mem);
  597. }
  598. if (chan == dev_priv->channel)
  599. mutex_unlock(&chan->mutex);
  600. return ret;
  601. }
  602. static int
  603. nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
  604. bool no_wait_reserve, bool no_wait_gpu,
  605. struct ttm_mem_reg *new_mem)
  606. {
  607. u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
  608. struct ttm_placement placement;
  609. struct ttm_mem_reg tmp_mem;
  610. int ret;
  611. placement.fpfn = placement.lpfn = 0;
  612. placement.num_placement = placement.num_busy_placement = 1;
  613. placement.placement = placement.busy_placement = &placement_memtype;
  614. tmp_mem = *new_mem;
  615. tmp_mem.mm_node = NULL;
  616. ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
  617. if (ret)
  618. return ret;
  619. ret = ttm_tt_bind(bo->ttm, &tmp_mem);
  620. if (ret)
  621. goto out;
  622. ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
  623. if (ret)
  624. goto out;
  625. ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
  626. out:
  627. ttm_bo_mem_put(bo, &tmp_mem);
  628. return ret;
  629. }
  630. static int
  631. nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
  632. bool no_wait_reserve, bool no_wait_gpu,
  633. struct ttm_mem_reg *new_mem)
  634. {
  635. u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
  636. struct ttm_placement placement;
  637. struct ttm_mem_reg tmp_mem;
  638. int ret;
  639. placement.fpfn = placement.lpfn = 0;
  640. placement.num_placement = placement.num_busy_placement = 1;
  641. placement.placement = placement.busy_placement = &placement_memtype;
  642. tmp_mem = *new_mem;
  643. tmp_mem.mm_node = NULL;
  644. ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
  645. if (ret)
  646. return ret;
  647. ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, &tmp_mem);
  648. if (ret)
  649. goto out;
  650. ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
  651. if (ret)
  652. goto out;
  653. out:
  654. ttm_bo_mem_put(bo, &tmp_mem);
  655. return ret;
  656. }
  657. static int
  658. nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
  659. struct nouveau_tile_reg **new_tile)
  660. {
  661. struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
  662. struct drm_device *dev = dev_priv->dev;
  663. struct nouveau_bo *nvbo = nouveau_bo(bo);
  664. uint64_t offset;
  665. int ret;
  666. if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) {
  667. /* Nothing to do. */
  668. *new_tile = NULL;
  669. return 0;
  670. }
  671. offset = new_mem->start << PAGE_SHIFT;
  672. if (dev_priv->card_type == NV_50) {
  673. ret = nv50_mem_vm_bind_linear(dev,
  674. offset + dev_priv->vm_vram_base,
  675. new_mem->size,
  676. nouveau_bo_tile_layout(nvbo),
  677. offset);
  678. if (ret)
  679. return ret;
  680. } else if (dev_priv->card_type >= NV_10) {
  681. *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
  682. nvbo->tile_mode,
  683. nvbo->tile_flags);
  684. }
  685. return 0;
  686. }
  687. static void
  688. nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
  689. struct nouveau_tile_reg *new_tile,
  690. struct nouveau_tile_reg **old_tile)
  691. {
  692. struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
  693. struct drm_device *dev = dev_priv->dev;
  694. if (dev_priv->card_type >= NV_10 &&
  695. dev_priv->card_type < NV_50) {
  696. nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj);
  697. *old_tile = new_tile;
  698. }
  699. }
  700. static int
  701. nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
  702. bool no_wait_reserve, bool no_wait_gpu,
  703. struct ttm_mem_reg *new_mem)
  704. {
  705. struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
  706. struct nouveau_bo *nvbo = nouveau_bo(bo);
  707. struct ttm_mem_reg *old_mem = &bo->mem;
  708. struct nouveau_tile_reg *new_tile = NULL;
  709. int ret = 0;
  710. ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
  711. if (ret)
  712. return ret;
  713. /* Fake bo copy. */
  714. if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
  715. BUG_ON(bo->mem.mm_node != NULL);
  716. bo->mem = *new_mem;
  717. new_mem->mm_node = NULL;
  718. goto out;
  719. }
  720. /* Software copy if the card isn't up and running yet. */
  721. if (!dev_priv->channel) {
  722. ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
  723. goto out;
  724. }
  725. /* Hardware assisted copy. */
  726. if (new_mem->mem_type == TTM_PL_SYSTEM)
  727. ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
  728. else if (old_mem->mem_type == TTM_PL_SYSTEM)
  729. ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
  730. else
  731. ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
  732. if (!ret)
  733. goto out;
  734. /* Fallback to software copy. */
  735. ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
  736. out:
  737. if (ret)
  738. nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
  739. else
  740. nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
  741. return ret;
  742. }
  743. static int
  744. nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
  745. {
  746. return 0;
  747. }
  748. static int
  749. nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
  750. {
  751. struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
  752. struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
  753. struct drm_device *dev = dev_priv->dev;
  754. mem->bus.addr = NULL;
  755. mem->bus.offset = 0;
  756. mem->bus.size = mem->num_pages << PAGE_SHIFT;
  757. mem->bus.base = 0;
  758. mem->bus.is_iomem = false;
  759. if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
  760. return -EINVAL;
  761. switch (mem->mem_type) {
  762. case TTM_PL_SYSTEM:
  763. /* System memory */
  764. return 0;
  765. case TTM_PL_TT:
  766. #if __OS_HAS_AGP
  767. if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
  768. mem->bus.offset = mem->start << PAGE_SHIFT;
  769. mem->bus.base = dev_priv->gart_info.aper_base;
  770. mem->bus.is_iomem = true;
  771. }
  772. #endif
  773. break;
  774. case TTM_PL_VRAM:
  775. mem->bus.offset = mem->start << PAGE_SHIFT;
  776. mem->bus.base = pci_resource_start(dev->pdev, 1);
  777. mem->bus.is_iomem = true;
  778. break;
  779. default:
  780. return -EINVAL;
  781. }
  782. return 0;
  783. }
  784. static void
  785. nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
  786. {
  787. }
  788. static int
  789. nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
  790. {
  791. struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
  792. struct nouveau_bo *nvbo = nouveau_bo(bo);
  793. /* as long as the bo isn't in vram, and isn't tiled, we've got
  794. * nothing to do here.
  795. */
  796. if (bo->mem.mem_type != TTM_PL_VRAM) {
  797. if (dev_priv->card_type < NV_50 ||
  798. !nouveau_bo_tile_layout(nvbo))
  799. return 0;
  800. }
  801. /* make sure bo is in mappable vram */
  802. if (bo->mem.start + bo->mem.num_pages < dev_priv->fb_mappable_pages)
  803. return 0;
  804. nvbo->placement.fpfn = 0;
  805. nvbo->placement.lpfn = dev_priv->fb_mappable_pages;
  806. nouveau_bo_placement_set(nvbo, TTM_PL_VRAM, 0);
  807. return ttm_bo_validate(bo, &nvbo->placement, false, true, false);
  808. }
  809. void
  810. nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
  811. {
  812. struct nouveau_fence *old_fence;
  813. if (likely(fence))
  814. nouveau_fence_ref(fence);
  815. spin_lock(&nvbo->bo.bdev->fence_lock);
  816. old_fence = nvbo->bo.sync_obj;
  817. nvbo->bo.sync_obj = fence;
  818. spin_unlock(&nvbo->bo.bdev->fence_lock);
  819. nouveau_fence_unref(&old_fence);
  820. }
  821. struct ttm_bo_driver nouveau_bo_driver = {
  822. .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
  823. .invalidate_caches = nouveau_bo_invalidate_caches,
  824. .init_mem_type = nouveau_bo_init_mem_type,
  825. .evict_flags = nouveau_bo_evict_flags,
  826. .move = nouveau_bo_move,
  827. .verify_access = nouveau_bo_verify_access,
  828. .sync_obj_signaled = __nouveau_fence_signalled,
  829. .sync_obj_wait = __nouveau_fence_wait,
  830. .sync_obj_flush = __nouveau_fence_flush,
  831. .sync_obj_unref = __nouveau_fence_unref,
  832. .sync_obj_ref = __nouveau_fence_ref,
  833. .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
  834. .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
  835. .io_mem_free = &nouveau_ttm_io_mem_free,
  836. };