nouveau_bo.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931
  1. /*
  2. * Copyright 2007 Dave Airlied
  3. * All Rights Reserved.
  4. *
  5. * Permission is hereby granted, free of charge, to any person obtaining a
  6. * copy of this software and associated documentation files (the "Software"),
  7. * to deal in the Software without restriction, including without limitation
  8. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  9. * and/or sell copies of the Software, and to permit persons to whom the
  10. * Software is furnished to do so, subject to the following conditions:
  11. *
  12. * The above copyright notice and this permission notice (including the next
  13. * paragraph) shall be included in all copies or substantial portions of the
  14. * Software.
  15. *
  16. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19. * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  20. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  21. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  22. * OTHER DEALINGS IN THE SOFTWARE.
  23. */
  24. /*
  25. * Authors: Dave Airlied <airlied@linux.ie>
  26. * Ben Skeggs <darktama@iinet.net.au>
  27. * Jeremy Kolb <jkolb@brandeis.edu>
  28. */
  29. #include "drmP.h"
  30. #include "nouveau_drm.h"
  31. #include "nouveau_drv.h"
  32. #include "nouveau_dma.h"
  33. #include <linux/log2.h>
  34. #include <linux/slab.h>
  35. static void
  36. nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
  37. {
  38. struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
  39. struct drm_device *dev = dev_priv->dev;
  40. struct nouveau_bo *nvbo = nouveau_bo(bo);
  41. if (unlikely(nvbo->gem))
  42. DRM_ERROR("bo %p still attached to GEM object\n", bo);
  43. if (nvbo->tile)
  44. nv10_mem_expire_tiling(dev, nvbo->tile, NULL);
  45. kfree(nvbo);
  46. }
  47. static void
  48. nouveau_bo_fixup_align(struct drm_device *dev,
  49. uint32_t tile_mode, uint32_t tile_flags,
  50. int *align, int *size)
  51. {
  52. struct drm_nouveau_private *dev_priv = dev->dev_private;
  53. /*
  54. * Some of the tile_flags have a periodic structure of N*4096 bytes,
  55. * align to to that as well as the page size. Align the size to the
  56. * appropriate boundaries. This does imply that sizes are rounded up
  57. * 3-7 pages, so be aware of this and do not waste memory by allocating
  58. * many small buffers.
  59. */
  60. if (dev_priv->card_type == NV_50) {
  61. uint32_t block_size = dev_priv->vram_size >> 15;
  62. int i;
  63. switch (tile_flags) {
  64. case 0x1800:
  65. case 0x2800:
  66. case 0x4800:
  67. case 0x7a00:
  68. if (is_power_of_2(block_size)) {
  69. for (i = 1; i < 10; i++) {
  70. *align = 12 * i * block_size;
  71. if (!(*align % 65536))
  72. break;
  73. }
  74. } else {
  75. for (i = 1; i < 10; i++) {
  76. *align = 8 * i * block_size;
  77. if (!(*align % 65536))
  78. break;
  79. }
  80. }
  81. *size = roundup(*size, *align);
  82. break;
  83. default:
  84. break;
  85. }
  86. } else {
  87. if (tile_mode) {
  88. if (dev_priv->chipset >= 0x40) {
  89. *align = 65536;
  90. *size = roundup(*size, 64 * tile_mode);
  91. } else if (dev_priv->chipset >= 0x30) {
  92. *align = 32768;
  93. *size = roundup(*size, 64 * tile_mode);
  94. } else if (dev_priv->chipset >= 0x20) {
  95. *align = 16384;
  96. *size = roundup(*size, 64 * tile_mode);
  97. } else if (dev_priv->chipset >= 0x10) {
  98. *align = 16384;
  99. *size = roundup(*size, 32 * tile_mode);
  100. }
  101. }
  102. }
  103. /* ALIGN works only on powers of two. */
  104. *size = roundup(*size, PAGE_SIZE);
  105. if (dev_priv->card_type == NV_50) {
  106. *size = roundup(*size, 65536);
  107. *align = max(65536, *align);
  108. }
  109. }
  110. int
  111. nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
  112. int size, int align, uint32_t flags, uint32_t tile_mode,
  113. uint32_t tile_flags, bool no_vm, bool mappable,
  114. struct nouveau_bo **pnvbo)
  115. {
  116. struct drm_nouveau_private *dev_priv = dev->dev_private;
  117. struct nouveau_bo *nvbo;
  118. int ret = 0;
  119. nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
  120. if (!nvbo)
  121. return -ENOMEM;
  122. INIT_LIST_HEAD(&nvbo->head);
  123. INIT_LIST_HEAD(&nvbo->entry);
  124. nvbo->mappable = mappable;
  125. nvbo->no_vm = no_vm;
  126. nvbo->tile_mode = tile_mode;
  127. nvbo->tile_flags = tile_flags;
  128. nouveau_bo_fixup_align(dev, tile_mode, tile_flags, &align, &size);
  129. align >>= PAGE_SHIFT;
  130. nouveau_bo_placement_set(nvbo, flags, 0);
  131. nvbo->channel = chan;
  132. ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
  133. ttm_bo_type_device, &nvbo->placement, align, 0,
  134. false, NULL, size, nouveau_bo_del_ttm);
  135. if (ret) {
  136. /* ttm will call nouveau_bo_del_ttm if it fails.. */
  137. return ret;
  138. }
  139. nvbo->channel = NULL;
  140. *pnvbo = nvbo;
  141. return 0;
  142. }
  143. static void
  144. set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
  145. {
  146. *n = 0;
  147. if (type & TTM_PL_FLAG_VRAM)
  148. pl[(*n)++] = TTM_PL_FLAG_VRAM | flags;
  149. if (type & TTM_PL_FLAG_TT)
  150. pl[(*n)++] = TTM_PL_FLAG_TT | flags;
  151. if (type & TTM_PL_FLAG_SYSTEM)
  152. pl[(*n)++] = TTM_PL_FLAG_SYSTEM | flags;
  153. }
  154. void
  155. nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
  156. {
  157. struct ttm_placement *pl = &nvbo->placement;
  158. uint32_t flags = TTM_PL_MASK_CACHING |
  159. (nvbo->pin_refcnt ? TTM_PL_FLAG_NO_EVICT : 0);
  160. pl->placement = nvbo->placements;
  161. set_placement_list(nvbo->placements, &pl->num_placement,
  162. type, flags);
  163. pl->busy_placement = nvbo->busy_placements;
  164. set_placement_list(nvbo->busy_placements, &pl->num_busy_placement,
  165. type | busy, flags);
  166. }
  167. int
  168. nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
  169. {
  170. struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
  171. struct ttm_buffer_object *bo = &nvbo->bo;
  172. int ret;
  173. if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
  174. NV_ERROR(nouveau_bdev(bo->bdev)->dev,
  175. "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
  176. 1 << bo->mem.mem_type, memtype);
  177. return -EINVAL;
  178. }
  179. if (nvbo->pin_refcnt++)
  180. return 0;
  181. ret = ttm_bo_reserve(bo, false, false, false, 0);
  182. if (ret)
  183. goto out;
  184. nouveau_bo_placement_set(nvbo, memtype, 0);
  185. ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false);
  186. if (ret == 0) {
  187. switch (bo->mem.mem_type) {
  188. case TTM_PL_VRAM:
  189. dev_priv->fb_aper_free -= bo->mem.size;
  190. break;
  191. case TTM_PL_TT:
  192. dev_priv->gart_info.aper_free -= bo->mem.size;
  193. break;
  194. default:
  195. break;
  196. }
  197. }
  198. ttm_bo_unreserve(bo);
  199. out:
  200. if (unlikely(ret))
  201. nvbo->pin_refcnt--;
  202. return ret;
  203. }
  204. int
  205. nouveau_bo_unpin(struct nouveau_bo *nvbo)
  206. {
  207. struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
  208. struct ttm_buffer_object *bo = &nvbo->bo;
  209. int ret;
  210. if (--nvbo->pin_refcnt)
  211. return 0;
  212. ret = ttm_bo_reserve(bo, false, false, false, 0);
  213. if (ret)
  214. return ret;
  215. nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
  216. ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false);
  217. if (ret == 0) {
  218. switch (bo->mem.mem_type) {
  219. case TTM_PL_VRAM:
  220. dev_priv->fb_aper_free += bo->mem.size;
  221. break;
  222. case TTM_PL_TT:
  223. dev_priv->gart_info.aper_free += bo->mem.size;
  224. break;
  225. default:
  226. break;
  227. }
  228. }
  229. ttm_bo_unreserve(bo);
  230. return ret;
  231. }
  232. int
  233. nouveau_bo_map(struct nouveau_bo *nvbo)
  234. {
  235. int ret;
  236. ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0);
  237. if (ret)
  238. return ret;
  239. ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap);
  240. ttm_bo_unreserve(&nvbo->bo);
  241. return ret;
  242. }
  243. void
  244. nouveau_bo_unmap(struct nouveau_bo *nvbo)
  245. {
  246. if (nvbo)
  247. ttm_bo_kunmap(&nvbo->kmap);
  248. }
  249. u16
  250. nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index)
  251. {
  252. bool is_iomem;
  253. u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
  254. mem = &mem[index];
  255. if (is_iomem)
  256. return ioread16_native((void __force __iomem *)mem);
  257. else
  258. return *mem;
  259. }
  260. void
  261. nouveau_bo_wr16(struct nouveau_bo *nvbo, unsigned index, u16 val)
  262. {
  263. bool is_iomem;
  264. u16 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
  265. mem = &mem[index];
  266. if (is_iomem)
  267. iowrite16_native(val, (void __force __iomem *)mem);
  268. else
  269. *mem = val;
  270. }
  271. u32
  272. nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index)
  273. {
  274. bool is_iomem;
  275. u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
  276. mem = &mem[index];
  277. if (is_iomem)
  278. return ioread32_native((void __force __iomem *)mem);
  279. else
  280. return *mem;
  281. }
  282. void
  283. nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
  284. {
  285. bool is_iomem;
  286. u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem);
  287. mem = &mem[index];
  288. if (is_iomem)
  289. iowrite32_native(val, (void __force __iomem *)mem);
  290. else
  291. *mem = val;
  292. }
  293. static struct ttm_backend *
  294. nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev)
  295. {
  296. struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
  297. struct drm_device *dev = dev_priv->dev;
  298. switch (dev_priv->gart_info.type) {
  299. #if __OS_HAS_AGP
  300. case NOUVEAU_GART_AGP:
  301. return ttm_agp_backend_init(bdev, dev->agp->bridge);
  302. #endif
  303. case NOUVEAU_GART_SGDMA:
  304. return nouveau_sgdma_init_ttm(dev);
  305. default:
  306. NV_ERROR(dev, "Unknown GART type %d\n",
  307. dev_priv->gart_info.type);
  308. break;
  309. }
  310. return NULL;
  311. }
  312. static int
  313. nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
  314. {
  315. /* We'll do this from user space. */
  316. return 0;
  317. }
  318. static int
  319. nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
  320. struct ttm_mem_type_manager *man)
  321. {
  322. struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
  323. struct drm_device *dev = dev_priv->dev;
  324. switch (type) {
  325. case TTM_PL_SYSTEM:
  326. man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
  327. man->available_caching = TTM_PL_MASK_CACHING;
  328. man->default_caching = TTM_PL_FLAG_CACHED;
  329. break;
  330. case TTM_PL_VRAM:
  331. man->flags = TTM_MEMTYPE_FLAG_FIXED |
  332. TTM_MEMTYPE_FLAG_MAPPABLE;
  333. man->available_caching = TTM_PL_FLAG_UNCACHED |
  334. TTM_PL_FLAG_WC;
  335. man->default_caching = TTM_PL_FLAG_WC;
  336. if (dev_priv->card_type == NV_50)
  337. man->gpu_offset = 0x40000000;
  338. else
  339. man->gpu_offset = 0;
  340. break;
  341. case TTM_PL_TT:
  342. switch (dev_priv->gart_info.type) {
  343. case NOUVEAU_GART_AGP:
  344. man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
  345. man->available_caching = TTM_PL_FLAG_UNCACHED;
  346. man->default_caching = TTM_PL_FLAG_UNCACHED;
  347. break;
  348. case NOUVEAU_GART_SGDMA:
  349. man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
  350. TTM_MEMTYPE_FLAG_CMA;
  351. man->available_caching = TTM_PL_MASK_CACHING;
  352. man->default_caching = TTM_PL_FLAG_CACHED;
  353. break;
  354. default:
  355. NV_ERROR(dev, "Unknown GART type: %d\n",
  356. dev_priv->gart_info.type);
  357. return -EINVAL;
  358. }
  359. man->gpu_offset = dev_priv->vm_gart_base;
  360. break;
  361. default:
  362. NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
  363. return -EINVAL;
  364. }
  365. return 0;
  366. }
  367. static void
  368. nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
  369. {
  370. struct nouveau_bo *nvbo = nouveau_bo(bo);
  371. switch (bo->mem.mem_type) {
  372. case TTM_PL_VRAM:
  373. nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT,
  374. TTM_PL_FLAG_SYSTEM);
  375. break;
  376. default:
  377. nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM, 0);
  378. break;
  379. }
  380. *pl = nvbo->placement;
  381. }
  382. /* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
  383. * TTM_PL_{VRAM,TT} directly.
  384. */
  385. static int
  386. nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
  387. struct nouveau_bo *nvbo, bool evict,
  388. bool no_wait_reserve, bool no_wait_gpu,
  389. struct ttm_mem_reg *new_mem)
  390. {
  391. struct nouveau_fence *fence = NULL;
  392. int ret;
  393. ret = nouveau_fence_new(chan, &fence, true);
  394. if (ret)
  395. return ret;
  396. ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL,
  397. evict || (nvbo->channel &&
  398. nvbo->channel != chan),
  399. no_wait_reserve, no_wait_gpu, new_mem);
  400. nouveau_fence_unref((void *)&fence);
  401. return ret;
  402. }
  403. static inline uint32_t
  404. nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
  405. struct nouveau_channel *chan, struct ttm_mem_reg *mem)
  406. {
  407. struct nouveau_bo *nvbo = nouveau_bo(bo);
  408. if (nvbo->no_vm) {
  409. if (mem->mem_type == TTM_PL_TT)
  410. return NvDmaGART;
  411. return NvDmaVRAM;
  412. }
  413. if (mem->mem_type == TTM_PL_TT)
  414. return chan->gart_handle;
  415. return chan->vram_handle;
  416. }
  417. static int
  418. nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
  419. struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
  420. {
  421. struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
  422. struct nouveau_bo *nvbo = nouveau_bo(bo);
  423. u64 length = (new_mem->num_pages << PAGE_SHIFT);
  424. u64 src_offset, dst_offset;
  425. int ret;
  426. src_offset = old_mem->mm_node->start << PAGE_SHIFT;
  427. dst_offset = new_mem->mm_node->start << PAGE_SHIFT;
  428. if (!nvbo->no_vm) {
  429. if (old_mem->mem_type == TTM_PL_VRAM)
  430. src_offset += dev_priv->vm_vram_base;
  431. else
  432. src_offset += dev_priv->vm_gart_base;
  433. if (new_mem->mem_type == TTM_PL_VRAM)
  434. dst_offset += dev_priv->vm_vram_base;
  435. else
  436. dst_offset += dev_priv->vm_gart_base;
  437. }
  438. ret = RING_SPACE(chan, 3);
  439. if (ret)
  440. return ret;
  441. BEGIN_RING(chan, NvSubM2MF, 0x0184, 2);
  442. OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
  443. OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
  444. while (length) {
  445. u32 amount, stride, height;
  446. amount = min(length, (u64)(4 * 1024 * 1024));
  447. stride = 16 * 4;
  448. height = amount / stride;
  449. if (new_mem->mem_type == TTM_PL_VRAM && nvbo->tile_flags) {
  450. ret = RING_SPACE(chan, 8);
  451. if (ret)
  452. return ret;
  453. BEGIN_RING(chan, NvSubM2MF, 0x0200, 7);
  454. OUT_RING (chan, 0);
  455. OUT_RING (chan, 0);
  456. OUT_RING (chan, stride);
  457. OUT_RING (chan, height);
  458. OUT_RING (chan, 1);
  459. OUT_RING (chan, 0);
  460. OUT_RING (chan, 0);
  461. } else {
  462. ret = RING_SPACE(chan, 2);
  463. if (ret)
  464. return ret;
  465. BEGIN_RING(chan, NvSubM2MF, 0x0200, 1);
  466. OUT_RING (chan, 1);
  467. }
  468. if (old_mem->mem_type == TTM_PL_VRAM && nvbo->tile_flags) {
  469. ret = RING_SPACE(chan, 8);
  470. if (ret)
  471. return ret;
  472. BEGIN_RING(chan, NvSubM2MF, 0x021c, 7);
  473. OUT_RING (chan, 0);
  474. OUT_RING (chan, 0);
  475. OUT_RING (chan, stride);
  476. OUT_RING (chan, height);
  477. OUT_RING (chan, 1);
  478. OUT_RING (chan, 0);
  479. OUT_RING (chan, 0);
  480. } else {
  481. ret = RING_SPACE(chan, 2);
  482. if (ret)
  483. return ret;
  484. BEGIN_RING(chan, NvSubM2MF, 0x021c, 1);
  485. OUT_RING (chan, 1);
  486. }
  487. ret = RING_SPACE(chan, 14);
  488. if (ret)
  489. return ret;
  490. BEGIN_RING(chan, NvSubM2MF, 0x0238, 2);
  491. OUT_RING (chan, upper_32_bits(src_offset));
  492. OUT_RING (chan, upper_32_bits(dst_offset));
  493. BEGIN_RING(chan, NvSubM2MF, 0x030c, 8);
  494. OUT_RING (chan, lower_32_bits(src_offset));
  495. OUT_RING (chan, lower_32_bits(dst_offset));
  496. OUT_RING (chan, stride);
  497. OUT_RING (chan, stride);
  498. OUT_RING (chan, stride);
  499. OUT_RING (chan, height);
  500. OUT_RING (chan, 0x00000101);
  501. OUT_RING (chan, 0x00000000);
  502. BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
  503. OUT_RING (chan, 0);
  504. length -= amount;
  505. src_offset += amount;
  506. dst_offset += amount;
  507. }
  508. return 0;
  509. }
  510. static int
  511. nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
  512. struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
  513. {
  514. u32 src_offset = old_mem->mm_node->start << PAGE_SHIFT;
  515. u32 dst_offset = new_mem->mm_node->start << PAGE_SHIFT;
  516. u32 page_count = new_mem->num_pages;
  517. int ret;
  518. ret = RING_SPACE(chan, 3);
  519. if (ret)
  520. return ret;
  521. BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2);
  522. OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem));
  523. OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem));
  524. page_count = new_mem->num_pages;
  525. while (page_count) {
  526. int line_count = (page_count > 2047) ? 2047 : page_count;
  527. ret = RING_SPACE(chan, 11);
  528. if (ret)
  529. return ret;
  530. BEGIN_RING(chan, NvSubM2MF,
  531. NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8);
  532. OUT_RING (chan, src_offset);
  533. OUT_RING (chan, dst_offset);
  534. OUT_RING (chan, PAGE_SIZE); /* src_pitch */
  535. OUT_RING (chan, PAGE_SIZE); /* dst_pitch */
  536. OUT_RING (chan, PAGE_SIZE); /* line_length */
  537. OUT_RING (chan, line_count);
  538. OUT_RING (chan, 0x00000101);
  539. OUT_RING (chan, 0x00000000);
  540. BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1);
  541. OUT_RING (chan, 0);
  542. page_count -= line_count;
  543. src_offset += (PAGE_SIZE * line_count);
  544. dst_offset += (PAGE_SIZE * line_count);
  545. }
  546. return 0;
  547. }
  548. static int
  549. nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
  550. bool no_wait_reserve, bool no_wait_gpu,
  551. struct ttm_mem_reg *new_mem)
  552. {
  553. struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
  554. struct nouveau_bo *nvbo = nouveau_bo(bo);
  555. struct nouveau_channel *chan;
  556. int ret;
  557. chan = nvbo->channel;
  558. if (!chan || nvbo->no_vm)
  559. chan = dev_priv->channel;
  560. if (dev_priv->card_type < NV_50)
  561. ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
  562. else
  563. ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem);
  564. if (ret)
  565. return ret;
  566. return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait_reserve, no_wait_gpu, new_mem);
  567. }
  568. static int
  569. nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
  570. bool no_wait_reserve, bool no_wait_gpu,
  571. struct ttm_mem_reg *new_mem)
  572. {
  573. u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
  574. struct ttm_placement placement;
  575. struct ttm_mem_reg tmp_mem;
  576. int ret;
  577. placement.fpfn = placement.lpfn = 0;
  578. placement.num_placement = placement.num_busy_placement = 1;
  579. placement.placement = placement.busy_placement = &placement_memtype;
  580. tmp_mem = *new_mem;
  581. tmp_mem.mm_node = NULL;
  582. ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
  583. if (ret)
  584. return ret;
  585. ret = ttm_tt_bind(bo->ttm, &tmp_mem);
  586. if (ret)
  587. goto out;
  588. ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
  589. if (ret)
  590. goto out;
  591. ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
  592. out:
  593. if (tmp_mem.mm_node) {
  594. spin_lock(&bo->bdev->glob->lru_lock);
  595. drm_mm_put_block(tmp_mem.mm_node);
  596. spin_unlock(&bo->bdev->glob->lru_lock);
  597. }
  598. return ret;
  599. }
  600. static int
  601. nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
  602. bool no_wait_reserve, bool no_wait_gpu,
  603. struct ttm_mem_reg *new_mem)
  604. {
  605. u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
  606. struct ttm_placement placement;
  607. struct ttm_mem_reg tmp_mem;
  608. int ret;
  609. placement.fpfn = placement.lpfn = 0;
  610. placement.num_placement = placement.num_busy_placement = 1;
  611. placement.placement = placement.busy_placement = &placement_memtype;
  612. tmp_mem = *new_mem;
  613. tmp_mem.mm_node = NULL;
  614. ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
  615. if (ret)
  616. return ret;
  617. ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, &tmp_mem);
  618. if (ret)
  619. goto out;
  620. ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
  621. if (ret)
  622. goto out;
  623. out:
  624. if (tmp_mem.mm_node) {
  625. spin_lock(&bo->bdev->glob->lru_lock);
  626. drm_mm_put_block(tmp_mem.mm_node);
  627. spin_unlock(&bo->bdev->glob->lru_lock);
  628. }
  629. return ret;
  630. }
  631. static int
  632. nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
  633. struct nouveau_tile_reg **new_tile)
  634. {
  635. struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
  636. struct drm_device *dev = dev_priv->dev;
  637. struct nouveau_bo *nvbo = nouveau_bo(bo);
  638. uint64_t offset;
  639. int ret;
  640. if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) {
  641. /* Nothing to do. */
  642. *new_tile = NULL;
  643. return 0;
  644. }
  645. offset = new_mem->mm_node->start << PAGE_SHIFT;
  646. if (dev_priv->card_type == NV_50) {
  647. ret = nv50_mem_vm_bind_linear(dev,
  648. offset + dev_priv->vm_vram_base,
  649. new_mem->size, nvbo->tile_flags,
  650. offset);
  651. if (ret)
  652. return ret;
  653. } else if (dev_priv->card_type >= NV_10) {
  654. *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
  655. nvbo->tile_mode);
  656. }
  657. return 0;
  658. }
  659. static void
  660. nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
  661. struct nouveau_tile_reg *new_tile,
  662. struct nouveau_tile_reg **old_tile)
  663. {
  664. struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
  665. struct drm_device *dev = dev_priv->dev;
  666. if (dev_priv->card_type >= NV_10 &&
  667. dev_priv->card_type < NV_50) {
  668. if (*old_tile)
  669. nv10_mem_expire_tiling(dev, *old_tile, bo->sync_obj);
  670. *old_tile = new_tile;
  671. }
  672. }
  673. static int
  674. nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
  675. bool no_wait_reserve, bool no_wait_gpu,
  676. struct ttm_mem_reg *new_mem)
  677. {
  678. struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
  679. struct nouveau_bo *nvbo = nouveau_bo(bo);
  680. struct ttm_mem_reg *old_mem = &bo->mem;
  681. struct nouveau_tile_reg *new_tile = NULL;
  682. int ret = 0;
  683. ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
  684. if (ret)
  685. return ret;
  686. /* Fake bo copy. */
  687. if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) {
  688. BUG_ON(bo->mem.mm_node != NULL);
  689. bo->mem = *new_mem;
  690. new_mem->mm_node = NULL;
  691. goto out;
  692. }
  693. /* Software copy if the card isn't up and running yet. */
  694. if (!dev_priv->channel) {
  695. ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
  696. goto out;
  697. }
  698. /* Hardware assisted copy. */
  699. if (new_mem->mem_type == TTM_PL_SYSTEM)
  700. ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
  701. else if (old_mem->mem_type == TTM_PL_SYSTEM)
  702. ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
  703. else
  704. ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
  705. if (!ret)
  706. goto out;
  707. /* Fallback to software copy. */
  708. ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
  709. out:
  710. if (ret)
  711. nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
  712. else
  713. nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile);
  714. return ret;
  715. }
  716. static int
  717. nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
  718. {
  719. return 0;
  720. }
  721. static int
  722. nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
  723. {
  724. struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
  725. struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
  726. struct drm_device *dev = dev_priv->dev;
  727. mem->bus.addr = NULL;
  728. mem->bus.offset = 0;
  729. mem->bus.size = mem->num_pages << PAGE_SHIFT;
  730. mem->bus.base = 0;
  731. mem->bus.is_iomem = false;
  732. if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
  733. return -EINVAL;
  734. switch (mem->mem_type) {
  735. case TTM_PL_SYSTEM:
  736. /* System memory */
  737. return 0;
  738. case TTM_PL_TT:
  739. #if __OS_HAS_AGP
  740. if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
  741. mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
  742. mem->bus.base = dev_priv->gart_info.aper_base;
  743. mem->bus.is_iomem = true;
  744. }
  745. #endif
  746. break;
  747. case TTM_PL_VRAM:
  748. mem->bus.offset = mem->mm_node->start << PAGE_SHIFT;
  749. mem->bus.base = pci_resource_start(dev->pdev, 1);
  750. mem->bus.is_iomem = true;
  751. break;
  752. default:
  753. return -EINVAL;
  754. }
  755. return 0;
  756. }
  757. static void
  758. nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
  759. {
  760. }
  761. static int
  762. nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
  763. {
  764. struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
  765. struct nouveau_bo *nvbo = nouveau_bo(bo);
  766. /* as long as the bo isn't in vram, and isn't tiled, we've got
  767. * nothing to do here.
  768. */
  769. if (bo->mem.mem_type != TTM_PL_VRAM) {
  770. if (dev_priv->card_type < NV_50 || !nvbo->tile_flags)
  771. return 0;
  772. }
  773. /* make sure bo is in mappable vram */
  774. if (bo->mem.mm_node->start + bo->mem.num_pages < dev_priv->fb_mappable_pages)
  775. return 0;
  776. nvbo->placement.fpfn = 0;
  777. nvbo->placement.lpfn = dev_priv->fb_mappable_pages;
  778. nouveau_bo_placement_set(nvbo, TTM_PL_VRAM, 0);
  779. return ttm_bo_validate(bo, &nvbo->placement, false, true, false);
  780. }
  781. struct ttm_bo_driver nouveau_bo_driver = {
  782. .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry,
  783. .invalidate_caches = nouveau_bo_invalidate_caches,
  784. .init_mem_type = nouveau_bo_init_mem_type,
  785. .evict_flags = nouveau_bo_evict_flags,
  786. .move = nouveau_bo_move,
  787. .verify_access = nouveau_bo_verify_access,
  788. .sync_obj_signaled = nouveau_fence_signalled,
  789. .sync_obj_wait = nouveau_fence_wait,
  790. .sync_obj_flush = nouveau_fence_flush,
  791. .sync_obj_unref = nouveau_fence_unref,
  792. .sync_obj_ref = nouveau_fence_ref,
  793. .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify,
  794. .io_mem_reserve = &nouveau_ttm_io_mem_reserve,
  795. .io_mem_free = &nouveau_ttm_io_mem_free,
  796. };