ttm_bo_util.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591
  1. /**************************************************************************
  2. *
  3. * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. /*
  28. * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  29. */
  30. #include "ttm/ttm_bo_driver.h"
  31. #include "ttm/ttm_placement.h"
  32. #include <linux/io.h>
  33. #include <linux/highmem.h>
  34. #include <linux/wait.h>
  35. #include <linux/vmalloc.h>
  36. #include <linux/module.h>
  37. void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
  38. {
  39. struct ttm_mem_reg *old_mem = &bo->mem;
  40. if (old_mem->mm_node) {
  41. spin_lock(&bo->glob->lru_lock);
  42. drm_mm_put_block(old_mem->mm_node);
  43. spin_unlock(&bo->glob->lru_lock);
  44. }
  45. old_mem->mm_node = NULL;
  46. }
  47. int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
  48. bool evict, bool no_wait_reserve,
  49. bool no_wait_gpu, struct ttm_mem_reg *new_mem)
  50. {
  51. struct ttm_tt *ttm = bo->ttm;
  52. struct ttm_mem_reg *old_mem = &bo->mem;
  53. int ret;
  54. if (old_mem->mem_type != TTM_PL_SYSTEM) {
  55. ttm_tt_unbind(ttm);
  56. ttm_bo_free_old_node(bo);
  57. ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
  58. TTM_PL_MASK_MEM);
  59. old_mem->mem_type = TTM_PL_SYSTEM;
  60. }
  61. ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
  62. if (unlikely(ret != 0))
  63. return ret;
  64. if (new_mem->mem_type != TTM_PL_SYSTEM) {
  65. ret = ttm_tt_bind(ttm, new_mem);
  66. if (unlikely(ret != 0))
  67. return ret;
  68. }
  69. *old_mem = *new_mem;
  70. new_mem->mm_node = NULL;
  71. return 0;
  72. }
  73. EXPORT_SYMBOL(ttm_bo_move_ttm);
  74. int ttm_mem_io_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
  75. {
  76. int ret;
  77. if (!mem->bus.io_reserved) {
  78. mem->bus.io_reserved = true;
  79. ret = bdev->driver->io_mem_reserve(bdev, mem);
  80. if (unlikely(ret != 0))
  81. return ret;
  82. }
  83. return 0;
  84. }
  85. void ttm_mem_io_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
  86. {
  87. if (bdev->driver->io_mem_reserve) {
  88. if (mem->bus.io_reserved) {
  89. mem->bus.io_reserved = false;
  90. bdev->driver->io_mem_free(bdev, mem);
  91. }
  92. }
  93. }
  94. int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
  95. void **virtual)
  96. {
  97. int ret;
  98. void *addr;
  99. *virtual = NULL;
  100. ret = ttm_mem_io_reserve(bdev, mem);
  101. if (ret)
  102. return ret;
  103. if (mem->bus.addr) {
  104. addr = mem->bus.addr;
  105. } else {
  106. if (mem->placement & TTM_PL_FLAG_WC)
  107. addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
  108. else
  109. addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
  110. if (!addr) {
  111. ttm_mem_io_free(bdev, mem);
  112. return -ENOMEM;
  113. }
  114. }
  115. *virtual = addr;
  116. return 0;
  117. }
  118. void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
  119. void *virtual)
  120. {
  121. struct ttm_mem_type_manager *man;
  122. man = &bdev->man[mem->mem_type];
  123. if (virtual && mem->bus.addr == NULL)
  124. iounmap(virtual);
  125. ttm_mem_io_free(bdev, mem);
  126. }
  127. static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
  128. {
  129. uint32_t *dstP =
  130. (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
  131. uint32_t *srcP =
  132. (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
  133. int i;
  134. for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
  135. iowrite32(ioread32(srcP++), dstP++);
  136. return 0;
  137. }
  138. static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
  139. unsigned long page,
  140. pgprot_t prot)
  141. {
  142. struct page *d = ttm_tt_get_page(ttm, page);
  143. void *dst;
  144. if (!d)
  145. return -ENOMEM;
  146. src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
  147. #ifdef CONFIG_X86
  148. dst = kmap_atomic_prot(d, KM_USER0, prot);
  149. #else
  150. if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
  151. dst = vmap(&d, 1, 0, prot);
  152. else
  153. dst = kmap(d);
  154. #endif
  155. if (!dst)
  156. return -ENOMEM;
  157. memcpy_fromio(dst, src, PAGE_SIZE);
  158. #ifdef CONFIG_X86
  159. kunmap_atomic(dst, KM_USER0);
  160. #else
  161. if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
  162. vunmap(dst);
  163. else
  164. kunmap(d);
  165. #endif
  166. return 0;
  167. }
  168. static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
  169. unsigned long page,
  170. pgprot_t prot)
  171. {
  172. struct page *s = ttm_tt_get_page(ttm, page);
  173. void *src;
  174. if (!s)
  175. return -ENOMEM;
  176. dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
  177. #ifdef CONFIG_X86
  178. src = kmap_atomic_prot(s, KM_USER0, prot);
  179. #else
  180. if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
  181. src = vmap(&s, 1, 0, prot);
  182. else
  183. src = kmap(s);
  184. #endif
  185. if (!src)
  186. return -ENOMEM;
  187. memcpy_toio(dst, src, PAGE_SIZE);
  188. #ifdef CONFIG_X86
  189. kunmap_atomic(src, KM_USER0);
  190. #else
  191. if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
  192. vunmap(src);
  193. else
  194. kunmap(s);
  195. #endif
  196. return 0;
  197. }
  198. int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
  199. bool evict, bool no_wait_reserve, bool no_wait_gpu,
  200. struct ttm_mem_reg *new_mem)
  201. {
  202. struct ttm_bo_device *bdev = bo->bdev;
  203. struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
  204. struct ttm_tt *ttm = bo->ttm;
  205. struct ttm_mem_reg *old_mem = &bo->mem;
  206. struct ttm_mem_reg old_copy = *old_mem;
  207. void *old_iomap;
  208. void *new_iomap;
  209. int ret;
  210. unsigned long i;
  211. unsigned long page;
  212. unsigned long add = 0;
  213. int dir;
  214. ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
  215. if (ret)
  216. return ret;
  217. ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
  218. if (ret)
  219. goto out;
  220. if (old_iomap == NULL && new_iomap == NULL)
  221. goto out2;
  222. if (old_iomap == NULL && ttm == NULL)
  223. goto out2;
  224. add = 0;
  225. dir = 1;
  226. if ((old_mem->mem_type == new_mem->mem_type) &&
  227. (new_mem->mm_node->start <
  228. old_mem->mm_node->start + old_mem->mm_node->size)) {
  229. dir = -1;
  230. add = new_mem->num_pages - 1;
  231. }
  232. for (i = 0; i < new_mem->num_pages; ++i) {
  233. page = i * dir + add;
  234. if (old_iomap == NULL) {
  235. pgprot_t prot = ttm_io_prot(old_mem->placement,
  236. PAGE_KERNEL);
  237. ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
  238. prot);
  239. } else if (new_iomap == NULL) {
  240. pgprot_t prot = ttm_io_prot(new_mem->placement,
  241. PAGE_KERNEL);
  242. ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
  243. prot);
  244. } else
  245. ret = ttm_copy_io_page(new_iomap, old_iomap, page);
  246. if (ret)
  247. goto out1;
  248. }
  249. mb();
  250. out2:
  251. ttm_bo_free_old_node(bo);
  252. *old_mem = *new_mem;
  253. new_mem->mm_node = NULL;
  254. if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
  255. ttm_tt_unbind(ttm);
  256. ttm_tt_destroy(ttm);
  257. bo->ttm = NULL;
  258. }
  259. out1:
  260. ttm_mem_reg_iounmap(bdev, new_mem, new_iomap);
  261. out:
  262. ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
  263. return ret;
  264. }
  265. EXPORT_SYMBOL(ttm_bo_move_memcpy);
  266. static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
  267. {
  268. kfree(bo);
  269. }
  270. /**
  271. * ttm_buffer_object_transfer
  272. *
  273. * @bo: A pointer to a struct ttm_buffer_object.
  274. * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
  275. * holding the data of @bo with the old placement.
  276. *
  277. * This is a utility function that may be called after an accelerated move
  278. * has been scheduled. A new buffer object is created as a placeholder for
  279. * the old data while it's being copied. When that buffer object is idle,
  280. * it can be destroyed, releasing the space of the old placement.
  281. * Returns:
  282. * !0: Failure.
  283. */
  284. static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
  285. struct ttm_buffer_object **new_obj)
  286. {
  287. struct ttm_buffer_object *fbo;
  288. struct ttm_bo_device *bdev = bo->bdev;
  289. struct ttm_bo_driver *driver = bdev->driver;
  290. fbo = kzalloc(sizeof(*fbo), GFP_KERNEL);
  291. if (!fbo)
  292. return -ENOMEM;
  293. *fbo = *bo;
  294. /**
  295. * Fix up members that we shouldn't copy directly:
  296. * TODO: Explicit member copy would probably be better here.
  297. */
  298. spin_lock_init(&fbo->lock);
  299. init_waitqueue_head(&fbo->event_queue);
  300. INIT_LIST_HEAD(&fbo->ddestroy);
  301. INIT_LIST_HEAD(&fbo->lru);
  302. INIT_LIST_HEAD(&fbo->swap);
  303. fbo->vm_node = NULL;
  304. fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
  305. if (fbo->mem.mm_node)
  306. fbo->mem.mm_node->private = (void *)fbo;
  307. kref_init(&fbo->list_kref);
  308. kref_init(&fbo->kref);
  309. fbo->destroy = &ttm_transfered_destroy;
  310. *new_obj = fbo;
  311. return 0;
  312. }
  313. pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
  314. {
  315. #if defined(__i386__) || defined(__x86_64__)
  316. if (caching_flags & TTM_PL_FLAG_WC)
  317. tmp = pgprot_writecombine(tmp);
  318. else if (boot_cpu_data.x86 > 3)
  319. tmp = pgprot_noncached(tmp);
  320. #elif defined(__powerpc__)
  321. if (!(caching_flags & TTM_PL_FLAG_CACHED)) {
  322. pgprot_val(tmp) |= _PAGE_NO_CACHE;
  323. if (caching_flags & TTM_PL_FLAG_UNCACHED)
  324. pgprot_val(tmp) |= _PAGE_GUARDED;
  325. }
  326. #endif
  327. #if defined(__ia64__)
  328. if (caching_flags & TTM_PL_FLAG_WC)
  329. tmp = pgprot_writecombine(tmp);
  330. else
  331. tmp = pgprot_noncached(tmp);
  332. #endif
  333. #if defined(__sparc__)
  334. if (!(caching_flags & TTM_PL_FLAG_CACHED))
  335. tmp = pgprot_noncached(tmp);
  336. #endif
  337. return tmp;
  338. }
  339. EXPORT_SYMBOL(ttm_io_prot);
  340. static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
  341. unsigned long offset,
  342. unsigned long size,
  343. struct ttm_bo_kmap_obj *map)
  344. {
  345. struct ttm_mem_reg *mem = &bo->mem;
  346. if (bo->mem.bus.addr) {
  347. map->bo_kmap_type = ttm_bo_map_premapped;
  348. map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
  349. } else {
  350. map->bo_kmap_type = ttm_bo_map_iomap;
  351. if (mem->placement & TTM_PL_FLAG_WC)
  352. map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
  353. size);
  354. else
  355. map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
  356. size);
  357. }
  358. return (!map->virtual) ? -ENOMEM : 0;
  359. }
  360. static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
  361. unsigned long start_page,
  362. unsigned long num_pages,
  363. struct ttm_bo_kmap_obj *map)
  364. {
  365. struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
  366. struct ttm_tt *ttm = bo->ttm;
  367. struct page *d;
  368. int i;
  369. BUG_ON(!ttm);
  370. if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
  371. /*
  372. * We're mapping a single page, and the desired
  373. * page protection is consistent with the bo.
  374. */
  375. map->bo_kmap_type = ttm_bo_map_kmap;
  376. map->page = ttm_tt_get_page(ttm, start_page);
  377. map->virtual = kmap(map->page);
  378. } else {
  379. /*
  380. * Populate the part we're mapping;
  381. */
  382. for (i = start_page; i < start_page + num_pages; ++i) {
  383. d = ttm_tt_get_page(ttm, i);
  384. if (!d)
  385. return -ENOMEM;
  386. }
  387. /*
  388. * We need to use vmap to get the desired page protection
  389. * or to make the buffer object look contiguous.
  390. */
  391. prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
  392. PAGE_KERNEL :
  393. ttm_io_prot(mem->placement, PAGE_KERNEL);
  394. map->bo_kmap_type = ttm_bo_map_vmap;
  395. map->virtual = vmap(ttm->pages + start_page, num_pages,
  396. 0, prot);
  397. }
  398. return (!map->virtual) ? -ENOMEM : 0;
  399. }
  400. int ttm_bo_kmap(struct ttm_buffer_object *bo,
  401. unsigned long start_page, unsigned long num_pages,
  402. struct ttm_bo_kmap_obj *map)
  403. {
  404. unsigned long offset, size;
  405. int ret;
  406. BUG_ON(!list_empty(&bo->swap));
  407. map->virtual = NULL;
  408. map->bo = bo;
  409. if (num_pages > bo->num_pages)
  410. return -EINVAL;
  411. if (start_page > bo->num_pages)
  412. return -EINVAL;
  413. #if 0
  414. if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
  415. return -EPERM;
  416. #endif
  417. ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
  418. if (ret)
  419. return ret;
  420. if (!bo->mem.bus.is_iomem) {
  421. return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
  422. } else {
  423. offset = start_page << PAGE_SHIFT;
  424. size = num_pages << PAGE_SHIFT;
  425. return ttm_bo_ioremap(bo, offset, size, map);
  426. }
  427. }
  428. EXPORT_SYMBOL(ttm_bo_kmap);
  429. void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
  430. {
  431. if (!map->virtual)
  432. return;
  433. switch (map->bo_kmap_type) {
  434. case ttm_bo_map_iomap:
  435. iounmap(map->virtual);
  436. ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
  437. break;
  438. case ttm_bo_map_vmap:
  439. vunmap(map->virtual);
  440. break;
  441. case ttm_bo_map_kmap:
  442. kunmap(map->page);
  443. break;
  444. case ttm_bo_map_premapped:
  445. break;
  446. default:
  447. BUG();
  448. }
  449. map->virtual = NULL;
  450. map->page = NULL;
  451. }
  452. EXPORT_SYMBOL(ttm_bo_kunmap);
  453. int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
  454. void *sync_obj,
  455. void *sync_obj_arg,
  456. bool evict, bool no_wait_reserve,
  457. bool no_wait_gpu,
  458. struct ttm_mem_reg *new_mem)
  459. {
  460. struct ttm_bo_device *bdev = bo->bdev;
  461. struct ttm_bo_driver *driver = bdev->driver;
  462. struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
  463. struct ttm_mem_reg *old_mem = &bo->mem;
  464. int ret;
  465. struct ttm_buffer_object *ghost_obj;
  466. void *tmp_obj = NULL;
  467. spin_lock(&bo->lock);
  468. if (bo->sync_obj) {
  469. tmp_obj = bo->sync_obj;
  470. bo->sync_obj = NULL;
  471. }
  472. bo->sync_obj = driver->sync_obj_ref(sync_obj);
  473. bo->sync_obj_arg = sync_obj_arg;
  474. if (evict) {
  475. ret = ttm_bo_wait(bo, false, false, false);
  476. spin_unlock(&bo->lock);
  477. if (tmp_obj)
  478. driver->sync_obj_unref(&tmp_obj);
  479. if (ret)
  480. return ret;
  481. ttm_bo_free_old_node(bo);
  482. if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
  483. (bo->ttm != NULL)) {
  484. ttm_tt_unbind(bo->ttm);
  485. ttm_tt_destroy(bo->ttm);
  486. bo->ttm = NULL;
  487. }
  488. } else {
  489. /**
  490. * This should help pipeline ordinary buffer moves.
  491. *
  492. * Hang old buffer memory on a new buffer object,
  493. * and leave it to be released when the GPU
  494. * operation has completed.
  495. */
  496. set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
  497. spin_unlock(&bo->lock);
  498. if (tmp_obj)
  499. driver->sync_obj_unref(&tmp_obj);
  500. ret = ttm_buffer_object_transfer(bo, &ghost_obj);
  501. if (ret)
  502. return ret;
  503. /**
  504. * If we're not moving to fixed memory, the TTM object
  505. * needs to stay alive. Otherwhise hang it on the ghost
  506. * bo to be unbound and destroyed.
  507. */
  508. if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
  509. ghost_obj->ttm = NULL;
  510. else
  511. bo->ttm = NULL;
  512. ttm_bo_unreserve(ghost_obj);
  513. ttm_bo_unref(&ghost_obj);
  514. }
  515. *old_mem = *new_mem;
  516. new_mem->mm_node = NULL;
  517. return 0;
  518. }
  519. EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);