ttm_bo_util.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583
  1. /**************************************************************************
  2. *
  3. * Copyright (c) 2007-2009 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. /*
  28. * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  29. */
  30. #include "ttm/ttm_bo_driver.h"
  31. #include "ttm/ttm_placement.h"
  32. #include <linux/io.h>
  33. #include <linux/highmem.h>
  34. #include <linux/wait.h>
  35. #include <linux/slab.h>
  36. #include <linux/vmalloc.h>
  37. #include <linux/module.h>
  38. void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
  39. {
  40. ttm_bo_mem_put(bo, &bo->mem);
  41. }
  42. int ttm_bo_move_ttm(struct ttm_buffer_object *bo,
  43. bool evict, bool no_wait_reserve,
  44. bool no_wait_gpu, struct ttm_mem_reg *new_mem)
  45. {
  46. struct ttm_tt *ttm = bo->ttm;
  47. struct ttm_mem_reg *old_mem = &bo->mem;
  48. int ret;
  49. if (old_mem->mem_type != TTM_PL_SYSTEM) {
  50. ttm_tt_unbind(ttm);
  51. ttm_bo_free_old_node(bo);
  52. ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM,
  53. TTM_PL_MASK_MEM);
  54. old_mem->mem_type = TTM_PL_SYSTEM;
  55. }
  56. ret = ttm_tt_set_placement_caching(ttm, new_mem->placement);
  57. if (unlikely(ret != 0))
  58. return ret;
  59. if (new_mem->mem_type != TTM_PL_SYSTEM) {
  60. ret = ttm_tt_bind(ttm, new_mem);
  61. if (unlikely(ret != 0))
  62. return ret;
  63. }
  64. *old_mem = *new_mem;
  65. new_mem->mm_node = NULL;
  66. return 0;
  67. }
  68. EXPORT_SYMBOL(ttm_bo_move_ttm);
  69. int ttm_mem_io_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
  70. {
  71. int ret;
  72. if (!mem->bus.io_reserved) {
  73. mem->bus.io_reserved = true;
  74. ret = bdev->driver->io_mem_reserve(bdev, mem);
  75. if (unlikely(ret != 0))
  76. return ret;
  77. }
  78. return 0;
  79. }
  80. void ttm_mem_io_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
  81. {
  82. if (bdev->driver->io_mem_reserve) {
  83. if (mem->bus.io_reserved) {
  84. mem->bus.io_reserved = false;
  85. bdev->driver->io_mem_free(bdev, mem);
  86. }
  87. }
  88. }
  89. int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
  90. void **virtual)
  91. {
  92. int ret;
  93. void *addr;
  94. *virtual = NULL;
  95. ret = ttm_mem_io_reserve(bdev, mem);
  96. if (ret || !mem->bus.is_iomem)
  97. return ret;
  98. if (mem->bus.addr) {
  99. addr = mem->bus.addr;
  100. } else {
  101. if (mem->placement & TTM_PL_FLAG_WC)
  102. addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size);
  103. else
  104. addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size);
  105. if (!addr) {
  106. ttm_mem_io_free(bdev, mem);
  107. return -ENOMEM;
  108. }
  109. }
  110. *virtual = addr;
  111. return 0;
  112. }
  113. void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
  114. void *virtual)
  115. {
  116. struct ttm_mem_type_manager *man;
  117. man = &bdev->man[mem->mem_type];
  118. if (virtual && mem->bus.addr == NULL)
  119. iounmap(virtual);
  120. ttm_mem_io_free(bdev, mem);
  121. }
  122. static int ttm_copy_io_page(void *dst, void *src, unsigned long page)
  123. {
  124. uint32_t *dstP =
  125. (uint32_t *) ((unsigned long)dst + (page << PAGE_SHIFT));
  126. uint32_t *srcP =
  127. (uint32_t *) ((unsigned long)src + (page << PAGE_SHIFT));
  128. int i;
  129. for (i = 0; i < PAGE_SIZE / sizeof(uint32_t); ++i)
  130. iowrite32(ioread32(srcP++), dstP++);
  131. return 0;
  132. }
  133. static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src,
  134. unsigned long page,
  135. pgprot_t prot)
  136. {
  137. struct page *d = ttm_tt_get_page(ttm, page);
  138. void *dst;
  139. if (!d)
  140. return -ENOMEM;
  141. src = (void *)((unsigned long)src + (page << PAGE_SHIFT));
  142. #ifdef CONFIG_X86
  143. dst = kmap_atomic_prot(d, prot);
  144. #else
  145. if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
  146. dst = vmap(&d, 1, 0, prot);
  147. else
  148. dst = kmap(d);
  149. #endif
  150. if (!dst)
  151. return -ENOMEM;
  152. memcpy_fromio(dst, src, PAGE_SIZE);
  153. #ifdef CONFIG_X86
  154. kunmap_atomic(dst);
  155. #else
  156. if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
  157. vunmap(dst);
  158. else
  159. kunmap(d);
  160. #endif
  161. return 0;
  162. }
  163. static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst,
  164. unsigned long page,
  165. pgprot_t prot)
  166. {
  167. struct page *s = ttm_tt_get_page(ttm, page);
  168. void *src;
  169. if (!s)
  170. return -ENOMEM;
  171. dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT));
  172. #ifdef CONFIG_X86
  173. src = kmap_atomic_prot(s, prot);
  174. #else
  175. if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
  176. src = vmap(&s, 1, 0, prot);
  177. else
  178. src = kmap(s);
  179. #endif
  180. if (!src)
  181. return -ENOMEM;
  182. memcpy_toio(dst, src, PAGE_SIZE);
  183. #ifdef CONFIG_X86
  184. kunmap_atomic(src);
  185. #else
  186. if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL))
  187. vunmap(src);
  188. else
  189. kunmap(s);
  190. #endif
  191. return 0;
  192. }
  193. int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
  194. bool evict, bool no_wait_reserve, bool no_wait_gpu,
  195. struct ttm_mem_reg *new_mem)
  196. {
  197. struct ttm_bo_device *bdev = bo->bdev;
  198. struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
  199. struct ttm_tt *ttm = bo->ttm;
  200. struct ttm_mem_reg *old_mem = &bo->mem;
  201. struct ttm_mem_reg old_copy = *old_mem;
  202. void *old_iomap;
  203. void *new_iomap;
  204. int ret;
  205. unsigned long i;
  206. unsigned long page;
  207. unsigned long add = 0;
  208. int dir;
  209. ret = ttm_mem_reg_ioremap(bdev, old_mem, &old_iomap);
  210. if (ret)
  211. return ret;
  212. ret = ttm_mem_reg_ioremap(bdev, new_mem, &new_iomap);
  213. if (ret)
  214. goto out;
  215. if (old_iomap == NULL && new_iomap == NULL)
  216. goto out2;
  217. if (old_iomap == NULL && ttm == NULL)
  218. goto out2;
  219. add = 0;
  220. dir = 1;
  221. if ((old_mem->mem_type == new_mem->mem_type) &&
  222. (new_mem->start < old_mem->start + old_mem->size)) {
  223. dir = -1;
  224. add = new_mem->num_pages - 1;
  225. }
  226. for (i = 0; i < new_mem->num_pages; ++i) {
  227. page = i * dir + add;
  228. if (old_iomap == NULL) {
  229. pgprot_t prot = ttm_io_prot(old_mem->placement,
  230. PAGE_KERNEL);
  231. ret = ttm_copy_ttm_io_page(ttm, new_iomap, page,
  232. prot);
  233. } else if (new_iomap == NULL) {
  234. pgprot_t prot = ttm_io_prot(new_mem->placement,
  235. PAGE_KERNEL);
  236. ret = ttm_copy_io_ttm_page(ttm, old_iomap, page,
  237. prot);
  238. } else
  239. ret = ttm_copy_io_page(new_iomap, old_iomap, page);
  240. if (ret)
  241. goto out1;
  242. }
  243. mb();
  244. out2:
  245. ttm_bo_free_old_node(bo);
  246. *old_mem = *new_mem;
  247. new_mem->mm_node = NULL;
  248. if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) {
  249. ttm_tt_unbind(ttm);
  250. ttm_tt_destroy(ttm);
  251. bo->ttm = NULL;
  252. }
  253. out1:
  254. ttm_mem_reg_iounmap(bdev, new_mem, new_iomap);
  255. out:
  256. ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
  257. return ret;
  258. }
  259. EXPORT_SYMBOL(ttm_bo_move_memcpy);
  260. static void ttm_transfered_destroy(struct ttm_buffer_object *bo)
  261. {
  262. kfree(bo);
  263. }
  264. /**
  265. * ttm_buffer_object_transfer
  266. *
  267. * @bo: A pointer to a struct ttm_buffer_object.
  268. * @new_obj: A pointer to a pointer to a newly created ttm_buffer_object,
  269. * holding the data of @bo with the old placement.
  270. *
  271. * This is a utility function that may be called after an accelerated move
  272. * has been scheduled. A new buffer object is created as a placeholder for
  273. * the old data while it's being copied. When that buffer object is idle,
  274. * it can be destroyed, releasing the space of the old placement.
  275. * Returns:
  276. * !0: Failure.
  277. */
  278. static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
  279. struct ttm_buffer_object **new_obj)
  280. {
  281. struct ttm_buffer_object *fbo;
  282. struct ttm_bo_device *bdev = bo->bdev;
  283. struct ttm_bo_driver *driver = bdev->driver;
  284. fbo = kzalloc(sizeof(*fbo), GFP_KERNEL);
  285. if (!fbo)
  286. return -ENOMEM;
  287. *fbo = *bo;
  288. /**
  289. * Fix up members that we shouldn't copy directly:
  290. * TODO: Explicit member copy would probably be better here.
  291. */
  292. spin_lock_init(&fbo->lock);
  293. init_waitqueue_head(&fbo->event_queue);
  294. INIT_LIST_HEAD(&fbo->ddestroy);
  295. INIT_LIST_HEAD(&fbo->lru);
  296. INIT_LIST_HEAD(&fbo->swap);
  297. fbo->vm_node = NULL;
  298. atomic_set(&fbo->cpu_writers, 0);
  299. fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj);
  300. kref_init(&fbo->list_kref);
  301. kref_init(&fbo->kref);
  302. fbo->destroy = &ttm_transfered_destroy;
  303. *new_obj = fbo;
  304. return 0;
  305. }
  306. pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
  307. {
  308. #if defined(__i386__) || defined(__x86_64__)
  309. if (caching_flags & TTM_PL_FLAG_WC)
  310. tmp = pgprot_writecombine(tmp);
  311. else if (boot_cpu_data.x86 > 3)
  312. tmp = pgprot_noncached(tmp);
  313. #elif defined(__powerpc__)
  314. if (!(caching_flags & TTM_PL_FLAG_CACHED)) {
  315. pgprot_val(tmp) |= _PAGE_NO_CACHE;
  316. if (caching_flags & TTM_PL_FLAG_UNCACHED)
  317. pgprot_val(tmp) |= _PAGE_GUARDED;
  318. }
  319. #endif
  320. #if defined(__ia64__)
  321. if (caching_flags & TTM_PL_FLAG_WC)
  322. tmp = pgprot_writecombine(tmp);
  323. else
  324. tmp = pgprot_noncached(tmp);
  325. #endif
  326. #if defined(__sparc__)
  327. if (!(caching_flags & TTM_PL_FLAG_CACHED))
  328. tmp = pgprot_noncached(tmp);
  329. #endif
  330. return tmp;
  331. }
  332. EXPORT_SYMBOL(ttm_io_prot);
  333. static int ttm_bo_ioremap(struct ttm_buffer_object *bo,
  334. unsigned long offset,
  335. unsigned long size,
  336. struct ttm_bo_kmap_obj *map)
  337. {
  338. struct ttm_mem_reg *mem = &bo->mem;
  339. if (bo->mem.bus.addr) {
  340. map->bo_kmap_type = ttm_bo_map_premapped;
  341. map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset);
  342. } else {
  343. map->bo_kmap_type = ttm_bo_map_iomap;
  344. if (mem->placement & TTM_PL_FLAG_WC)
  345. map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset,
  346. size);
  347. else
  348. map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset,
  349. size);
  350. }
  351. return (!map->virtual) ? -ENOMEM : 0;
  352. }
  353. static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo,
  354. unsigned long start_page,
  355. unsigned long num_pages,
  356. struct ttm_bo_kmap_obj *map)
  357. {
  358. struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot;
  359. struct ttm_tt *ttm = bo->ttm;
  360. struct page *d;
  361. int i;
  362. BUG_ON(!ttm);
  363. if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) {
  364. /*
  365. * We're mapping a single page, and the desired
  366. * page protection is consistent with the bo.
  367. */
  368. map->bo_kmap_type = ttm_bo_map_kmap;
  369. map->page = ttm_tt_get_page(ttm, start_page);
  370. map->virtual = kmap(map->page);
  371. } else {
  372. /*
  373. * Populate the part we're mapping;
  374. */
  375. for (i = start_page; i < start_page + num_pages; ++i) {
  376. d = ttm_tt_get_page(ttm, i);
  377. if (!d)
  378. return -ENOMEM;
  379. }
  380. /*
  381. * We need to use vmap to get the desired page protection
  382. * or to make the buffer object look contiguous.
  383. */
  384. prot = (mem->placement & TTM_PL_FLAG_CACHED) ?
  385. PAGE_KERNEL :
  386. ttm_io_prot(mem->placement, PAGE_KERNEL);
  387. map->bo_kmap_type = ttm_bo_map_vmap;
  388. map->virtual = vmap(ttm->pages + start_page, num_pages,
  389. 0, prot);
  390. }
  391. return (!map->virtual) ? -ENOMEM : 0;
  392. }
  393. int ttm_bo_kmap(struct ttm_buffer_object *bo,
  394. unsigned long start_page, unsigned long num_pages,
  395. struct ttm_bo_kmap_obj *map)
  396. {
  397. unsigned long offset, size;
  398. int ret;
  399. BUG_ON(!list_empty(&bo->swap));
  400. map->virtual = NULL;
  401. map->bo = bo;
  402. if (num_pages > bo->num_pages)
  403. return -EINVAL;
  404. if (start_page > bo->num_pages)
  405. return -EINVAL;
  406. #if 0
  407. if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
  408. return -EPERM;
  409. #endif
  410. ret = ttm_mem_io_reserve(bo->bdev, &bo->mem);
  411. if (ret)
  412. return ret;
  413. if (!bo->mem.bus.is_iomem) {
  414. return ttm_bo_kmap_ttm(bo, start_page, num_pages, map);
  415. } else {
  416. offset = start_page << PAGE_SHIFT;
  417. size = num_pages << PAGE_SHIFT;
  418. return ttm_bo_ioremap(bo, offset, size, map);
  419. }
  420. }
  421. EXPORT_SYMBOL(ttm_bo_kmap);
  422. void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map)
  423. {
  424. if (!map->virtual)
  425. return;
  426. switch (map->bo_kmap_type) {
  427. case ttm_bo_map_iomap:
  428. iounmap(map->virtual);
  429. ttm_mem_io_free(map->bo->bdev, &map->bo->mem);
  430. break;
  431. case ttm_bo_map_vmap:
  432. vunmap(map->virtual);
  433. break;
  434. case ttm_bo_map_kmap:
  435. kunmap(map->page);
  436. break;
  437. case ttm_bo_map_premapped:
  438. break;
  439. default:
  440. BUG();
  441. }
  442. map->virtual = NULL;
  443. map->page = NULL;
  444. }
  445. EXPORT_SYMBOL(ttm_bo_kunmap);
  446. int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo,
  447. void *sync_obj,
  448. void *sync_obj_arg,
  449. bool evict, bool no_wait_reserve,
  450. bool no_wait_gpu,
  451. struct ttm_mem_reg *new_mem)
  452. {
  453. struct ttm_bo_device *bdev = bo->bdev;
  454. struct ttm_bo_driver *driver = bdev->driver;
  455. struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type];
  456. struct ttm_mem_reg *old_mem = &bo->mem;
  457. int ret;
  458. struct ttm_buffer_object *ghost_obj;
  459. void *tmp_obj = NULL;
  460. spin_lock(&bo->lock);
  461. if (bo->sync_obj) {
  462. tmp_obj = bo->sync_obj;
  463. bo->sync_obj = NULL;
  464. }
  465. bo->sync_obj = driver->sync_obj_ref(sync_obj);
  466. bo->sync_obj_arg = sync_obj_arg;
  467. if (evict) {
  468. ret = ttm_bo_wait(bo, false, false, false);
  469. spin_unlock(&bo->lock);
  470. if (tmp_obj)
  471. driver->sync_obj_unref(&tmp_obj);
  472. if (ret)
  473. return ret;
  474. ttm_bo_free_old_node(bo);
  475. if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
  476. (bo->ttm != NULL)) {
  477. ttm_tt_unbind(bo->ttm);
  478. ttm_tt_destroy(bo->ttm);
  479. bo->ttm = NULL;
  480. }
  481. } else {
  482. /**
  483. * This should help pipeline ordinary buffer moves.
  484. *
  485. * Hang old buffer memory on a new buffer object,
  486. * and leave it to be released when the GPU
  487. * operation has completed.
  488. */
  489. set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
  490. spin_unlock(&bo->lock);
  491. if (tmp_obj)
  492. driver->sync_obj_unref(&tmp_obj);
  493. ret = ttm_buffer_object_transfer(bo, &ghost_obj);
  494. if (ret)
  495. return ret;
  496. /**
  497. * If we're not moving to fixed memory, the TTM object
  498. * needs to stay alive. Otherwhise hang it on the ghost
  499. * bo to be unbound and destroyed.
  500. */
  501. if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED))
  502. ghost_obj->ttm = NULL;
  503. else
  504. bo->ttm = NULL;
  505. ttm_bo_unreserve(ghost_obj);
  506. ttm_bo_unref(&ghost_obj);
  507. }
  508. *old_mem = *new_mem;
  509. new_mem->mm_node = NULL;
  510. return 0;
  511. }
  512. EXPORT_SYMBOL(ttm_bo_move_accel_cleanup);