ttm_bo.c 46 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891
  1. /**************************************************************************
  2. *
  3. * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. /*
  28. * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  29. */
  30. #define pr_fmt(fmt) "[TTM] " fmt
  31. #include <drm/ttm/ttm_module.h>
  32. #include <drm/ttm/ttm_bo_driver.h>
  33. #include <drm/ttm/ttm_placement.h>
  34. #include <linux/jiffies.h>
  35. #include <linux/slab.h>
  36. #include <linux/sched.h>
  37. #include <linux/mm.h>
  38. #include <linux/file.h>
  39. #include <linux/module.h>
  40. #include <linux/atomic.h>
  41. #define TTM_ASSERT_LOCKED(param)
  42. #define TTM_DEBUG(fmt, arg...)
  43. #define TTM_BO_HASH_ORDER 13
  44. static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
  45. static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
  46. static void ttm_bo_global_kobj_release(struct kobject *kobj);
  47. static struct attribute ttm_bo_count = {
  48. .name = "bo_count",
  49. .mode = S_IRUGO
  50. };
  51. static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
  52. {
  53. int i;
  54. for (i = 0; i <= TTM_PL_PRIV5; i++)
  55. if (flags & (1 << i)) {
  56. *mem_type = i;
  57. return 0;
  58. }
  59. return -EINVAL;
  60. }
  61. static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
  62. {
  63. struct ttm_mem_type_manager *man = &bdev->man[mem_type];
  64. pr_err(" has_type: %d\n", man->has_type);
  65. pr_err(" use_type: %d\n", man->use_type);
  66. pr_err(" flags: 0x%08X\n", man->flags);
  67. pr_err(" gpu_offset: 0x%08lX\n", man->gpu_offset);
  68. pr_err(" size: %llu\n", man->size);
  69. pr_err(" available_caching: 0x%08X\n", man->available_caching);
  70. pr_err(" default_caching: 0x%08X\n", man->default_caching);
  71. if (mem_type != TTM_PL_SYSTEM)
  72. (*man->func->debug)(man, TTM_PFX);
  73. }
  74. static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
  75. struct ttm_placement *placement)
  76. {
  77. int i, ret, mem_type;
  78. pr_err("No space for %p (%lu pages, %luK, %luM)\n",
  79. bo, bo->mem.num_pages, bo->mem.size >> 10,
  80. bo->mem.size >> 20);
  81. for (i = 0; i < placement->num_placement; i++) {
  82. ret = ttm_mem_type_from_flags(placement->placement[i],
  83. &mem_type);
  84. if (ret)
  85. return;
  86. pr_err(" placement[%d]=0x%08X (%d)\n",
  87. i, placement->placement[i], mem_type);
  88. ttm_mem_type_debug(bo->bdev, mem_type);
  89. }
  90. }
  91. static ssize_t ttm_bo_global_show(struct kobject *kobj,
  92. struct attribute *attr,
  93. char *buffer)
  94. {
  95. struct ttm_bo_global *glob =
  96. container_of(kobj, struct ttm_bo_global, kobj);
  97. return snprintf(buffer, PAGE_SIZE, "%lu\n",
  98. (unsigned long) atomic_read(&glob->bo_count));
  99. }
  100. static struct attribute *ttm_bo_global_attrs[] = {
  101. &ttm_bo_count,
  102. NULL
  103. };
  104. static const struct sysfs_ops ttm_bo_global_ops = {
  105. .show = &ttm_bo_global_show
  106. };
  107. static struct kobj_type ttm_bo_glob_kobj_type = {
  108. .release = &ttm_bo_global_kobj_release,
  109. .sysfs_ops = &ttm_bo_global_ops,
  110. .default_attrs = ttm_bo_global_attrs
  111. };
  112. static inline uint32_t ttm_bo_type_flags(unsigned type)
  113. {
  114. return 1 << (type);
  115. }
  116. static void ttm_bo_release_list(struct kref *list_kref)
  117. {
  118. struct ttm_buffer_object *bo =
  119. container_of(list_kref, struct ttm_buffer_object, list_kref);
  120. struct ttm_bo_device *bdev = bo->bdev;
  121. size_t acc_size = bo->acc_size;
  122. BUG_ON(atomic_read(&bo->list_kref.refcount));
  123. BUG_ON(atomic_read(&bo->kref.refcount));
  124. BUG_ON(atomic_read(&bo->cpu_writers));
  125. BUG_ON(bo->sync_obj != NULL);
  126. BUG_ON(bo->mem.mm_node != NULL);
  127. BUG_ON(!list_empty(&bo->lru));
  128. BUG_ON(!list_empty(&bo->ddestroy));
  129. if (bo->ttm)
  130. ttm_tt_destroy(bo->ttm);
  131. atomic_dec(&bo->glob->bo_count);
  132. if (bo->destroy)
  133. bo->destroy(bo);
  134. else {
  135. kfree(bo);
  136. }
  137. ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
  138. }
  139. int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
  140. {
  141. if (interruptible) {
  142. return wait_event_interruptible(bo->event_queue,
  143. !ttm_bo_is_reserved(bo));
  144. } else {
  145. wait_event(bo->event_queue, !ttm_bo_is_reserved(bo));
  146. return 0;
  147. }
  148. }
  149. EXPORT_SYMBOL(ttm_bo_wait_unreserved);
  150. void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
  151. {
  152. struct ttm_bo_device *bdev = bo->bdev;
  153. struct ttm_mem_type_manager *man;
  154. BUG_ON(!ttm_bo_is_reserved(bo));
  155. if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
  156. BUG_ON(!list_empty(&bo->lru));
  157. man = &bdev->man[bo->mem.mem_type];
  158. list_add_tail(&bo->lru, &man->lru);
  159. kref_get(&bo->list_kref);
  160. if (bo->ttm != NULL) {
  161. list_add_tail(&bo->swap, &bo->glob->swap_lru);
  162. kref_get(&bo->list_kref);
  163. }
  164. }
  165. }
  166. int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
  167. {
  168. int put_count = 0;
  169. if (!list_empty(&bo->swap)) {
  170. list_del_init(&bo->swap);
  171. ++put_count;
  172. }
  173. if (!list_empty(&bo->lru)) {
  174. list_del_init(&bo->lru);
  175. ++put_count;
  176. }
  177. /*
  178. * TODO: Add a driver hook to delete from
  179. * driver-specific LRU's here.
  180. */
  181. return put_count;
  182. }
  183. int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
  184. bool interruptible,
  185. bool no_wait, bool use_sequence, uint32_t sequence)
  186. {
  187. struct ttm_bo_global *glob = bo->glob;
  188. int ret;
  189. while (unlikely(atomic_read(&bo->reserved) != 0)) {
  190. /**
  191. * Deadlock avoidance for multi-bo reserving.
  192. */
  193. if (use_sequence && bo->seq_valid) {
  194. /**
  195. * We've already reserved this one.
  196. */
  197. if (unlikely(sequence == bo->val_seq))
  198. return -EDEADLK;
  199. /**
  200. * Already reserved by a thread that will not back
  201. * off for us. We need to back off.
  202. */
  203. if (unlikely(sequence - bo->val_seq < (1 << 31)))
  204. return -EAGAIN;
  205. }
  206. if (no_wait)
  207. return -EBUSY;
  208. spin_unlock(&glob->lru_lock);
  209. ret = ttm_bo_wait_unreserved(bo, interruptible);
  210. spin_lock(&glob->lru_lock);
  211. if (unlikely(ret))
  212. return ret;
  213. }
  214. atomic_set(&bo->reserved, 1);
  215. if (use_sequence) {
  216. /**
  217. * Wake up waiters that may need to recheck for deadlock,
  218. * if we decreased the sequence number.
  219. */
  220. if (unlikely((bo->val_seq - sequence < (1 << 31))
  221. || !bo->seq_valid))
  222. wake_up_all(&bo->event_queue);
  223. bo->val_seq = sequence;
  224. bo->seq_valid = true;
  225. } else {
  226. bo->seq_valid = false;
  227. }
  228. return 0;
  229. }
  230. EXPORT_SYMBOL(ttm_bo_reserve);
  231. static void ttm_bo_ref_bug(struct kref *list_kref)
  232. {
  233. BUG();
  234. }
  235. void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
  236. bool never_free)
  237. {
  238. kref_sub(&bo->list_kref, count,
  239. (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list);
  240. }
  241. int ttm_bo_reserve(struct ttm_buffer_object *bo,
  242. bool interruptible,
  243. bool no_wait, bool use_sequence, uint32_t sequence)
  244. {
  245. struct ttm_bo_global *glob = bo->glob;
  246. int put_count = 0;
  247. int ret;
  248. spin_lock(&glob->lru_lock);
  249. ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
  250. sequence);
  251. if (likely(ret == 0))
  252. put_count = ttm_bo_del_from_lru(bo);
  253. spin_unlock(&glob->lru_lock);
  254. ttm_bo_list_ref_sub(bo, put_count, true);
  255. return ret;
  256. }
  257. void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo)
  258. {
  259. ttm_bo_add_to_lru(bo);
  260. atomic_set(&bo->reserved, 0);
  261. wake_up_all(&bo->event_queue);
  262. }
  263. void ttm_bo_unreserve(struct ttm_buffer_object *bo)
  264. {
  265. struct ttm_bo_global *glob = bo->glob;
  266. spin_lock(&glob->lru_lock);
  267. ttm_bo_unreserve_locked(bo);
  268. spin_unlock(&glob->lru_lock);
  269. }
  270. EXPORT_SYMBOL(ttm_bo_unreserve);
  271. /*
  272. * Call bo->mutex locked.
  273. */
  274. static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
  275. {
  276. struct ttm_bo_device *bdev = bo->bdev;
  277. struct ttm_bo_global *glob = bo->glob;
  278. int ret = 0;
  279. uint32_t page_flags = 0;
  280. TTM_ASSERT_LOCKED(&bo->mutex);
  281. bo->ttm = NULL;
  282. if (bdev->need_dma32)
  283. page_flags |= TTM_PAGE_FLAG_DMA32;
  284. switch (bo->type) {
  285. case ttm_bo_type_device:
  286. if (zero_alloc)
  287. page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
  288. case ttm_bo_type_kernel:
  289. bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
  290. page_flags, glob->dummy_read_page);
  291. if (unlikely(bo->ttm == NULL))
  292. ret = -ENOMEM;
  293. break;
  294. case ttm_bo_type_sg:
  295. bo->ttm = bdev->driver->ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
  296. page_flags | TTM_PAGE_FLAG_SG,
  297. glob->dummy_read_page);
  298. if (unlikely(bo->ttm == NULL)) {
  299. ret = -ENOMEM;
  300. break;
  301. }
  302. bo->ttm->sg = bo->sg;
  303. break;
  304. default:
  305. pr_err("Illegal buffer object type\n");
  306. ret = -EINVAL;
  307. break;
  308. }
  309. return ret;
  310. }
  311. static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
  312. struct ttm_mem_reg *mem,
  313. bool evict, bool interruptible,
  314. bool no_wait_reserve, bool no_wait_gpu)
  315. {
  316. struct ttm_bo_device *bdev = bo->bdev;
  317. bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
  318. bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
  319. struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
  320. struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
  321. int ret = 0;
  322. if (old_is_pci || new_is_pci ||
  323. ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) {
  324. ret = ttm_mem_io_lock(old_man, true);
  325. if (unlikely(ret != 0))
  326. goto out_err;
  327. ttm_bo_unmap_virtual_locked(bo);
  328. ttm_mem_io_unlock(old_man);
  329. }
  330. /*
  331. * Create and bind a ttm if required.
  332. */
  333. if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
  334. if (bo->ttm == NULL) {
  335. bool zero = !(old_man->flags & TTM_MEMTYPE_FLAG_FIXED);
  336. ret = ttm_bo_add_ttm(bo, zero);
  337. if (ret)
  338. goto out_err;
  339. }
  340. ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
  341. if (ret)
  342. goto out_err;
  343. if (mem->mem_type != TTM_PL_SYSTEM) {
  344. ret = ttm_tt_bind(bo->ttm, mem);
  345. if (ret)
  346. goto out_err;
  347. }
  348. if (bo->mem.mem_type == TTM_PL_SYSTEM) {
  349. if (bdev->driver->move_notify)
  350. bdev->driver->move_notify(bo, mem);
  351. bo->mem = *mem;
  352. mem->mm_node = NULL;
  353. goto moved;
  354. }
  355. }
  356. if (bdev->driver->move_notify)
  357. bdev->driver->move_notify(bo, mem);
  358. if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
  359. !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
  360. ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem);
  361. else if (bdev->driver->move)
  362. ret = bdev->driver->move(bo, evict, interruptible,
  363. no_wait_reserve, no_wait_gpu, mem);
  364. else
  365. ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem);
  366. if (ret) {
  367. if (bdev->driver->move_notify) {
  368. struct ttm_mem_reg tmp_mem = *mem;
  369. *mem = bo->mem;
  370. bo->mem = tmp_mem;
  371. bdev->driver->move_notify(bo, mem);
  372. bo->mem = *mem;
  373. }
  374. goto out_err;
  375. }
  376. moved:
  377. if (bo->evicted) {
  378. ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
  379. if (ret)
  380. pr_err("Can not flush read caches\n");
  381. bo->evicted = false;
  382. }
  383. if (bo->mem.mm_node) {
  384. bo->offset = (bo->mem.start << PAGE_SHIFT) +
  385. bdev->man[bo->mem.mem_type].gpu_offset;
  386. bo->cur_placement = bo->mem.placement;
  387. } else
  388. bo->offset = 0;
  389. return 0;
  390. out_err:
  391. new_man = &bdev->man[bo->mem.mem_type];
  392. if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
  393. ttm_tt_unbind(bo->ttm);
  394. ttm_tt_destroy(bo->ttm);
  395. bo->ttm = NULL;
  396. }
  397. return ret;
  398. }
  399. /**
  400. * Call bo::reserved.
  401. * Will release GPU memory type usage on destruction.
  402. * This is the place to put in driver specific hooks to release
  403. * driver private resources.
  404. * Will release the bo::reserved lock.
  405. */
  406. static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
  407. {
  408. if (bo->bdev->driver->move_notify)
  409. bo->bdev->driver->move_notify(bo, NULL);
  410. if (bo->ttm) {
  411. ttm_tt_unbind(bo->ttm);
  412. ttm_tt_destroy(bo->ttm);
  413. bo->ttm = NULL;
  414. }
  415. ttm_bo_mem_put(bo, &bo->mem);
  416. atomic_set(&bo->reserved, 0);
  417. /*
  418. * Make processes trying to reserve really pick it up.
  419. */
  420. smp_mb__after_atomic_dec();
  421. wake_up_all(&bo->event_queue);
  422. }
  423. static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
  424. {
  425. struct ttm_bo_device *bdev = bo->bdev;
  426. struct ttm_bo_global *glob = bo->glob;
  427. struct ttm_bo_driver *driver;
  428. void *sync_obj = NULL;
  429. int put_count;
  430. int ret;
  431. spin_lock(&bdev->fence_lock);
  432. (void) ttm_bo_wait(bo, false, false, true);
  433. if (!bo->sync_obj) {
  434. spin_lock(&glob->lru_lock);
  435. /**
  436. * Lock inversion between bo:reserve and bdev::fence_lock here,
  437. * but that's OK, since we're only trylocking.
  438. */
  439. ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
  440. if (unlikely(ret == -EBUSY))
  441. goto queue;
  442. spin_unlock(&bdev->fence_lock);
  443. put_count = ttm_bo_del_from_lru(bo);
  444. spin_unlock(&glob->lru_lock);
  445. ttm_bo_cleanup_memtype_use(bo);
  446. ttm_bo_list_ref_sub(bo, put_count, true);
  447. return;
  448. } else {
  449. spin_lock(&glob->lru_lock);
  450. }
  451. queue:
  452. driver = bdev->driver;
  453. if (bo->sync_obj)
  454. sync_obj = driver->sync_obj_ref(bo->sync_obj);
  455. kref_get(&bo->list_kref);
  456. list_add_tail(&bo->ddestroy, &bdev->ddestroy);
  457. spin_unlock(&glob->lru_lock);
  458. spin_unlock(&bdev->fence_lock);
  459. if (sync_obj) {
  460. driver->sync_obj_flush(sync_obj);
  461. driver->sync_obj_unref(&sync_obj);
  462. }
  463. schedule_delayed_work(&bdev->wq,
  464. ((HZ / 100) < 1) ? 1 : HZ / 100);
  465. }
  466. /**
  467. * function ttm_bo_cleanup_refs
  468. * If bo idle, remove from delayed- and lru lists, and unref.
  469. * If not idle, do nothing.
  470. *
  471. * @interruptible Any sleeps should occur interruptibly.
  472. * @no_wait_reserve Never wait for reserve. Return -EBUSY instead.
  473. * @no_wait_gpu Never wait for gpu. Return -EBUSY instead.
  474. */
  475. static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
  476. bool interruptible,
  477. bool no_wait_reserve,
  478. bool no_wait_gpu)
  479. {
  480. struct ttm_bo_device *bdev = bo->bdev;
  481. struct ttm_bo_global *glob = bo->glob;
  482. int put_count;
  483. int ret = 0;
  484. retry:
  485. spin_lock(&bdev->fence_lock);
  486. ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
  487. spin_unlock(&bdev->fence_lock);
  488. if (unlikely(ret != 0))
  489. return ret;
  490. retry_reserve:
  491. spin_lock(&glob->lru_lock);
  492. if (unlikely(list_empty(&bo->ddestroy))) {
  493. spin_unlock(&glob->lru_lock);
  494. return 0;
  495. }
  496. ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
  497. if (unlikely(ret == -EBUSY)) {
  498. spin_unlock(&glob->lru_lock);
  499. if (likely(!no_wait_reserve))
  500. ret = ttm_bo_wait_unreserved(bo, interruptible);
  501. if (unlikely(ret != 0))
  502. return ret;
  503. goto retry_reserve;
  504. }
  505. BUG_ON(ret != 0);
  506. /**
  507. * We can re-check for sync object without taking
  508. * the bo::lock since setting the sync object requires
  509. * also bo::reserved. A busy object at this point may
  510. * be caused by another thread recently starting an accelerated
  511. * eviction.
  512. */
  513. if (unlikely(bo->sync_obj)) {
  514. atomic_set(&bo->reserved, 0);
  515. wake_up_all(&bo->event_queue);
  516. spin_unlock(&glob->lru_lock);
  517. goto retry;
  518. }
  519. put_count = ttm_bo_del_from_lru(bo);
  520. list_del_init(&bo->ddestroy);
  521. ++put_count;
  522. spin_unlock(&glob->lru_lock);
  523. ttm_bo_cleanup_memtype_use(bo);
  524. ttm_bo_list_ref_sub(bo, put_count, true);
  525. return 0;
  526. }
  527. /**
  528. * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
  529. * encountered buffers.
  530. */
  531. static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
  532. {
  533. struct ttm_bo_global *glob = bdev->glob;
  534. struct ttm_buffer_object *entry = NULL;
  535. int ret = 0;
  536. spin_lock(&glob->lru_lock);
  537. if (list_empty(&bdev->ddestroy))
  538. goto out_unlock;
  539. entry = list_first_entry(&bdev->ddestroy,
  540. struct ttm_buffer_object, ddestroy);
  541. kref_get(&entry->list_kref);
  542. for (;;) {
  543. struct ttm_buffer_object *nentry = NULL;
  544. if (entry->ddestroy.next != &bdev->ddestroy) {
  545. nentry = list_first_entry(&entry->ddestroy,
  546. struct ttm_buffer_object, ddestroy);
  547. kref_get(&nentry->list_kref);
  548. }
  549. spin_unlock(&glob->lru_lock);
  550. ret = ttm_bo_cleanup_refs(entry, false, !remove_all,
  551. !remove_all);
  552. kref_put(&entry->list_kref, ttm_bo_release_list);
  553. entry = nentry;
  554. if (ret || !entry)
  555. goto out;
  556. spin_lock(&glob->lru_lock);
  557. if (list_empty(&entry->ddestroy))
  558. break;
  559. }
  560. out_unlock:
  561. spin_unlock(&glob->lru_lock);
  562. out:
  563. if (entry)
  564. kref_put(&entry->list_kref, ttm_bo_release_list);
  565. return ret;
  566. }
  567. static void ttm_bo_delayed_workqueue(struct work_struct *work)
  568. {
  569. struct ttm_bo_device *bdev =
  570. container_of(work, struct ttm_bo_device, wq.work);
  571. if (ttm_bo_delayed_delete(bdev, false)) {
  572. schedule_delayed_work(&bdev->wq,
  573. ((HZ / 100) < 1) ? 1 : HZ / 100);
  574. }
  575. }
  576. static void ttm_bo_release(struct kref *kref)
  577. {
  578. struct ttm_buffer_object *bo =
  579. container_of(kref, struct ttm_buffer_object, kref);
  580. struct ttm_bo_device *bdev = bo->bdev;
  581. struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
  582. write_lock(&bdev->vm_lock);
  583. if (likely(bo->vm_node != NULL)) {
  584. rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
  585. drm_mm_put_block(bo->vm_node);
  586. bo->vm_node = NULL;
  587. }
  588. write_unlock(&bdev->vm_lock);
  589. ttm_mem_io_lock(man, false);
  590. ttm_mem_io_free_vm(bo);
  591. ttm_mem_io_unlock(man);
  592. ttm_bo_cleanup_refs_or_queue(bo);
  593. kref_put(&bo->list_kref, ttm_bo_release_list);
  594. }
  595. void ttm_bo_unref(struct ttm_buffer_object **p_bo)
  596. {
  597. struct ttm_buffer_object *bo = *p_bo;
  598. *p_bo = NULL;
  599. kref_put(&bo->kref, ttm_bo_release);
  600. }
  601. EXPORT_SYMBOL(ttm_bo_unref);
  602. int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
  603. {
  604. return cancel_delayed_work_sync(&bdev->wq);
  605. }
  606. EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
  607. void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
  608. {
  609. if (resched)
  610. schedule_delayed_work(&bdev->wq,
  611. ((HZ / 100) < 1) ? 1 : HZ / 100);
  612. }
  613. EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
  614. static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
  615. bool no_wait_reserve, bool no_wait_gpu)
  616. {
  617. struct ttm_bo_device *bdev = bo->bdev;
  618. struct ttm_mem_reg evict_mem;
  619. struct ttm_placement placement;
  620. int ret = 0;
  621. spin_lock(&bdev->fence_lock);
  622. ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
  623. spin_unlock(&bdev->fence_lock);
  624. if (unlikely(ret != 0)) {
  625. if (ret != -ERESTARTSYS) {
  626. pr_err("Failed to expire sync object before buffer eviction\n");
  627. }
  628. goto out;
  629. }
  630. BUG_ON(!ttm_bo_is_reserved(bo));
  631. evict_mem = bo->mem;
  632. evict_mem.mm_node = NULL;
  633. evict_mem.bus.io_reserved_vm = false;
  634. evict_mem.bus.io_reserved_count = 0;
  635. placement.fpfn = 0;
  636. placement.lpfn = 0;
  637. placement.num_placement = 0;
  638. placement.num_busy_placement = 0;
  639. bdev->driver->evict_flags(bo, &placement);
  640. ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
  641. no_wait_reserve, no_wait_gpu);
  642. if (ret) {
  643. if (ret != -ERESTARTSYS) {
  644. pr_err("Failed to find memory space for buffer 0x%p eviction\n",
  645. bo);
  646. ttm_bo_mem_space_debug(bo, &placement);
  647. }
  648. goto out;
  649. }
  650. ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
  651. no_wait_reserve, no_wait_gpu);
  652. if (ret) {
  653. if (ret != -ERESTARTSYS)
  654. pr_err("Buffer eviction failed\n");
  655. ttm_bo_mem_put(bo, &evict_mem);
  656. goto out;
  657. }
  658. bo->evicted = true;
  659. out:
  660. return ret;
  661. }
  662. static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
  663. uint32_t mem_type,
  664. bool interruptible, bool no_wait_reserve,
  665. bool no_wait_gpu)
  666. {
  667. struct ttm_bo_global *glob = bdev->glob;
  668. struct ttm_mem_type_manager *man = &bdev->man[mem_type];
  669. struct ttm_buffer_object *bo;
  670. int ret, put_count = 0;
  671. retry:
  672. spin_lock(&glob->lru_lock);
  673. if (list_empty(&man->lru)) {
  674. spin_unlock(&glob->lru_lock);
  675. return -EBUSY;
  676. }
  677. bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
  678. kref_get(&bo->list_kref);
  679. if (!list_empty(&bo->ddestroy)) {
  680. spin_unlock(&glob->lru_lock);
  681. ret = ttm_bo_cleanup_refs(bo, interruptible,
  682. no_wait_reserve, no_wait_gpu);
  683. kref_put(&bo->list_kref, ttm_bo_release_list);
  684. return ret;
  685. }
  686. ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
  687. if (unlikely(ret == -EBUSY)) {
  688. spin_unlock(&glob->lru_lock);
  689. if (likely(!no_wait_reserve))
  690. ret = ttm_bo_wait_unreserved(bo, interruptible);
  691. kref_put(&bo->list_kref, ttm_bo_release_list);
  692. /**
  693. * We *need* to retry after releasing the lru lock.
  694. */
  695. if (unlikely(ret != 0))
  696. return ret;
  697. goto retry;
  698. }
  699. put_count = ttm_bo_del_from_lru(bo);
  700. spin_unlock(&glob->lru_lock);
  701. BUG_ON(ret != 0);
  702. ttm_bo_list_ref_sub(bo, put_count, true);
  703. ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu);
  704. ttm_bo_unreserve(bo);
  705. kref_put(&bo->list_kref, ttm_bo_release_list);
  706. return ret;
  707. }
  708. void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
  709. {
  710. struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
  711. if (mem->mm_node)
  712. (*man->func->put_node)(man, mem);
  713. }
  714. EXPORT_SYMBOL(ttm_bo_mem_put);
  715. /**
  716. * Repeatedly evict memory from the LRU for @mem_type until we create enough
  717. * space, or we've evicted everything and there isn't enough space.
  718. */
  719. static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
  720. uint32_t mem_type,
  721. struct ttm_placement *placement,
  722. struct ttm_mem_reg *mem,
  723. bool interruptible,
  724. bool no_wait_reserve,
  725. bool no_wait_gpu)
  726. {
  727. struct ttm_bo_device *bdev = bo->bdev;
  728. struct ttm_mem_type_manager *man = &bdev->man[mem_type];
  729. int ret;
  730. do {
  731. ret = (*man->func->get_node)(man, bo, placement, mem);
  732. if (unlikely(ret != 0))
  733. return ret;
  734. if (mem->mm_node)
  735. break;
  736. ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
  737. no_wait_reserve, no_wait_gpu);
  738. if (unlikely(ret != 0))
  739. return ret;
  740. } while (1);
  741. if (mem->mm_node == NULL)
  742. return -ENOMEM;
  743. mem->mem_type = mem_type;
  744. return 0;
  745. }
  746. static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
  747. uint32_t cur_placement,
  748. uint32_t proposed_placement)
  749. {
  750. uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
  751. uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
  752. /**
  753. * Keep current caching if possible.
  754. */
  755. if ((cur_placement & caching) != 0)
  756. result |= (cur_placement & caching);
  757. else if ((man->default_caching & caching) != 0)
  758. result |= man->default_caching;
  759. else if ((TTM_PL_FLAG_CACHED & caching) != 0)
  760. result |= TTM_PL_FLAG_CACHED;
  761. else if ((TTM_PL_FLAG_WC & caching) != 0)
  762. result |= TTM_PL_FLAG_WC;
  763. else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
  764. result |= TTM_PL_FLAG_UNCACHED;
  765. return result;
  766. }
  767. static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
  768. uint32_t mem_type,
  769. uint32_t proposed_placement,
  770. uint32_t *masked_placement)
  771. {
  772. uint32_t cur_flags = ttm_bo_type_flags(mem_type);
  773. if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
  774. return false;
  775. if ((proposed_placement & man->available_caching) == 0)
  776. return false;
  777. cur_flags |= (proposed_placement & man->available_caching);
  778. *masked_placement = cur_flags;
  779. return true;
  780. }
  781. /**
  782. * Creates space for memory region @mem according to its type.
  783. *
  784. * This function first searches for free space in compatible memory types in
  785. * the priority order defined by the driver. If free space isn't found, then
  786. * ttm_bo_mem_force_space is attempted in priority order to evict and find
  787. * space.
  788. */
  789. int ttm_bo_mem_space(struct ttm_buffer_object *bo,
  790. struct ttm_placement *placement,
  791. struct ttm_mem_reg *mem,
  792. bool interruptible, bool no_wait_reserve,
  793. bool no_wait_gpu)
  794. {
  795. struct ttm_bo_device *bdev = bo->bdev;
  796. struct ttm_mem_type_manager *man;
  797. uint32_t mem_type = TTM_PL_SYSTEM;
  798. uint32_t cur_flags = 0;
  799. bool type_found = false;
  800. bool type_ok = false;
  801. bool has_erestartsys = false;
  802. int i, ret;
  803. mem->mm_node = NULL;
  804. for (i = 0; i < placement->num_placement; ++i) {
  805. ret = ttm_mem_type_from_flags(placement->placement[i],
  806. &mem_type);
  807. if (ret)
  808. return ret;
  809. man = &bdev->man[mem_type];
  810. type_ok = ttm_bo_mt_compatible(man,
  811. mem_type,
  812. placement->placement[i],
  813. &cur_flags);
  814. if (!type_ok)
  815. continue;
  816. cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
  817. cur_flags);
  818. /*
  819. * Use the access and other non-mapping-related flag bits from
  820. * the memory placement flags to the current flags
  821. */
  822. ttm_flag_masked(&cur_flags, placement->placement[i],
  823. ~TTM_PL_MASK_MEMTYPE);
  824. if (mem_type == TTM_PL_SYSTEM)
  825. break;
  826. if (man->has_type && man->use_type) {
  827. type_found = true;
  828. ret = (*man->func->get_node)(man, bo, placement, mem);
  829. if (unlikely(ret))
  830. return ret;
  831. }
  832. if (mem->mm_node)
  833. break;
  834. }
  835. if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
  836. mem->mem_type = mem_type;
  837. mem->placement = cur_flags;
  838. return 0;
  839. }
  840. if (!type_found)
  841. return -EINVAL;
  842. for (i = 0; i < placement->num_busy_placement; ++i) {
  843. ret = ttm_mem_type_from_flags(placement->busy_placement[i],
  844. &mem_type);
  845. if (ret)
  846. return ret;
  847. man = &bdev->man[mem_type];
  848. if (!man->has_type)
  849. continue;
  850. if (!ttm_bo_mt_compatible(man,
  851. mem_type,
  852. placement->busy_placement[i],
  853. &cur_flags))
  854. continue;
  855. cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
  856. cur_flags);
  857. /*
  858. * Use the access and other non-mapping-related flag bits from
  859. * the memory placement flags to the current flags
  860. */
  861. ttm_flag_masked(&cur_flags, placement->busy_placement[i],
  862. ~TTM_PL_MASK_MEMTYPE);
  863. if (mem_type == TTM_PL_SYSTEM) {
  864. mem->mem_type = mem_type;
  865. mem->placement = cur_flags;
  866. mem->mm_node = NULL;
  867. return 0;
  868. }
  869. ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
  870. interruptible, no_wait_reserve, no_wait_gpu);
  871. if (ret == 0 && mem->mm_node) {
  872. mem->placement = cur_flags;
  873. return 0;
  874. }
  875. if (ret == -ERESTARTSYS)
  876. has_erestartsys = true;
  877. }
  878. ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
  879. return ret;
  880. }
  881. EXPORT_SYMBOL(ttm_bo_mem_space);
  882. int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
  883. struct ttm_placement *placement,
  884. bool interruptible, bool no_wait_reserve,
  885. bool no_wait_gpu)
  886. {
  887. int ret = 0;
  888. struct ttm_mem_reg mem;
  889. struct ttm_bo_device *bdev = bo->bdev;
  890. BUG_ON(!ttm_bo_is_reserved(bo));
  891. /*
  892. * FIXME: It's possible to pipeline buffer moves.
  893. * Have the driver move function wait for idle when necessary,
  894. * instead of doing it here.
  895. */
  896. spin_lock(&bdev->fence_lock);
  897. ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
  898. spin_unlock(&bdev->fence_lock);
  899. if (ret)
  900. return ret;
  901. mem.num_pages = bo->num_pages;
  902. mem.size = mem.num_pages << PAGE_SHIFT;
  903. mem.page_alignment = bo->mem.page_alignment;
  904. mem.bus.io_reserved_vm = false;
  905. mem.bus.io_reserved_count = 0;
  906. /*
  907. * Determine where to move the buffer.
  908. */
  909. ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait_reserve, no_wait_gpu);
  910. if (ret)
  911. goto out_unlock;
  912. ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu);
  913. out_unlock:
  914. if (ret && mem.mm_node)
  915. ttm_bo_mem_put(bo, &mem);
  916. return ret;
  917. }
  918. static int ttm_bo_mem_compat(struct ttm_placement *placement,
  919. struct ttm_mem_reg *mem)
  920. {
  921. int i;
  922. if (mem->mm_node && placement->lpfn != 0 &&
  923. (mem->start < placement->fpfn ||
  924. mem->start + mem->num_pages > placement->lpfn))
  925. return -1;
  926. for (i = 0; i < placement->num_placement; i++) {
  927. if ((placement->placement[i] & mem->placement &
  928. TTM_PL_MASK_CACHING) &&
  929. (placement->placement[i] & mem->placement &
  930. TTM_PL_MASK_MEM))
  931. return i;
  932. }
  933. return -1;
  934. }
  935. int ttm_bo_validate(struct ttm_buffer_object *bo,
  936. struct ttm_placement *placement,
  937. bool interruptible, bool no_wait_reserve,
  938. bool no_wait_gpu)
  939. {
  940. int ret;
  941. BUG_ON(!ttm_bo_is_reserved(bo));
  942. /* Check that range is valid */
  943. if (placement->lpfn || placement->fpfn)
  944. if (placement->fpfn > placement->lpfn ||
  945. (placement->lpfn - placement->fpfn) < bo->num_pages)
  946. return -EINVAL;
  947. /*
  948. * Check whether we need to move buffer.
  949. */
  950. ret = ttm_bo_mem_compat(placement, &bo->mem);
  951. if (ret < 0) {
  952. ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait_reserve, no_wait_gpu);
  953. if (ret)
  954. return ret;
  955. } else {
  956. /*
  957. * Use the access and other non-mapping-related flag bits from
  958. * the compatible memory placement flags to the active flags
  959. */
  960. ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
  961. ~TTM_PL_MASK_MEMTYPE);
  962. }
  963. /*
  964. * We might need to add a TTM.
  965. */
  966. if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
  967. ret = ttm_bo_add_ttm(bo, true);
  968. if (ret)
  969. return ret;
  970. }
  971. return 0;
  972. }
  973. EXPORT_SYMBOL(ttm_bo_validate);
  974. int ttm_bo_check_placement(struct ttm_buffer_object *bo,
  975. struct ttm_placement *placement)
  976. {
  977. BUG_ON((placement->fpfn || placement->lpfn) &&
  978. (bo->mem.num_pages > (placement->lpfn - placement->fpfn)));
  979. return 0;
  980. }
  981. int ttm_bo_init(struct ttm_bo_device *bdev,
  982. struct ttm_buffer_object *bo,
  983. unsigned long size,
  984. enum ttm_bo_type type,
  985. struct ttm_placement *placement,
  986. uint32_t page_alignment,
  987. bool interruptible,
  988. struct file *persistent_swap_storage,
  989. size_t acc_size,
  990. struct sg_table *sg,
  991. void (*destroy) (struct ttm_buffer_object *))
  992. {
  993. int ret = 0;
  994. unsigned long num_pages;
  995. struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
  996. ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
  997. if (ret) {
  998. pr_err("Out of kernel memory\n");
  999. if (destroy)
  1000. (*destroy)(bo);
  1001. else
  1002. kfree(bo);
  1003. return -ENOMEM;
  1004. }
  1005. num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
  1006. if (num_pages == 0) {
  1007. pr_err("Illegal buffer object size\n");
  1008. if (destroy)
  1009. (*destroy)(bo);
  1010. else
  1011. kfree(bo);
  1012. ttm_mem_global_free(mem_glob, acc_size);
  1013. return -EINVAL;
  1014. }
  1015. bo->destroy = destroy;
  1016. kref_init(&bo->kref);
  1017. kref_init(&bo->list_kref);
  1018. atomic_set(&bo->cpu_writers, 0);
  1019. atomic_set(&bo->reserved, 1);
  1020. init_waitqueue_head(&bo->event_queue);
  1021. INIT_LIST_HEAD(&bo->lru);
  1022. INIT_LIST_HEAD(&bo->ddestroy);
  1023. INIT_LIST_HEAD(&bo->swap);
  1024. INIT_LIST_HEAD(&bo->io_reserve_lru);
  1025. bo->bdev = bdev;
  1026. bo->glob = bdev->glob;
  1027. bo->type = type;
  1028. bo->num_pages = num_pages;
  1029. bo->mem.size = num_pages << PAGE_SHIFT;
  1030. bo->mem.mem_type = TTM_PL_SYSTEM;
  1031. bo->mem.num_pages = bo->num_pages;
  1032. bo->mem.mm_node = NULL;
  1033. bo->mem.page_alignment = page_alignment;
  1034. bo->mem.bus.io_reserved_vm = false;
  1035. bo->mem.bus.io_reserved_count = 0;
  1036. bo->priv_flags = 0;
  1037. bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
  1038. bo->seq_valid = false;
  1039. bo->persistent_swap_storage = persistent_swap_storage;
  1040. bo->acc_size = acc_size;
  1041. bo->sg = sg;
  1042. atomic_inc(&bo->glob->bo_count);
  1043. ret = ttm_bo_check_placement(bo, placement);
  1044. if (unlikely(ret != 0))
  1045. goto out_err;
  1046. /*
  1047. * For ttm_bo_type_device buffers, allocate
  1048. * address space from the device.
  1049. */
  1050. if (bo->type == ttm_bo_type_device ||
  1051. bo->type == ttm_bo_type_sg) {
  1052. ret = ttm_bo_setup_vm(bo);
  1053. if (ret)
  1054. goto out_err;
  1055. }
  1056. ret = ttm_bo_validate(bo, placement, interruptible, false, false);
  1057. if (ret)
  1058. goto out_err;
  1059. ttm_bo_unreserve(bo);
  1060. return 0;
  1061. out_err:
  1062. ttm_bo_unreserve(bo);
  1063. ttm_bo_unref(&bo);
  1064. return ret;
  1065. }
  1066. EXPORT_SYMBOL(ttm_bo_init);
  1067. size_t ttm_bo_acc_size(struct ttm_bo_device *bdev,
  1068. unsigned long bo_size,
  1069. unsigned struct_size)
  1070. {
  1071. unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
  1072. size_t size = 0;
  1073. size += ttm_round_pot(struct_size);
  1074. size += PAGE_ALIGN(npages * sizeof(void *));
  1075. size += ttm_round_pot(sizeof(struct ttm_tt));
  1076. return size;
  1077. }
  1078. EXPORT_SYMBOL(ttm_bo_acc_size);
  1079. size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
  1080. unsigned long bo_size,
  1081. unsigned struct_size)
  1082. {
  1083. unsigned npages = (PAGE_ALIGN(bo_size)) >> PAGE_SHIFT;
  1084. size_t size = 0;
  1085. size += ttm_round_pot(struct_size);
  1086. size += PAGE_ALIGN(npages * sizeof(void *));
  1087. size += PAGE_ALIGN(npages * sizeof(dma_addr_t));
  1088. size += ttm_round_pot(sizeof(struct ttm_dma_tt));
  1089. return size;
  1090. }
  1091. EXPORT_SYMBOL(ttm_bo_dma_acc_size);
  1092. int ttm_bo_create(struct ttm_bo_device *bdev,
  1093. unsigned long size,
  1094. enum ttm_bo_type type,
  1095. struct ttm_placement *placement,
  1096. uint32_t page_alignment,
  1097. bool interruptible,
  1098. struct file *persistent_swap_storage,
  1099. struct ttm_buffer_object **p_bo)
  1100. {
  1101. struct ttm_buffer_object *bo;
  1102. size_t acc_size;
  1103. int ret;
  1104. bo = kzalloc(sizeof(*bo), GFP_KERNEL);
  1105. if (unlikely(bo == NULL))
  1106. return -ENOMEM;
  1107. acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
  1108. ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
  1109. interruptible, persistent_swap_storage, acc_size,
  1110. NULL, NULL);
  1111. if (likely(ret == 0))
  1112. *p_bo = bo;
  1113. return ret;
  1114. }
  1115. EXPORT_SYMBOL(ttm_bo_create);
  1116. static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
  1117. unsigned mem_type, bool allow_errors)
  1118. {
  1119. struct ttm_mem_type_manager *man = &bdev->man[mem_type];
  1120. struct ttm_bo_global *glob = bdev->glob;
  1121. int ret;
  1122. /*
  1123. * Can't use standard list traversal since we're unlocking.
  1124. */
  1125. spin_lock(&glob->lru_lock);
  1126. while (!list_empty(&man->lru)) {
  1127. spin_unlock(&glob->lru_lock);
  1128. ret = ttm_mem_evict_first(bdev, mem_type, false, false, false);
  1129. if (ret) {
  1130. if (allow_errors) {
  1131. return ret;
  1132. } else {
  1133. pr_err("Cleanup eviction failed\n");
  1134. }
  1135. }
  1136. spin_lock(&glob->lru_lock);
  1137. }
  1138. spin_unlock(&glob->lru_lock);
  1139. return 0;
  1140. }
  1141. int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
  1142. {
  1143. struct ttm_mem_type_manager *man;
  1144. int ret = -EINVAL;
  1145. if (mem_type >= TTM_NUM_MEM_TYPES) {
  1146. pr_err("Illegal memory type %d\n", mem_type);
  1147. return ret;
  1148. }
  1149. man = &bdev->man[mem_type];
  1150. if (!man->has_type) {
  1151. pr_err("Trying to take down uninitialized memory manager type %u\n",
  1152. mem_type);
  1153. return ret;
  1154. }
  1155. man->use_type = false;
  1156. man->has_type = false;
  1157. ret = 0;
  1158. if (mem_type > 0) {
  1159. ttm_bo_force_list_clean(bdev, mem_type, false);
  1160. ret = (*man->func->takedown)(man);
  1161. }
  1162. return ret;
  1163. }
  1164. EXPORT_SYMBOL(ttm_bo_clean_mm);
  1165. int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
  1166. {
  1167. struct ttm_mem_type_manager *man = &bdev->man[mem_type];
  1168. if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
  1169. pr_err("Illegal memory manager memory type %u\n", mem_type);
  1170. return -EINVAL;
  1171. }
  1172. if (!man->has_type) {
  1173. pr_err("Memory type %u has not been initialized\n", mem_type);
  1174. return 0;
  1175. }
  1176. return ttm_bo_force_list_clean(bdev, mem_type, true);
  1177. }
  1178. EXPORT_SYMBOL(ttm_bo_evict_mm);
  1179. int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
  1180. unsigned long p_size)
  1181. {
  1182. int ret = -EINVAL;
  1183. struct ttm_mem_type_manager *man;
  1184. BUG_ON(type >= TTM_NUM_MEM_TYPES);
  1185. man = &bdev->man[type];
  1186. BUG_ON(man->has_type);
  1187. man->io_reserve_fastpath = true;
  1188. man->use_io_reserve_lru = false;
  1189. mutex_init(&man->io_reserve_mutex);
  1190. INIT_LIST_HEAD(&man->io_reserve_lru);
  1191. ret = bdev->driver->init_mem_type(bdev, type, man);
  1192. if (ret)
  1193. return ret;
  1194. man->bdev = bdev;
  1195. ret = 0;
  1196. if (type != TTM_PL_SYSTEM) {
  1197. ret = (*man->func->init)(man, p_size);
  1198. if (ret)
  1199. return ret;
  1200. }
  1201. man->has_type = true;
  1202. man->use_type = true;
  1203. man->size = p_size;
  1204. INIT_LIST_HEAD(&man->lru);
  1205. return 0;
  1206. }
  1207. EXPORT_SYMBOL(ttm_bo_init_mm);
  1208. static void ttm_bo_global_kobj_release(struct kobject *kobj)
  1209. {
  1210. struct ttm_bo_global *glob =
  1211. container_of(kobj, struct ttm_bo_global, kobj);
  1212. ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
  1213. __free_page(glob->dummy_read_page);
  1214. kfree(glob);
  1215. }
  1216. void ttm_bo_global_release(struct drm_global_reference *ref)
  1217. {
  1218. struct ttm_bo_global *glob = ref->object;
  1219. kobject_del(&glob->kobj);
  1220. kobject_put(&glob->kobj);
  1221. }
  1222. EXPORT_SYMBOL(ttm_bo_global_release);
  1223. int ttm_bo_global_init(struct drm_global_reference *ref)
  1224. {
  1225. struct ttm_bo_global_ref *bo_ref =
  1226. container_of(ref, struct ttm_bo_global_ref, ref);
  1227. struct ttm_bo_global *glob = ref->object;
  1228. int ret;
  1229. mutex_init(&glob->device_list_mutex);
  1230. spin_lock_init(&glob->lru_lock);
  1231. glob->mem_glob = bo_ref->mem_glob;
  1232. glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
  1233. if (unlikely(glob->dummy_read_page == NULL)) {
  1234. ret = -ENOMEM;
  1235. goto out_no_drp;
  1236. }
  1237. INIT_LIST_HEAD(&glob->swap_lru);
  1238. INIT_LIST_HEAD(&glob->device_list);
  1239. ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
  1240. ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
  1241. if (unlikely(ret != 0)) {
  1242. pr_err("Could not register buffer object swapout\n");
  1243. goto out_no_shrink;
  1244. }
  1245. atomic_set(&glob->bo_count, 0);
  1246. ret = kobject_init_and_add(
  1247. &glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
  1248. if (unlikely(ret != 0))
  1249. kobject_put(&glob->kobj);
  1250. return ret;
  1251. out_no_shrink:
  1252. __free_page(glob->dummy_read_page);
  1253. out_no_drp:
  1254. kfree(glob);
  1255. return ret;
  1256. }
  1257. EXPORT_SYMBOL(ttm_bo_global_init);
  1258. int ttm_bo_device_release(struct ttm_bo_device *bdev)
  1259. {
  1260. int ret = 0;
  1261. unsigned i = TTM_NUM_MEM_TYPES;
  1262. struct ttm_mem_type_manager *man;
  1263. struct ttm_bo_global *glob = bdev->glob;
  1264. while (i--) {
  1265. man = &bdev->man[i];
  1266. if (man->has_type) {
  1267. man->use_type = false;
  1268. if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
  1269. ret = -EBUSY;
  1270. pr_err("DRM memory manager type %d is not clean\n",
  1271. i);
  1272. }
  1273. man->has_type = false;
  1274. }
  1275. }
  1276. mutex_lock(&glob->device_list_mutex);
  1277. list_del(&bdev->device_list);
  1278. mutex_unlock(&glob->device_list_mutex);
  1279. cancel_delayed_work_sync(&bdev->wq);
  1280. while (ttm_bo_delayed_delete(bdev, true))
  1281. ;
  1282. spin_lock(&glob->lru_lock);
  1283. if (list_empty(&bdev->ddestroy))
  1284. TTM_DEBUG("Delayed destroy list was clean\n");
  1285. if (list_empty(&bdev->man[0].lru))
  1286. TTM_DEBUG("Swap list was clean\n");
  1287. spin_unlock(&glob->lru_lock);
  1288. BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
  1289. write_lock(&bdev->vm_lock);
  1290. drm_mm_takedown(&bdev->addr_space_mm);
  1291. write_unlock(&bdev->vm_lock);
  1292. return ret;
  1293. }
  1294. EXPORT_SYMBOL(ttm_bo_device_release);
  1295. int ttm_bo_device_init(struct ttm_bo_device *bdev,
  1296. struct ttm_bo_global *glob,
  1297. struct ttm_bo_driver *driver,
  1298. uint64_t file_page_offset,
  1299. bool need_dma32)
  1300. {
  1301. int ret = -EINVAL;
  1302. rwlock_init(&bdev->vm_lock);
  1303. bdev->driver = driver;
  1304. memset(bdev->man, 0, sizeof(bdev->man));
  1305. /*
  1306. * Initialize the system memory buffer type.
  1307. * Other types need to be driver / IOCTL initialized.
  1308. */
  1309. ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
  1310. if (unlikely(ret != 0))
  1311. goto out_no_sys;
  1312. bdev->addr_space_rb = RB_ROOT;
  1313. ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
  1314. if (unlikely(ret != 0))
  1315. goto out_no_addr_mm;
  1316. INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
  1317. INIT_LIST_HEAD(&bdev->ddestroy);
  1318. bdev->dev_mapping = NULL;
  1319. bdev->glob = glob;
  1320. bdev->need_dma32 = need_dma32;
  1321. bdev->val_seq = 0;
  1322. spin_lock_init(&bdev->fence_lock);
  1323. mutex_lock(&glob->device_list_mutex);
  1324. list_add_tail(&bdev->device_list, &glob->device_list);
  1325. mutex_unlock(&glob->device_list_mutex);
  1326. return 0;
  1327. out_no_addr_mm:
  1328. ttm_bo_clean_mm(bdev, 0);
  1329. out_no_sys:
  1330. return ret;
  1331. }
  1332. EXPORT_SYMBOL(ttm_bo_device_init);
  1333. /*
  1334. * buffer object vm functions.
  1335. */
  1336. bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
  1337. {
  1338. struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
  1339. if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
  1340. if (mem->mem_type == TTM_PL_SYSTEM)
  1341. return false;
  1342. if (man->flags & TTM_MEMTYPE_FLAG_CMA)
  1343. return false;
  1344. if (mem->placement & TTM_PL_FLAG_CACHED)
  1345. return false;
  1346. }
  1347. return true;
  1348. }
  1349. void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
  1350. {
  1351. struct ttm_bo_device *bdev = bo->bdev;
  1352. loff_t offset = (loff_t) bo->addr_space_offset;
  1353. loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
  1354. if (!bdev->dev_mapping)
  1355. return;
  1356. unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
  1357. ttm_mem_io_free_vm(bo);
  1358. }
  1359. void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
  1360. {
  1361. struct ttm_bo_device *bdev = bo->bdev;
  1362. struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
  1363. ttm_mem_io_lock(man, false);
  1364. ttm_bo_unmap_virtual_locked(bo);
  1365. ttm_mem_io_unlock(man);
  1366. }
  1367. EXPORT_SYMBOL(ttm_bo_unmap_virtual);
  1368. static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
  1369. {
  1370. struct ttm_bo_device *bdev = bo->bdev;
  1371. struct rb_node **cur = &bdev->addr_space_rb.rb_node;
  1372. struct rb_node *parent = NULL;
  1373. struct ttm_buffer_object *cur_bo;
  1374. unsigned long offset = bo->vm_node->start;
  1375. unsigned long cur_offset;
  1376. while (*cur) {
  1377. parent = *cur;
  1378. cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
  1379. cur_offset = cur_bo->vm_node->start;
  1380. if (offset < cur_offset)
  1381. cur = &parent->rb_left;
  1382. else if (offset > cur_offset)
  1383. cur = &parent->rb_right;
  1384. else
  1385. BUG();
  1386. }
  1387. rb_link_node(&bo->vm_rb, parent, cur);
  1388. rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
  1389. }
  1390. /**
  1391. * ttm_bo_setup_vm:
  1392. *
  1393. * @bo: the buffer to allocate address space for
  1394. *
  1395. * Allocate address space in the drm device so that applications
  1396. * can mmap the buffer and access the contents. This only
  1397. * applies to ttm_bo_type_device objects as others are not
  1398. * placed in the drm device address space.
  1399. */
  1400. static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
  1401. {
  1402. struct ttm_bo_device *bdev = bo->bdev;
  1403. int ret;
  1404. retry_pre_get:
  1405. ret = drm_mm_pre_get(&bdev->addr_space_mm);
  1406. if (unlikely(ret != 0))
  1407. return ret;
  1408. write_lock(&bdev->vm_lock);
  1409. bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
  1410. bo->mem.num_pages, 0, 0);
  1411. if (unlikely(bo->vm_node == NULL)) {
  1412. ret = -ENOMEM;
  1413. goto out_unlock;
  1414. }
  1415. bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
  1416. bo->mem.num_pages, 0);
  1417. if (unlikely(bo->vm_node == NULL)) {
  1418. write_unlock(&bdev->vm_lock);
  1419. goto retry_pre_get;
  1420. }
  1421. ttm_bo_vm_insert_rb(bo);
  1422. write_unlock(&bdev->vm_lock);
  1423. bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
  1424. return 0;
  1425. out_unlock:
  1426. write_unlock(&bdev->vm_lock);
  1427. return ret;
  1428. }
  1429. int ttm_bo_wait(struct ttm_buffer_object *bo,
  1430. bool lazy, bool interruptible, bool no_wait)
  1431. {
  1432. struct ttm_bo_driver *driver = bo->bdev->driver;
  1433. struct ttm_bo_device *bdev = bo->bdev;
  1434. void *sync_obj;
  1435. int ret = 0;
  1436. if (likely(bo->sync_obj == NULL))
  1437. return 0;
  1438. while (bo->sync_obj) {
  1439. if (driver->sync_obj_signaled(bo->sync_obj)) {
  1440. void *tmp_obj = bo->sync_obj;
  1441. bo->sync_obj = NULL;
  1442. clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
  1443. spin_unlock(&bdev->fence_lock);
  1444. driver->sync_obj_unref(&tmp_obj);
  1445. spin_lock(&bdev->fence_lock);
  1446. continue;
  1447. }
  1448. if (no_wait)
  1449. return -EBUSY;
  1450. sync_obj = driver->sync_obj_ref(bo->sync_obj);
  1451. spin_unlock(&bdev->fence_lock);
  1452. ret = driver->sync_obj_wait(sync_obj,
  1453. lazy, interruptible);
  1454. if (unlikely(ret != 0)) {
  1455. driver->sync_obj_unref(&sync_obj);
  1456. spin_lock(&bdev->fence_lock);
  1457. return ret;
  1458. }
  1459. spin_lock(&bdev->fence_lock);
  1460. if (likely(bo->sync_obj == sync_obj)) {
  1461. void *tmp_obj = bo->sync_obj;
  1462. bo->sync_obj = NULL;
  1463. clear_bit(TTM_BO_PRIV_FLAG_MOVING,
  1464. &bo->priv_flags);
  1465. spin_unlock(&bdev->fence_lock);
  1466. driver->sync_obj_unref(&sync_obj);
  1467. driver->sync_obj_unref(&tmp_obj);
  1468. spin_lock(&bdev->fence_lock);
  1469. } else {
  1470. spin_unlock(&bdev->fence_lock);
  1471. driver->sync_obj_unref(&sync_obj);
  1472. spin_lock(&bdev->fence_lock);
  1473. }
  1474. }
  1475. return 0;
  1476. }
  1477. EXPORT_SYMBOL(ttm_bo_wait);
  1478. int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
  1479. {
  1480. struct ttm_bo_device *bdev = bo->bdev;
  1481. int ret = 0;
  1482. /*
  1483. * Using ttm_bo_reserve makes sure the lru lists are updated.
  1484. */
  1485. ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
  1486. if (unlikely(ret != 0))
  1487. return ret;
  1488. spin_lock(&bdev->fence_lock);
  1489. ret = ttm_bo_wait(bo, false, true, no_wait);
  1490. spin_unlock(&bdev->fence_lock);
  1491. if (likely(ret == 0))
  1492. atomic_inc(&bo->cpu_writers);
  1493. ttm_bo_unreserve(bo);
  1494. return ret;
  1495. }
  1496. EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
  1497. void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
  1498. {
  1499. atomic_dec(&bo->cpu_writers);
  1500. }
  1501. EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
  1502. /**
  1503. * A buffer object shrink method that tries to swap out the first
  1504. * buffer object on the bo_global::swap_lru list.
  1505. */
  1506. static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
  1507. {
  1508. struct ttm_bo_global *glob =
  1509. container_of(shrink, struct ttm_bo_global, shrink);
  1510. struct ttm_buffer_object *bo;
  1511. int ret = -EBUSY;
  1512. int put_count;
  1513. uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
  1514. spin_lock(&glob->lru_lock);
  1515. while (ret == -EBUSY) {
  1516. if (unlikely(list_empty(&glob->swap_lru))) {
  1517. spin_unlock(&glob->lru_lock);
  1518. return -EBUSY;
  1519. }
  1520. bo = list_first_entry(&glob->swap_lru,
  1521. struct ttm_buffer_object, swap);
  1522. kref_get(&bo->list_kref);
  1523. if (!list_empty(&bo->ddestroy)) {
  1524. spin_unlock(&glob->lru_lock);
  1525. (void) ttm_bo_cleanup_refs(bo, false, false, false);
  1526. kref_put(&bo->list_kref, ttm_bo_release_list);
  1527. spin_lock(&glob->lru_lock);
  1528. continue;
  1529. }
  1530. /**
  1531. * Reserve buffer. Since we unlock while sleeping, we need
  1532. * to re-check that nobody removed us from the swap-list while
  1533. * we slept.
  1534. */
  1535. ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
  1536. if (unlikely(ret == -EBUSY)) {
  1537. spin_unlock(&glob->lru_lock);
  1538. ttm_bo_wait_unreserved(bo, false);
  1539. kref_put(&bo->list_kref, ttm_bo_release_list);
  1540. spin_lock(&glob->lru_lock);
  1541. }
  1542. }
  1543. BUG_ON(ret != 0);
  1544. put_count = ttm_bo_del_from_lru(bo);
  1545. spin_unlock(&glob->lru_lock);
  1546. ttm_bo_list_ref_sub(bo, put_count, true);
  1547. /**
  1548. * Wait for GPU, then move to system cached.
  1549. */
  1550. spin_lock(&bo->bdev->fence_lock);
  1551. ret = ttm_bo_wait(bo, false, false, false);
  1552. spin_unlock(&bo->bdev->fence_lock);
  1553. if (unlikely(ret != 0))
  1554. goto out;
  1555. if ((bo->mem.placement & swap_placement) != swap_placement) {
  1556. struct ttm_mem_reg evict_mem;
  1557. evict_mem = bo->mem;
  1558. evict_mem.mm_node = NULL;
  1559. evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
  1560. evict_mem.mem_type = TTM_PL_SYSTEM;
  1561. ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
  1562. false, false, false);
  1563. if (unlikely(ret != 0))
  1564. goto out;
  1565. }
  1566. ttm_bo_unmap_virtual(bo);
  1567. /**
  1568. * Swap out. Buffer will be swapped in again as soon as
  1569. * anyone tries to access a ttm page.
  1570. */
  1571. if (bo->bdev->driver->swap_notify)
  1572. bo->bdev->driver->swap_notify(bo);
  1573. ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage);
  1574. out:
  1575. /**
  1576. *
  1577. * Unreserve without putting on LRU to avoid swapping out an
  1578. * already swapped buffer.
  1579. */
  1580. atomic_set(&bo->reserved, 0);
  1581. wake_up_all(&bo->event_queue);
  1582. kref_put(&bo->list_kref, ttm_bo_release_list);
  1583. return ret;
  1584. }
  1585. void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
  1586. {
  1587. while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
  1588. ;
  1589. }
  1590. EXPORT_SYMBOL(ttm_bo_swapout_all);