ttm_bo.c 47 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936
  1. /**************************************************************************
  2. *
  3. * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. /*
  28. * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  29. */
  30. #include "ttm/ttm_module.h"
  31. #include "ttm/ttm_bo_driver.h"
  32. #include "ttm/ttm_placement.h"
  33. #include <linux/jiffies.h>
  34. #include <linux/slab.h>
  35. #include <linux/sched.h>
  36. #include <linux/mm.h>
  37. #include <linux/file.h>
  38. #include <linux/module.h>
  39. #include <linux/atomic.h>
  40. #define TTM_ASSERT_LOCKED(param)
  41. #define TTM_DEBUG(fmt, arg...)
  42. #define TTM_BO_HASH_ORDER 13
  43. static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
  44. static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
  45. static void ttm_bo_global_kobj_release(struct kobject *kobj);
  46. static struct attribute ttm_bo_count = {
  47. .name = "bo_count",
  48. .mode = S_IRUGO
  49. };
  50. static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
  51. {
  52. int i;
  53. for (i = 0; i <= TTM_PL_PRIV5; i++)
  54. if (flags & (1 << i)) {
  55. *mem_type = i;
  56. return 0;
  57. }
  58. return -EINVAL;
  59. }
  60. static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
  61. {
  62. struct ttm_mem_type_manager *man = &bdev->man[mem_type];
  63. printk(KERN_ERR TTM_PFX " has_type: %d\n", man->has_type);
  64. printk(KERN_ERR TTM_PFX " use_type: %d\n", man->use_type);
  65. printk(KERN_ERR TTM_PFX " flags: 0x%08X\n", man->flags);
  66. printk(KERN_ERR TTM_PFX " gpu_offset: 0x%08lX\n", man->gpu_offset);
  67. printk(KERN_ERR TTM_PFX " size: %llu\n", man->size);
  68. printk(KERN_ERR TTM_PFX " available_caching: 0x%08X\n",
  69. man->available_caching);
  70. printk(KERN_ERR TTM_PFX " default_caching: 0x%08X\n",
  71. man->default_caching);
  72. if (mem_type != TTM_PL_SYSTEM)
  73. (*man->func->debug)(man, TTM_PFX);
  74. }
  75. static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
  76. struct ttm_placement *placement)
  77. {
  78. int i, ret, mem_type;
  79. printk(KERN_ERR TTM_PFX "No space for %p (%lu pages, %luK, %luM)\n",
  80. bo, bo->mem.num_pages, bo->mem.size >> 10,
  81. bo->mem.size >> 20);
  82. for (i = 0; i < placement->num_placement; i++) {
  83. ret = ttm_mem_type_from_flags(placement->placement[i],
  84. &mem_type);
  85. if (ret)
  86. return;
  87. printk(KERN_ERR TTM_PFX " placement[%d]=0x%08X (%d)\n",
  88. i, placement->placement[i], mem_type);
  89. ttm_mem_type_debug(bo->bdev, mem_type);
  90. }
  91. }
  92. static ssize_t ttm_bo_global_show(struct kobject *kobj,
  93. struct attribute *attr,
  94. char *buffer)
  95. {
  96. struct ttm_bo_global *glob =
  97. container_of(kobj, struct ttm_bo_global, kobj);
  98. return snprintf(buffer, PAGE_SIZE, "%lu\n",
  99. (unsigned long) atomic_read(&glob->bo_count));
  100. }
  101. static struct attribute *ttm_bo_global_attrs[] = {
  102. &ttm_bo_count,
  103. NULL
  104. };
  105. static const struct sysfs_ops ttm_bo_global_ops = {
  106. .show = &ttm_bo_global_show
  107. };
  108. static struct kobj_type ttm_bo_glob_kobj_type = {
  109. .release = &ttm_bo_global_kobj_release,
  110. .sysfs_ops = &ttm_bo_global_ops,
  111. .default_attrs = ttm_bo_global_attrs
  112. };
  113. static inline uint32_t ttm_bo_type_flags(unsigned type)
  114. {
  115. return 1 << (type);
  116. }
  117. static void ttm_bo_release_list(struct kref *list_kref)
  118. {
  119. struct ttm_buffer_object *bo =
  120. container_of(list_kref, struct ttm_buffer_object, list_kref);
  121. struct ttm_bo_device *bdev = bo->bdev;
  122. BUG_ON(atomic_read(&bo->list_kref.refcount));
  123. BUG_ON(atomic_read(&bo->kref.refcount));
  124. BUG_ON(atomic_read(&bo->cpu_writers));
  125. BUG_ON(bo->sync_obj != NULL);
  126. BUG_ON(bo->mem.mm_node != NULL);
  127. BUG_ON(!list_empty(&bo->lru));
  128. BUG_ON(!list_empty(&bo->ddestroy));
  129. if (bo->ttm)
  130. ttm_tt_destroy(bo->ttm);
  131. atomic_dec(&bo->glob->bo_count);
  132. if (bo->destroy)
  133. bo->destroy(bo);
  134. else {
  135. ttm_mem_global_free(bdev->glob->mem_glob, bo->acc_size);
  136. kfree(bo);
  137. }
  138. }
  139. int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
  140. {
  141. if (interruptible) {
  142. return wait_event_interruptible(bo->event_queue,
  143. atomic_read(&bo->reserved) == 0);
  144. } else {
  145. wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
  146. return 0;
  147. }
  148. }
  149. EXPORT_SYMBOL(ttm_bo_wait_unreserved);
  150. void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
  151. {
  152. struct ttm_bo_device *bdev = bo->bdev;
  153. struct ttm_mem_type_manager *man;
  154. BUG_ON(!atomic_read(&bo->reserved));
  155. if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
  156. BUG_ON(!list_empty(&bo->lru));
  157. man = &bdev->man[bo->mem.mem_type];
  158. list_add_tail(&bo->lru, &man->lru);
  159. kref_get(&bo->list_kref);
  160. if (bo->ttm != NULL) {
  161. list_add_tail(&bo->swap, &bo->glob->swap_lru);
  162. kref_get(&bo->list_kref);
  163. }
  164. }
  165. }
  166. int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
  167. {
  168. int put_count = 0;
  169. if (!list_empty(&bo->swap)) {
  170. list_del_init(&bo->swap);
  171. ++put_count;
  172. }
  173. if (!list_empty(&bo->lru)) {
  174. list_del_init(&bo->lru);
  175. ++put_count;
  176. }
  177. /*
  178. * TODO: Add a driver hook to delete from
  179. * driver-specific LRU's here.
  180. */
  181. return put_count;
  182. }
  183. int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
  184. bool interruptible,
  185. bool no_wait, bool use_sequence, uint32_t sequence)
  186. {
  187. struct ttm_bo_global *glob = bo->glob;
  188. int ret;
  189. while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
  190. /**
  191. * Deadlock avoidance for multi-bo reserving.
  192. */
  193. if (use_sequence && bo->seq_valid) {
  194. /**
  195. * We've already reserved this one.
  196. */
  197. if (unlikely(sequence == bo->val_seq))
  198. return -EDEADLK;
  199. /**
  200. * Already reserved by a thread that will not back
  201. * off for us. We need to back off.
  202. */
  203. if (unlikely(sequence - bo->val_seq < (1 << 31)))
  204. return -EAGAIN;
  205. }
  206. if (no_wait)
  207. return -EBUSY;
  208. spin_unlock(&glob->lru_lock);
  209. ret = ttm_bo_wait_unreserved(bo, interruptible);
  210. spin_lock(&glob->lru_lock);
  211. if (unlikely(ret))
  212. return ret;
  213. }
  214. if (use_sequence) {
  215. /**
  216. * Wake up waiters that may need to recheck for deadlock,
  217. * if we decreased the sequence number.
  218. */
  219. if (unlikely((bo->val_seq - sequence < (1 << 31))
  220. || !bo->seq_valid))
  221. wake_up_all(&bo->event_queue);
  222. bo->val_seq = sequence;
  223. bo->seq_valid = true;
  224. } else {
  225. bo->seq_valid = false;
  226. }
  227. return 0;
  228. }
  229. EXPORT_SYMBOL(ttm_bo_reserve);
  230. static void ttm_bo_ref_bug(struct kref *list_kref)
  231. {
  232. BUG();
  233. }
  234. void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
  235. bool never_free)
  236. {
  237. kref_sub(&bo->list_kref, count,
  238. (never_free) ? ttm_bo_ref_bug : ttm_bo_release_list);
  239. }
  240. int ttm_bo_reserve(struct ttm_buffer_object *bo,
  241. bool interruptible,
  242. bool no_wait, bool use_sequence, uint32_t sequence)
  243. {
  244. struct ttm_bo_global *glob = bo->glob;
  245. int put_count = 0;
  246. int ret;
  247. spin_lock(&glob->lru_lock);
  248. ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
  249. sequence);
  250. if (likely(ret == 0))
  251. put_count = ttm_bo_del_from_lru(bo);
  252. spin_unlock(&glob->lru_lock);
  253. ttm_bo_list_ref_sub(bo, put_count, true);
  254. return ret;
  255. }
  256. void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo)
  257. {
  258. ttm_bo_add_to_lru(bo);
  259. atomic_set(&bo->reserved, 0);
  260. wake_up_all(&bo->event_queue);
  261. }
  262. void ttm_bo_unreserve(struct ttm_buffer_object *bo)
  263. {
  264. struct ttm_bo_global *glob = bo->glob;
  265. spin_lock(&glob->lru_lock);
  266. ttm_bo_unreserve_locked(bo);
  267. spin_unlock(&glob->lru_lock);
  268. }
  269. EXPORT_SYMBOL(ttm_bo_unreserve);
  270. /*
  271. * Call bo->mutex locked.
  272. */
  273. static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
  274. {
  275. struct ttm_bo_device *bdev = bo->bdev;
  276. struct ttm_bo_global *glob = bo->glob;
  277. int ret = 0;
  278. uint32_t page_flags = 0;
  279. TTM_ASSERT_LOCKED(&bo->mutex);
  280. bo->ttm = NULL;
  281. if (bdev->need_dma32)
  282. page_flags |= TTM_PAGE_FLAG_DMA32;
  283. switch (bo->type) {
  284. case ttm_bo_type_device:
  285. if (zero_alloc)
  286. page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
  287. case ttm_bo_type_kernel:
  288. bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
  289. page_flags, glob->dummy_read_page);
  290. if (unlikely(bo->ttm == NULL))
  291. ret = -ENOMEM;
  292. break;
  293. case ttm_bo_type_user:
  294. bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
  295. page_flags | TTM_PAGE_FLAG_USER,
  296. glob->dummy_read_page);
  297. if (unlikely(bo->ttm == NULL)) {
  298. ret = -ENOMEM;
  299. break;
  300. }
  301. ret = ttm_tt_set_user(bo->ttm, current,
  302. bo->buffer_start, bo->num_pages);
  303. if (unlikely(ret != 0)) {
  304. ttm_tt_destroy(bo->ttm);
  305. bo->ttm = NULL;
  306. }
  307. break;
  308. default:
  309. printk(KERN_ERR TTM_PFX "Illegal buffer object type\n");
  310. ret = -EINVAL;
  311. break;
  312. }
  313. return ret;
  314. }
  315. static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
  316. struct ttm_mem_reg *mem,
  317. bool evict, bool interruptible,
  318. bool no_wait_reserve, bool no_wait_gpu)
  319. {
  320. struct ttm_bo_device *bdev = bo->bdev;
  321. bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
  322. bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
  323. struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
  324. struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
  325. int ret = 0;
  326. if (old_is_pci || new_is_pci ||
  327. ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) {
  328. ret = ttm_mem_io_lock(old_man, true);
  329. if (unlikely(ret != 0))
  330. goto out_err;
  331. ttm_bo_unmap_virtual_locked(bo);
  332. ttm_mem_io_unlock(old_man);
  333. }
  334. /*
  335. * Create and bind a ttm if required.
  336. */
  337. if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
  338. if (bo->ttm == NULL) {
  339. ret = ttm_bo_add_ttm(bo, false);
  340. if (ret)
  341. goto out_err;
  342. }
  343. ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
  344. if (ret)
  345. goto out_err;
  346. if (mem->mem_type != TTM_PL_SYSTEM) {
  347. ret = ttm_tt_bind(bo->ttm, mem);
  348. if (ret)
  349. goto out_err;
  350. }
  351. if (bo->mem.mem_type == TTM_PL_SYSTEM) {
  352. if (bdev->driver->move_notify)
  353. bdev->driver->move_notify(bo, mem);
  354. bo->mem = *mem;
  355. mem->mm_node = NULL;
  356. goto moved;
  357. }
  358. }
  359. if (bdev->driver->move_notify)
  360. bdev->driver->move_notify(bo, mem);
  361. if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
  362. !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
  363. ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem);
  364. else if (bdev->driver->move)
  365. ret = bdev->driver->move(bo, evict, interruptible,
  366. no_wait_reserve, no_wait_gpu, mem);
  367. else
  368. ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem);
  369. if (ret)
  370. goto out_err;
  371. moved:
  372. if (bo->evicted) {
  373. ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
  374. if (ret)
  375. printk(KERN_ERR TTM_PFX "Can not flush read caches\n");
  376. bo->evicted = false;
  377. }
  378. if (bo->mem.mm_node) {
  379. bo->offset = (bo->mem.start << PAGE_SHIFT) +
  380. bdev->man[bo->mem.mem_type].gpu_offset;
  381. bo->cur_placement = bo->mem.placement;
  382. } else
  383. bo->offset = 0;
  384. return 0;
  385. out_err:
  386. new_man = &bdev->man[bo->mem.mem_type];
  387. if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
  388. ttm_tt_unbind(bo->ttm);
  389. ttm_tt_destroy(bo->ttm);
  390. bo->ttm = NULL;
  391. }
  392. return ret;
  393. }
  394. /**
  395. * Call bo::reserved.
  396. * Will release GPU memory type usage on destruction.
  397. * This is the place to put in driver specific hooks to release
  398. * driver private resources.
  399. * Will release the bo::reserved lock.
  400. */
  401. static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
  402. {
  403. if (bo->ttm) {
  404. ttm_tt_unbind(bo->ttm);
  405. ttm_tt_destroy(bo->ttm);
  406. bo->ttm = NULL;
  407. }
  408. ttm_bo_mem_put(bo, &bo->mem);
  409. atomic_set(&bo->reserved, 0);
  410. /*
  411. * Make processes trying to reserve really pick it up.
  412. */
  413. smp_mb__after_atomic_dec();
  414. wake_up_all(&bo->event_queue);
  415. }
  416. static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
  417. {
  418. struct ttm_bo_device *bdev = bo->bdev;
  419. struct ttm_bo_global *glob = bo->glob;
  420. struct ttm_bo_driver *driver;
  421. void *sync_obj = NULL;
  422. void *sync_obj_arg;
  423. int put_count;
  424. int ret;
  425. spin_lock(&bdev->fence_lock);
  426. (void) ttm_bo_wait(bo, false, false, true, TTM_USAGE_READWRITE);
  427. if (!bo->sync_obj) {
  428. spin_lock(&glob->lru_lock);
  429. /**
  430. * Lock inversion between bo:reserve and bdev::fence_lock here,
  431. * but that's OK, since we're only trylocking.
  432. */
  433. ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
  434. if (unlikely(ret == -EBUSY))
  435. goto queue;
  436. spin_unlock(&bdev->fence_lock);
  437. put_count = ttm_bo_del_from_lru(bo);
  438. spin_unlock(&glob->lru_lock);
  439. ttm_bo_cleanup_memtype_use(bo);
  440. ttm_bo_list_ref_sub(bo, put_count, true);
  441. return;
  442. } else {
  443. spin_lock(&glob->lru_lock);
  444. }
  445. queue:
  446. driver = bdev->driver;
  447. if (bo->sync_obj)
  448. sync_obj = driver->sync_obj_ref(bo->sync_obj);
  449. sync_obj_arg = bo->sync_obj_arg;
  450. kref_get(&bo->list_kref);
  451. list_add_tail(&bo->ddestroy, &bdev->ddestroy);
  452. spin_unlock(&glob->lru_lock);
  453. spin_unlock(&bdev->fence_lock);
  454. if (sync_obj) {
  455. driver->sync_obj_flush(sync_obj, sync_obj_arg);
  456. driver->sync_obj_unref(&sync_obj);
  457. }
  458. schedule_delayed_work(&bdev->wq,
  459. ((HZ / 100) < 1) ? 1 : HZ / 100);
  460. }
  461. /**
  462. * function ttm_bo_cleanup_refs
  463. * If bo idle, remove from delayed- and lru lists, and unref.
  464. * If not idle, do nothing.
  465. *
  466. * @interruptible Any sleeps should occur interruptibly.
  467. * @no_wait_reserve Never wait for reserve. Return -EBUSY instead.
  468. * @no_wait_gpu Never wait for gpu. Return -EBUSY instead.
  469. */
  470. static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
  471. bool interruptible,
  472. bool no_wait_reserve,
  473. bool no_wait_gpu)
  474. {
  475. struct ttm_bo_device *bdev = bo->bdev;
  476. struct ttm_bo_global *glob = bo->glob;
  477. int put_count;
  478. int ret = 0;
  479. retry:
  480. spin_lock(&bdev->fence_lock);
  481. ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu,
  482. TTM_USAGE_READWRITE);
  483. spin_unlock(&bdev->fence_lock);
  484. if (unlikely(ret != 0))
  485. return ret;
  486. spin_lock(&glob->lru_lock);
  487. ret = ttm_bo_reserve_locked(bo, interruptible,
  488. no_wait_reserve, false, 0);
  489. if (unlikely(ret != 0) || list_empty(&bo->ddestroy)) {
  490. spin_unlock(&glob->lru_lock);
  491. return ret;
  492. }
  493. /**
  494. * We can re-check for sync object without taking
  495. * the bo::lock since setting the sync object requires
  496. * also bo::reserved. A busy object at this point may
  497. * be caused by another thread recently starting an accelerated
  498. * eviction.
  499. */
  500. if (unlikely(bo->sync_obj)) {
  501. atomic_set(&bo->reserved, 0);
  502. wake_up_all(&bo->event_queue);
  503. spin_unlock(&glob->lru_lock);
  504. goto retry;
  505. }
  506. put_count = ttm_bo_del_from_lru(bo);
  507. list_del_init(&bo->ddestroy);
  508. ++put_count;
  509. spin_unlock(&glob->lru_lock);
  510. ttm_bo_cleanup_memtype_use(bo);
  511. ttm_bo_list_ref_sub(bo, put_count, true);
  512. return 0;
  513. }
  514. /**
  515. * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
  516. * encountered buffers.
  517. */
  518. static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
  519. {
  520. struct ttm_bo_global *glob = bdev->glob;
  521. struct ttm_buffer_object *entry = NULL;
  522. int ret = 0;
  523. spin_lock(&glob->lru_lock);
  524. if (list_empty(&bdev->ddestroy))
  525. goto out_unlock;
  526. entry = list_first_entry(&bdev->ddestroy,
  527. struct ttm_buffer_object, ddestroy);
  528. kref_get(&entry->list_kref);
  529. for (;;) {
  530. struct ttm_buffer_object *nentry = NULL;
  531. if (entry->ddestroy.next != &bdev->ddestroy) {
  532. nentry = list_first_entry(&entry->ddestroy,
  533. struct ttm_buffer_object, ddestroy);
  534. kref_get(&nentry->list_kref);
  535. }
  536. spin_unlock(&glob->lru_lock);
  537. ret = ttm_bo_cleanup_refs(entry, false, !remove_all,
  538. !remove_all);
  539. kref_put(&entry->list_kref, ttm_bo_release_list);
  540. entry = nentry;
  541. if (ret || !entry)
  542. goto out;
  543. spin_lock(&glob->lru_lock);
  544. if (list_empty(&entry->ddestroy))
  545. break;
  546. }
  547. out_unlock:
  548. spin_unlock(&glob->lru_lock);
  549. out:
  550. if (entry)
  551. kref_put(&entry->list_kref, ttm_bo_release_list);
  552. return ret;
  553. }
  554. static void ttm_bo_delayed_workqueue(struct work_struct *work)
  555. {
  556. struct ttm_bo_device *bdev =
  557. container_of(work, struct ttm_bo_device, wq.work);
  558. if (ttm_bo_delayed_delete(bdev, false)) {
  559. schedule_delayed_work(&bdev->wq,
  560. ((HZ / 100) < 1) ? 1 : HZ / 100);
  561. }
  562. }
  563. static void ttm_bo_release(struct kref *kref)
  564. {
  565. struct ttm_buffer_object *bo =
  566. container_of(kref, struct ttm_buffer_object, kref);
  567. struct ttm_bo_device *bdev = bo->bdev;
  568. struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
  569. if (likely(bo->vm_node != NULL)) {
  570. rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
  571. drm_mm_put_block(bo->vm_node);
  572. bo->vm_node = NULL;
  573. }
  574. write_unlock(&bdev->vm_lock);
  575. ttm_mem_io_lock(man, false);
  576. ttm_mem_io_free_vm(bo);
  577. ttm_mem_io_unlock(man);
  578. ttm_bo_cleanup_refs_or_queue(bo);
  579. kref_put(&bo->list_kref, ttm_bo_release_list);
  580. write_lock(&bdev->vm_lock);
  581. }
  582. void ttm_bo_unref(struct ttm_buffer_object **p_bo)
  583. {
  584. struct ttm_buffer_object *bo = *p_bo;
  585. struct ttm_bo_device *bdev = bo->bdev;
  586. *p_bo = NULL;
  587. write_lock(&bdev->vm_lock);
  588. kref_put(&bo->kref, ttm_bo_release);
  589. write_unlock(&bdev->vm_lock);
  590. }
  591. EXPORT_SYMBOL(ttm_bo_unref);
  592. int ttm_bo_lock_delayed_workqueue(struct ttm_bo_device *bdev)
  593. {
  594. return cancel_delayed_work_sync(&bdev->wq);
  595. }
  596. EXPORT_SYMBOL(ttm_bo_lock_delayed_workqueue);
  597. void ttm_bo_unlock_delayed_workqueue(struct ttm_bo_device *bdev, int resched)
  598. {
  599. if (resched)
  600. schedule_delayed_work(&bdev->wq,
  601. ((HZ / 100) < 1) ? 1 : HZ / 100);
  602. }
  603. EXPORT_SYMBOL(ttm_bo_unlock_delayed_workqueue);
  604. static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
  605. bool no_wait_reserve, bool no_wait_gpu)
  606. {
  607. struct ttm_bo_device *bdev = bo->bdev;
  608. struct ttm_mem_reg evict_mem;
  609. struct ttm_placement placement;
  610. int ret = 0;
  611. spin_lock(&bdev->fence_lock);
  612. ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu,
  613. TTM_USAGE_READWRITE);
  614. spin_unlock(&bdev->fence_lock);
  615. if (unlikely(ret != 0)) {
  616. if (ret != -ERESTARTSYS) {
  617. printk(KERN_ERR TTM_PFX
  618. "Failed to expire sync object before "
  619. "buffer eviction.\n");
  620. }
  621. goto out;
  622. }
  623. BUG_ON(!atomic_read(&bo->reserved));
  624. evict_mem = bo->mem;
  625. evict_mem.mm_node = NULL;
  626. evict_mem.bus.io_reserved_vm = false;
  627. evict_mem.bus.io_reserved_count = 0;
  628. placement.fpfn = 0;
  629. placement.lpfn = 0;
  630. placement.num_placement = 0;
  631. placement.num_busy_placement = 0;
  632. bdev->driver->evict_flags(bo, &placement);
  633. ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
  634. no_wait_reserve, no_wait_gpu);
  635. if (ret) {
  636. if (ret != -ERESTARTSYS) {
  637. printk(KERN_ERR TTM_PFX
  638. "Failed to find memory space for "
  639. "buffer 0x%p eviction.\n", bo);
  640. ttm_bo_mem_space_debug(bo, &placement);
  641. }
  642. goto out;
  643. }
  644. ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
  645. no_wait_reserve, no_wait_gpu);
  646. if (ret) {
  647. if (ret != -ERESTARTSYS)
  648. printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
  649. ttm_bo_mem_put(bo, &evict_mem);
  650. goto out;
  651. }
  652. bo->evicted = true;
  653. out:
  654. return ret;
  655. }
  656. static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
  657. uint32_t mem_type,
  658. bool interruptible, bool no_wait_reserve,
  659. bool no_wait_gpu)
  660. {
  661. struct ttm_bo_global *glob = bdev->glob;
  662. struct ttm_mem_type_manager *man = &bdev->man[mem_type];
  663. struct ttm_buffer_object *bo;
  664. int ret, put_count = 0;
  665. retry:
  666. spin_lock(&glob->lru_lock);
  667. if (list_empty(&man->lru)) {
  668. spin_unlock(&glob->lru_lock);
  669. return -EBUSY;
  670. }
  671. bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
  672. kref_get(&bo->list_kref);
  673. if (!list_empty(&bo->ddestroy)) {
  674. spin_unlock(&glob->lru_lock);
  675. ret = ttm_bo_cleanup_refs(bo, interruptible,
  676. no_wait_reserve, no_wait_gpu);
  677. kref_put(&bo->list_kref, ttm_bo_release_list);
  678. if (likely(ret == 0 || ret == -ERESTARTSYS))
  679. return ret;
  680. goto retry;
  681. }
  682. ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0);
  683. if (unlikely(ret == -EBUSY)) {
  684. spin_unlock(&glob->lru_lock);
  685. if (likely(!no_wait_gpu))
  686. ret = ttm_bo_wait_unreserved(bo, interruptible);
  687. kref_put(&bo->list_kref, ttm_bo_release_list);
  688. /**
  689. * We *need* to retry after releasing the lru lock.
  690. */
  691. if (unlikely(ret != 0))
  692. return ret;
  693. goto retry;
  694. }
  695. put_count = ttm_bo_del_from_lru(bo);
  696. spin_unlock(&glob->lru_lock);
  697. BUG_ON(ret != 0);
  698. ttm_bo_list_ref_sub(bo, put_count, true);
  699. ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu);
  700. ttm_bo_unreserve(bo);
  701. kref_put(&bo->list_kref, ttm_bo_release_list);
  702. return ret;
  703. }
  704. void ttm_bo_mem_put(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem)
  705. {
  706. struct ttm_mem_type_manager *man = &bo->bdev->man[mem->mem_type];
  707. if (mem->mm_node)
  708. (*man->func->put_node)(man, mem);
  709. }
  710. EXPORT_SYMBOL(ttm_bo_mem_put);
  711. /**
  712. * Repeatedly evict memory from the LRU for @mem_type until we create enough
  713. * space, or we've evicted everything and there isn't enough space.
  714. */
  715. static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
  716. uint32_t mem_type,
  717. struct ttm_placement *placement,
  718. struct ttm_mem_reg *mem,
  719. bool interruptible,
  720. bool no_wait_reserve,
  721. bool no_wait_gpu)
  722. {
  723. struct ttm_bo_device *bdev = bo->bdev;
  724. struct ttm_mem_type_manager *man = &bdev->man[mem_type];
  725. int ret;
  726. do {
  727. ret = (*man->func->get_node)(man, bo, placement, mem);
  728. if (unlikely(ret != 0))
  729. return ret;
  730. if (mem->mm_node)
  731. break;
  732. ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
  733. no_wait_reserve, no_wait_gpu);
  734. if (unlikely(ret != 0))
  735. return ret;
  736. } while (1);
  737. if (mem->mm_node == NULL)
  738. return -ENOMEM;
  739. mem->mem_type = mem_type;
  740. return 0;
  741. }
  742. static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
  743. uint32_t cur_placement,
  744. uint32_t proposed_placement)
  745. {
  746. uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
  747. uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
  748. /**
  749. * Keep current caching if possible.
  750. */
  751. if ((cur_placement & caching) != 0)
  752. result |= (cur_placement & caching);
  753. else if ((man->default_caching & caching) != 0)
  754. result |= man->default_caching;
  755. else if ((TTM_PL_FLAG_CACHED & caching) != 0)
  756. result |= TTM_PL_FLAG_CACHED;
  757. else if ((TTM_PL_FLAG_WC & caching) != 0)
  758. result |= TTM_PL_FLAG_WC;
  759. else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
  760. result |= TTM_PL_FLAG_UNCACHED;
  761. return result;
  762. }
  763. static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
  764. bool disallow_fixed,
  765. uint32_t mem_type,
  766. uint32_t proposed_placement,
  767. uint32_t *masked_placement)
  768. {
  769. uint32_t cur_flags = ttm_bo_type_flags(mem_type);
  770. if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
  771. return false;
  772. if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
  773. return false;
  774. if ((proposed_placement & man->available_caching) == 0)
  775. return false;
  776. cur_flags |= (proposed_placement & man->available_caching);
  777. *masked_placement = cur_flags;
  778. return true;
  779. }
  780. /**
  781. * Creates space for memory region @mem according to its type.
  782. *
  783. * This function first searches for free space in compatible memory types in
  784. * the priority order defined by the driver. If free space isn't found, then
  785. * ttm_bo_mem_force_space is attempted in priority order to evict and find
  786. * space.
  787. */
  788. int ttm_bo_mem_space(struct ttm_buffer_object *bo,
  789. struct ttm_placement *placement,
  790. struct ttm_mem_reg *mem,
  791. bool interruptible, bool no_wait_reserve,
  792. bool no_wait_gpu)
  793. {
  794. struct ttm_bo_device *bdev = bo->bdev;
  795. struct ttm_mem_type_manager *man;
  796. uint32_t mem_type = TTM_PL_SYSTEM;
  797. uint32_t cur_flags = 0;
  798. bool type_found = false;
  799. bool type_ok = false;
  800. bool has_erestartsys = false;
  801. int i, ret;
  802. mem->mm_node = NULL;
  803. for (i = 0; i < placement->num_placement; ++i) {
  804. ret = ttm_mem_type_from_flags(placement->placement[i],
  805. &mem_type);
  806. if (ret)
  807. return ret;
  808. man = &bdev->man[mem_type];
  809. type_ok = ttm_bo_mt_compatible(man,
  810. bo->type == ttm_bo_type_user,
  811. mem_type,
  812. placement->placement[i],
  813. &cur_flags);
  814. if (!type_ok)
  815. continue;
  816. cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
  817. cur_flags);
  818. /*
  819. * Use the access and other non-mapping-related flag bits from
  820. * the memory placement flags to the current flags
  821. */
  822. ttm_flag_masked(&cur_flags, placement->placement[i],
  823. ~TTM_PL_MASK_MEMTYPE);
  824. if (mem_type == TTM_PL_SYSTEM)
  825. break;
  826. if (man->has_type && man->use_type) {
  827. type_found = true;
  828. ret = (*man->func->get_node)(man, bo, placement, mem);
  829. if (unlikely(ret))
  830. return ret;
  831. }
  832. if (mem->mm_node)
  833. break;
  834. }
  835. if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || mem->mm_node) {
  836. mem->mem_type = mem_type;
  837. mem->placement = cur_flags;
  838. return 0;
  839. }
  840. if (!type_found)
  841. return -EINVAL;
  842. for (i = 0; i < placement->num_busy_placement; ++i) {
  843. ret = ttm_mem_type_from_flags(placement->busy_placement[i],
  844. &mem_type);
  845. if (ret)
  846. return ret;
  847. man = &bdev->man[mem_type];
  848. if (!man->has_type)
  849. continue;
  850. if (!ttm_bo_mt_compatible(man,
  851. bo->type == ttm_bo_type_user,
  852. mem_type,
  853. placement->busy_placement[i],
  854. &cur_flags))
  855. continue;
  856. cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
  857. cur_flags);
  858. /*
  859. * Use the access and other non-mapping-related flag bits from
  860. * the memory placement flags to the current flags
  861. */
  862. ttm_flag_masked(&cur_flags, placement->busy_placement[i],
  863. ~TTM_PL_MASK_MEMTYPE);
  864. if (mem_type == TTM_PL_SYSTEM) {
  865. mem->mem_type = mem_type;
  866. mem->placement = cur_flags;
  867. mem->mm_node = NULL;
  868. return 0;
  869. }
  870. ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
  871. interruptible, no_wait_reserve, no_wait_gpu);
  872. if (ret == 0 && mem->mm_node) {
  873. mem->placement = cur_flags;
  874. return 0;
  875. }
  876. if (ret == -ERESTARTSYS)
  877. has_erestartsys = true;
  878. }
  879. ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
  880. return ret;
  881. }
  882. EXPORT_SYMBOL(ttm_bo_mem_space);
  883. int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
  884. {
  885. if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
  886. return -EBUSY;
  887. return wait_event_interruptible(bo->event_queue,
  888. atomic_read(&bo->cpu_writers) == 0);
  889. }
  890. EXPORT_SYMBOL(ttm_bo_wait_cpu);
  891. int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
  892. struct ttm_placement *placement,
  893. bool interruptible, bool no_wait_reserve,
  894. bool no_wait_gpu)
  895. {
  896. int ret = 0;
  897. struct ttm_mem_reg mem;
  898. struct ttm_bo_device *bdev = bo->bdev;
  899. BUG_ON(!atomic_read(&bo->reserved));
  900. /*
  901. * FIXME: It's possible to pipeline buffer moves.
  902. * Have the driver move function wait for idle when necessary,
  903. * instead of doing it here.
  904. */
  905. spin_lock(&bdev->fence_lock);
  906. ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu,
  907. TTM_USAGE_READWRITE);
  908. spin_unlock(&bdev->fence_lock);
  909. if (ret)
  910. return ret;
  911. mem.num_pages = bo->num_pages;
  912. mem.size = mem.num_pages << PAGE_SHIFT;
  913. mem.page_alignment = bo->mem.page_alignment;
  914. mem.bus.io_reserved_vm = false;
  915. mem.bus.io_reserved_count = 0;
  916. /*
  917. * Determine where to move the buffer.
  918. */
  919. ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait_reserve, no_wait_gpu);
  920. if (ret)
  921. goto out_unlock;
  922. ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu);
  923. out_unlock:
  924. if (ret && mem.mm_node)
  925. ttm_bo_mem_put(bo, &mem);
  926. return ret;
  927. }
  928. static int ttm_bo_mem_compat(struct ttm_placement *placement,
  929. struct ttm_mem_reg *mem)
  930. {
  931. int i;
  932. if (mem->mm_node && placement->lpfn != 0 &&
  933. (mem->start < placement->fpfn ||
  934. mem->start + mem->num_pages > placement->lpfn))
  935. return -1;
  936. for (i = 0; i < placement->num_placement; i++) {
  937. if ((placement->placement[i] & mem->placement &
  938. TTM_PL_MASK_CACHING) &&
  939. (placement->placement[i] & mem->placement &
  940. TTM_PL_MASK_MEM))
  941. return i;
  942. }
  943. return -1;
  944. }
  945. int ttm_bo_validate(struct ttm_buffer_object *bo,
  946. struct ttm_placement *placement,
  947. bool interruptible, bool no_wait_reserve,
  948. bool no_wait_gpu)
  949. {
  950. int ret;
  951. BUG_ON(!atomic_read(&bo->reserved));
  952. /* Check that range is valid */
  953. if (placement->lpfn || placement->fpfn)
  954. if (placement->fpfn > placement->lpfn ||
  955. (placement->lpfn - placement->fpfn) < bo->num_pages)
  956. return -EINVAL;
  957. /*
  958. * Check whether we need to move buffer.
  959. */
  960. ret = ttm_bo_mem_compat(placement, &bo->mem);
  961. if (ret < 0) {
  962. ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait_reserve, no_wait_gpu);
  963. if (ret)
  964. return ret;
  965. } else {
  966. /*
  967. * Use the access and other non-mapping-related flag bits from
  968. * the compatible memory placement flags to the active flags
  969. */
  970. ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
  971. ~TTM_PL_MASK_MEMTYPE);
  972. }
  973. /*
  974. * We might need to add a TTM.
  975. */
  976. if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
  977. ret = ttm_bo_add_ttm(bo, true);
  978. if (ret)
  979. return ret;
  980. }
  981. return 0;
  982. }
  983. EXPORT_SYMBOL(ttm_bo_validate);
  984. int ttm_bo_check_placement(struct ttm_buffer_object *bo,
  985. struct ttm_placement *placement)
  986. {
  987. BUG_ON((placement->fpfn || placement->lpfn) &&
  988. (bo->mem.num_pages > (placement->lpfn - placement->fpfn)));
  989. return 0;
  990. }
  991. int ttm_bo_init(struct ttm_bo_device *bdev,
  992. struct ttm_buffer_object *bo,
  993. unsigned long size,
  994. enum ttm_bo_type type,
  995. struct ttm_placement *placement,
  996. uint32_t page_alignment,
  997. unsigned long buffer_start,
  998. bool interruptible,
  999. struct file *persistent_swap_storage,
  1000. size_t acc_size,
  1001. void (*destroy) (struct ttm_buffer_object *))
  1002. {
  1003. int ret = 0;
  1004. unsigned long num_pages;
  1005. size += buffer_start & ~PAGE_MASK;
  1006. num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
  1007. if (num_pages == 0) {
  1008. printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n");
  1009. if (destroy)
  1010. (*destroy)(bo);
  1011. else
  1012. kfree(bo);
  1013. return -EINVAL;
  1014. }
  1015. bo->destroy = destroy;
  1016. kref_init(&bo->kref);
  1017. kref_init(&bo->list_kref);
  1018. atomic_set(&bo->cpu_writers, 0);
  1019. atomic_set(&bo->reserved, 1);
  1020. init_waitqueue_head(&bo->event_queue);
  1021. INIT_LIST_HEAD(&bo->lru);
  1022. INIT_LIST_HEAD(&bo->ddestroy);
  1023. INIT_LIST_HEAD(&bo->swap);
  1024. INIT_LIST_HEAD(&bo->io_reserve_lru);
  1025. bo->bdev = bdev;
  1026. bo->glob = bdev->glob;
  1027. bo->type = type;
  1028. bo->num_pages = num_pages;
  1029. bo->mem.size = num_pages << PAGE_SHIFT;
  1030. bo->mem.mem_type = TTM_PL_SYSTEM;
  1031. bo->mem.num_pages = bo->num_pages;
  1032. bo->mem.mm_node = NULL;
  1033. bo->mem.page_alignment = page_alignment;
  1034. bo->mem.bus.io_reserved_vm = false;
  1035. bo->mem.bus.io_reserved_count = 0;
  1036. bo->buffer_start = buffer_start & PAGE_MASK;
  1037. bo->priv_flags = 0;
  1038. bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
  1039. bo->seq_valid = false;
  1040. bo->persistent_swap_storage = persistent_swap_storage;
  1041. bo->acc_size = acc_size;
  1042. atomic_inc(&bo->glob->bo_count);
  1043. ret = ttm_bo_check_placement(bo, placement);
  1044. if (unlikely(ret != 0))
  1045. goto out_err;
  1046. /*
  1047. * For ttm_bo_type_device buffers, allocate
  1048. * address space from the device.
  1049. */
  1050. if (bo->type == ttm_bo_type_device) {
  1051. ret = ttm_bo_setup_vm(bo);
  1052. if (ret)
  1053. goto out_err;
  1054. }
  1055. ret = ttm_bo_validate(bo, placement, interruptible, false, false);
  1056. if (ret)
  1057. goto out_err;
  1058. ttm_bo_unreserve(bo);
  1059. return 0;
  1060. out_err:
  1061. ttm_bo_unreserve(bo);
  1062. ttm_bo_unref(&bo);
  1063. return ret;
  1064. }
  1065. EXPORT_SYMBOL(ttm_bo_init);
  1066. static inline size_t ttm_bo_size(struct ttm_bo_global *glob,
  1067. unsigned long num_pages)
  1068. {
  1069. size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
  1070. PAGE_MASK;
  1071. return glob->ttm_bo_size + 2 * page_array_size;
  1072. }
  1073. int ttm_bo_create(struct ttm_bo_device *bdev,
  1074. unsigned long size,
  1075. enum ttm_bo_type type,
  1076. struct ttm_placement *placement,
  1077. uint32_t page_alignment,
  1078. unsigned long buffer_start,
  1079. bool interruptible,
  1080. struct file *persistent_swap_storage,
  1081. struct ttm_buffer_object **p_bo)
  1082. {
  1083. struct ttm_buffer_object *bo;
  1084. struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
  1085. int ret;
  1086. size_t acc_size =
  1087. ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
  1088. ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
  1089. if (unlikely(ret != 0))
  1090. return ret;
  1091. bo = kzalloc(sizeof(*bo), GFP_KERNEL);
  1092. if (unlikely(bo == NULL)) {
  1093. ttm_mem_global_free(mem_glob, acc_size);
  1094. return -ENOMEM;
  1095. }
  1096. ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
  1097. buffer_start, interruptible,
  1098. persistent_swap_storage, acc_size, NULL);
  1099. if (likely(ret == 0))
  1100. *p_bo = bo;
  1101. return ret;
  1102. }
  1103. EXPORT_SYMBOL(ttm_bo_create);
  1104. static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
  1105. unsigned mem_type, bool allow_errors)
  1106. {
  1107. struct ttm_mem_type_manager *man = &bdev->man[mem_type];
  1108. struct ttm_bo_global *glob = bdev->glob;
  1109. int ret;
  1110. /*
  1111. * Can't use standard list traversal since we're unlocking.
  1112. */
  1113. spin_lock(&glob->lru_lock);
  1114. while (!list_empty(&man->lru)) {
  1115. spin_unlock(&glob->lru_lock);
  1116. ret = ttm_mem_evict_first(bdev, mem_type, false, false, false);
  1117. if (ret) {
  1118. if (allow_errors) {
  1119. return ret;
  1120. } else {
  1121. printk(KERN_ERR TTM_PFX
  1122. "Cleanup eviction failed\n");
  1123. }
  1124. }
  1125. spin_lock(&glob->lru_lock);
  1126. }
  1127. spin_unlock(&glob->lru_lock);
  1128. return 0;
  1129. }
  1130. int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
  1131. {
  1132. struct ttm_mem_type_manager *man;
  1133. int ret = -EINVAL;
  1134. if (mem_type >= TTM_NUM_MEM_TYPES) {
  1135. printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", mem_type);
  1136. return ret;
  1137. }
  1138. man = &bdev->man[mem_type];
  1139. if (!man->has_type) {
  1140. printk(KERN_ERR TTM_PFX "Trying to take down uninitialized "
  1141. "memory manager type %u\n", mem_type);
  1142. return ret;
  1143. }
  1144. man->use_type = false;
  1145. man->has_type = false;
  1146. ret = 0;
  1147. if (mem_type > 0) {
  1148. ttm_bo_force_list_clean(bdev, mem_type, false);
  1149. ret = (*man->func->takedown)(man);
  1150. }
  1151. return ret;
  1152. }
  1153. EXPORT_SYMBOL(ttm_bo_clean_mm);
  1154. int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
  1155. {
  1156. struct ttm_mem_type_manager *man = &bdev->man[mem_type];
  1157. if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
  1158. printk(KERN_ERR TTM_PFX
  1159. "Illegal memory manager memory type %u.\n",
  1160. mem_type);
  1161. return -EINVAL;
  1162. }
  1163. if (!man->has_type) {
  1164. printk(KERN_ERR TTM_PFX
  1165. "Memory type %u has not been initialized.\n",
  1166. mem_type);
  1167. return 0;
  1168. }
  1169. return ttm_bo_force_list_clean(bdev, mem_type, true);
  1170. }
  1171. EXPORT_SYMBOL(ttm_bo_evict_mm);
  1172. int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
  1173. unsigned long p_size)
  1174. {
  1175. int ret = -EINVAL;
  1176. struct ttm_mem_type_manager *man;
  1177. BUG_ON(type >= TTM_NUM_MEM_TYPES);
  1178. man = &bdev->man[type];
  1179. BUG_ON(man->has_type);
  1180. man->io_reserve_fastpath = true;
  1181. man->use_io_reserve_lru = false;
  1182. mutex_init(&man->io_reserve_mutex);
  1183. INIT_LIST_HEAD(&man->io_reserve_lru);
  1184. ret = bdev->driver->init_mem_type(bdev, type, man);
  1185. if (ret)
  1186. return ret;
  1187. man->bdev = bdev;
  1188. ret = 0;
  1189. if (type != TTM_PL_SYSTEM) {
  1190. ret = (*man->func->init)(man, p_size);
  1191. if (ret)
  1192. return ret;
  1193. }
  1194. man->has_type = true;
  1195. man->use_type = true;
  1196. man->size = p_size;
  1197. INIT_LIST_HEAD(&man->lru);
  1198. return 0;
  1199. }
  1200. EXPORT_SYMBOL(ttm_bo_init_mm);
  1201. static void ttm_bo_global_kobj_release(struct kobject *kobj)
  1202. {
  1203. struct ttm_bo_global *glob =
  1204. container_of(kobj, struct ttm_bo_global, kobj);
  1205. ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
  1206. __free_page(glob->dummy_read_page);
  1207. kfree(glob);
  1208. }
  1209. void ttm_bo_global_release(struct drm_global_reference *ref)
  1210. {
  1211. struct ttm_bo_global *glob = ref->object;
  1212. kobject_del(&glob->kobj);
  1213. kobject_put(&glob->kobj);
  1214. }
  1215. EXPORT_SYMBOL(ttm_bo_global_release);
  1216. int ttm_bo_global_init(struct drm_global_reference *ref)
  1217. {
  1218. struct ttm_bo_global_ref *bo_ref =
  1219. container_of(ref, struct ttm_bo_global_ref, ref);
  1220. struct ttm_bo_global *glob = ref->object;
  1221. int ret;
  1222. mutex_init(&glob->device_list_mutex);
  1223. spin_lock_init(&glob->lru_lock);
  1224. glob->mem_glob = bo_ref->mem_glob;
  1225. glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
  1226. if (unlikely(glob->dummy_read_page == NULL)) {
  1227. ret = -ENOMEM;
  1228. goto out_no_drp;
  1229. }
  1230. INIT_LIST_HEAD(&glob->swap_lru);
  1231. INIT_LIST_HEAD(&glob->device_list);
  1232. ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
  1233. ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
  1234. if (unlikely(ret != 0)) {
  1235. printk(KERN_ERR TTM_PFX
  1236. "Could not register buffer object swapout.\n");
  1237. goto out_no_shrink;
  1238. }
  1239. glob->ttm_bo_extra_size =
  1240. ttm_round_pot(sizeof(struct ttm_tt)) +
  1241. ttm_round_pot(sizeof(struct ttm_backend));
  1242. glob->ttm_bo_size = glob->ttm_bo_extra_size +
  1243. ttm_round_pot(sizeof(struct ttm_buffer_object));
  1244. atomic_set(&glob->bo_count, 0);
  1245. ret = kobject_init_and_add(
  1246. &glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
  1247. if (unlikely(ret != 0))
  1248. kobject_put(&glob->kobj);
  1249. return ret;
  1250. out_no_shrink:
  1251. __free_page(glob->dummy_read_page);
  1252. out_no_drp:
  1253. kfree(glob);
  1254. return ret;
  1255. }
  1256. EXPORT_SYMBOL(ttm_bo_global_init);
  1257. int ttm_bo_device_release(struct ttm_bo_device *bdev)
  1258. {
  1259. int ret = 0;
  1260. unsigned i = TTM_NUM_MEM_TYPES;
  1261. struct ttm_mem_type_manager *man;
  1262. struct ttm_bo_global *glob = bdev->glob;
  1263. while (i--) {
  1264. man = &bdev->man[i];
  1265. if (man->has_type) {
  1266. man->use_type = false;
  1267. if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
  1268. ret = -EBUSY;
  1269. printk(KERN_ERR TTM_PFX
  1270. "DRM memory manager type %d "
  1271. "is not clean.\n", i);
  1272. }
  1273. man->has_type = false;
  1274. }
  1275. }
  1276. mutex_lock(&glob->device_list_mutex);
  1277. list_del(&bdev->device_list);
  1278. mutex_unlock(&glob->device_list_mutex);
  1279. cancel_delayed_work_sync(&bdev->wq);
  1280. while (ttm_bo_delayed_delete(bdev, true))
  1281. ;
  1282. spin_lock(&glob->lru_lock);
  1283. if (list_empty(&bdev->ddestroy))
  1284. TTM_DEBUG("Delayed destroy list was clean\n");
  1285. if (list_empty(&bdev->man[0].lru))
  1286. TTM_DEBUG("Swap list was clean\n");
  1287. spin_unlock(&glob->lru_lock);
  1288. BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
  1289. write_lock(&bdev->vm_lock);
  1290. drm_mm_takedown(&bdev->addr_space_mm);
  1291. write_unlock(&bdev->vm_lock);
  1292. return ret;
  1293. }
  1294. EXPORT_SYMBOL(ttm_bo_device_release);
  1295. int ttm_bo_device_init(struct ttm_bo_device *bdev,
  1296. struct ttm_bo_global *glob,
  1297. struct ttm_bo_driver *driver,
  1298. uint64_t file_page_offset,
  1299. bool need_dma32)
  1300. {
  1301. int ret = -EINVAL;
  1302. rwlock_init(&bdev->vm_lock);
  1303. bdev->driver = driver;
  1304. memset(bdev->man, 0, sizeof(bdev->man));
  1305. /*
  1306. * Initialize the system memory buffer type.
  1307. * Other types need to be driver / IOCTL initialized.
  1308. */
  1309. ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
  1310. if (unlikely(ret != 0))
  1311. goto out_no_sys;
  1312. bdev->addr_space_rb = RB_ROOT;
  1313. ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
  1314. if (unlikely(ret != 0))
  1315. goto out_no_addr_mm;
  1316. INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
  1317. bdev->nice_mode = true;
  1318. INIT_LIST_HEAD(&bdev->ddestroy);
  1319. bdev->dev_mapping = NULL;
  1320. bdev->glob = glob;
  1321. bdev->need_dma32 = need_dma32;
  1322. bdev->val_seq = 0;
  1323. spin_lock_init(&bdev->fence_lock);
  1324. mutex_lock(&glob->device_list_mutex);
  1325. list_add_tail(&bdev->device_list, &glob->device_list);
  1326. mutex_unlock(&glob->device_list_mutex);
  1327. return 0;
  1328. out_no_addr_mm:
  1329. ttm_bo_clean_mm(bdev, 0);
  1330. out_no_sys:
  1331. return ret;
  1332. }
  1333. EXPORT_SYMBOL(ttm_bo_device_init);
  1334. /*
  1335. * buffer object vm functions.
  1336. */
  1337. bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
  1338. {
  1339. struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
  1340. if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
  1341. if (mem->mem_type == TTM_PL_SYSTEM)
  1342. return false;
  1343. if (man->flags & TTM_MEMTYPE_FLAG_CMA)
  1344. return false;
  1345. if (mem->placement & TTM_PL_FLAG_CACHED)
  1346. return false;
  1347. }
  1348. return true;
  1349. }
  1350. void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
  1351. {
  1352. struct ttm_bo_device *bdev = bo->bdev;
  1353. loff_t offset = (loff_t) bo->addr_space_offset;
  1354. loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
  1355. if (!bdev->dev_mapping)
  1356. return;
  1357. unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
  1358. ttm_mem_io_free_vm(bo);
  1359. }
  1360. void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
  1361. {
  1362. struct ttm_bo_device *bdev = bo->bdev;
  1363. struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
  1364. ttm_mem_io_lock(man, false);
  1365. ttm_bo_unmap_virtual_locked(bo);
  1366. ttm_mem_io_unlock(man);
  1367. }
  1368. EXPORT_SYMBOL(ttm_bo_unmap_virtual);
  1369. static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
  1370. {
  1371. struct ttm_bo_device *bdev = bo->bdev;
  1372. struct rb_node **cur = &bdev->addr_space_rb.rb_node;
  1373. struct rb_node *parent = NULL;
  1374. struct ttm_buffer_object *cur_bo;
  1375. unsigned long offset = bo->vm_node->start;
  1376. unsigned long cur_offset;
  1377. while (*cur) {
  1378. parent = *cur;
  1379. cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
  1380. cur_offset = cur_bo->vm_node->start;
  1381. if (offset < cur_offset)
  1382. cur = &parent->rb_left;
  1383. else if (offset > cur_offset)
  1384. cur = &parent->rb_right;
  1385. else
  1386. BUG();
  1387. }
  1388. rb_link_node(&bo->vm_rb, parent, cur);
  1389. rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
  1390. }
  1391. /**
  1392. * ttm_bo_setup_vm:
  1393. *
  1394. * @bo: the buffer to allocate address space for
  1395. *
  1396. * Allocate address space in the drm device so that applications
  1397. * can mmap the buffer and access the contents. This only
  1398. * applies to ttm_bo_type_device objects as others are not
  1399. * placed in the drm device address space.
  1400. */
  1401. static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
  1402. {
  1403. struct ttm_bo_device *bdev = bo->bdev;
  1404. int ret;
  1405. retry_pre_get:
  1406. ret = drm_mm_pre_get(&bdev->addr_space_mm);
  1407. if (unlikely(ret != 0))
  1408. return ret;
  1409. write_lock(&bdev->vm_lock);
  1410. bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
  1411. bo->mem.num_pages, 0, 0);
  1412. if (unlikely(bo->vm_node == NULL)) {
  1413. ret = -ENOMEM;
  1414. goto out_unlock;
  1415. }
  1416. bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
  1417. bo->mem.num_pages, 0);
  1418. if (unlikely(bo->vm_node == NULL)) {
  1419. write_unlock(&bdev->vm_lock);
  1420. goto retry_pre_get;
  1421. }
  1422. ttm_bo_vm_insert_rb(bo);
  1423. write_unlock(&bdev->vm_lock);
  1424. bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
  1425. return 0;
  1426. out_unlock:
  1427. write_unlock(&bdev->vm_lock);
  1428. return ret;
  1429. }
  1430. static void ttm_bo_unref_sync_obj_locked(struct ttm_buffer_object *bo,
  1431. void *sync_obj,
  1432. void **extra_sync_obj)
  1433. {
  1434. struct ttm_bo_device *bdev = bo->bdev;
  1435. struct ttm_bo_driver *driver = bdev->driver;
  1436. void *tmp_obj = NULL, *tmp_obj_read = NULL, *tmp_obj_write = NULL;
  1437. /* We must unref the sync obj wherever it's ref'd.
  1438. * Note that if we unref bo->sync_obj, we can unref both the read
  1439. * and write sync objs too, because they can't be newer than
  1440. * bo->sync_obj, so they are no longer relevant. */
  1441. if (sync_obj == bo->sync_obj ||
  1442. sync_obj == bo->sync_obj_read) {
  1443. tmp_obj_read = bo->sync_obj_read;
  1444. bo->sync_obj_read = NULL;
  1445. }
  1446. if (sync_obj == bo->sync_obj ||
  1447. sync_obj == bo->sync_obj_write) {
  1448. tmp_obj_write = bo->sync_obj_write;
  1449. bo->sync_obj_write = NULL;
  1450. }
  1451. if (sync_obj == bo->sync_obj) {
  1452. tmp_obj = bo->sync_obj;
  1453. bo->sync_obj = NULL;
  1454. }
  1455. clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
  1456. spin_unlock(&bdev->fence_lock);
  1457. if (tmp_obj)
  1458. driver->sync_obj_unref(&tmp_obj);
  1459. if (tmp_obj_read)
  1460. driver->sync_obj_unref(&tmp_obj_read);
  1461. if (tmp_obj_write)
  1462. driver->sync_obj_unref(&tmp_obj_write);
  1463. if (extra_sync_obj)
  1464. driver->sync_obj_unref(extra_sync_obj);
  1465. spin_lock(&bdev->fence_lock);
  1466. }
  1467. int ttm_bo_wait(struct ttm_buffer_object *bo,
  1468. bool lazy, bool interruptible, bool no_wait,
  1469. enum ttm_buffer_usage usage)
  1470. {
  1471. struct ttm_bo_driver *driver = bo->bdev->driver;
  1472. struct ttm_bo_device *bdev = bo->bdev;
  1473. void *sync_obj;
  1474. void *sync_obj_arg;
  1475. int ret = 0;
  1476. void **bo_sync_obj;
  1477. switch (usage) {
  1478. case TTM_USAGE_READ:
  1479. bo_sync_obj = &bo->sync_obj_read;
  1480. break;
  1481. case TTM_USAGE_WRITE:
  1482. bo_sync_obj = &bo->sync_obj_write;
  1483. break;
  1484. case TTM_USAGE_READWRITE:
  1485. default:
  1486. bo_sync_obj = &bo->sync_obj;
  1487. }
  1488. if (likely(*bo_sync_obj == NULL))
  1489. return 0;
  1490. while (*bo_sync_obj) {
  1491. if (driver->sync_obj_signaled(*bo_sync_obj, bo->sync_obj_arg)) {
  1492. ttm_bo_unref_sync_obj_locked(bo, *bo_sync_obj, NULL);
  1493. continue;
  1494. }
  1495. if (no_wait)
  1496. return -EBUSY;
  1497. sync_obj = driver->sync_obj_ref(*bo_sync_obj);
  1498. sync_obj_arg = bo->sync_obj_arg;
  1499. spin_unlock(&bdev->fence_lock);
  1500. ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
  1501. lazy, interruptible);
  1502. if (unlikely(ret != 0)) {
  1503. driver->sync_obj_unref(&sync_obj);
  1504. spin_lock(&bdev->fence_lock);
  1505. return ret;
  1506. }
  1507. spin_lock(&bdev->fence_lock);
  1508. if (likely(*bo_sync_obj == sync_obj &&
  1509. bo->sync_obj_arg == sync_obj_arg)) {
  1510. ttm_bo_unref_sync_obj_locked(bo, *bo_sync_obj, &sync_obj);
  1511. } else {
  1512. spin_unlock(&bdev->fence_lock);
  1513. driver->sync_obj_unref(&sync_obj);
  1514. spin_lock(&bdev->fence_lock);
  1515. }
  1516. }
  1517. return 0;
  1518. }
  1519. EXPORT_SYMBOL(ttm_bo_wait);
  1520. int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
  1521. {
  1522. struct ttm_bo_device *bdev = bo->bdev;
  1523. int ret = 0;
  1524. /*
  1525. * Using ttm_bo_reserve makes sure the lru lists are updated.
  1526. */
  1527. ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
  1528. if (unlikely(ret != 0))
  1529. return ret;
  1530. spin_lock(&bdev->fence_lock);
  1531. ret = ttm_bo_wait(bo, false, true, no_wait, TTM_USAGE_READWRITE);
  1532. spin_unlock(&bdev->fence_lock);
  1533. if (likely(ret == 0))
  1534. atomic_inc(&bo->cpu_writers);
  1535. ttm_bo_unreserve(bo);
  1536. return ret;
  1537. }
  1538. EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
  1539. void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
  1540. {
  1541. if (atomic_dec_and_test(&bo->cpu_writers))
  1542. wake_up_all(&bo->event_queue);
  1543. }
  1544. EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
  1545. /**
  1546. * A buffer object shrink method that tries to swap out the first
  1547. * buffer object on the bo_global::swap_lru list.
  1548. */
  1549. static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
  1550. {
  1551. struct ttm_bo_global *glob =
  1552. container_of(shrink, struct ttm_bo_global, shrink);
  1553. struct ttm_buffer_object *bo;
  1554. int ret = -EBUSY;
  1555. int put_count;
  1556. uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
  1557. spin_lock(&glob->lru_lock);
  1558. while (ret == -EBUSY) {
  1559. if (unlikely(list_empty(&glob->swap_lru))) {
  1560. spin_unlock(&glob->lru_lock);
  1561. return -EBUSY;
  1562. }
  1563. bo = list_first_entry(&glob->swap_lru,
  1564. struct ttm_buffer_object, swap);
  1565. kref_get(&bo->list_kref);
  1566. if (!list_empty(&bo->ddestroy)) {
  1567. spin_unlock(&glob->lru_lock);
  1568. (void) ttm_bo_cleanup_refs(bo, false, false, false);
  1569. kref_put(&bo->list_kref, ttm_bo_release_list);
  1570. continue;
  1571. }
  1572. /**
  1573. * Reserve buffer. Since we unlock while sleeping, we need
  1574. * to re-check that nobody removed us from the swap-list while
  1575. * we slept.
  1576. */
  1577. ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
  1578. if (unlikely(ret == -EBUSY)) {
  1579. spin_unlock(&glob->lru_lock);
  1580. ttm_bo_wait_unreserved(bo, false);
  1581. kref_put(&bo->list_kref, ttm_bo_release_list);
  1582. spin_lock(&glob->lru_lock);
  1583. }
  1584. }
  1585. BUG_ON(ret != 0);
  1586. put_count = ttm_bo_del_from_lru(bo);
  1587. spin_unlock(&glob->lru_lock);
  1588. ttm_bo_list_ref_sub(bo, put_count, true);
  1589. /**
  1590. * Wait for GPU, then move to system cached.
  1591. */
  1592. spin_lock(&bo->bdev->fence_lock);
  1593. ret = ttm_bo_wait(bo, false, false, false, TTM_USAGE_READWRITE);
  1594. spin_unlock(&bo->bdev->fence_lock);
  1595. if (unlikely(ret != 0))
  1596. goto out;
  1597. if ((bo->mem.placement & swap_placement) != swap_placement) {
  1598. struct ttm_mem_reg evict_mem;
  1599. evict_mem = bo->mem;
  1600. evict_mem.mm_node = NULL;
  1601. evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
  1602. evict_mem.mem_type = TTM_PL_SYSTEM;
  1603. ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
  1604. false, false, false);
  1605. if (unlikely(ret != 0))
  1606. goto out;
  1607. }
  1608. ttm_bo_unmap_virtual(bo);
  1609. /**
  1610. * Swap out. Buffer will be swapped in again as soon as
  1611. * anyone tries to access a ttm page.
  1612. */
  1613. if (bo->bdev->driver->swap_notify)
  1614. bo->bdev->driver->swap_notify(bo);
  1615. ret = ttm_tt_swapout(bo->ttm, bo->persistent_swap_storage);
  1616. out:
  1617. /**
  1618. *
  1619. * Unreserve without putting on LRU to avoid swapping out an
  1620. * already swapped buffer.
  1621. */
  1622. atomic_set(&bo->reserved, 0);
  1623. wake_up_all(&bo->event_queue);
  1624. kref_put(&bo->list_kref, ttm_bo_release_list);
  1625. return ret;
  1626. }
  1627. void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
  1628. {
  1629. while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
  1630. ;
  1631. }
  1632. EXPORT_SYMBOL(ttm_bo_swapout_all);