ttm_bo.c 45 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865
  1. /**************************************************************************
  2. *
  3. * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. /*
  28. * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  29. */
  30. /* Notes:
  31. *
  32. * We store bo pointer in drm_mm_node struct so we know which bo own a
  33. * specific node. There is no protection on the pointer, thus to make
  34. * sure things don't go berserk you have to access this pointer while
  35. * holding the global lru lock and make sure anytime you free a node you
  36. * reset the pointer to NULL.
  37. */
  38. #include "ttm/ttm_module.h"
  39. #include "ttm/ttm_bo_driver.h"
  40. #include "ttm/ttm_placement.h"
  41. #include <linux/jiffies.h>
  42. #include <linux/slab.h>
  43. #include <linux/sched.h>
  44. #include <linux/mm.h>
  45. #include <linux/file.h>
  46. #include <linux/module.h>
  47. #define TTM_ASSERT_LOCKED(param)
  48. #define TTM_DEBUG(fmt, arg...)
  49. #define TTM_BO_HASH_ORDER 13
  50. static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
  51. static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
  52. static void ttm_bo_global_kobj_release(struct kobject *kobj);
  53. static struct attribute ttm_bo_count = {
  54. .name = "bo_count",
  55. .mode = S_IRUGO
  56. };
  57. static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
  58. {
  59. int i;
  60. for (i = 0; i <= TTM_PL_PRIV5; i++)
  61. if (flags & (1 << i)) {
  62. *mem_type = i;
  63. return 0;
  64. }
  65. return -EINVAL;
  66. }
  67. static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
  68. {
  69. struct ttm_mem_type_manager *man = &bdev->man[mem_type];
  70. printk(KERN_ERR TTM_PFX " has_type: %d\n", man->has_type);
  71. printk(KERN_ERR TTM_PFX " use_type: %d\n", man->use_type);
  72. printk(KERN_ERR TTM_PFX " flags: 0x%08X\n", man->flags);
  73. printk(KERN_ERR TTM_PFX " gpu_offset: 0x%08lX\n", man->gpu_offset);
  74. printk(KERN_ERR TTM_PFX " size: %llu\n", man->size);
  75. printk(KERN_ERR TTM_PFX " available_caching: 0x%08X\n",
  76. man->available_caching);
  77. printk(KERN_ERR TTM_PFX " default_caching: 0x%08X\n",
  78. man->default_caching);
  79. if (mem_type != TTM_PL_SYSTEM) {
  80. spin_lock(&bdev->glob->lru_lock);
  81. drm_mm_debug_table(&man->manager, TTM_PFX);
  82. spin_unlock(&bdev->glob->lru_lock);
  83. }
  84. }
  85. static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
  86. struct ttm_placement *placement)
  87. {
  88. int i, ret, mem_type;
  89. printk(KERN_ERR TTM_PFX "No space for %p (%lu pages, %luK, %luM)\n",
  90. bo, bo->mem.num_pages, bo->mem.size >> 10,
  91. bo->mem.size >> 20);
  92. for (i = 0; i < placement->num_placement; i++) {
  93. ret = ttm_mem_type_from_flags(placement->placement[i],
  94. &mem_type);
  95. if (ret)
  96. return;
  97. printk(KERN_ERR TTM_PFX " placement[%d]=0x%08X (%d)\n",
  98. i, placement->placement[i], mem_type);
  99. ttm_mem_type_debug(bo->bdev, mem_type);
  100. }
  101. }
  102. static ssize_t ttm_bo_global_show(struct kobject *kobj,
  103. struct attribute *attr,
  104. char *buffer)
  105. {
  106. struct ttm_bo_global *glob =
  107. container_of(kobj, struct ttm_bo_global, kobj);
  108. return snprintf(buffer, PAGE_SIZE, "%lu\n",
  109. (unsigned long) atomic_read(&glob->bo_count));
  110. }
  111. static struct attribute *ttm_bo_global_attrs[] = {
  112. &ttm_bo_count,
  113. NULL
  114. };
  115. static const struct sysfs_ops ttm_bo_global_ops = {
  116. .show = &ttm_bo_global_show
  117. };
  118. static struct kobj_type ttm_bo_glob_kobj_type = {
  119. .release = &ttm_bo_global_kobj_release,
  120. .sysfs_ops = &ttm_bo_global_ops,
  121. .default_attrs = ttm_bo_global_attrs
  122. };
  123. static inline uint32_t ttm_bo_type_flags(unsigned type)
  124. {
  125. return 1 << (type);
  126. }
  127. static void ttm_bo_release_list(struct kref *list_kref)
  128. {
  129. struct ttm_buffer_object *bo =
  130. container_of(list_kref, struct ttm_buffer_object, list_kref);
  131. struct ttm_bo_device *bdev = bo->bdev;
  132. BUG_ON(atomic_read(&bo->list_kref.refcount));
  133. BUG_ON(atomic_read(&bo->kref.refcount));
  134. BUG_ON(atomic_read(&bo->cpu_writers));
  135. BUG_ON(bo->sync_obj != NULL);
  136. BUG_ON(bo->mem.mm_node != NULL);
  137. BUG_ON(!list_empty(&bo->lru));
  138. BUG_ON(!list_empty(&bo->ddestroy));
  139. if (bo->ttm)
  140. ttm_tt_destroy(bo->ttm);
  141. atomic_dec(&bo->glob->bo_count);
  142. if (bo->destroy)
  143. bo->destroy(bo);
  144. else {
  145. ttm_mem_global_free(bdev->glob->mem_glob, bo->acc_size);
  146. kfree(bo);
  147. }
  148. }
  149. int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
  150. {
  151. if (interruptible) {
  152. int ret = 0;
  153. ret = wait_event_interruptible(bo->event_queue,
  154. atomic_read(&bo->reserved) == 0);
  155. if (unlikely(ret != 0))
  156. return ret;
  157. } else {
  158. wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
  159. }
  160. return 0;
  161. }
  162. EXPORT_SYMBOL(ttm_bo_wait_unreserved);
  163. static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
  164. {
  165. struct ttm_bo_device *bdev = bo->bdev;
  166. struct ttm_mem_type_manager *man;
  167. BUG_ON(!atomic_read(&bo->reserved));
  168. if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
  169. BUG_ON(!list_empty(&bo->lru));
  170. man = &bdev->man[bo->mem.mem_type];
  171. list_add_tail(&bo->lru, &man->lru);
  172. kref_get(&bo->list_kref);
  173. if (bo->ttm != NULL) {
  174. list_add_tail(&bo->swap, &bo->glob->swap_lru);
  175. kref_get(&bo->list_kref);
  176. }
  177. }
  178. }
  179. /**
  180. * Call with the lru_lock held.
  181. */
  182. static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
  183. {
  184. int put_count = 0;
  185. if (!list_empty(&bo->swap)) {
  186. list_del_init(&bo->swap);
  187. ++put_count;
  188. }
  189. if (!list_empty(&bo->lru)) {
  190. list_del_init(&bo->lru);
  191. ++put_count;
  192. }
  193. /*
  194. * TODO: Add a driver hook to delete from
  195. * driver-specific LRU's here.
  196. */
  197. return put_count;
  198. }
  199. int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
  200. bool interruptible,
  201. bool no_wait, bool use_sequence, uint32_t sequence)
  202. {
  203. struct ttm_bo_global *glob = bo->glob;
  204. int ret;
  205. while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
  206. if (use_sequence && bo->seq_valid &&
  207. (sequence - bo->val_seq < (1 << 31))) {
  208. return -EAGAIN;
  209. }
  210. if (no_wait)
  211. return -EBUSY;
  212. spin_unlock(&glob->lru_lock);
  213. ret = ttm_bo_wait_unreserved(bo, interruptible);
  214. spin_lock(&glob->lru_lock);
  215. if (unlikely(ret))
  216. return ret;
  217. }
  218. if (use_sequence) {
  219. bo->val_seq = sequence;
  220. bo->seq_valid = true;
  221. } else {
  222. bo->seq_valid = false;
  223. }
  224. return 0;
  225. }
  226. EXPORT_SYMBOL(ttm_bo_reserve);
  227. static void ttm_bo_ref_bug(struct kref *list_kref)
  228. {
  229. BUG();
  230. }
  231. int ttm_bo_reserve(struct ttm_buffer_object *bo,
  232. bool interruptible,
  233. bool no_wait, bool use_sequence, uint32_t sequence)
  234. {
  235. struct ttm_bo_global *glob = bo->glob;
  236. int put_count = 0;
  237. int ret;
  238. spin_lock(&glob->lru_lock);
  239. ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
  240. sequence);
  241. if (likely(ret == 0))
  242. put_count = ttm_bo_del_from_lru(bo);
  243. spin_unlock(&glob->lru_lock);
  244. while (put_count--)
  245. kref_put(&bo->list_kref, ttm_bo_ref_bug);
  246. return ret;
  247. }
  248. void ttm_bo_unreserve(struct ttm_buffer_object *bo)
  249. {
  250. struct ttm_bo_global *glob = bo->glob;
  251. spin_lock(&glob->lru_lock);
  252. ttm_bo_add_to_lru(bo);
  253. atomic_set(&bo->reserved, 0);
  254. wake_up_all(&bo->event_queue);
  255. spin_unlock(&glob->lru_lock);
  256. }
  257. EXPORT_SYMBOL(ttm_bo_unreserve);
  258. /*
  259. * Call bo->mutex locked.
  260. */
  261. static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
  262. {
  263. struct ttm_bo_device *bdev = bo->bdev;
  264. struct ttm_bo_global *glob = bo->glob;
  265. int ret = 0;
  266. uint32_t page_flags = 0;
  267. TTM_ASSERT_LOCKED(&bo->mutex);
  268. bo->ttm = NULL;
  269. if (bdev->need_dma32)
  270. page_flags |= TTM_PAGE_FLAG_DMA32;
  271. switch (bo->type) {
  272. case ttm_bo_type_device:
  273. if (zero_alloc)
  274. page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
  275. case ttm_bo_type_kernel:
  276. bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
  277. page_flags, glob->dummy_read_page);
  278. if (unlikely(bo->ttm == NULL))
  279. ret = -ENOMEM;
  280. break;
  281. case ttm_bo_type_user:
  282. bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
  283. page_flags | TTM_PAGE_FLAG_USER,
  284. glob->dummy_read_page);
  285. if (unlikely(bo->ttm == NULL)) {
  286. ret = -ENOMEM;
  287. break;
  288. }
  289. ret = ttm_tt_set_user(bo->ttm, current,
  290. bo->buffer_start, bo->num_pages);
  291. if (unlikely(ret != 0))
  292. ttm_tt_destroy(bo->ttm);
  293. break;
  294. default:
  295. printk(KERN_ERR TTM_PFX "Illegal buffer object type\n");
  296. ret = -EINVAL;
  297. break;
  298. }
  299. return ret;
  300. }
  301. static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
  302. struct ttm_mem_reg *mem,
  303. bool evict, bool interruptible,
  304. bool no_wait_reserve, bool no_wait_gpu)
  305. {
  306. struct ttm_bo_device *bdev = bo->bdev;
  307. bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
  308. bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
  309. struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
  310. struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
  311. int ret = 0;
  312. if (old_is_pci || new_is_pci ||
  313. ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0))
  314. ttm_bo_unmap_virtual(bo);
  315. /*
  316. * Create and bind a ttm if required.
  317. */
  318. if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) {
  319. ret = ttm_bo_add_ttm(bo, false);
  320. if (ret)
  321. goto out_err;
  322. ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
  323. if (ret)
  324. goto out_err;
  325. if (mem->mem_type != TTM_PL_SYSTEM) {
  326. ret = ttm_tt_bind(bo->ttm, mem);
  327. if (ret)
  328. goto out_err;
  329. }
  330. if (bo->mem.mem_type == TTM_PL_SYSTEM) {
  331. bo->mem = *mem;
  332. mem->mm_node = NULL;
  333. goto moved;
  334. }
  335. }
  336. if (bdev->driver->move_notify)
  337. bdev->driver->move_notify(bo, mem);
  338. if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
  339. !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
  340. ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem);
  341. else if (bdev->driver->move)
  342. ret = bdev->driver->move(bo, evict, interruptible,
  343. no_wait_reserve, no_wait_gpu, mem);
  344. else
  345. ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem);
  346. if (ret)
  347. goto out_err;
  348. moved:
  349. if (bo->evicted) {
  350. ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
  351. if (ret)
  352. printk(KERN_ERR TTM_PFX "Can not flush read caches\n");
  353. bo->evicted = false;
  354. }
  355. if (bo->mem.mm_node) {
  356. spin_lock(&bo->lock);
  357. bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
  358. bdev->man[bo->mem.mem_type].gpu_offset;
  359. bo->cur_placement = bo->mem.placement;
  360. spin_unlock(&bo->lock);
  361. } else
  362. bo->offset = 0;
  363. return 0;
  364. out_err:
  365. new_man = &bdev->man[bo->mem.mem_type];
  366. if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
  367. ttm_tt_unbind(bo->ttm);
  368. ttm_tt_destroy(bo->ttm);
  369. bo->ttm = NULL;
  370. }
  371. return ret;
  372. }
  373. /**
  374. * If bo idle, remove from delayed- and lru lists, and unref.
  375. * If not idle, and already on delayed list, do nothing.
  376. * If not idle, and not on delayed list, put on delayed list,
  377. * up the list_kref and schedule a delayed list check.
  378. */
  379. static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
  380. {
  381. struct ttm_bo_device *bdev = bo->bdev;
  382. struct ttm_bo_global *glob = bo->glob;
  383. struct ttm_bo_driver *driver = bdev->driver;
  384. int ret;
  385. spin_lock(&bo->lock);
  386. (void) ttm_bo_wait(bo, false, false, !remove_all);
  387. if (!bo->sync_obj) {
  388. int put_count;
  389. spin_unlock(&bo->lock);
  390. spin_lock(&glob->lru_lock);
  391. put_count = ttm_bo_del_from_lru(bo);
  392. ret = ttm_bo_reserve_locked(bo, false, false, false, 0);
  393. BUG_ON(ret);
  394. if (bo->ttm)
  395. ttm_tt_unbind(bo->ttm);
  396. if (!list_empty(&bo->ddestroy)) {
  397. list_del_init(&bo->ddestroy);
  398. ++put_count;
  399. }
  400. if (bo->mem.mm_node) {
  401. bo->mem.mm_node->private = NULL;
  402. drm_mm_put_block(bo->mem.mm_node);
  403. bo->mem.mm_node = NULL;
  404. }
  405. spin_unlock(&glob->lru_lock);
  406. atomic_set(&bo->reserved, 0);
  407. while (put_count--)
  408. kref_put(&bo->list_kref, ttm_bo_ref_bug);
  409. return 0;
  410. }
  411. spin_lock(&glob->lru_lock);
  412. if (list_empty(&bo->ddestroy)) {
  413. void *sync_obj = bo->sync_obj;
  414. void *sync_obj_arg = bo->sync_obj_arg;
  415. kref_get(&bo->list_kref);
  416. list_add_tail(&bo->ddestroy, &bdev->ddestroy);
  417. spin_unlock(&glob->lru_lock);
  418. spin_unlock(&bo->lock);
  419. if (sync_obj)
  420. driver->sync_obj_flush(sync_obj, sync_obj_arg);
  421. schedule_delayed_work(&bdev->wq,
  422. ((HZ / 100) < 1) ? 1 : HZ / 100);
  423. ret = 0;
  424. } else {
  425. spin_unlock(&glob->lru_lock);
  426. spin_unlock(&bo->lock);
  427. ret = -EBUSY;
  428. }
  429. return ret;
  430. }
  431. /**
  432. * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
  433. * encountered buffers.
  434. */
  435. static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
  436. {
  437. struct ttm_bo_global *glob = bdev->glob;
  438. struct ttm_buffer_object *entry = NULL;
  439. int ret = 0;
  440. spin_lock(&glob->lru_lock);
  441. if (list_empty(&bdev->ddestroy))
  442. goto out_unlock;
  443. entry = list_first_entry(&bdev->ddestroy,
  444. struct ttm_buffer_object, ddestroy);
  445. kref_get(&entry->list_kref);
  446. for (;;) {
  447. struct ttm_buffer_object *nentry = NULL;
  448. if (entry->ddestroy.next != &bdev->ddestroy) {
  449. nentry = list_first_entry(&entry->ddestroy,
  450. struct ttm_buffer_object, ddestroy);
  451. kref_get(&nentry->list_kref);
  452. }
  453. spin_unlock(&glob->lru_lock);
  454. ret = ttm_bo_cleanup_refs(entry, remove_all);
  455. kref_put(&entry->list_kref, ttm_bo_release_list);
  456. entry = nentry;
  457. if (ret || !entry)
  458. goto out;
  459. spin_lock(&glob->lru_lock);
  460. if (list_empty(&entry->ddestroy))
  461. break;
  462. }
  463. out_unlock:
  464. spin_unlock(&glob->lru_lock);
  465. out:
  466. if (entry)
  467. kref_put(&entry->list_kref, ttm_bo_release_list);
  468. return ret;
  469. }
  470. static void ttm_bo_delayed_workqueue(struct work_struct *work)
  471. {
  472. struct ttm_bo_device *bdev =
  473. container_of(work, struct ttm_bo_device, wq.work);
  474. if (ttm_bo_delayed_delete(bdev, false)) {
  475. schedule_delayed_work(&bdev->wq,
  476. ((HZ / 100) < 1) ? 1 : HZ / 100);
  477. }
  478. }
  479. static void ttm_bo_release(struct kref *kref)
  480. {
  481. struct ttm_buffer_object *bo =
  482. container_of(kref, struct ttm_buffer_object, kref);
  483. struct ttm_bo_device *bdev = bo->bdev;
  484. if (likely(bo->vm_node != NULL)) {
  485. rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
  486. drm_mm_put_block(bo->vm_node);
  487. bo->vm_node = NULL;
  488. }
  489. write_unlock(&bdev->vm_lock);
  490. ttm_bo_cleanup_refs(bo, false);
  491. kref_put(&bo->list_kref, ttm_bo_release_list);
  492. write_lock(&bdev->vm_lock);
  493. }
  494. void ttm_bo_unref(struct ttm_buffer_object **p_bo)
  495. {
  496. struct ttm_buffer_object *bo = *p_bo;
  497. struct ttm_bo_device *bdev = bo->bdev;
  498. *p_bo = NULL;
  499. write_lock(&bdev->vm_lock);
  500. kref_put(&bo->kref, ttm_bo_release);
  501. write_unlock(&bdev->vm_lock);
  502. }
  503. EXPORT_SYMBOL(ttm_bo_unref);
  504. static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
  505. bool no_wait_reserve, bool no_wait_gpu)
  506. {
  507. struct ttm_bo_device *bdev = bo->bdev;
  508. struct ttm_bo_global *glob = bo->glob;
  509. struct ttm_mem_reg evict_mem;
  510. struct ttm_placement placement;
  511. int ret = 0;
  512. spin_lock(&bo->lock);
  513. ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
  514. spin_unlock(&bo->lock);
  515. if (unlikely(ret != 0)) {
  516. if (ret != -ERESTARTSYS) {
  517. printk(KERN_ERR TTM_PFX
  518. "Failed to expire sync object before "
  519. "buffer eviction.\n");
  520. }
  521. goto out;
  522. }
  523. BUG_ON(!atomic_read(&bo->reserved));
  524. evict_mem = bo->mem;
  525. evict_mem.mm_node = NULL;
  526. evict_mem.bus.io_reserved = false;
  527. placement.fpfn = 0;
  528. placement.lpfn = 0;
  529. placement.num_placement = 0;
  530. placement.num_busy_placement = 0;
  531. bdev->driver->evict_flags(bo, &placement);
  532. ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
  533. no_wait_reserve, no_wait_gpu);
  534. if (ret) {
  535. if (ret != -ERESTARTSYS) {
  536. printk(KERN_ERR TTM_PFX
  537. "Failed to find memory space for "
  538. "buffer 0x%p eviction.\n", bo);
  539. ttm_bo_mem_space_debug(bo, &placement);
  540. }
  541. goto out;
  542. }
  543. ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
  544. no_wait_reserve, no_wait_gpu);
  545. if (ret) {
  546. if (ret != -ERESTARTSYS)
  547. printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
  548. spin_lock(&glob->lru_lock);
  549. if (evict_mem.mm_node) {
  550. evict_mem.mm_node->private = NULL;
  551. drm_mm_put_block(evict_mem.mm_node);
  552. evict_mem.mm_node = NULL;
  553. }
  554. spin_unlock(&glob->lru_lock);
  555. goto out;
  556. }
  557. bo->evicted = true;
  558. out:
  559. return ret;
  560. }
  561. static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
  562. uint32_t mem_type,
  563. bool interruptible, bool no_wait_reserve,
  564. bool no_wait_gpu)
  565. {
  566. struct ttm_bo_global *glob = bdev->glob;
  567. struct ttm_mem_type_manager *man = &bdev->man[mem_type];
  568. struct ttm_buffer_object *bo;
  569. int ret, put_count = 0;
  570. retry:
  571. spin_lock(&glob->lru_lock);
  572. if (list_empty(&man->lru)) {
  573. spin_unlock(&glob->lru_lock);
  574. return -EBUSY;
  575. }
  576. bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
  577. kref_get(&bo->list_kref);
  578. ret = ttm_bo_reserve_locked(bo, false, no_wait_reserve, false, 0);
  579. if (unlikely(ret == -EBUSY)) {
  580. spin_unlock(&glob->lru_lock);
  581. if (likely(!no_wait_gpu))
  582. ret = ttm_bo_wait_unreserved(bo, interruptible);
  583. kref_put(&bo->list_kref, ttm_bo_release_list);
  584. /**
  585. * We *need* to retry after releasing the lru lock.
  586. */
  587. if (unlikely(ret != 0))
  588. return ret;
  589. goto retry;
  590. }
  591. put_count = ttm_bo_del_from_lru(bo);
  592. spin_unlock(&glob->lru_lock);
  593. BUG_ON(ret != 0);
  594. while (put_count--)
  595. kref_put(&bo->list_kref, ttm_bo_ref_bug);
  596. ret = ttm_bo_evict(bo, interruptible, no_wait_reserve, no_wait_gpu);
  597. ttm_bo_unreserve(bo);
  598. kref_put(&bo->list_kref, ttm_bo_release_list);
  599. return ret;
  600. }
  601. static int ttm_bo_man_get_node(struct ttm_buffer_object *bo,
  602. struct ttm_mem_type_manager *man,
  603. struct ttm_placement *placement,
  604. struct ttm_mem_reg *mem,
  605. struct drm_mm_node **node)
  606. {
  607. struct ttm_bo_global *glob = bo->glob;
  608. unsigned long lpfn;
  609. int ret;
  610. lpfn = placement->lpfn;
  611. if (!lpfn)
  612. lpfn = man->size;
  613. *node = NULL;
  614. do {
  615. ret = drm_mm_pre_get(&man->manager);
  616. if (unlikely(ret))
  617. return ret;
  618. spin_lock(&glob->lru_lock);
  619. *node = drm_mm_search_free_in_range(&man->manager,
  620. mem->num_pages, mem->page_alignment,
  621. placement->fpfn, lpfn, 1);
  622. if (unlikely(*node == NULL)) {
  623. spin_unlock(&glob->lru_lock);
  624. return 0;
  625. }
  626. *node = drm_mm_get_block_atomic_range(*node, mem->num_pages,
  627. mem->page_alignment,
  628. placement->fpfn,
  629. lpfn);
  630. spin_unlock(&glob->lru_lock);
  631. } while (*node == NULL);
  632. return 0;
  633. }
  634. /**
  635. * Repeatedly evict memory from the LRU for @mem_type until we create enough
  636. * space, or we've evicted everything and there isn't enough space.
  637. */
  638. static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
  639. uint32_t mem_type,
  640. struct ttm_placement *placement,
  641. struct ttm_mem_reg *mem,
  642. bool interruptible,
  643. bool no_wait_reserve,
  644. bool no_wait_gpu)
  645. {
  646. struct ttm_bo_device *bdev = bo->bdev;
  647. struct ttm_bo_global *glob = bdev->glob;
  648. struct ttm_mem_type_manager *man = &bdev->man[mem_type];
  649. struct drm_mm_node *node;
  650. int ret;
  651. do {
  652. ret = ttm_bo_man_get_node(bo, man, placement, mem, &node);
  653. if (unlikely(ret != 0))
  654. return ret;
  655. if (node)
  656. break;
  657. spin_lock(&glob->lru_lock);
  658. if (list_empty(&man->lru)) {
  659. spin_unlock(&glob->lru_lock);
  660. break;
  661. }
  662. spin_unlock(&glob->lru_lock);
  663. ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
  664. no_wait_reserve, no_wait_gpu);
  665. if (unlikely(ret != 0))
  666. return ret;
  667. } while (1);
  668. if (node == NULL)
  669. return -ENOMEM;
  670. mem->mm_node = node;
  671. mem->mem_type = mem_type;
  672. return 0;
  673. }
  674. static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
  675. uint32_t cur_placement,
  676. uint32_t proposed_placement)
  677. {
  678. uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
  679. uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
  680. /**
  681. * Keep current caching if possible.
  682. */
  683. if ((cur_placement & caching) != 0)
  684. result |= (cur_placement & caching);
  685. else if ((man->default_caching & caching) != 0)
  686. result |= man->default_caching;
  687. else if ((TTM_PL_FLAG_CACHED & caching) != 0)
  688. result |= TTM_PL_FLAG_CACHED;
  689. else if ((TTM_PL_FLAG_WC & caching) != 0)
  690. result |= TTM_PL_FLAG_WC;
  691. else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
  692. result |= TTM_PL_FLAG_UNCACHED;
  693. return result;
  694. }
  695. static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
  696. bool disallow_fixed,
  697. uint32_t mem_type,
  698. uint32_t proposed_placement,
  699. uint32_t *masked_placement)
  700. {
  701. uint32_t cur_flags = ttm_bo_type_flags(mem_type);
  702. if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
  703. return false;
  704. if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
  705. return false;
  706. if ((proposed_placement & man->available_caching) == 0)
  707. return false;
  708. cur_flags |= (proposed_placement & man->available_caching);
  709. *masked_placement = cur_flags;
  710. return true;
  711. }
  712. /**
  713. * Creates space for memory region @mem according to its type.
  714. *
  715. * This function first searches for free space in compatible memory types in
  716. * the priority order defined by the driver. If free space isn't found, then
  717. * ttm_bo_mem_force_space is attempted in priority order to evict and find
  718. * space.
  719. */
  720. int ttm_bo_mem_space(struct ttm_buffer_object *bo,
  721. struct ttm_placement *placement,
  722. struct ttm_mem_reg *mem,
  723. bool interruptible, bool no_wait_reserve,
  724. bool no_wait_gpu)
  725. {
  726. struct ttm_bo_device *bdev = bo->bdev;
  727. struct ttm_mem_type_manager *man;
  728. uint32_t mem_type = TTM_PL_SYSTEM;
  729. uint32_t cur_flags = 0;
  730. bool type_found = false;
  731. bool type_ok = false;
  732. bool has_erestartsys = false;
  733. struct drm_mm_node *node = NULL;
  734. int i, ret;
  735. mem->mm_node = NULL;
  736. for (i = 0; i < placement->num_placement; ++i) {
  737. ret = ttm_mem_type_from_flags(placement->placement[i],
  738. &mem_type);
  739. if (ret)
  740. return ret;
  741. man = &bdev->man[mem_type];
  742. type_ok = ttm_bo_mt_compatible(man,
  743. bo->type == ttm_bo_type_user,
  744. mem_type,
  745. placement->placement[i],
  746. &cur_flags);
  747. if (!type_ok)
  748. continue;
  749. cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
  750. cur_flags);
  751. /*
  752. * Use the access and other non-mapping-related flag bits from
  753. * the memory placement flags to the current flags
  754. */
  755. ttm_flag_masked(&cur_flags, placement->placement[i],
  756. ~TTM_PL_MASK_MEMTYPE);
  757. if (mem_type == TTM_PL_SYSTEM)
  758. break;
  759. if (man->has_type && man->use_type) {
  760. type_found = true;
  761. ret = ttm_bo_man_get_node(bo, man, placement, mem,
  762. &node);
  763. if (unlikely(ret))
  764. return ret;
  765. }
  766. if (node)
  767. break;
  768. }
  769. if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || node) {
  770. mem->mm_node = node;
  771. mem->mem_type = mem_type;
  772. mem->placement = cur_flags;
  773. if (node)
  774. node->private = bo;
  775. return 0;
  776. }
  777. if (!type_found)
  778. return -EINVAL;
  779. for (i = 0; i < placement->num_busy_placement; ++i) {
  780. ret = ttm_mem_type_from_flags(placement->busy_placement[i],
  781. &mem_type);
  782. if (ret)
  783. return ret;
  784. man = &bdev->man[mem_type];
  785. if (!man->has_type)
  786. continue;
  787. if (!ttm_bo_mt_compatible(man,
  788. bo->type == ttm_bo_type_user,
  789. mem_type,
  790. placement->busy_placement[i],
  791. &cur_flags))
  792. continue;
  793. cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
  794. cur_flags);
  795. /*
  796. * Use the access and other non-mapping-related flag bits from
  797. * the memory placement flags to the current flags
  798. */
  799. ttm_flag_masked(&cur_flags, placement->busy_placement[i],
  800. ~TTM_PL_MASK_MEMTYPE);
  801. if (mem_type == TTM_PL_SYSTEM) {
  802. mem->mem_type = mem_type;
  803. mem->placement = cur_flags;
  804. mem->mm_node = NULL;
  805. return 0;
  806. }
  807. ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
  808. interruptible, no_wait_reserve, no_wait_gpu);
  809. if (ret == 0 && mem->mm_node) {
  810. mem->placement = cur_flags;
  811. mem->mm_node->private = bo;
  812. return 0;
  813. }
  814. if (ret == -ERESTARTSYS)
  815. has_erestartsys = true;
  816. }
  817. ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
  818. return ret;
  819. }
  820. EXPORT_SYMBOL(ttm_bo_mem_space);
  821. int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
  822. {
  823. if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
  824. return -EBUSY;
  825. return wait_event_interruptible(bo->event_queue,
  826. atomic_read(&bo->cpu_writers) == 0);
  827. }
  828. EXPORT_SYMBOL(ttm_bo_wait_cpu);
  829. int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
  830. struct ttm_placement *placement,
  831. bool interruptible, bool no_wait_reserve,
  832. bool no_wait_gpu)
  833. {
  834. struct ttm_bo_global *glob = bo->glob;
  835. int ret = 0;
  836. struct ttm_mem_reg mem;
  837. BUG_ON(!atomic_read(&bo->reserved));
  838. /*
  839. * FIXME: It's possible to pipeline buffer moves.
  840. * Have the driver move function wait for idle when necessary,
  841. * instead of doing it here.
  842. */
  843. spin_lock(&bo->lock);
  844. ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
  845. spin_unlock(&bo->lock);
  846. if (ret)
  847. return ret;
  848. mem.num_pages = bo->num_pages;
  849. mem.size = mem.num_pages << PAGE_SHIFT;
  850. mem.page_alignment = bo->mem.page_alignment;
  851. mem.bus.io_reserved = false;
  852. /*
  853. * Determine where to move the buffer.
  854. */
  855. ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait_reserve, no_wait_gpu);
  856. if (ret)
  857. goto out_unlock;
  858. ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu);
  859. out_unlock:
  860. if (ret && mem.mm_node) {
  861. spin_lock(&glob->lru_lock);
  862. mem.mm_node->private = NULL;
  863. drm_mm_put_block(mem.mm_node);
  864. spin_unlock(&glob->lru_lock);
  865. }
  866. return ret;
  867. }
  868. static int ttm_bo_mem_compat(struct ttm_placement *placement,
  869. struct ttm_mem_reg *mem)
  870. {
  871. int i;
  872. struct drm_mm_node *node = mem->mm_node;
  873. if (node && placement->lpfn != 0 &&
  874. (node->start < placement->fpfn ||
  875. node->start + node->size > placement->lpfn))
  876. return -1;
  877. for (i = 0; i < placement->num_placement; i++) {
  878. if ((placement->placement[i] & mem->placement &
  879. TTM_PL_MASK_CACHING) &&
  880. (placement->placement[i] & mem->placement &
  881. TTM_PL_MASK_MEM))
  882. return i;
  883. }
  884. return -1;
  885. }
  886. int ttm_bo_validate(struct ttm_buffer_object *bo,
  887. struct ttm_placement *placement,
  888. bool interruptible, bool no_wait_reserve,
  889. bool no_wait_gpu)
  890. {
  891. int ret;
  892. BUG_ON(!atomic_read(&bo->reserved));
  893. /* Check that range is valid */
  894. if (placement->lpfn || placement->fpfn)
  895. if (placement->fpfn > placement->lpfn ||
  896. (placement->lpfn - placement->fpfn) < bo->num_pages)
  897. return -EINVAL;
  898. /*
  899. * Check whether we need to move buffer.
  900. */
  901. ret = ttm_bo_mem_compat(placement, &bo->mem);
  902. if (ret < 0) {
  903. ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait_reserve, no_wait_gpu);
  904. if (ret)
  905. return ret;
  906. } else {
  907. /*
  908. * Use the access and other non-mapping-related flag bits from
  909. * the compatible memory placement flags to the active flags
  910. */
  911. ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
  912. ~TTM_PL_MASK_MEMTYPE);
  913. }
  914. /*
  915. * We might need to add a TTM.
  916. */
  917. if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
  918. ret = ttm_bo_add_ttm(bo, true);
  919. if (ret)
  920. return ret;
  921. }
  922. return 0;
  923. }
  924. EXPORT_SYMBOL(ttm_bo_validate);
  925. int ttm_bo_check_placement(struct ttm_buffer_object *bo,
  926. struct ttm_placement *placement)
  927. {
  928. int i;
  929. if (placement->fpfn || placement->lpfn) {
  930. if (bo->mem.num_pages > (placement->lpfn - placement->fpfn)) {
  931. printk(KERN_ERR TTM_PFX "Page number range to small "
  932. "Need %lu pages, range is [%u, %u]\n",
  933. bo->mem.num_pages, placement->fpfn,
  934. placement->lpfn);
  935. return -EINVAL;
  936. }
  937. }
  938. for (i = 0; i < placement->num_placement; i++) {
  939. if (!capable(CAP_SYS_ADMIN)) {
  940. if (placement->placement[i] & TTM_PL_FLAG_NO_EVICT) {
  941. printk(KERN_ERR TTM_PFX "Need to be root to "
  942. "modify NO_EVICT status.\n");
  943. return -EINVAL;
  944. }
  945. }
  946. }
  947. for (i = 0; i < placement->num_busy_placement; i++) {
  948. if (!capable(CAP_SYS_ADMIN)) {
  949. if (placement->busy_placement[i] & TTM_PL_FLAG_NO_EVICT) {
  950. printk(KERN_ERR TTM_PFX "Need to be root to "
  951. "modify NO_EVICT status.\n");
  952. return -EINVAL;
  953. }
  954. }
  955. }
  956. return 0;
  957. }
  958. int ttm_bo_init(struct ttm_bo_device *bdev,
  959. struct ttm_buffer_object *bo,
  960. unsigned long size,
  961. enum ttm_bo_type type,
  962. struct ttm_placement *placement,
  963. uint32_t page_alignment,
  964. unsigned long buffer_start,
  965. bool interruptible,
  966. struct file *persistant_swap_storage,
  967. size_t acc_size,
  968. void (*destroy) (struct ttm_buffer_object *))
  969. {
  970. int ret = 0;
  971. unsigned long num_pages;
  972. size += buffer_start & ~PAGE_MASK;
  973. num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
  974. if (num_pages == 0) {
  975. printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n");
  976. return -EINVAL;
  977. }
  978. bo->destroy = destroy;
  979. spin_lock_init(&bo->lock);
  980. kref_init(&bo->kref);
  981. kref_init(&bo->list_kref);
  982. atomic_set(&bo->cpu_writers, 0);
  983. atomic_set(&bo->reserved, 1);
  984. init_waitqueue_head(&bo->event_queue);
  985. INIT_LIST_HEAD(&bo->lru);
  986. INIT_LIST_HEAD(&bo->ddestroy);
  987. INIT_LIST_HEAD(&bo->swap);
  988. bo->bdev = bdev;
  989. bo->glob = bdev->glob;
  990. bo->type = type;
  991. bo->num_pages = num_pages;
  992. bo->mem.size = num_pages << PAGE_SHIFT;
  993. bo->mem.mem_type = TTM_PL_SYSTEM;
  994. bo->mem.num_pages = bo->num_pages;
  995. bo->mem.mm_node = NULL;
  996. bo->mem.page_alignment = page_alignment;
  997. bo->mem.bus.io_reserved = false;
  998. bo->buffer_start = buffer_start & PAGE_MASK;
  999. bo->priv_flags = 0;
  1000. bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
  1001. bo->seq_valid = false;
  1002. bo->persistant_swap_storage = persistant_swap_storage;
  1003. bo->acc_size = acc_size;
  1004. atomic_inc(&bo->glob->bo_count);
  1005. ret = ttm_bo_check_placement(bo, placement);
  1006. if (unlikely(ret != 0))
  1007. goto out_err;
  1008. /*
  1009. * For ttm_bo_type_device buffers, allocate
  1010. * address space from the device.
  1011. */
  1012. if (bo->type == ttm_bo_type_device) {
  1013. ret = ttm_bo_setup_vm(bo);
  1014. if (ret)
  1015. goto out_err;
  1016. }
  1017. ret = ttm_bo_validate(bo, placement, interruptible, false, false);
  1018. if (ret)
  1019. goto out_err;
  1020. ttm_bo_unreserve(bo);
  1021. return 0;
  1022. out_err:
  1023. ttm_bo_unreserve(bo);
  1024. ttm_bo_unref(&bo);
  1025. return ret;
  1026. }
  1027. EXPORT_SYMBOL(ttm_bo_init);
  1028. static inline size_t ttm_bo_size(struct ttm_bo_global *glob,
  1029. unsigned long num_pages)
  1030. {
  1031. size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
  1032. PAGE_MASK;
  1033. return glob->ttm_bo_size + 2 * page_array_size;
  1034. }
  1035. int ttm_bo_create(struct ttm_bo_device *bdev,
  1036. unsigned long size,
  1037. enum ttm_bo_type type,
  1038. struct ttm_placement *placement,
  1039. uint32_t page_alignment,
  1040. unsigned long buffer_start,
  1041. bool interruptible,
  1042. struct file *persistant_swap_storage,
  1043. struct ttm_buffer_object **p_bo)
  1044. {
  1045. struct ttm_buffer_object *bo;
  1046. struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
  1047. int ret;
  1048. size_t acc_size =
  1049. ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
  1050. ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
  1051. if (unlikely(ret != 0))
  1052. return ret;
  1053. bo = kzalloc(sizeof(*bo), GFP_KERNEL);
  1054. if (unlikely(bo == NULL)) {
  1055. ttm_mem_global_free(mem_glob, acc_size);
  1056. return -ENOMEM;
  1057. }
  1058. ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
  1059. buffer_start, interruptible,
  1060. persistant_swap_storage, acc_size, NULL);
  1061. if (likely(ret == 0))
  1062. *p_bo = bo;
  1063. return ret;
  1064. }
  1065. static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
  1066. unsigned mem_type, bool allow_errors)
  1067. {
  1068. struct ttm_mem_type_manager *man = &bdev->man[mem_type];
  1069. struct ttm_bo_global *glob = bdev->glob;
  1070. int ret;
  1071. /*
  1072. * Can't use standard list traversal since we're unlocking.
  1073. */
  1074. spin_lock(&glob->lru_lock);
  1075. while (!list_empty(&man->lru)) {
  1076. spin_unlock(&glob->lru_lock);
  1077. ret = ttm_mem_evict_first(bdev, mem_type, false, false, false);
  1078. if (ret) {
  1079. if (allow_errors) {
  1080. return ret;
  1081. } else {
  1082. printk(KERN_ERR TTM_PFX
  1083. "Cleanup eviction failed\n");
  1084. }
  1085. }
  1086. spin_lock(&glob->lru_lock);
  1087. }
  1088. spin_unlock(&glob->lru_lock);
  1089. return 0;
  1090. }
  1091. int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
  1092. {
  1093. struct ttm_bo_global *glob = bdev->glob;
  1094. struct ttm_mem_type_manager *man;
  1095. int ret = -EINVAL;
  1096. if (mem_type >= TTM_NUM_MEM_TYPES) {
  1097. printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", mem_type);
  1098. return ret;
  1099. }
  1100. man = &bdev->man[mem_type];
  1101. if (!man->has_type) {
  1102. printk(KERN_ERR TTM_PFX "Trying to take down uninitialized "
  1103. "memory manager type %u\n", mem_type);
  1104. return ret;
  1105. }
  1106. man->use_type = false;
  1107. man->has_type = false;
  1108. ret = 0;
  1109. if (mem_type > 0) {
  1110. ttm_bo_force_list_clean(bdev, mem_type, false);
  1111. spin_lock(&glob->lru_lock);
  1112. if (drm_mm_clean(&man->manager))
  1113. drm_mm_takedown(&man->manager);
  1114. else
  1115. ret = -EBUSY;
  1116. spin_unlock(&glob->lru_lock);
  1117. }
  1118. return ret;
  1119. }
  1120. EXPORT_SYMBOL(ttm_bo_clean_mm);
  1121. int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
  1122. {
  1123. struct ttm_mem_type_manager *man = &bdev->man[mem_type];
  1124. if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
  1125. printk(KERN_ERR TTM_PFX
  1126. "Illegal memory manager memory type %u.\n",
  1127. mem_type);
  1128. return -EINVAL;
  1129. }
  1130. if (!man->has_type) {
  1131. printk(KERN_ERR TTM_PFX
  1132. "Memory type %u has not been initialized.\n",
  1133. mem_type);
  1134. return 0;
  1135. }
  1136. return ttm_bo_force_list_clean(bdev, mem_type, true);
  1137. }
  1138. EXPORT_SYMBOL(ttm_bo_evict_mm);
  1139. int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
  1140. unsigned long p_size)
  1141. {
  1142. int ret = -EINVAL;
  1143. struct ttm_mem_type_manager *man;
  1144. if (type >= TTM_NUM_MEM_TYPES) {
  1145. printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", type);
  1146. return ret;
  1147. }
  1148. man = &bdev->man[type];
  1149. if (man->has_type) {
  1150. printk(KERN_ERR TTM_PFX
  1151. "Memory manager already initialized for type %d\n",
  1152. type);
  1153. return ret;
  1154. }
  1155. ret = bdev->driver->init_mem_type(bdev, type, man);
  1156. if (ret)
  1157. return ret;
  1158. ret = 0;
  1159. if (type != TTM_PL_SYSTEM) {
  1160. if (!p_size) {
  1161. printk(KERN_ERR TTM_PFX
  1162. "Zero size memory manager type %d\n",
  1163. type);
  1164. return ret;
  1165. }
  1166. ret = drm_mm_init(&man->manager, 0, p_size);
  1167. if (ret)
  1168. return ret;
  1169. }
  1170. man->has_type = true;
  1171. man->use_type = true;
  1172. man->size = p_size;
  1173. INIT_LIST_HEAD(&man->lru);
  1174. return 0;
  1175. }
  1176. EXPORT_SYMBOL(ttm_bo_init_mm);
  1177. static void ttm_bo_global_kobj_release(struct kobject *kobj)
  1178. {
  1179. struct ttm_bo_global *glob =
  1180. container_of(kobj, struct ttm_bo_global, kobj);
  1181. ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
  1182. __free_page(glob->dummy_read_page);
  1183. kfree(glob);
  1184. }
  1185. void ttm_bo_global_release(struct ttm_global_reference *ref)
  1186. {
  1187. struct ttm_bo_global *glob = ref->object;
  1188. kobject_del(&glob->kobj);
  1189. kobject_put(&glob->kobj);
  1190. }
  1191. EXPORT_SYMBOL(ttm_bo_global_release);
  1192. int ttm_bo_global_init(struct ttm_global_reference *ref)
  1193. {
  1194. struct ttm_bo_global_ref *bo_ref =
  1195. container_of(ref, struct ttm_bo_global_ref, ref);
  1196. struct ttm_bo_global *glob = ref->object;
  1197. int ret;
  1198. mutex_init(&glob->device_list_mutex);
  1199. spin_lock_init(&glob->lru_lock);
  1200. glob->mem_glob = bo_ref->mem_glob;
  1201. glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
  1202. if (unlikely(glob->dummy_read_page == NULL)) {
  1203. ret = -ENOMEM;
  1204. goto out_no_drp;
  1205. }
  1206. INIT_LIST_HEAD(&glob->swap_lru);
  1207. INIT_LIST_HEAD(&glob->device_list);
  1208. ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
  1209. ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
  1210. if (unlikely(ret != 0)) {
  1211. printk(KERN_ERR TTM_PFX
  1212. "Could not register buffer object swapout.\n");
  1213. goto out_no_shrink;
  1214. }
  1215. glob->ttm_bo_extra_size =
  1216. ttm_round_pot(sizeof(struct ttm_tt)) +
  1217. ttm_round_pot(sizeof(struct ttm_backend));
  1218. glob->ttm_bo_size = glob->ttm_bo_extra_size +
  1219. ttm_round_pot(sizeof(struct ttm_buffer_object));
  1220. atomic_set(&glob->bo_count, 0);
  1221. ret = kobject_init_and_add(
  1222. &glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
  1223. if (unlikely(ret != 0))
  1224. kobject_put(&glob->kobj);
  1225. return ret;
  1226. out_no_shrink:
  1227. __free_page(glob->dummy_read_page);
  1228. out_no_drp:
  1229. kfree(glob);
  1230. return ret;
  1231. }
  1232. EXPORT_SYMBOL(ttm_bo_global_init);
  1233. int ttm_bo_device_release(struct ttm_bo_device *bdev)
  1234. {
  1235. int ret = 0;
  1236. unsigned i = TTM_NUM_MEM_TYPES;
  1237. struct ttm_mem_type_manager *man;
  1238. struct ttm_bo_global *glob = bdev->glob;
  1239. while (i--) {
  1240. man = &bdev->man[i];
  1241. if (man->has_type) {
  1242. man->use_type = false;
  1243. if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
  1244. ret = -EBUSY;
  1245. printk(KERN_ERR TTM_PFX
  1246. "DRM memory manager type %d "
  1247. "is not clean.\n", i);
  1248. }
  1249. man->has_type = false;
  1250. }
  1251. }
  1252. mutex_lock(&glob->device_list_mutex);
  1253. list_del(&bdev->device_list);
  1254. mutex_unlock(&glob->device_list_mutex);
  1255. if (!cancel_delayed_work(&bdev->wq))
  1256. flush_scheduled_work();
  1257. while (ttm_bo_delayed_delete(bdev, true))
  1258. ;
  1259. spin_lock(&glob->lru_lock);
  1260. if (list_empty(&bdev->ddestroy))
  1261. TTM_DEBUG("Delayed destroy list was clean\n");
  1262. if (list_empty(&bdev->man[0].lru))
  1263. TTM_DEBUG("Swap list was clean\n");
  1264. spin_unlock(&glob->lru_lock);
  1265. BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
  1266. write_lock(&bdev->vm_lock);
  1267. drm_mm_takedown(&bdev->addr_space_mm);
  1268. write_unlock(&bdev->vm_lock);
  1269. return ret;
  1270. }
  1271. EXPORT_SYMBOL(ttm_bo_device_release);
  1272. int ttm_bo_device_init(struct ttm_bo_device *bdev,
  1273. struct ttm_bo_global *glob,
  1274. struct ttm_bo_driver *driver,
  1275. uint64_t file_page_offset,
  1276. bool need_dma32)
  1277. {
  1278. int ret = -EINVAL;
  1279. rwlock_init(&bdev->vm_lock);
  1280. bdev->driver = driver;
  1281. memset(bdev->man, 0, sizeof(bdev->man));
  1282. /*
  1283. * Initialize the system memory buffer type.
  1284. * Other types need to be driver / IOCTL initialized.
  1285. */
  1286. ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
  1287. if (unlikely(ret != 0))
  1288. goto out_no_sys;
  1289. bdev->addr_space_rb = RB_ROOT;
  1290. ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
  1291. if (unlikely(ret != 0))
  1292. goto out_no_addr_mm;
  1293. INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
  1294. bdev->nice_mode = true;
  1295. INIT_LIST_HEAD(&bdev->ddestroy);
  1296. bdev->dev_mapping = NULL;
  1297. bdev->glob = glob;
  1298. bdev->need_dma32 = need_dma32;
  1299. mutex_lock(&glob->device_list_mutex);
  1300. list_add_tail(&bdev->device_list, &glob->device_list);
  1301. mutex_unlock(&glob->device_list_mutex);
  1302. return 0;
  1303. out_no_addr_mm:
  1304. ttm_bo_clean_mm(bdev, 0);
  1305. out_no_sys:
  1306. return ret;
  1307. }
  1308. EXPORT_SYMBOL(ttm_bo_device_init);
  1309. /*
  1310. * buffer object vm functions.
  1311. */
  1312. bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
  1313. {
  1314. struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
  1315. if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
  1316. if (mem->mem_type == TTM_PL_SYSTEM)
  1317. return false;
  1318. if (man->flags & TTM_MEMTYPE_FLAG_CMA)
  1319. return false;
  1320. if (mem->placement & TTM_PL_FLAG_CACHED)
  1321. return false;
  1322. }
  1323. return true;
  1324. }
  1325. void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
  1326. {
  1327. struct ttm_bo_device *bdev = bo->bdev;
  1328. loff_t offset = (loff_t) bo->addr_space_offset;
  1329. loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
  1330. if (!bdev->dev_mapping)
  1331. return;
  1332. unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
  1333. ttm_mem_io_free(bdev, &bo->mem);
  1334. }
  1335. EXPORT_SYMBOL(ttm_bo_unmap_virtual);
  1336. static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
  1337. {
  1338. struct ttm_bo_device *bdev = bo->bdev;
  1339. struct rb_node **cur = &bdev->addr_space_rb.rb_node;
  1340. struct rb_node *parent = NULL;
  1341. struct ttm_buffer_object *cur_bo;
  1342. unsigned long offset = bo->vm_node->start;
  1343. unsigned long cur_offset;
  1344. while (*cur) {
  1345. parent = *cur;
  1346. cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
  1347. cur_offset = cur_bo->vm_node->start;
  1348. if (offset < cur_offset)
  1349. cur = &parent->rb_left;
  1350. else if (offset > cur_offset)
  1351. cur = &parent->rb_right;
  1352. else
  1353. BUG();
  1354. }
  1355. rb_link_node(&bo->vm_rb, parent, cur);
  1356. rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
  1357. }
  1358. /**
  1359. * ttm_bo_setup_vm:
  1360. *
  1361. * @bo: the buffer to allocate address space for
  1362. *
  1363. * Allocate address space in the drm device so that applications
  1364. * can mmap the buffer and access the contents. This only
  1365. * applies to ttm_bo_type_device objects as others are not
  1366. * placed in the drm device address space.
  1367. */
  1368. static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
  1369. {
  1370. struct ttm_bo_device *bdev = bo->bdev;
  1371. int ret;
  1372. retry_pre_get:
  1373. ret = drm_mm_pre_get(&bdev->addr_space_mm);
  1374. if (unlikely(ret != 0))
  1375. return ret;
  1376. write_lock(&bdev->vm_lock);
  1377. bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
  1378. bo->mem.num_pages, 0, 0);
  1379. if (unlikely(bo->vm_node == NULL)) {
  1380. ret = -ENOMEM;
  1381. goto out_unlock;
  1382. }
  1383. bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
  1384. bo->mem.num_pages, 0);
  1385. if (unlikely(bo->vm_node == NULL)) {
  1386. write_unlock(&bdev->vm_lock);
  1387. goto retry_pre_get;
  1388. }
  1389. ttm_bo_vm_insert_rb(bo);
  1390. write_unlock(&bdev->vm_lock);
  1391. bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
  1392. return 0;
  1393. out_unlock:
  1394. write_unlock(&bdev->vm_lock);
  1395. return ret;
  1396. }
  1397. int ttm_bo_wait(struct ttm_buffer_object *bo,
  1398. bool lazy, bool interruptible, bool no_wait)
  1399. {
  1400. struct ttm_bo_driver *driver = bo->bdev->driver;
  1401. void *sync_obj;
  1402. void *sync_obj_arg;
  1403. int ret = 0;
  1404. if (likely(bo->sync_obj == NULL))
  1405. return 0;
  1406. while (bo->sync_obj) {
  1407. if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) {
  1408. void *tmp_obj = bo->sync_obj;
  1409. bo->sync_obj = NULL;
  1410. clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
  1411. spin_unlock(&bo->lock);
  1412. driver->sync_obj_unref(&tmp_obj);
  1413. spin_lock(&bo->lock);
  1414. continue;
  1415. }
  1416. if (no_wait)
  1417. return -EBUSY;
  1418. sync_obj = driver->sync_obj_ref(bo->sync_obj);
  1419. sync_obj_arg = bo->sync_obj_arg;
  1420. spin_unlock(&bo->lock);
  1421. ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
  1422. lazy, interruptible);
  1423. if (unlikely(ret != 0)) {
  1424. driver->sync_obj_unref(&sync_obj);
  1425. spin_lock(&bo->lock);
  1426. return ret;
  1427. }
  1428. spin_lock(&bo->lock);
  1429. if (likely(bo->sync_obj == sync_obj &&
  1430. bo->sync_obj_arg == sync_obj_arg)) {
  1431. void *tmp_obj = bo->sync_obj;
  1432. bo->sync_obj = NULL;
  1433. clear_bit(TTM_BO_PRIV_FLAG_MOVING,
  1434. &bo->priv_flags);
  1435. spin_unlock(&bo->lock);
  1436. driver->sync_obj_unref(&sync_obj);
  1437. driver->sync_obj_unref(&tmp_obj);
  1438. spin_lock(&bo->lock);
  1439. } else {
  1440. spin_unlock(&bo->lock);
  1441. driver->sync_obj_unref(&sync_obj);
  1442. spin_lock(&bo->lock);
  1443. }
  1444. }
  1445. return 0;
  1446. }
  1447. EXPORT_SYMBOL(ttm_bo_wait);
  1448. void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo)
  1449. {
  1450. atomic_set(&bo->reserved, 0);
  1451. wake_up_all(&bo->event_queue);
  1452. }
  1453. int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible,
  1454. bool no_wait)
  1455. {
  1456. int ret;
  1457. while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
  1458. if (no_wait)
  1459. return -EBUSY;
  1460. else if (interruptible) {
  1461. ret = wait_event_interruptible
  1462. (bo->event_queue, atomic_read(&bo->reserved) == 0);
  1463. if (unlikely(ret != 0))
  1464. return ret;
  1465. } else {
  1466. wait_event(bo->event_queue,
  1467. atomic_read(&bo->reserved) == 0);
  1468. }
  1469. }
  1470. return 0;
  1471. }
  1472. int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
  1473. {
  1474. int ret = 0;
  1475. /*
  1476. * Using ttm_bo_reserve instead of ttm_bo_block_reservation
  1477. * makes sure the lru lists are updated.
  1478. */
  1479. ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
  1480. if (unlikely(ret != 0))
  1481. return ret;
  1482. spin_lock(&bo->lock);
  1483. ret = ttm_bo_wait(bo, false, true, no_wait);
  1484. spin_unlock(&bo->lock);
  1485. if (likely(ret == 0))
  1486. atomic_inc(&bo->cpu_writers);
  1487. ttm_bo_unreserve(bo);
  1488. return ret;
  1489. }
  1490. EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
  1491. void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
  1492. {
  1493. if (atomic_dec_and_test(&bo->cpu_writers))
  1494. wake_up_all(&bo->event_queue);
  1495. }
  1496. EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
  1497. /**
  1498. * A buffer object shrink method that tries to swap out the first
  1499. * buffer object on the bo_global::swap_lru list.
  1500. */
  1501. static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
  1502. {
  1503. struct ttm_bo_global *glob =
  1504. container_of(shrink, struct ttm_bo_global, shrink);
  1505. struct ttm_buffer_object *bo;
  1506. int ret = -EBUSY;
  1507. int put_count;
  1508. uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
  1509. spin_lock(&glob->lru_lock);
  1510. while (ret == -EBUSY) {
  1511. if (unlikely(list_empty(&glob->swap_lru))) {
  1512. spin_unlock(&glob->lru_lock);
  1513. return -EBUSY;
  1514. }
  1515. bo = list_first_entry(&glob->swap_lru,
  1516. struct ttm_buffer_object, swap);
  1517. kref_get(&bo->list_kref);
  1518. /**
  1519. * Reserve buffer. Since we unlock while sleeping, we need
  1520. * to re-check that nobody removed us from the swap-list while
  1521. * we slept.
  1522. */
  1523. ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
  1524. if (unlikely(ret == -EBUSY)) {
  1525. spin_unlock(&glob->lru_lock);
  1526. ttm_bo_wait_unreserved(bo, false);
  1527. kref_put(&bo->list_kref, ttm_bo_release_list);
  1528. spin_lock(&glob->lru_lock);
  1529. }
  1530. }
  1531. BUG_ON(ret != 0);
  1532. put_count = ttm_bo_del_from_lru(bo);
  1533. spin_unlock(&glob->lru_lock);
  1534. while (put_count--)
  1535. kref_put(&bo->list_kref, ttm_bo_ref_bug);
  1536. /**
  1537. * Wait for GPU, then move to system cached.
  1538. */
  1539. spin_lock(&bo->lock);
  1540. ret = ttm_bo_wait(bo, false, false, false);
  1541. spin_unlock(&bo->lock);
  1542. if (unlikely(ret != 0))
  1543. goto out;
  1544. if ((bo->mem.placement & swap_placement) != swap_placement) {
  1545. struct ttm_mem_reg evict_mem;
  1546. evict_mem = bo->mem;
  1547. evict_mem.mm_node = NULL;
  1548. evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
  1549. evict_mem.mem_type = TTM_PL_SYSTEM;
  1550. ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
  1551. false, false, false);
  1552. if (unlikely(ret != 0))
  1553. goto out;
  1554. }
  1555. ttm_bo_unmap_virtual(bo);
  1556. /**
  1557. * Swap out. Buffer will be swapped in again as soon as
  1558. * anyone tries to access a ttm page.
  1559. */
  1560. if (bo->bdev->driver->swap_notify)
  1561. bo->bdev->driver->swap_notify(bo);
  1562. ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage);
  1563. out:
  1564. /**
  1565. *
  1566. * Unreserve without putting on LRU to avoid swapping out an
  1567. * already swapped buffer.
  1568. */
  1569. atomic_set(&bo->reserved, 0);
  1570. wake_up_all(&bo->event_queue);
  1571. kref_put(&bo->list_kref, ttm_bo_release_list);
  1572. return ret;
  1573. }
  1574. void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
  1575. {
  1576. while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
  1577. ;
  1578. }
  1579. EXPORT_SYMBOL(ttm_bo_swapout_all);