ttm_bo.c 45 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877
  1. /**************************************************************************
  2. *
  3. * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. /*
  28. * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  29. */
  30. /* Notes:
  31. *
  32. * We store bo pointer in drm_mm_node struct so we know which bo own a
  33. * specific node. There is no protection on the pointer, thus to make
  34. * sure things don't go berserk you have to access this pointer while
  35. * holding the global lru lock and make sure anytime you free a node you
  36. * reset the pointer to NULL.
  37. */
  38. #include "ttm/ttm_module.h"
  39. #include "ttm/ttm_bo_driver.h"
  40. #include "ttm/ttm_placement.h"
  41. #include <linux/jiffies.h>
  42. #include <linux/slab.h>
  43. #include <linux/sched.h>
  44. #include <linux/mm.h>
  45. #include <linux/file.h>
  46. #include <linux/module.h>
  47. #define TTM_ASSERT_LOCKED(param)
  48. #define TTM_DEBUG(fmt, arg...)
  49. #define TTM_BO_HASH_ORDER 13
  50. static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
  51. static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
  52. static void ttm_bo_global_kobj_release(struct kobject *kobj);
  53. static struct attribute ttm_bo_count = {
  54. .name = "bo_count",
  55. .mode = S_IRUGO
  56. };
  57. static inline int ttm_mem_type_from_flags(uint32_t flags, uint32_t *mem_type)
  58. {
  59. int i;
  60. for (i = 0; i <= TTM_PL_PRIV5; i++)
  61. if (flags & (1 << i)) {
  62. *mem_type = i;
  63. return 0;
  64. }
  65. return -EINVAL;
  66. }
  67. static void ttm_mem_type_debug(struct ttm_bo_device *bdev, int mem_type)
  68. {
  69. struct ttm_mem_type_manager *man = &bdev->man[mem_type];
  70. printk(KERN_ERR TTM_PFX " has_type: %d\n", man->has_type);
  71. printk(KERN_ERR TTM_PFX " use_type: %d\n", man->use_type);
  72. printk(KERN_ERR TTM_PFX " flags: 0x%08X\n", man->flags);
  73. printk(KERN_ERR TTM_PFX " gpu_offset: 0x%08lX\n", man->gpu_offset);
  74. printk(KERN_ERR TTM_PFX " io_offset: 0x%08lX\n", man->io_offset);
  75. printk(KERN_ERR TTM_PFX " io_size: %ld\n", man->io_size);
  76. printk(KERN_ERR TTM_PFX " size: %llu\n", man->size);
  77. printk(KERN_ERR TTM_PFX " available_caching: 0x%08X\n",
  78. man->available_caching);
  79. printk(KERN_ERR TTM_PFX " default_caching: 0x%08X\n",
  80. man->default_caching);
  81. if (mem_type != TTM_PL_SYSTEM) {
  82. spin_lock(&bdev->glob->lru_lock);
  83. drm_mm_debug_table(&man->manager, TTM_PFX);
  84. spin_unlock(&bdev->glob->lru_lock);
  85. }
  86. }
  87. static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo,
  88. struct ttm_placement *placement)
  89. {
  90. int i, ret, mem_type;
  91. printk(KERN_ERR TTM_PFX "No space for %p (%lu pages, %luK, %luM)\n",
  92. bo, bo->mem.num_pages, bo->mem.size >> 10,
  93. bo->mem.size >> 20);
  94. for (i = 0; i < placement->num_placement; i++) {
  95. ret = ttm_mem_type_from_flags(placement->placement[i],
  96. &mem_type);
  97. if (ret)
  98. return;
  99. printk(KERN_ERR TTM_PFX " placement[%d]=0x%08X (%d)\n",
  100. i, placement->placement[i], mem_type);
  101. ttm_mem_type_debug(bo->bdev, mem_type);
  102. }
  103. }
  104. static ssize_t ttm_bo_global_show(struct kobject *kobj,
  105. struct attribute *attr,
  106. char *buffer)
  107. {
  108. struct ttm_bo_global *glob =
  109. container_of(kobj, struct ttm_bo_global, kobj);
  110. return snprintf(buffer, PAGE_SIZE, "%lu\n",
  111. (unsigned long) atomic_read(&glob->bo_count));
  112. }
  113. static struct attribute *ttm_bo_global_attrs[] = {
  114. &ttm_bo_count,
  115. NULL
  116. };
  117. static const struct sysfs_ops ttm_bo_global_ops = {
  118. .show = &ttm_bo_global_show
  119. };
  120. static struct kobj_type ttm_bo_glob_kobj_type = {
  121. .release = &ttm_bo_global_kobj_release,
  122. .sysfs_ops = &ttm_bo_global_ops,
  123. .default_attrs = ttm_bo_global_attrs
  124. };
  125. static inline uint32_t ttm_bo_type_flags(unsigned type)
  126. {
  127. return 1 << (type);
  128. }
  129. static void ttm_bo_release_list(struct kref *list_kref)
  130. {
  131. struct ttm_buffer_object *bo =
  132. container_of(list_kref, struct ttm_buffer_object, list_kref);
  133. struct ttm_bo_device *bdev = bo->bdev;
  134. BUG_ON(atomic_read(&bo->list_kref.refcount));
  135. BUG_ON(atomic_read(&bo->kref.refcount));
  136. BUG_ON(atomic_read(&bo->cpu_writers));
  137. BUG_ON(bo->sync_obj != NULL);
  138. BUG_ON(bo->mem.mm_node != NULL);
  139. BUG_ON(!list_empty(&bo->lru));
  140. BUG_ON(!list_empty(&bo->ddestroy));
  141. if (bo->ttm)
  142. ttm_tt_destroy(bo->ttm);
  143. atomic_dec(&bo->glob->bo_count);
  144. if (bo->destroy)
  145. bo->destroy(bo);
  146. else {
  147. ttm_mem_global_free(bdev->glob->mem_glob, bo->acc_size);
  148. kfree(bo);
  149. }
  150. }
  151. int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
  152. {
  153. if (interruptible) {
  154. int ret = 0;
  155. ret = wait_event_interruptible(bo->event_queue,
  156. atomic_read(&bo->reserved) == 0);
  157. if (unlikely(ret != 0))
  158. return ret;
  159. } else {
  160. wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
  161. }
  162. return 0;
  163. }
  164. EXPORT_SYMBOL(ttm_bo_wait_unreserved);
  165. static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
  166. {
  167. struct ttm_bo_device *bdev = bo->bdev;
  168. struct ttm_mem_type_manager *man;
  169. BUG_ON(!atomic_read(&bo->reserved));
  170. if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
  171. BUG_ON(!list_empty(&bo->lru));
  172. man = &bdev->man[bo->mem.mem_type];
  173. list_add_tail(&bo->lru, &man->lru);
  174. kref_get(&bo->list_kref);
  175. if (bo->ttm != NULL) {
  176. list_add_tail(&bo->swap, &bo->glob->swap_lru);
  177. kref_get(&bo->list_kref);
  178. }
  179. }
  180. }
  181. /**
  182. * Call with the lru_lock held.
  183. */
  184. static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
  185. {
  186. int put_count = 0;
  187. if (!list_empty(&bo->swap)) {
  188. list_del_init(&bo->swap);
  189. ++put_count;
  190. }
  191. if (!list_empty(&bo->lru)) {
  192. list_del_init(&bo->lru);
  193. ++put_count;
  194. }
  195. /*
  196. * TODO: Add a driver hook to delete from
  197. * driver-specific LRU's here.
  198. */
  199. return put_count;
  200. }
  201. int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
  202. bool interruptible,
  203. bool no_wait, bool use_sequence, uint32_t sequence)
  204. {
  205. struct ttm_bo_global *glob = bo->glob;
  206. int ret;
  207. while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
  208. if (use_sequence && bo->seq_valid &&
  209. (sequence - bo->val_seq < (1 << 31))) {
  210. return -EAGAIN;
  211. }
  212. if (no_wait)
  213. return -EBUSY;
  214. spin_unlock(&glob->lru_lock);
  215. ret = ttm_bo_wait_unreserved(bo, interruptible);
  216. spin_lock(&glob->lru_lock);
  217. if (unlikely(ret))
  218. return ret;
  219. }
  220. if (use_sequence) {
  221. bo->val_seq = sequence;
  222. bo->seq_valid = true;
  223. } else {
  224. bo->seq_valid = false;
  225. }
  226. return 0;
  227. }
  228. EXPORT_SYMBOL(ttm_bo_reserve);
  229. static void ttm_bo_ref_bug(struct kref *list_kref)
  230. {
  231. BUG();
  232. }
  233. int ttm_bo_reserve(struct ttm_buffer_object *bo,
  234. bool interruptible,
  235. bool no_wait, bool use_sequence, uint32_t sequence)
  236. {
  237. struct ttm_bo_global *glob = bo->glob;
  238. int put_count = 0;
  239. int ret;
  240. spin_lock(&glob->lru_lock);
  241. ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
  242. sequence);
  243. if (likely(ret == 0))
  244. put_count = ttm_bo_del_from_lru(bo);
  245. spin_unlock(&glob->lru_lock);
  246. while (put_count--)
  247. kref_put(&bo->list_kref, ttm_bo_ref_bug);
  248. return ret;
  249. }
  250. void ttm_bo_unreserve(struct ttm_buffer_object *bo)
  251. {
  252. struct ttm_bo_global *glob = bo->glob;
  253. spin_lock(&glob->lru_lock);
  254. ttm_bo_add_to_lru(bo);
  255. atomic_set(&bo->reserved, 0);
  256. wake_up_all(&bo->event_queue);
  257. spin_unlock(&glob->lru_lock);
  258. }
  259. EXPORT_SYMBOL(ttm_bo_unreserve);
  260. /*
  261. * Call bo->mutex locked.
  262. */
  263. static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
  264. {
  265. struct ttm_bo_device *bdev = bo->bdev;
  266. struct ttm_bo_global *glob = bo->glob;
  267. int ret = 0;
  268. uint32_t page_flags = 0;
  269. TTM_ASSERT_LOCKED(&bo->mutex);
  270. bo->ttm = NULL;
  271. if (bdev->need_dma32)
  272. page_flags |= TTM_PAGE_FLAG_DMA32;
  273. switch (bo->type) {
  274. case ttm_bo_type_device:
  275. if (zero_alloc)
  276. page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
  277. case ttm_bo_type_kernel:
  278. bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
  279. page_flags, glob->dummy_read_page);
  280. if (unlikely(bo->ttm == NULL))
  281. ret = -ENOMEM;
  282. break;
  283. case ttm_bo_type_user:
  284. bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
  285. page_flags | TTM_PAGE_FLAG_USER,
  286. glob->dummy_read_page);
  287. if (unlikely(bo->ttm == NULL)) {
  288. ret = -ENOMEM;
  289. break;
  290. }
  291. ret = ttm_tt_set_user(bo->ttm, current,
  292. bo->buffer_start, bo->num_pages);
  293. if (unlikely(ret != 0))
  294. ttm_tt_destroy(bo->ttm);
  295. break;
  296. default:
  297. printk(KERN_ERR TTM_PFX "Illegal buffer object type\n");
  298. ret = -EINVAL;
  299. break;
  300. }
  301. return ret;
  302. }
  303. static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
  304. struct ttm_mem_reg *mem,
  305. bool evict, bool interruptible, bool no_wait)
  306. {
  307. struct ttm_bo_device *bdev = bo->bdev;
  308. bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
  309. bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
  310. struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
  311. struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
  312. int ret = 0;
  313. if (old_is_pci || new_is_pci ||
  314. ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0))
  315. ttm_bo_unmap_virtual(bo);
  316. /*
  317. * Create and bind a ttm if required.
  318. */
  319. if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) {
  320. ret = ttm_bo_add_ttm(bo, false);
  321. if (ret)
  322. goto out_err;
  323. ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
  324. if (ret)
  325. goto out_err;
  326. if (mem->mem_type != TTM_PL_SYSTEM) {
  327. ret = ttm_tt_bind(bo->ttm, mem);
  328. if (ret)
  329. goto out_err;
  330. }
  331. if (bo->mem.mem_type == TTM_PL_SYSTEM) {
  332. bo->mem = *mem;
  333. mem->mm_node = NULL;
  334. goto moved;
  335. }
  336. }
  337. if (bdev->driver->move_notify)
  338. bdev->driver->move_notify(bo, mem);
  339. if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
  340. !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
  341. ret = ttm_bo_move_ttm(bo, evict, no_wait, mem);
  342. else if (bdev->driver->move)
  343. ret = bdev->driver->move(bo, evict, interruptible,
  344. no_wait, mem);
  345. else
  346. ret = ttm_bo_move_memcpy(bo, evict, no_wait, mem);
  347. if (ret)
  348. goto out_err;
  349. moved:
  350. if (bo->evicted) {
  351. ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
  352. if (ret)
  353. printk(KERN_ERR TTM_PFX "Can not flush read caches\n");
  354. bo->evicted = false;
  355. }
  356. if (bo->mem.mm_node) {
  357. spin_lock(&bo->lock);
  358. bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
  359. bdev->man[bo->mem.mem_type].gpu_offset;
  360. bo->cur_placement = bo->mem.placement;
  361. spin_unlock(&bo->lock);
  362. } else
  363. bo->offset = 0;
  364. return 0;
  365. out_err:
  366. new_man = &bdev->man[bo->mem.mem_type];
  367. if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
  368. ttm_tt_unbind(bo->ttm);
  369. ttm_tt_destroy(bo->ttm);
  370. bo->ttm = NULL;
  371. }
  372. return ret;
  373. }
  374. /**
  375. * If bo idle, remove from delayed- and lru lists, and unref.
  376. * If not idle, and already on delayed list, do nothing.
  377. * If not idle, and not on delayed list, put on delayed list,
  378. * up the list_kref and schedule a delayed list check.
  379. */
  380. static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
  381. {
  382. struct ttm_bo_device *bdev = bo->bdev;
  383. struct ttm_bo_global *glob = bo->glob;
  384. struct ttm_bo_driver *driver = bdev->driver;
  385. int ret;
  386. spin_lock(&bo->lock);
  387. (void) ttm_bo_wait(bo, false, false, !remove_all);
  388. if (!bo->sync_obj) {
  389. int put_count;
  390. spin_unlock(&bo->lock);
  391. spin_lock(&glob->lru_lock);
  392. put_count = ttm_bo_del_from_lru(bo);
  393. ret = ttm_bo_reserve_locked(bo, false, false, false, 0);
  394. BUG_ON(ret);
  395. if (bo->ttm)
  396. ttm_tt_unbind(bo->ttm);
  397. if (!list_empty(&bo->ddestroy)) {
  398. list_del_init(&bo->ddestroy);
  399. ++put_count;
  400. }
  401. if (bo->mem.mm_node) {
  402. bo->mem.mm_node->private = NULL;
  403. drm_mm_put_block(bo->mem.mm_node);
  404. bo->mem.mm_node = NULL;
  405. }
  406. spin_unlock(&glob->lru_lock);
  407. atomic_set(&bo->reserved, 0);
  408. while (put_count--)
  409. kref_put(&bo->list_kref, ttm_bo_ref_bug);
  410. return 0;
  411. }
  412. spin_lock(&glob->lru_lock);
  413. if (list_empty(&bo->ddestroy)) {
  414. void *sync_obj = bo->sync_obj;
  415. void *sync_obj_arg = bo->sync_obj_arg;
  416. kref_get(&bo->list_kref);
  417. list_add_tail(&bo->ddestroy, &bdev->ddestroy);
  418. spin_unlock(&glob->lru_lock);
  419. spin_unlock(&bo->lock);
  420. if (sync_obj)
  421. driver->sync_obj_flush(sync_obj, sync_obj_arg);
  422. schedule_delayed_work(&bdev->wq,
  423. ((HZ / 100) < 1) ? 1 : HZ / 100);
  424. ret = 0;
  425. } else {
  426. spin_unlock(&glob->lru_lock);
  427. spin_unlock(&bo->lock);
  428. ret = -EBUSY;
  429. }
  430. return ret;
  431. }
  432. /**
  433. * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
  434. * encountered buffers.
  435. */
  436. static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
  437. {
  438. struct ttm_bo_global *glob = bdev->glob;
  439. struct ttm_buffer_object *entry = NULL;
  440. int ret = 0;
  441. spin_lock(&glob->lru_lock);
  442. if (list_empty(&bdev->ddestroy))
  443. goto out_unlock;
  444. entry = list_first_entry(&bdev->ddestroy,
  445. struct ttm_buffer_object, ddestroy);
  446. kref_get(&entry->list_kref);
  447. for (;;) {
  448. struct ttm_buffer_object *nentry = NULL;
  449. if (entry->ddestroy.next != &bdev->ddestroy) {
  450. nentry = list_first_entry(&entry->ddestroy,
  451. struct ttm_buffer_object, ddestroy);
  452. kref_get(&nentry->list_kref);
  453. }
  454. spin_unlock(&glob->lru_lock);
  455. ret = ttm_bo_cleanup_refs(entry, remove_all);
  456. kref_put(&entry->list_kref, ttm_bo_release_list);
  457. entry = nentry;
  458. if (ret || !entry)
  459. goto out;
  460. spin_lock(&glob->lru_lock);
  461. if (list_empty(&entry->ddestroy))
  462. break;
  463. }
  464. out_unlock:
  465. spin_unlock(&glob->lru_lock);
  466. out:
  467. if (entry)
  468. kref_put(&entry->list_kref, ttm_bo_release_list);
  469. return ret;
  470. }
  471. static void ttm_bo_delayed_workqueue(struct work_struct *work)
  472. {
  473. struct ttm_bo_device *bdev =
  474. container_of(work, struct ttm_bo_device, wq.work);
  475. if (ttm_bo_delayed_delete(bdev, false)) {
  476. schedule_delayed_work(&bdev->wq,
  477. ((HZ / 100) < 1) ? 1 : HZ / 100);
  478. }
  479. }
  480. static void ttm_bo_release(struct kref *kref)
  481. {
  482. struct ttm_buffer_object *bo =
  483. container_of(kref, struct ttm_buffer_object, kref);
  484. struct ttm_bo_device *bdev = bo->bdev;
  485. if (likely(bo->vm_node != NULL)) {
  486. rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
  487. drm_mm_put_block(bo->vm_node);
  488. bo->vm_node = NULL;
  489. }
  490. write_unlock(&bdev->vm_lock);
  491. ttm_bo_cleanup_refs(bo, false);
  492. kref_put(&bo->list_kref, ttm_bo_release_list);
  493. write_lock(&bdev->vm_lock);
  494. }
  495. void ttm_bo_unref(struct ttm_buffer_object **p_bo)
  496. {
  497. struct ttm_buffer_object *bo = *p_bo;
  498. struct ttm_bo_device *bdev = bo->bdev;
  499. *p_bo = NULL;
  500. write_lock(&bdev->vm_lock);
  501. kref_put(&bo->kref, ttm_bo_release);
  502. write_unlock(&bdev->vm_lock);
  503. }
  504. EXPORT_SYMBOL(ttm_bo_unref);
  505. static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
  506. bool no_wait)
  507. {
  508. struct ttm_bo_device *bdev = bo->bdev;
  509. struct ttm_bo_global *glob = bo->glob;
  510. struct ttm_mem_reg evict_mem;
  511. struct ttm_placement placement;
  512. int ret = 0;
  513. spin_lock(&bo->lock);
  514. ret = ttm_bo_wait(bo, false, interruptible, no_wait);
  515. spin_unlock(&bo->lock);
  516. if (unlikely(ret != 0)) {
  517. if (ret != -ERESTARTSYS) {
  518. printk(KERN_ERR TTM_PFX
  519. "Failed to expire sync object before "
  520. "buffer eviction.\n");
  521. }
  522. goto out;
  523. }
  524. BUG_ON(!atomic_read(&bo->reserved));
  525. evict_mem = bo->mem;
  526. evict_mem.mm_node = NULL;
  527. placement.fpfn = 0;
  528. placement.lpfn = 0;
  529. placement.num_placement = 0;
  530. placement.num_busy_placement = 0;
  531. bdev->driver->evict_flags(bo, &placement);
  532. ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
  533. no_wait);
  534. if (ret) {
  535. if (ret != -ERESTARTSYS) {
  536. printk(KERN_ERR TTM_PFX
  537. "Failed to find memory space for "
  538. "buffer 0x%p eviction.\n", bo);
  539. ttm_bo_mem_space_debug(bo, &placement);
  540. }
  541. goto out;
  542. }
  543. ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
  544. no_wait);
  545. if (ret) {
  546. if (ret != -ERESTARTSYS)
  547. printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
  548. spin_lock(&glob->lru_lock);
  549. if (evict_mem.mm_node) {
  550. evict_mem.mm_node->private = NULL;
  551. drm_mm_put_block(evict_mem.mm_node);
  552. evict_mem.mm_node = NULL;
  553. }
  554. spin_unlock(&glob->lru_lock);
  555. goto out;
  556. }
  557. bo->evicted = true;
  558. out:
  559. return ret;
  560. }
  561. static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
  562. uint32_t mem_type,
  563. bool interruptible, bool no_wait)
  564. {
  565. struct ttm_bo_global *glob = bdev->glob;
  566. struct ttm_mem_type_manager *man = &bdev->man[mem_type];
  567. struct ttm_buffer_object *bo;
  568. int ret, put_count = 0;
  569. retry:
  570. spin_lock(&glob->lru_lock);
  571. if (list_empty(&man->lru)) {
  572. spin_unlock(&glob->lru_lock);
  573. return -EBUSY;
  574. }
  575. bo = list_first_entry(&man->lru, struct ttm_buffer_object, lru);
  576. kref_get(&bo->list_kref);
  577. ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
  578. if (unlikely(ret == -EBUSY)) {
  579. spin_unlock(&glob->lru_lock);
  580. if (likely(!no_wait))
  581. ret = ttm_bo_wait_unreserved(bo, interruptible);
  582. kref_put(&bo->list_kref, ttm_bo_release_list);
  583. /**
  584. * We *need* to retry after releasing the lru lock.
  585. */
  586. if (unlikely(ret != 0))
  587. return ret;
  588. goto retry;
  589. }
  590. put_count = ttm_bo_del_from_lru(bo);
  591. spin_unlock(&glob->lru_lock);
  592. BUG_ON(ret != 0);
  593. while (put_count--)
  594. kref_put(&bo->list_kref, ttm_bo_ref_bug);
  595. ret = ttm_bo_evict(bo, interruptible, no_wait);
  596. ttm_bo_unreserve(bo);
  597. kref_put(&bo->list_kref, ttm_bo_release_list);
  598. return ret;
  599. }
  600. static int ttm_bo_man_get_node(struct ttm_buffer_object *bo,
  601. struct ttm_mem_type_manager *man,
  602. struct ttm_placement *placement,
  603. struct ttm_mem_reg *mem,
  604. struct drm_mm_node **node)
  605. {
  606. struct ttm_bo_global *glob = bo->glob;
  607. unsigned long lpfn;
  608. int ret;
  609. lpfn = placement->lpfn;
  610. if (!lpfn)
  611. lpfn = man->size;
  612. *node = NULL;
  613. do {
  614. ret = drm_mm_pre_get(&man->manager);
  615. if (unlikely(ret))
  616. return ret;
  617. spin_lock(&glob->lru_lock);
  618. *node = drm_mm_search_free_in_range(&man->manager,
  619. mem->num_pages, mem->page_alignment,
  620. placement->fpfn, lpfn, 1);
  621. if (unlikely(*node == NULL)) {
  622. spin_unlock(&glob->lru_lock);
  623. return 0;
  624. }
  625. *node = drm_mm_get_block_atomic_range(*node, mem->num_pages,
  626. mem->page_alignment,
  627. placement->fpfn,
  628. lpfn);
  629. spin_unlock(&glob->lru_lock);
  630. } while (*node == NULL);
  631. return 0;
  632. }
  633. /**
  634. * Repeatedly evict memory from the LRU for @mem_type until we create enough
  635. * space, or we've evicted everything and there isn't enough space.
  636. */
  637. static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
  638. uint32_t mem_type,
  639. struct ttm_placement *placement,
  640. struct ttm_mem_reg *mem,
  641. bool interruptible, bool no_wait)
  642. {
  643. struct ttm_bo_device *bdev = bo->bdev;
  644. struct ttm_bo_global *glob = bdev->glob;
  645. struct ttm_mem_type_manager *man = &bdev->man[mem_type];
  646. struct drm_mm_node *node;
  647. int ret;
  648. do {
  649. ret = ttm_bo_man_get_node(bo, man, placement, mem, &node);
  650. if (unlikely(ret != 0))
  651. return ret;
  652. if (node)
  653. break;
  654. spin_lock(&glob->lru_lock);
  655. if (list_empty(&man->lru)) {
  656. spin_unlock(&glob->lru_lock);
  657. break;
  658. }
  659. spin_unlock(&glob->lru_lock);
  660. ret = ttm_mem_evict_first(bdev, mem_type, interruptible,
  661. no_wait);
  662. if (unlikely(ret != 0))
  663. return ret;
  664. } while (1);
  665. if (node == NULL)
  666. return -ENOMEM;
  667. mem->mm_node = node;
  668. mem->mem_type = mem_type;
  669. return 0;
  670. }
  671. static uint32_t ttm_bo_select_caching(struct ttm_mem_type_manager *man,
  672. uint32_t cur_placement,
  673. uint32_t proposed_placement)
  674. {
  675. uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING;
  676. uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING;
  677. /**
  678. * Keep current caching if possible.
  679. */
  680. if ((cur_placement & caching) != 0)
  681. result |= (cur_placement & caching);
  682. else if ((man->default_caching & caching) != 0)
  683. result |= man->default_caching;
  684. else if ((TTM_PL_FLAG_CACHED & caching) != 0)
  685. result |= TTM_PL_FLAG_CACHED;
  686. else if ((TTM_PL_FLAG_WC & caching) != 0)
  687. result |= TTM_PL_FLAG_WC;
  688. else if ((TTM_PL_FLAG_UNCACHED & caching) != 0)
  689. result |= TTM_PL_FLAG_UNCACHED;
  690. return result;
  691. }
  692. static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
  693. bool disallow_fixed,
  694. uint32_t mem_type,
  695. uint32_t proposed_placement,
  696. uint32_t *masked_placement)
  697. {
  698. uint32_t cur_flags = ttm_bo_type_flags(mem_type);
  699. if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
  700. return false;
  701. if ((cur_flags & proposed_placement & TTM_PL_MASK_MEM) == 0)
  702. return false;
  703. if ((proposed_placement & man->available_caching) == 0)
  704. return false;
  705. cur_flags |= (proposed_placement & man->available_caching);
  706. *masked_placement = cur_flags;
  707. return true;
  708. }
  709. /**
  710. * Creates space for memory region @mem according to its type.
  711. *
  712. * This function first searches for free space in compatible memory types in
  713. * the priority order defined by the driver. If free space isn't found, then
  714. * ttm_bo_mem_force_space is attempted in priority order to evict and find
  715. * space.
  716. */
  717. int ttm_bo_mem_space(struct ttm_buffer_object *bo,
  718. struct ttm_placement *placement,
  719. struct ttm_mem_reg *mem,
  720. bool interruptible, bool no_wait)
  721. {
  722. struct ttm_bo_device *bdev = bo->bdev;
  723. struct ttm_mem_type_manager *man;
  724. uint32_t mem_type = TTM_PL_SYSTEM;
  725. uint32_t cur_flags = 0;
  726. bool type_found = false;
  727. bool type_ok = false;
  728. bool has_erestartsys = false;
  729. struct drm_mm_node *node = NULL;
  730. int i, ret;
  731. mem->mm_node = NULL;
  732. for (i = 0; i < placement->num_placement; ++i) {
  733. ret = ttm_mem_type_from_flags(placement->placement[i],
  734. &mem_type);
  735. if (ret)
  736. return ret;
  737. man = &bdev->man[mem_type];
  738. type_ok = ttm_bo_mt_compatible(man,
  739. bo->type == ttm_bo_type_user,
  740. mem_type,
  741. placement->placement[i],
  742. &cur_flags);
  743. if (!type_ok)
  744. continue;
  745. cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
  746. cur_flags);
  747. /*
  748. * Use the access and other non-mapping-related flag bits from
  749. * the memory placement flags to the current flags
  750. */
  751. ttm_flag_masked(&cur_flags, placement->placement[i],
  752. ~TTM_PL_MASK_MEMTYPE);
  753. if (mem_type == TTM_PL_SYSTEM)
  754. break;
  755. if (man->has_type && man->use_type) {
  756. type_found = true;
  757. ret = ttm_bo_man_get_node(bo, man, placement, mem,
  758. &node);
  759. if (unlikely(ret))
  760. return ret;
  761. }
  762. if (node)
  763. break;
  764. }
  765. if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || node) {
  766. mem->mm_node = node;
  767. mem->mem_type = mem_type;
  768. mem->placement = cur_flags;
  769. if (node)
  770. node->private = bo;
  771. return 0;
  772. }
  773. if (!type_found)
  774. return -EINVAL;
  775. for (i = 0; i < placement->num_busy_placement; ++i) {
  776. ret = ttm_mem_type_from_flags(placement->busy_placement[i],
  777. &mem_type);
  778. if (ret)
  779. return ret;
  780. man = &bdev->man[mem_type];
  781. if (!man->has_type)
  782. continue;
  783. if (!ttm_bo_mt_compatible(man,
  784. bo->type == ttm_bo_type_user,
  785. mem_type,
  786. placement->busy_placement[i],
  787. &cur_flags))
  788. continue;
  789. cur_flags = ttm_bo_select_caching(man, bo->mem.placement,
  790. cur_flags);
  791. /*
  792. * Use the access and other non-mapping-related flag bits from
  793. * the memory placement flags to the current flags
  794. */
  795. ttm_flag_masked(&cur_flags, placement->busy_placement[i],
  796. ~TTM_PL_MASK_MEMTYPE);
  797. if (mem_type == TTM_PL_SYSTEM) {
  798. mem->mem_type = mem_type;
  799. mem->placement = cur_flags;
  800. mem->mm_node = NULL;
  801. return 0;
  802. }
  803. ret = ttm_bo_mem_force_space(bo, mem_type, placement, mem,
  804. interruptible, no_wait);
  805. if (ret == 0 && mem->mm_node) {
  806. mem->placement = cur_flags;
  807. mem->mm_node->private = bo;
  808. return 0;
  809. }
  810. if (ret == -ERESTARTSYS)
  811. has_erestartsys = true;
  812. }
  813. ret = (has_erestartsys) ? -ERESTARTSYS : -ENOMEM;
  814. return ret;
  815. }
  816. EXPORT_SYMBOL(ttm_bo_mem_space);
  817. int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
  818. {
  819. if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
  820. return -EBUSY;
  821. return wait_event_interruptible(bo->event_queue,
  822. atomic_read(&bo->cpu_writers) == 0);
  823. }
  824. EXPORT_SYMBOL(ttm_bo_wait_cpu);
  825. int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
  826. struct ttm_placement *placement,
  827. bool interruptible, bool no_wait)
  828. {
  829. struct ttm_bo_global *glob = bo->glob;
  830. int ret = 0;
  831. struct ttm_mem_reg mem;
  832. BUG_ON(!atomic_read(&bo->reserved));
  833. /*
  834. * FIXME: It's possible to pipeline buffer moves.
  835. * Have the driver move function wait for idle when necessary,
  836. * instead of doing it here.
  837. */
  838. spin_lock(&bo->lock);
  839. ret = ttm_bo_wait(bo, false, interruptible, no_wait);
  840. spin_unlock(&bo->lock);
  841. if (ret)
  842. return ret;
  843. mem.num_pages = bo->num_pages;
  844. mem.size = mem.num_pages << PAGE_SHIFT;
  845. mem.page_alignment = bo->mem.page_alignment;
  846. /*
  847. * Determine where to move the buffer.
  848. */
  849. ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait);
  850. if (ret)
  851. goto out_unlock;
  852. ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
  853. out_unlock:
  854. if (ret && mem.mm_node) {
  855. spin_lock(&glob->lru_lock);
  856. mem.mm_node->private = NULL;
  857. drm_mm_put_block(mem.mm_node);
  858. spin_unlock(&glob->lru_lock);
  859. }
  860. return ret;
  861. }
  862. static int ttm_bo_mem_compat(struct ttm_placement *placement,
  863. struct ttm_mem_reg *mem)
  864. {
  865. int i;
  866. struct drm_mm_node *node = mem->mm_node;
  867. if (node && placement->lpfn != 0 &&
  868. (node->start < placement->fpfn ||
  869. node->start + node->size > placement->lpfn))
  870. return -1;
  871. for (i = 0; i < placement->num_placement; i++) {
  872. if ((placement->placement[i] & mem->placement &
  873. TTM_PL_MASK_CACHING) &&
  874. (placement->placement[i] & mem->placement &
  875. TTM_PL_MASK_MEM))
  876. return i;
  877. }
  878. return -1;
  879. }
  880. int ttm_bo_validate(struct ttm_buffer_object *bo,
  881. struct ttm_placement *placement,
  882. bool interruptible, bool no_wait)
  883. {
  884. int ret;
  885. BUG_ON(!atomic_read(&bo->reserved));
  886. /* Check that range is valid */
  887. if (placement->lpfn || placement->fpfn)
  888. if (placement->fpfn > placement->lpfn ||
  889. (placement->lpfn - placement->fpfn) < bo->num_pages)
  890. return -EINVAL;
  891. /*
  892. * Check whether we need to move buffer.
  893. */
  894. ret = ttm_bo_mem_compat(placement, &bo->mem);
  895. if (ret < 0) {
  896. ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait);
  897. if (ret)
  898. return ret;
  899. } else {
  900. /*
  901. * Use the access and other non-mapping-related flag bits from
  902. * the compatible memory placement flags to the active flags
  903. */
  904. ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
  905. ~TTM_PL_MASK_MEMTYPE);
  906. }
  907. /*
  908. * We might need to add a TTM.
  909. */
  910. if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
  911. ret = ttm_bo_add_ttm(bo, true);
  912. if (ret)
  913. return ret;
  914. }
  915. return 0;
  916. }
  917. EXPORT_SYMBOL(ttm_bo_validate);
  918. int ttm_bo_check_placement(struct ttm_buffer_object *bo,
  919. struct ttm_placement *placement)
  920. {
  921. int i;
  922. if (placement->fpfn || placement->lpfn) {
  923. if (bo->mem.num_pages > (placement->lpfn - placement->fpfn)) {
  924. printk(KERN_ERR TTM_PFX "Page number range to small "
  925. "Need %lu pages, range is [%u, %u]\n",
  926. bo->mem.num_pages, placement->fpfn,
  927. placement->lpfn);
  928. return -EINVAL;
  929. }
  930. }
  931. for (i = 0; i < placement->num_placement; i++) {
  932. if (!capable(CAP_SYS_ADMIN)) {
  933. if (placement->placement[i] & TTM_PL_FLAG_NO_EVICT) {
  934. printk(KERN_ERR TTM_PFX "Need to be root to "
  935. "modify NO_EVICT status.\n");
  936. return -EINVAL;
  937. }
  938. }
  939. }
  940. for (i = 0; i < placement->num_busy_placement; i++) {
  941. if (!capable(CAP_SYS_ADMIN)) {
  942. if (placement->busy_placement[i] & TTM_PL_FLAG_NO_EVICT) {
  943. printk(KERN_ERR TTM_PFX "Need to be root to "
  944. "modify NO_EVICT status.\n");
  945. return -EINVAL;
  946. }
  947. }
  948. }
  949. return 0;
  950. }
  951. int ttm_bo_init(struct ttm_bo_device *bdev,
  952. struct ttm_buffer_object *bo,
  953. unsigned long size,
  954. enum ttm_bo_type type,
  955. struct ttm_placement *placement,
  956. uint32_t page_alignment,
  957. unsigned long buffer_start,
  958. bool interruptible,
  959. struct file *persistant_swap_storage,
  960. size_t acc_size,
  961. void (*destroy) (struct ttm_buffer_object *))
  962. {
  963. int ret = 0;
  964. unsigned long num_pages;
  965. size += buffer_start & ~PAGE_MASK;
  966. num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
  967. if (num_pages == 0) {
  968. printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n");
  969. return -EINVAL;
  970. }
  971. bo->destroy = destroy;
  972. spin_lock_init(&bo->lock);
  973. kref_init(&bo->kref);
  974. kref_init(&bo->list_kref);
  975. atomic_set(&bo->cpu_writers, 0);
  976. atomic_set(&bo->reserved, 1);
  977. init_waitqueue_head(&bo->event_queue);
  978. INIT_LIST_HEAD(&bo->lru);
  979. INIT_LIST_HEAD(&bo->ddestroy);
  980. INIT_LIST_HEAD(&bo->swap);
  981. bo->bdev = bdev;
  982. bo->glob = bdev->glob;
  983. bo->type = type;
  984. bo->num_pages = num_pages;
  985. bo->mem.size = num_pages << PAGE_SHIFT;
  986. bo->mem.mem_type = TTM_PL_SYSTEM;
  987. bo->mem.num_pages = bo->num_pages;
  988. bo->mem.mm_node = NULL;
  989. bo->mem.page_alignment = page_alignment;
  990. bo->buffer_start = buffer_start & PAGE_MASK;
  991. bo->priv_flags = 0;
  992. bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
  993. bo->seq_valid = false;
  994. bo->persistant_swap_storage = persistant_swap_storage;
  995. bo->acc_size = acc_size;
  996. atomic_inc(&bo->glob->bo_count);
  997. ret = ttm_bo_check_placement(bo, placement);
  998. if (unlikely(ret != 0))
  999. goto out_err;
  1000. /*
  1001. * For ttm_bo_type_device buffers, allocate
  1002. * address space from the device.
  1003. */
  1004. if (bo->type == ttm_bo_type_device) {
  1005. ret = ttm_bo_setup_vm(bo);
  1006. if (ret)
  1007. goto out_err;
  1008. }
  1009. ret = ttm_bo_validate(bo, placement, interruptible, false);
  1010. if (ret)
  1011. goto out_err;
  1012. ttm_bo_unreserve(bo);
  1013. return 0;
  1014. out_err:
  1015. ttm_bo_unreserve(bo);
  1016. ttm_bo_unref(&bo);
  1017. return ret;
  1018. }
  1019. EXPORT_SYMBOL(ttm_bo_init);
  1020. static inline size_t ttm_bo_size(struct ttm_bo_global *glob,
  1021. unsigned long num_pages)
  1022. {
  1023. size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
  1024. PAGE_MASK;
  1025. return glob->ttm_bo_size + 2 * page_array_size;
  1026. }
  1027. int ttm_bo_create(struct ttm_bo_device *bdev,
  1028. unsigned long size,
  1029. enum ttm_bo_type type,
  1030. struct ttm_placement *placement,
  1031. uint32_t page_alignment,
  1032. unsigned long buffer_start,
  1033. bool interruptible,
  1034. struct file *persistant_swap_storage,
  1035. struct ttm_buffer_object **p_bo)
  1036. {
  1037. struct ttm_buffer_object *bo;
  1038. struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
  1039. int ret;
  1040. size_t acc_size =
  1041. ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
  1042. ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
  1043. if (unlikely(ret != 0))
  1044. return ret;
  1045. bo = kzalloc(sizeof(*bo), GFP_KERNEL);
  1046. if (unlikely(bo == NULL)) {
  1047. ttm_mem_global_free(mem_glob, acc_size);
  1048. return -ENOMEM;
  1049. }
  1050. ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
  1051. buffer_start, interruptible,
  1052. persistant_swap_storage, acc_size, NULL);
  1053. if (likely(ret == 0))
  1054. *p_bo = bo;
  1055. return ret;
  1056. }
  1057. static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
  1058. unsigned mem_type, bool allow_errors)
  1059. {
  1060. struct ttm_mem_type_manager *man = &bdev->man[mem_type];
  1061. struct ttm_bo_global *glob = bdev->glob;
  1062. int ret;
  1063. /*
  1064. * Can't use standard list traversal since we're unlocking.
  1065. */
  1066. spin_lock(&glob->lru_lock);
  1067. while (!list_empty(&man->lru)) {
  1068. spin_unlock(&glob->lru_lock);
  1069. ret = ttm_mem_evict_first(bdev, mem_type, false, false);
  1070. if (ret) {
  1071. if (allow_errors) {
  1072. return ret;
  1073. } else {
  1074. printk(KERN_ERR TTM_PFX
  1075. "Cleanup eviction failed\n");
  1076. }
  1077. }
  1078. spin_lock(&glob->lru_lock);
  1079. }
  1080. spin_unlock(&glob->lru_lock);
  1081. return 0;
  1082. }
  1083. int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
  1084. {
  1085. struct ttm_bo_global *glob = bdev->glob;
  1086. struct ttm_mem_type_manager *man;
  1087. int ret = -EINVAL;
  1088. if (mem_type >= TTM_NUM_MEM_TYPES) {
  1089. printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", mem_type);
  1090. return ret;
  1091. }
  1092. man = &bdev->man[mem_type];
  1093. if (!man->has_type) {
  1094. printk(KERN_ERR TTM_PFX "Trying to take down uninitialized "
  1095. "memory manager type %u\n", mem_type);
  1096. return ret;
  1097. }
  1098. man->use_type = false;
  1099. man->has_type = false;
  1100. ret = 0;
  1101. if (mem_type > 0) {
  1102. ttm_bo_force_list_clean(bdev, mem_type, false);
  1103. spin_lock(&glob->lru_lock);
  1104. if (drm_mm_clean(&man->manager))
  1105. drm_mm_takedown(&man->manager);
  1106. else
  1107. ret = -EBUSY;
  1108. spin_unlock(&glob->lru_lock);
  1109. }
  1110. return ret;
  1111. }
  1112. EXPORT_SYMBOL(ttm_bo_clean_mm);
  1113. int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
  1114. {
  1115. struct ttm_mem_type_manager *man = &bdev->man[mem_type];
  1116. if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
  1117. printk(KERN_ERR TTM_PFX
  1118. "Illegal memory manager memory type %u.\n",
  1119. mem_type);
  1120. return -EINVAL;
  1121. }
  1122. if (!man->has_type) {
  1123. printk(KERN_ERR TTM_PFX
  1124. "Memory type %u has not been initialized.\n",
  1125. mem_type);
  1126. return 0;
  1127. }
  1128. return ttm_bo_force_list_clean(bdev, mem_type, true);
  1129. }
  1130. EXPORT_SYMBOL(ttm_bo_evict_mm);
  1131. int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
  1132. unsigned long p_size)
  1133. {
  1134. int ret = -EINVAL;
  1135. struct ttm_mem_type_manager *man;
  1136. if (type >= TTM_NUM_MEM_TYPES) {
  1137. printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", type);
  1138. return ret;
  1139. }
  1140. man = &bdev->man[type];
  1141. if (man->has_type) {
  1142. printk(KERN_ERR TTM_PFX
  1143. "Memory manager already initialized for type %d\n",
  1144. type);
  1145. return ret;
  1146. }
  1147. ret = bdev->driver->init_mem_type(bdev, type, man);
  1148. if (ret)
  1149. return ret;
  1150. ret = 0;
  1151. if (type != TTM_PL_SYSTEM) {
  1152. if (!p_size) {
  1153. printk(KERN_ERR TTM_PFX
  1154. "Zero size memory manager type %d\n",
  1155. type);
  1156. return ret;
  1157. }
  1158. ret = drm_mm_init(&man->manager, 0, p_size);
  1159. if (ret)
  1160. return ret;
  1161. }
  1162. man->has_type = true;
  1163. man->use_type = true;
  1164. man->size = p_size;
  1165. INIT_LIST_HEAD(&man->lru);
  1166. return 0;
  1167. }
  1168. EXPORT_SYMBOL(ttm_bo_init_mm);
  1169. static void ttm_bo_global_kobj_release(struct kobject *kobj)
  1170. {
  1171. struct ttm_bo_global *glob =
  1172. container_of(kobj, struct ttm_bo_global, kobj);
  1173. ttm_mem_unregister_shrink(glob->mem_glob, &glob->shrink);
  1174. __free_page(glob->dummy_read_page);
  1175. kfree(glob);
  1176. }
  1177. void ttm_bo_global_release(struct ttm_global_reference *ref)
  1178. {
  1179. struct ttm_bo_global *glob = ref->object;
  1180. kobject_del(&glob->kobj);
  1181. kobject_put(&glob->kobj);
  1182. }
  1183. EXPORT_SYMBOL(ttm_bo_global_release);
  1184. int ttm_bo_global_init(struct ttm_global_reference *ref)
  1185. {
  1186. struct ttm_bo_global_ref *bo_ref =
  1187. container_of(ref, struct ttm_bo_global_ref, ref);
  1188. struct ttm_bo_global *glob = ref->object;
  1189. int ret;
  1190. mutex_init(&glob->device_list_mutex);
  1191. spin_lock_init(&glob->lru_lock);
  1192. glob->mem_glob = bo_ref->mem_glob;
  1193. glob->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
  1194. if (unlikely(glob->dummy_read_page == NULL)) {
  1195. ret = -ENOMEM;
  1196. goto out_no_drp;
  1197. }
  1198. INIT_LIST_HEAD(&glob->swap_lru);
  1199. INIT_LIST_HEAD(&glob->device_list);
  1200. ttm_mem_init_shrink(&glob->shrink, ttm_bo_swapout);
  1201. ret = ttm_mem_register_shrink(glob->mem_glob, &glob->shrink);
  1202. if (unlikely(ret != 0)) {
  1203. printk(KERN_ERR TTM_PFX
  1204. "Could not register buffer object swapout.\n");
  1205. goto out_no_shrink;
  1206. }
  1207. glob->ttm_bo_extra_size =
  1208. ttm_round_pot(sizeof(struct ttm_tt)) +
  1209. ttm_round_pot(sizeof(struct ttm_backend));
  1210. glob->ttm_bo_size = glob->ttm_bo_extra_size +
  1211. ttm_round_pot(sizeof(struct ttm_buffer_object));
  1212. atomic_set(&glob->bo_count, 0);
  1213. kobject_init(&glob->kobj, &ttm_bo_glob_kobj_type);
  1214. ret = kobject_add(&glob->kobj, ttm_get_kobj(), "buffer_objects");
  1215. if (unlikely(ret != 0))
  1216. kobject_put(&glob->kobj);
  1217. return ret;
  1218. out_no_shrink:
  1219. __free_page(glob->dummy_read_page);
  1220. out_no_drp:
  1221. kfree(glob);
  1222. return ret;
  1223. }
  1224. EXPORT_SYMBOL(ttm_bo_global_init);
  1225. int ttm_bo_device_release(struct ttm_bo_device *bdev)
  1226. {
  1227. int ret = 0;
  1228. unsigned i = TTM_NUM_MEM_TYPES;
  1229. struct ttm_mem_type_manager *man;
  1230. struct ttm_bo_global *glob = bdev->glob;
  1231. while (i--) {
  1232. man = &bdev->man[i];
  1233. if (man->has_type) {
  1234. man->use_type = false;
  1235. if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
  1236. ret = -EBUSY;
  1237. printk(KERN_ERR TTM_PFX
  1238. "DRM memory manager type %d "
  1239. "is not clean.\n", i);
  1240. }
  1241. man->has_type = false;
  1242. }
  1243. }
  1244. mutex_lock(&glob->device_list_mutex);
  1245. list_del(&bdev->device_list);
  1246. mutex_unlock(&glob->device_list_mutex);
  1247. if (!cancel_delayed_work(&bdev->wq))
  1248. flush_scheduled_work();
  1249. while (ttm_bo_delayed_delete(bdev, true))
  1250. ;
  1251. spin_lock(&glob->lru_lock);
  1252. if (list_empty(&bdev->ddestroy))
  1253. TTM_DEBUG("Delayed destroy list was clean\n");
  1254. if (list_empty(&bdev->man[0].lru))
  1255. TTM_DEBUG("Swap list was clean\n");
  1256. spin_unlock(&glob->lru_lock);
  1257. BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
  1258. write_lock(&bdev->vm_lock);
  1259. drm_mm_takedown(&bdev->addr_space_mm);
  1260. write_unlock(&bdev->vm_lock);
  1261. return ret;
  1262. }
  1263. EXPORT_SYMBOL(ttm_bo_device_release);
  1264. int ttm_bo_device_init(struct ttm_bo_device *bdev,
  1265. struct ttm_bo_global *glob,
  1266. struct ttm_bo_driver *driver,
  1267. uint64_t file_page_offset,
  1268. bool need_dma32)
  1269. {
  1270. int ret = -EINVAL;
  1271. rwlock_init(&bdev->vm_lock);
  1272. bdev->driver = driver;
  1273. memset(bdev->man, 0, sizeof(bdev->man));
  1274. /*
  1275. * Initialize the system memory buffer type.
  1276. * Other types need to be driver / IOCTL initialized.
  1277. */
  1278. ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0);
  1279. if (unlikely(ret != 0))
  1280. goto out_no_sys;
  1281. bdev->addr_space_rb = RB_ROOT;
  1282. ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
  1283. if (unlikely(ret != 0))
  1284. goto out_no_addr_mm;
  1285. INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
  1286. bdev->nice_mode = true;
  1287. INIT_LIST_HEAD(&bdev->ddestroy);
  1288. bdev->dev_mapping = NULL;
  1289. bdev->glob = glob;
  1290. bdev->need_dma32 = need_dma32;
  1291. mutex_lock(&glob->device_list_mutex);
  1292. list_add_tail(&bdev->device_list, &glob->device_list);
  1293. mutex_unlock(&glob->device_list_mutex);
  1294. return 0;
  1295. out_no_addr_mm:
  1296. ttm_bo_clean_mm(bdev, 0);
  1297. out_no_sys:
  1298. return ret;
  1299. }
  1300. EXPORT_SYMBOL(ttm_bo_device_init);
  1301. /*
  1302. * buffer object vm functions.
  1303. */
  1304. bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
  1305. {
  1306. struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
  1307. if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
  1308. if (mem->mem_type == TTM_PL_SYSTEM)
  1309. return false;
  1310. if (man->flags & TTM_MEMTYPE_FLAG_CMA)
  1311. return false;
  1312. if (mem->placement & TTM_PL_FLAG_CACHED)
  1313. return false;
  1314. }
  1315. return true;
  1316. }
  1317. int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
  1318. struct ttm_mem_reg *mem,
  1319. unsigned long *bus_base,
  1320. unsigned long *bus_offset, unsigned long *bus_size)
  1321. {
  1322. struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
  1323. *bus_size = 0;
  1324. if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
  1325. return -EINVAL;
  1326. if (ttm_mem_reg_is_pci(bdev, mem)) {
  1327. *bus_offset = mem->mm_node->start << PAGE_SHIFT;
  1328. *bus_size = mem->num_pages << PAGE_SHIFT;
  1329. *bus_base = man->io_offset;
  1330. }
  1331. return 0;
  1332. }
  1333. void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
  1334. {
  1335. struct ttm_bo_device *bdev = bo->bdev;
  1336. loff_t offset = (loff_t) bo->addr_space_offset;
  1337. loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
  1338. if (!bdev->dev_mapping)
  1339. return;
  1340. unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
  1341. }
  1342. EXPORT_SYMBOL(ttm_bo_unmap_virtual);
  1343. static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
  1344. {
  1345. struct ttm_bo_device *bdev = bo->bdev;
  1346. struct rb_node **cur = &bdev->addr_space_rb.rb_node;
  1347. struct rb_node *parent = NULL;
  1348. struct ttm_buffer_object *cur_bo;
  1349. unsigned long offset = bo->vm_node->start;
  1350. unsigned long cur_offset;
  1351. while (*cur) {
  1352. parent = *cur;
  1353. cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
  1354. cur_offset = cur_bo->vm_node->start;
  1355. if (offset < cur_offset)
  1356. cur = &parent->rb_left;
  1357. else if (offset > cur_offset)
  1358. cur = &parent->rb_right;
  1359. else
  1360. BUG();
  1361. }
  1362. rb_link_node(&bo->vm_rb, parent, cur);
  1363. rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
  1364. }
  1365. /**
  1366. * ttm_bo_setup_vm:
  1367. *
  1368. * @bo: the buffer to allocate address space for
  1369. *
  1370. * Allocate address space in the drm device so that applications
  1371. * can mmap the buffer and access the contents. This only
  1372. * applies to ttm_bo_type_device objects as others are not
  1373. * placed in the drm device address space.
  1374. */
  1375. static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
  1376. {
  1377. struct ttm_bo_device *bdev = bo->bdev;
  1378. int ret;
  1379. retry_pre_get:
  1380. ret = drm_mm_pre_get(&bdev->addr_space_mm);
  1381. if (unlikely(ret != 0))
  1382. return ret;
  1383. write_lock(&bdev->vm_lock);
  1384. bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
  1385. bo->mem.num_pages, 0, 0);
  1386. if (unlikely(bo->vm_node == NULL)) {
  1387. ret = -ENOMEM;
  1388. goto out_unlock;
  1389. }
  1390. bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
  1391. bo->mem.num_pages, 0);
  1392. if (unlikely(bo->vm_node == NULL)) {
  1393. write_unlock(&bdev->vm_lock);
  1394. goto retry_pre_get;
  1395. }
  1396. ttm_bo_vm_insert_rb(bo);
  1397. write_unlock(&bdev->vm_lock);
  1398. bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
  1399. return 0;
  1400. out_unlock:
  1401. write_unlock(&bdev->vm_lock);
  1402. return ret;
  1403. }
  1404. int ttm_bo_wait(struct ttm_buffer_object *bo,
  1405. bool lazy, bool interruptible, bool no_wait)
  1406. {
  1407. struct ttm_bo_driver *driver = bo->bdev->driver;
  1408. void *sync_obj;
  1409. void *sync_obj_arg;
  1410. int ret = 0;
  1411. if (likely(bo->sync_obj == NULL))
  1412. return 0;
  1413. while (bo->sync_obj) {
  1414. if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) {
  1415. void *tmp_obj = bo->sync_obj;
  1416. bo->sync_obj = NULL;
  1417. clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
  1418. spin_unlock(&bo->lock);
  1419. driver->sync_obj_unref(&tmp_obj);
  1420. spin_lock(&bo->lock);
  1421. continue;
  1422. }
  1423. if (no_wait)
  1424. return -EBUSY;
  1425. sync_obj = driver->sync_obj_ref(bo->sync_obj);
  1426. sync_obj_arg = bo->sync_obj_arg;
  1427. spin_unlock(&bo->lock);
  1428. ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
  1429. lazy, interruptible);
  1430. if (unlikely(ret != 0)) {
  1431. driver->sync_obj_unref(&sync_obj);
  1432. spin_lock(&bo->lock);
  1433. return ret;
  1434. }
  1435. spin_lock(&bo->lock);
  1436. if (likely(bo->sync_obj == sync_obj &&
  1437. bo->sync_obj_arg == sync_obj_arg)) {
  1438. void *tmp_obj = bo->sync_obj;
  1439. bo->sync_obj = NULL;
  1440. clear_bit(TTM_BO_PRIV_FLAG_MOVING,
  1441. &bo->priv_flags);
  1442. spin_unlock(&bo->lock);
  1443. driver->sync_obj_unref(&sync_obj);
  1444. driver->sync_obj_unref(&tmp_obj);
  1445. spin_lock(&bo->lock);
  1446. } else {
  1447. spin_unlock(&bo->lock);
  1448. driver->sync_obj_unref(&sync_obj);
  1449. spin_lock(&bo->lock);
  1450. }
  1451. }
  1452. return 0;
  1453. }
  1454. EXPORT_SYMBOL(ttm_bo_wait);
  1455. void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo)
  1456. {
  1457. atomic_set(&bo->reserved, 0);
  1458. wake_up_all(&bo->event_queue);
  1459. }
  1460. int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible,
  1461. bool no_wait)
  1462. {
  1463. int ret;
  1464. while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
  1465. if (no_wait)
  1466. return -EBUSY;
  1467. else if (interruptible) {
  1468. ret = wait_event_interruptible
  1469. (bo->event_queue, atomic_read(&bo->reserved) == 0);
  1470. if (unlikely(ret != 0))
  1471. return ret;
  1472. } else {
  1473. wait_event(bo->event_queue,
  1474. atomic_read(&bo->reserved) == 0);
  1475. }
  1476. }
  1477. return 0;
  1478. }
  1479. int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
  1480. {
  1481. int ret = 0;
  1482. /*
  1483. * Using ttm_bo_reserve instead of ttm_bo_block_reservation
  1484. * makes sure the lru lists are updated.
  1485. */
  1486. ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
  1487. if (unlikely(ret != 0))
  1488. return ret;
  1489. spin_lock(&bo->lock);
  1490. ret = ttm_bo_wait(bo, false, true, no_wait);
  1491. spin_unlock(&bo->lock);
  1492. if (likely(ret == 0))
  1493. atomic_inc(&bo->cpu_writers);
  1494. ttm_bo_unreserve(bo);
  1495. return ret;
  1496. }
  1497. EXPORT_SYMBOL(ttm_bo_synccpu_write_grab);
  1498. void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
  1499. {
  1500. if (atomic_dec_and_test(&bo->cpu_writers))
  1501. wake_up_all(&bo->event_queue);
  1502. }
  1503. EXPORT_SYMBOL(ttm_bo_synccpu_write_release);
  1504. /**
  1505. * A buffer object shrink method that tries to swap out the first
  1506. * buffer object on the bo_global::swap_lru list.
  1507. */
  1508. static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
  1509. {
  1510. struct ttm_bo_global *glob =
  1511. container_of(shrink, struct ttm_bo_global, shrink);
  1512. struct ttm_buffer_object *bo;
  1513. int ret = -EBUSY;
  1514. int put_count;
  1515. uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
  1516. spin_lock(&glob->lru_lock);
  1517. while (ret == -EBUSY) {
  1518. if (unlikely(list_empty(&glob->swap_lru))) {
  1519. spin_unlock(&glob->lru_lock);
  1520. return -EBUSY;
  1521. }
  1522. bo = list_first_entry(&glob->swap_lru,
  1523. struct ttm_buffer_object, swap);
  1524. kref_get(&bo->list_kref);
  1525. /**
  1526. * Reserve buffer. Since we unlock while sleeping, we need
  1527. * to re-check that nobody removed us from the swap-list while
  1528. * we slept.
  1529. */
  1530. ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
  1531. if (unlikely(ret == -EBUSY)) {
  1532. spin_unlock(&glob->lru_lock);
  1533. ttm_bo_wait_unreserved(bo, false);
  1534. kref_put(&bo->list_kref, ttm_bo_release_list);
  1535. spin_lock(&glob->lru_lock);
  1536. }
  1537. }
  1538. BUG_ON(ret != 0);
  1539. put_count = ttm_bo_del_from_lru(bo);
  1540. spin_unlock(&glob->lru_lock);
  1541. while (put_count--)
  1542. kref_put(&bo->list_kref, ttm_bo_ref_bug);
  1543. /**
  1544. * Wait for GPU, then move to system cached.
  1545. */
  1546. spin_lock(&bo->lock);
  1547. ret = ttm_bo_wait(bo, false, false, false);
  1548. spin_unlock(&bo->lock);
  1549. if (unlikely(ret != 0))
  1550. goto out;
  1551. if ((bo->mem.placement & swap_placement) != swap_placement) {
  1552. struct ttm_mem_reg evict_mem;
  1553. evict_mem = bo->mem;
  1554. evict_mem.mm_node = NULL;
  1555. evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
  1556. evict_mem.mem_type = TTM_PL_SYSTEM;
  1557. ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
  1558. false, false);
  1559. if (unlikely(ret != 0))
  1560. goto out;
  1561. }
  1562. ttm_bo_unmap_virtual(bo);
  1563. /**
  1564. * Swap out. Buffer will be swapped in again as soon as
  1565. * anyone tries to access a ttm page.
  1566. */
  1567. if (bo->bdev->driver->swap_notify)
  1568. bo->bdev->driver->swap_notify(bo);
  1569. ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage);
  1570. out:
  1571. /**
  1572. *
  1573. * Unreserve without putting on LRU to avoid swapping out an
  1574. * already swapped buffer.
  1575. */
  1576. atomic_set(&bo->reserved, 0);
  1577. wake_up_all(&bo->event_queue);
  1578. kref_put(&bo->list_kref, ttm_bo_release_list);
  1579. return ret;
  1580. }
  1581. void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
  1582. {
  1583. while (ttm_bo_swapout(&bdev->glob->shrink) == 0)
  1584. ;
  1585. }
  1586. EXPORT_SYMBOL(ttm_bo_swapout_all);