ttm_bo.c 40 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698
  1. /**************************************************************************
  2. *
  3. * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. /*
  28. * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  29. */
  30. #include "ttm/ttm_module.h"
  31. #include "ttm/ttm_bo_driver.h"
  32. #include "ttm/ttm_placement.h"
  33. #include <linux/jiffies.h>
  34. #include <linux/slab.h>
  35. #include <linux/sched.h>
  36. #include <linux/mm.h>
  37. #include <linux/file.h>
  38. #include <linux/module.h>
  39. #define TTM_ASSERT_LOCKED(param)
  40. #define TTM_DEBUG(fmt, arg...)
  41. #define TTM_BO_HASH_ORDER 13
  42. static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
  43. static void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
  44. static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
  45. static inline uint32_t ttm_bo_type_flags(unsigned type)
  46. {
  47. return 1 << (type);
  48. }
  49. static void ttm_bo_release_list(struct kref *list_kref)
  50. {
  51. struct ttm_buffer_object *bo =
  52. container_of(list_kref, struct ttm_buffer_object, list_kref);
  53. struct ttm_bo_device *bdev = bo->bdev;
  54. BUG_ON(atomic_read(&bo->list_kref.refcount));
  55. BUG_ON(atomic_read(&bo->kref.refcount));
  56. BUG_ON(atomic_read(&bo->cpu_writers));
  57. BUG_ON(bo->sync_obj != NULL);
  58. BUG_ON(bo->mem.mm_node != NULL);
  59. BUG_ON(!list_empty(&bo->lru));
  60. BUG_ON(!list_empty(&bo->ddestroy));
  61. if (bo->ttm)
  62. ttm_tt_destroy(bo->ttm);
  63. if (bo->destroy)
  64. bo->destroy(bo);
  65. else {
  66. ttm_mem_global_free(bdev->mem_glob, bo->acc_size, false);
  67. kfree(bo);
  68. }
  69. }
  70. int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
  71. {
  72. if (interruptible) {
  73. int ret = 0;
  74. ret = wait_event_interruptible(bo->event_queue,
  75. atomic_read(&bo->reserved) == 0);
  76. if (unlikely(ret != 0))
  77. return -ERESTART;
  78. } else {
  79. wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
  80. }
  81. return 0;
  82. }
  83. static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
  84. {
  85. struct ttm_bo_device *bdev = bo->bdev;
  86. struct ttm_mem_type_manager *man;
  87. BUG_ON(!atomic_read(&bo->reserved));
  88. if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
  89. BUG_ON(!list_empty(&bo->lru));
  90. man = &bdev->man[bo->mem.mem_type];
  91. list_add_tail(&bo->lru, &man->lru);
  92. kref_get(&bo->list_kref);
  93. if (bo->ttm != NULL) {
  94. list_add_tail(&bo->swap, &bdev->swap_lru);
  95. kref_get(&bo->list_kref);
  96. }
  97. }
  98. }
  99. /**
  100. * Call with the lru_lock held.
  101. */
  102. static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
  103. {
  104. int put_count = 0;
  105. if (!list_empty(&bo->swap)) {
  106. list_del_init(&bo->swap);
  107. ++put_count;
  108. }
  109. if (!list_empty(&bo->lru)) {
  110. list_del_init(&bo->lru);
  111. ++put_count;
  112. }
  113. /*
  114. * TODO: Add a driver hook to delete from
  115. * driver-specific LRU's here.
  116. */
  117. return put_count;
  118. }
  119. int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
  120. bool interruptible,
  121. bool no_wait, bool use_sequence, uint32_t sequence)
  122. {
  123. struct ttm_bo_device *bdev = bo->bdev;
  124. int ret;
  125. while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
  126. if (use_sequence && bo->seq_valid &&
  127. (sequence - bo->val_seq < (1 << 31))) {
  128. return -EAGAIN;
  129. }
  130. if (no_wait)
  131. return -EBUSY;
  132. spin_unlock(&bdev->lru_lock);
  133. ret = ttm_bo_wait_unreserved(bo, interruptible);
  134. spin_lock(&bdev->lru_lock);
  135. if (unlikely(ret))
  136. return ret;
  137. }
  138. if (use_sequence) {
  139. bo->val_seq = sequence;
  140. bo->seq_valid = true;
  141. } else {
  142. bo->seq_valid = false;
  143. }
  144. return 0;
  145. }
  146. EXPORT_SYMBOL(ttm_bo_reserve);
  147. static void ttm_bo_ref_bug(struct kref *list_kref)
  148. {
  149. BUG();
  150. }
  151. int ttm_bo_reserve(struct ttm_buffer_object *bo,
  152. bool interruptible,
  153. bool no_wait, bool use_sequence, uint32_t sequence)
  154. {
  155. struct ttm_bo_device *bdev = bo->bdev;
  156. int put_count = 0;
  157. int ret;
  158. spin_lock(&bdev->lru_lock);
  159. ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
  160. sequence);
  161. if (likely(ret == 0))
  162. put_count = ttm_bo_del_from_lru(bo);
  163. spin_unlock(&bdev->lru_lock);
  164. while (put_count--)
  165. kref_put(&bo->list_kref, ttm_bo_ref_bug);
  166. return ret;
  167. }
  168. void ttm_bo_unreserve(struct ttm_buffer_object *bo)
  169. {
  170. struct ttm_bo_device *bdev = bo->bdev;
  171. spin_lock(&bdev->lru_lock);
  172. ttm_bo_add_to_lru(bo);
  173. atomic_set(&bo->reserved, 0);
  174. wake_up_all(&bo->event_queue);
  175. spin_unlock(&bdev->lru_lock);
  176. }
  177. EXPORT_SYMBOL(ttm_bo_unreserve);
  178. /*
  179. * Call bo->mutex locked.
  180. */
  181. static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
  182. {
  183. struct ttm_bo_device *bdev = bo->bdev;
  184. int ret = 0;
  185. uint32_t page_flags = 0;
  186. TTM_ASSERT_LOCKED(&bo->mutex);
  187. bo->ttm = NULL;
  188. switch (bo->type) {
  189. case ttm_bo_type_device:
  190. if (zero_alloc)
  191. page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
  192. case ttm_bo_type_kernel:
  193. bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
  194. page_flags, bdev->dummy_read_page);
  195. if (unlikely(bo->ttm == NULL))
  196. ret = -ENOMEM;
  197. break;
  198. case ttm_bo_type_user:
  199. bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
  200. page_flags | TTM_PAGE_FLAG_USER,
  201. bdev->dummy_read_page);
  202. if (unlikely(bo->ttm == NULL))
  203. ret = -ENOMEM;
  204. break;
  205. ret = ttm_tt_set_user(bo->ttm, current,
  206. bo->buffer_start, bo->num_pages);
  207. if (unlikely(ret != 0))
  208. ttm_tt_destroy(bo->ttm);
  209. break;
  210. default:
  211. printk(KERN_ERR TTM_PFX "Illegal buffer object type\n");
  212. ret = -EINVAL;
  213. break;
  214. }
  215. return ret;
  216. }
  217. static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
  218. struct ttm_mem_reg *mem,
  219. bool evict, bool interruptible, bool no_wait)
  220. {
  221. struct ttm_bo_device *bdev = bo->bdev;
  222. bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
  223. bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
  224. struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
  225. struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
  226. int ret = 0;
  227. if (old_is_pci || new_is_pci ||
  228. ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0))
  229. ttm_bo_unmap_virtual(bo);
  230. /*
  231. * Create and bind a ttm if required.
  232. */
  233. if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) {
  234. ret = ttm_bo_add_ttm(bo, false);
  235. if (ret)
  236. goto out_err;
  237. ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
  238. if (ret)
  239. return ret;
  240. if (mem->mem_type != TTM_PL_SYSTEM) {
  241. ret = ttm_tt_bind(bo->ttm, mem);
  242. if (ret)
  243. goto out_err;
  244. }
  245. if (bo->mem.mem_type == TTM_PL_SYSTEM) {
  246. struct ttm_mem_reg *old_mem = &bo->mem;
  247. uint32_t save_flags = old_mem->placement;
  248. *old_mem = *mem;
  249. mem->mm_node = NULL;
  250. ttm_flag_masked(&save_flags, mem->placement,
  251. TTM_PL_MASK_MEMTYPE);
  252. goto moved;
  253. }
  254. }
  255. if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
  256. !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
  257. ret = ttm_bo_move_ttm(bo, evict, no_wait, mem);
  258. else if (bdev->driver->move)
  259. ret = bdev->driver->move(bo, evict, interruptible,
  260. no_wait, mem);
  261. else
  262. ret = ttm_bo_move_memcpy(bo, evict, no_wait, mem);
  263. if (ret)
  264. goto out_err;
  265. moved:
  266. if (bo->evicted) {
  267. ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
  268. if (ret)
  269. printk(KERN_ERR TTM_PFX "Can not flush read caches\n");
  270. bo->evicted = false;
  271. }
  272. if (bo->mem.mm_node) {
  273. spin_lock(&bo->lock);
  274. bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
  275. bdev->man[bo->mem.mem_type].gpu_offset;
  276. bo->cur_placement = bo->mem.placement;
  277. spin_unlock(&bo->lock);
  278. }
  279. return 0;
  280. out_err:
  281. new_man = &bdev->man[bo->mem.mem_type];
  282. if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
  283. ttm_tt_unbind(bo->ttm);
  284. ttm_tt_destroy(bo->ttm);
  285. bo->ttm = NULL;
  286. }
  287. return ret;
  288. }
  289. /**
  290. * If bo idle, remove from delayed- and lru lists, and unref.
  291. * If not idle, and already on delayed list, do nothing.
  292. * If not idle, and not on delayed list, put on delayed list,
  293. * up the list_kref and schedule a delayed list check.
  294. */
  295. static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
  296. {
  297. struct ttm_bo_device *bdev = bo->bdev;
  298. struct ttm_bo_driver *driver = bdev->driver;
  299. int ret;
  300. spin_lock(&bo->lock);
  301. (void) ttm_bo_wait(bo, false, false, !remove_all);
  302. if (!bo->sync_obj) {
  303. int put_count;
  304. spin_unlock(&bo->lock);
  305. spin_lock(&bdev->lru_lock);
  306. ret = ttm_bo_reserve_locked(bo, false, false, false, 0);
  307. BUG_ON(ret);
  308. if (bo->ttm)
  309. ttm_tt_unbind(bo->ttm);
  310. if (!list_empty(&bo->ddestroy)) {
  311. list_del_init(&bo->ddestroy);
  312. kref_put(&bo->list_kref, ttm_bo_ref_bug);
  313. }
  314. if (bo->mem.mm_node) {
  315. drm_mm_put_block(bo->mem.mm_node);
  316. bo->mem.mm_node = NULL;
  317. }
  318. put_count = ttm_bo_del_from_lru(bo);
  319. spin_unlock(&bdev->lru_lock);
  320. atomic_set(&bo->reserved, 0);
  321. while (put_count--)
  322. kref_put(&bo->list_kref, ttm_bo_release_list);
  323. return 0;
  324. }
  325. spin_lock(&bdev->lru_lock);
  326. if (list_empty(&bo->ddestroy)) {
  327. void *sync_obj = bo->sync_obj;
  328. void *sync_obj_arg = bo->sync_obj_arg;
  329. kref_get(&bo->list_kref);
  330. list_add_tail(&bo->ddestroy, &bdev->ddestroy);
  331. spin_unlock(&bdev->lru_lock);
  332. spin_unlock(&bo->lock);
  333. if (sync_obj)
  334. driver->sync_obj_flush(sync_obj, sync_obj_arg);
  335. schedule_delayed_work(&bdev->wq,
  336. ((HZ / 100) < 1) ? 1 : HZ / 100);
  337. ret = 0;
  338. } else {
  339. spin_unlock(&bdev->lru_lock);
  340. spin_unlock(&bo->lock);
  341. ret = -EBUSY;
  342. }
  343. return ret;
  344. }
  345. /**
  346. * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
  347. * encountered buffers.
  348. */
  349. static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
  350. {
  351. struct ttm_buffer_object *entry, *nentry;
  352. struct list_head *list, *next;
  353. int ret;
  354. spin_lock(&bdev->lru_lock);
  355. list_for_each_safe(list, next, &bdev->ddestroy) {
  356. entry = list_entry(list, struct ttm_buffer_object, ddestroy);
  357. nentry = NULL;
  358. /*
  359. * Protect the next list entry from destruction while we
  360. * unlock the lru_lock.
  361. */
  362. if (next != &bdev->ddestroy) {
  363. nentry = list_entry(next, struct ttm_buffer_object,
  364. ddestroy);
  365. kref_get(&nentry->list_kref);
  366. }
  367. kref_get(&entry->list_kref);
  368. spin_unlock(&bdev->lru_lock);
  369. ret = ttm_bo_cleanup_refs(entry, remove_all);
  370. kref_put(&entry->list_kref, ttm_bo_release_list);
  371. spin_lock(&bdev->lru_lock);
  372. if (nentry) {
  373. bool next_onlist = !list_empty(next);
  374. spin_unlock(&bdev->lru_lock);
  375. kref_put(&nentry->list_kref, ttm_bo_release_list);
  376. spin_lock(&bdev->lru_lock);
  377. /*
  378. * Someone might have raced us and removed the
  379. * next entry from the list. We don't bother restarting
  380. * list traversal.
  381. */
  382. if (!next_onlist)
  383. break;
  384. }
  385. if (ret)
  386. break;
  387. }
  388. ret = !list_empty(&bdev->ddestroy);
  389. spin_unlock(&bdev->lru_lock);
  390. return ret;
  391. }
  392. static void ttm_bo_delayed_workqueue(struct work_struct *work)
  393. {
  394. struct ttm_bo_device *bdev =
  395. container_of(work, struct ttm_bo_device, wq.work);
  396. if (ttm_bo_delayed_delete(bdev, false)) {
  397. schedule_delayed_work(&bdev->wq,
  398. ((HZ / 100) < 1) ? 1 : HZ / 100);
  399. }
  400. }
  401. static void ttm_bo_release(struct kref *kref)
  402. {
  403. struct ttm_buffer_object *bo =
  404. container_of(kref, struct ttm_buffer_object, kref);
  405. struct ttm_bo_device *bdev = bo->bdev;
  406. if (likely(bo->vm_node != NULL)) {
  407. rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
  408. drm_mm_put_block(bo->vm_node);
  409. bo->vm_node = NULL;
  410. }
  411. write_unlock(&bdev->vm_lock);
  412. ttm_bo_cleanup_refs(bo, false);
  413. kref_put(&bo->list_kref, ttm_bo_release_list);
  414. write_lock(&bdev->vm_lock);
  415. }
  416. void ttm_bo_unref(struct ttm_buffer_object **p_bo)
  417. {
  418. struct ttm_buffer_object *bo = *p_bo;
  419. struct ttm_bo_device *bdev = bo->bdev;
  420. *p_bo = NULL;
  421. write_lock(&bdev->vm_lock);
  422. kref_put(&bo->kref, ttm_bo_release);
  423. write_unlock(&bdev->vm_lock);
  424. }
  425. EXPORT_SYMBOL(ttm_bo_unref);
  426. static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
  427. bool interruptible, bool no_wait)
  428. {
  429. int ret = 0;
  430. struct ttm_bo_device *bdev = bo->bdev;
  431. struct ttm_mem_reg evict_mem;
  432. uint32_t proposed_placement;
  433. if (bo->mem.mem_type != mem_type)
  434. goto out;
  435. spin_lock(&bo->lock);
  436. ret = ttm_bo_wait(bo, false, interruptible, no_wait);
  437. spin_unlock(&bo->lock);
  438. if (ret && ret != -ERESTART) {
  439. printk(KERN_ERR TTM_PFX "Failed to expire sync object before "
  440. "buffer eviction.\n");
  441. goto out;
  442. }
  443. BUG_ON(!atomic_read(&bo->reserved));
  444. evict_mem = bo->mem;
  445. evict_mem.mm_node = NULL;
  446. proposed_placement = bdev->driver->evict_flags(bo);
  447. ret = ttm_bo_mem_space(bo, proposed_placement,
  448. &evict_mem, interruptible, no_wait);
  449. if (unlikely(ret != 0 && ret != -ERESTART))
  450. ret = ttm_bo_mem_space(bo, TTM_PL_FLAG_SYSTEM,
  451. &evict_mem, interruptible, no_wait);
  452. if (ret) {
  453. if (ret != -ERESTART)
  454. printk(KERN_ERR TTM_PFX
  455. "Failed to find memory space for "
  456. "buffer 0x%p eviction.\n", bo);
  457. goto out;
  458. }
  459. ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
  460. no_wait);
  461. if (ret) {
  462. if (ret != -ERESTART)
  463. printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
  464. goto out;
  465. }
  466. spin_lock(&bdev->lru_lock);
  467. if (evict_mem.mm_node) {
  468. drm_mm_put_block(evict_mem.mm_node);
  469. evict_mem.mm_node = NULL;
  470. }
  471. spin_unlock(&bdev->lru_lock);
  472. bo->evicted = true;
  473. out:
  474. return ret;
  475. }
  476. /**
  477. * Repeatedly evict memory from the LRU for @mem_type until we create enough
  478. * space, or we've evicted everything and there isn't enough space.
  479. */
  480. static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev,
  481. struct ttm_mem_reg *mem,
  482. uint32_t mem_type,
  483. bool interruptible, bool no_wait)
  484. {
  485. struct drm_mm_node *node;
  486. struct ttm_buffer_object *entry;
  487. struct ttm_mem_type_manager *man = &bdev->man[mem_type];
  488. struct list_head *lru;
  489. unsigned long num_pages = mem->num_pages;
  490. int put_count = 0;
  491. int ret;
  492. retry_pre_get:
  493. ret = drm_mm_pre_get(&man->manager);
  494. if (unlikely(ret != 0))
  495. return ret;
  496. spin_lock(&bdev->lru_lock);
  497. do {
  498. node = drm_mm_search_free(&man->manager, num_pages,
  499. mem->page_alignment, 1);
  500. if (node)
  501. break;
  502. lru = &man->lru;
  503. if (list_empty(lru))
  504. break;
  505. entry = list_first_entry(lru, struct ttm_buffer_object, lru);
  506. kref_get(&entry->list_kref);
  507. ret =
  508. ttm_bo_reserve_locked(entry, interruptible, no_wait,
  509. false, 0);
  510. if (likely(ret == 0))
  511. put_count = ttm_bo_del_from_lru(entry);
  512. spin_unlock(&bdev->lru_lock);
  513. if (unlikely(ret != 0))
  514. return ret;
  515. while (put_count--)
  516. kref_put(&entry->list_kref, ttm_bo_ref_bug);
  517. ret = ttm_bo_evict(entry, mem_type, interruptible, no_wait);
  518. ttm_bo_unreserve(entry);
  519. kref_put(&entry->list_kref, ttm_bo_release_list);
  520. if (ret)
  521. return ret;
  522. spin_lock(&bdev->lru_lock);
  523. } while (1);
  524. if (!node) {
  525. spin_unlock(&bdev->lru_lock);
  526. return -ENOMEM;
  527. }
  528. node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment);
  529. if (unlikely(!node)) {
  530. spin_unlock(&bdev->lru_lock);
  531. goto retry_pre_get;
  532. }
  533. spin_unlock(&bdev->lru_lock);
  534. mem->mm_node = node;
  535. mem->mem_type = mem_type;
  536. return 0;
  537. }
  538. static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
  539. bool disallow_fixed,
  540. uint32_t mem_type,
  541. uint32_t mask, uint32_t *res_mask)
  542. {
  543. uint32_t cur_flags = ttm_bo_type_flags(mem_type);
  544. if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
  545. return false;
  546. if ((cur_flags & mask & TTM_PL_MASK_MEM) == 0)
  547. return false;
  548. if ((mask & man->available_caching) == 0)
  549. return false;
  550. if (mask & man->default_caching)
  551. cur_flags |= man->default_caching;
  552. else if (mask & TTM_PL_FLAG_CACHED)
  553. cur_flags |= TTM_PL_FLAG_CACHED;
  554. else if (mask & TTM_PL_FLAG_WC)
  555. cur_flags |= TTM_PL_FLAG_WC;
  556. else
  557. cur_flags |= TTM_PL_FLAG_UNCACHED;
  558. *res_mask = cur_flags;
  559. return true;
  560. }
  561. /**
  562. * Creates space for memory region @mem according to its type.
  563. *
  564. * This function first searches for free space in compatible memory types in
  565. * the priority order defined by the driver. If free space isn't found, then
  566. * ttm_bo_mem_force_space is attempted in priority order to evict and find
  567. * space.
  568. */
  569. int ttm_bo_mem_space(struct ttm_buffer_object *bo,
  570. uint32_t proposed_placement,
  571. struct ttm_mem_reg *mem,
  572. bool interruptible, bool no_wait)
  573. {
  574. struct ttm_bo_device *bdev = bo->bdev;
  575. struct ttm_mem_type_manager *man;
  576. uint32_t num_prios = bdev->driver->num_mem_type_prio;
  577. const uint32_t *prios = bdev->driver->mem_type_prio;
  578. uint32_t i;
  579. uint32_t mem_type = TTM_PL_SYSTEM;
  580. uint32_t cur_flags = 0;
  581. bool type_found = false;
  582. bool type_ok = false;
  583. bool has_eagain = false;
  584. struct drm_mm_node *node = NULL;
  585. int ret;
  586. mem->mm_node = NULL;
  587. for (i = 0; i < num_prios; ++i) {
  588. mem_type = prios[i];
  589. man = &bdev->man[mem_type];
  590. type_ok = ttm_bo_mt_compatible(man,
  591. bo->type == ttm_bo_type_user,
  592. mem_type, proposed_placement,
  593. &cur_flags);
  594. if (!type_ok)
  595. continue;
  596. if (mem_type == TTM_PL_SYSTEM)
  597. break;
  598. if (man->has_type && man->use_type) {
  599. type_found = true;
  600. do {
  601. ret = drm_mm_pre_get(&man->manager);
  602. if (unlikely(ret))
  603. return ret;
  604. spin_lock(&bdev->lru_lock);
  605. node = drm_mm_search_free(&man->manager,
  606. mem->num_pages,
  607. mem->page_alignment,
  608. 1);
  609. if (unlikely(!node)) {
  610. spin_unlock(&bdev->lru_lock);
  611. break;
  612. }
  613. node = drm_mm_get_block_atomic(node,
  614. mem->num_pages,
  615. mem->
  616. page_alignment);
  617. spin_unlock(&bdev->lru_lock);
  618. } while (!node);
  619. }
  620. if (node)
  621. break;
  622. }
  623. if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || node) {
  624. mem->mm_node = node;
  625. mem->mem_type = mem_type;
  626. mem->placement = cur_flags;
  627. return 0;
  628. }
  629. if (!type_found)
  630. return -EINVAL;
  631. num_prios = bdev->driver->num_mem_busy_prio;
  632. prios = bdev->driver->mem_busy_prio;
  633. for (i = 0; i < num_prios; ++i) {
  634. mem_type = prios[i];
  635. man = &bdev->man[mem_type];
  636. if (!man->has_type)
  637. continue;
  638. if (!ttm_bo_mt_compatible(man,
  639. bo->type == ttm_bo_type_user,
  640. mem_type,
  641. proposed_placement, &cur_flags))
  642. continue;
  643. ret = ttm_bo_mem_force_space(bdev, mem, mem_type,
  644. interruptible, no_wait);
  645. if (ret == 0 && mem->mm_node) {
  646. mem->placement = cur_flags;
  647. return 0;
  648. }
  649. if (ret == -ERESTART)
  650. has_eagain = true;
  651. }
  652. ret = (has_eagain) ? -ERESTART : -ENOMEM;
  653. return ret;
  654. }
  655. EXPORT_SYMBOL(ttm_bo_mem_space);
  656. int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
  657. {
  658. int ret = 0;
  659. if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
  660. return -EBUSY;
  661. ret = wait_event_interruptible(bo->event_queue,
  662. atomic_read(&bo->cpu_writers) == 0);
  663. if (ret == -ERESTARTSYS)
  664. ret = -ERESTART;
  665. return ret;
  666. }
  667. int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
  668. uint32_t proposed_placement,
  669. bool interruptible, bool no_wait)
  670. {
  671. struct ttm_bo_device *bdev = bo->bdev;
  672. int ret = 0;
  673. struct ttm_mem_reg mem;
  674. BUG_ON(!atomic_read(&bo->reserved));
  675. /*
  676. * FIXME: It's possible to pipeline buffer moves.
  677. * Have the driver move function wait for idle when necessary,
  678. * instead of doing it here.
  679. */
  680. spin_lock(&bo->lock);
  681. ret = ttm_bo_wait(bo, false, interruptible, no_wait);
  682. spin_unlock(&bo->lock);
  683. if (ret)
  684. return ret;
  685. mem.num_pages = bo->num_pages;
  686. mem.size = mem.num_pages << PAGE_SHIFT;
  687. mem.page_alignment = bo->mem.page_alignment;
  688. /*
  689. * Determine where to move the buffer.
  690. */
  691. ret = ttm_bo_mem_space(bo, proposed_placement, &mem,
  692. interruptible, no_wait);
  693. if (ret)
  694. goto out_unlock;
  695. ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
  696. out_unlock:
  697. if (ret && mem.mm_node) {
  698. spin_lock(&bdev->lru_lock);
  699. drm_mm_put_block(mem.mm_node);
  700. spin_unlock(&bdev->lru_lock);
  701. }
  702. return ret;
  703. }
  704. static int ttm_bo_mem_compat(uint32_t proposed_placement,
  705. struct ttm_mem_reg *mem)
  706. {
  707. if ((proposed_placement & mem->placement & TTM_PL_MASK_MEM) == 0)
  708. return 0;
  709. if ((proposed_placement & mem->placement & TTM_PL_MASK_CACHING) == 0)
  710. return 0;
  711. return 1;
  712. }
  713. int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
  714. uint32_t proposed_placement,
  715. bool interruptible, bool no_wait)
  716. {
  717. int ret;
  718. BUG_ON(!atomic_read(&bo->reserved));
  719. bo->proposed_placement = proposed_placement;
  720. TTM_DEBUG("Proposed placement 0x%08lx, Old flags 0x%08lx\n",
  721. (unsigned long)proposed_placement,
  722. (unsigned long)bo->mem.placement);
  723. /*
  724. * Check whether we need to move buffer.
  725. */
  726. if (!ttm_bo_mem_compat(bo->proposed_placement, &bo->mem)) {
  727. ret = ttm_bo_move_buffer(bo, bo->proposed_placement,
  728. interruptible, no_wait);
  729. if (ret) {
  730. if (ret != -ERESTART)
  731. printk(KERN_ERR TTM_PFX
  732. "Failed moving buffer. "
  733. "Proposed placement 0x%08x\n",
  734. bo->proposed_placement);
  735. if (ret == -ENOMEM)
  736. printk(KERN_ERR TTM_PFX
  737. "Out of aperture space or "
  738. "DRM memory quota.\n");
  739. return ret;
  740. }
  741. }
  742. /*
  743. * We might need to add a TTM.
  744. */
  745. if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
  746. ret = ttm_bo_add_ttm(bo, true);
  747. if (ret)
  748. return ret;
  749. }
  750. /*
  751. * Validation has succeeded, move the access and other
  752. * non-mapping-related flag bits from the proposed flags to
  753. * the active flags
  754. */
  755. ttm_flag_masked(&bo->mem.placement, bo->proposed_placement,
  756. ~TTM_PL_MASK_MEMTYPE);
  757. return 0;
  758. }
  759. EXPORT_SYMBOL(ttm_buffer_object_validate);
  760. int
  761. ttm_bo_check_placement(struct ttm_buffer_object *bo,
  762. uint32_t set_flags, uint32_t clr_flags)
  763. {
  764. uint32_t new_mask = set_flags | clr_flags;
  765. if ((bo->type == ttm_bo_type_user) &&
  766. (clr_flags & TTM_PL_FLAG_CACHED)) {
  767. printk(KERN_ERR TTM_PFX
  768. "User buffers require cache-coherent memory.\n");
  769. return -EINVAL;
  770. }
  771. if (!capable(CAP_SYS_ADMIN)) {
  772. if (new_mask & TTM_PL_FLAG_NO_EVICT) {
  773. printk(KERN_ERR TTM_PFX "Need to be root to modify"
  774. " NO_EVICT status.\n");
  775. return -EINVAL;
  776. }
  777. if ((clr_flags & bo->mem.placement & TTM_PL_MASK_MEMTYPE) &&
  778. (bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
  779. printk(KERN_ERR TTM_PFX
  780. "Incompatible memory specification"
  781. " for NO_EVICT buffer.\n");
  782. return -EINVAL;
  783. }
  784. }
  785. return 0;
  786. }
  787. int ttm_buffer_object_init(struct ttm_bo_device *bdev,
  788. struct ttm_buffer_object *bo,
  789. unsigned long size,
  790. enum ttm_bo_type type,
  791. uint32_t flags,
  792. uint32_t page_alignment,
  793. unsigned long buffer_start,
  794. bool interruptible,
  795. struct file *persistant_swap_storage,
  796. size_t acc_size,
  797. void (*destroy) (struct ttm_buffer_object *))
  798. {
  799. int ret = 0;
  800. unsigned long num_pages;
  801. size += buffer_start & ~PAGE_MASK;
  802. num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
  803. if (num_pages == 0) {
  804. printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n");
  805. return -EINVAL;
  806. }
  807. bo->destroy = destroy;
  808. spin_lock_init(&bo->lock);
  809. kref_init(&bo->kref);
  810. kref_init(&bo->list_kref);
  811. atomic_set(&bo->cpu_writers, 0);
  812. atomic_set(&bo->reserved, 1);
  813. init_waitqueue_head(&bo->event_queue);
  814. INIT_LIST_HEAD(&bo->lru);
  815. INIT_LIST_HEAD(&bo->ddestroy);
  816. INIT_LIST_HEAD(&bo->swap);
  817. bo->bdev = bdev;
  818. bo->type = type;
  819. bo->num_pages = num_pages;
  820. bo->mem.mem_type = TTM_PL_SYSTEM;
  821. bo->mem.num_pages = bo->num_pages;
  822. bo->mem.mm_node = NULL;
  823. bo->mem.page_alignment = page_alignment;
  824. bo->buffer_start = buffer_start & PAGE_MASK;
  825. bo->priv_flags = 0;
  826. bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
  827. bo->seq_valid = false;
  828. bo->persistant_swap_storage = persistant_swap_storage;
  829. bo->acc_size = acc_size;
  830. ret = ttm_bo_check_placement(bo, flags, 0ULL);
  831. if (unlikely(ret != 0))
  832. goto out_err;
  833. /*
  834. * If no caching attributes are set, accept any form of caching.
  835. */
  836. if ((flags & TTM_PL_MASK_CACHING) == 0)
  837. flags |= TTM_PL_MASK_CACHING;
  838. /*
  839. * For ttm_bo_type_device buffers, allocate
  840. * address space from the device.
  841. */
  842. if (bo->type == ttm_bo_type_device) {
  843. ret = ttm_bo_setup_vm(bo);
  844. if (ret)
  845. goto out_err;
  846. }
  847. ret = ttm_buffer_object_validate(bo, flags, interruptible, false);
  848. if (ret)
  849. goto out_err;
  850. ttm_bo_unreserve(bo);
  851. return 0;
  852. out_err:
  853. ttm_bo_unreserve(bo);
  854. ttm_bo_unref(&bo);
  855. return ret;
  856. }
  857. EXPORT_SYMBOL(ttm_buffer_object_init);
  858. static inline size_t ttm_bo_size(struct ttm_bo_device *bdev,
  859. unsigned long num_pages)
  860. {
  861. size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
  862. PAGE_MASK;
  863. return bdev->ttm_bo_size + 2 * page_array_size;
  864. }
  865. int ttm_buffer_object_create(struct ttm_bo_device *bdev,
  866. unsigned long size,
  867. enum ttm_bo_type type,
  868. uint32_t flags,
  869. uint32_t page_alignment,
  870. unsigned long buffer_start,
  871. bool interruptible,
  872. struct file *persistant_swap_storage,
  873. struct ttm_buffer_object **p_bo)
  874. {
  875. struct ttm_buffer_object *bo;
  876. int ret;
  877. struct ttm_mem_global *mem_glob = bdev->mem_glob;
  878. size_t acc_size =
  879. ttm_bo_size(bdev, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
  880. ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false, false);
  881. if (unlikely(ret != 0))
  882. return ret;
  883. bo = kzalloc(sizeof(*bo), GFP_KERNEL);
  884. if (unlikely(bo == NULL)) {
  885. ttm_mem_global_free(mem_glob, acc_size, false);
  886. return -ENOMEM;
  887. }
  888. ret = ttm_buffer_object_init(bdev, bo, size, type, flags,
  889. page_alignment, buffer_start,
  890. interruptible,
  891. persistant_swap_storage, acc_size, NULL);
  892. if (likely(ret == 0))
  893. *p_bo = bo;
  894. return ret;
  895. }
  896. static int ttm_bo_leave_list(struct ttm_buffer_object *bo,
  897. uint32_t mem_type, bool allow_errors)
  898. {
  899. int ret;
  900. spin_lock(&bo->lock);
  901. ret = ttm_bo_wait(bo, false, false, false);
  902. spin_unlock(&bo->lock);
  903. if (ret && allow_errors)
  904. goto out;
  905. if (bo->mem.mem_type == mem_type)
  906. ret = ttm_bo_evict(bo, mem_type, false, false);
  907. if (ret) {
  908. if (allow_errors) {
  909. goto out;
  910. } else {
  911. ret = 0;
  912. printk(KERN_ERR TTM_PFX "Cleanup eviction failed\n");
  913. }
  914. }
  915. out:
  916. return ret;
  917. }
  918. static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
  919. struct list_head *head,
  920. unsigned mem_type, bool allow_errors)
  921. {
  922. struct ttm_buffer_object *entry;
  923. int ret;
  924. int put_count;
  925. /*
  926. * Can't use standard list traversal since we're unlocking.
  927. */
  928. spin_lock(&bdev->lru_lock);
  929. while (!list_empty(head)) {
  930. entry = list_first_entry(head, struct ttm_buffer_object, lru);
  931. kref_get(&entry->list_kref);
  932. ret = ttm_bo_reserve_locked(entry, false, false, false, 0);
  933. put_count = ttm_bo_del_from_lru(entry);
  934. spin_unlock(&bdev->lru_lock);
  935. while (put_count--)
  936. kref_put(&entry->list_kref, ttm_bo_ref_bug);
  937. BUG_ON(ret);
  938. ret = ttm_bo_leave_list(entry, mem_type, allow_errors);
  939. ttm_bo_unreserve(entry);
  940. kref_put(&entry->list_kref, ttm_bo_release_list);
  941. spin_lock(&bdev->lru_lock);
  942. }
  943. spin_unlock(&bdev->lru_lock);
  944. return 0;
  945. }
  946. int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
  947. {
  948. struct ttm_mem_type_manager *man = &bdev->man[mem_type];
  949. int ret = -EINVAL;
  950. if (mem_type >= TTM_NUM_MEM_TYPES) {
  951. printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", mem_type);
  952. return ret;
  953. }
  954. if (!man->has_type) {
  955. printk(KERN_ERR TTM_PFX "Trying to take down uninitialized "
  956. "memory manager type %u\n", mem_type);
  957. return ret;
  958. }
  959. man->use_type = false;
  960. man->has_type = false;
  961. ret = 0;
  962. if (mem_type > 0) {
  963. ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false);
  964. spin_lock(&bdev->lru_lock);
  965. if (drm_mm_clean(&man->manager))
  966. drm_mm_takedown(&man->manager);
  967. else
  968. ret = -EBUSY;
  969. spin_unlock(&bdev->lru_lock);
  970. }
  971. return ret;
  972. }
  973. EXPORT_SYMBOL(ttm_bo_clean_mm);
  974. int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
  975. {
  976. struct ttm_mem_type_manager *man = &bdev->man[mem_type];
  977. if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
  978. printk(KERN_ERR TTM_PFX
  979. "Illegal memory manager memory type %u.\n",
  980. mem_type);
  981. return -EINVAL;
  982. }
  983. if (!man->has_type) {
  984. printk(KERN_ERR TTM_PFX
  985. "Memory type %u has not been initialized.\n",
  986. mem_type);
  987. return 0;
  988. }
  989. return ttm_bo_force_list_clean(bdev, &man->lru, mem_type, true);
  990. }
  991. EXPORT_SYMBOL(ttm_bo_evict_mm);
  992. int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
  993. unsigned long p_offset, unsigned long p_size)
  994. {
  995. int ret = -EINVAL;
  996. struct ttm_mem_type_manager *man;
  997. if (type >= TTM_NUM_MEM_TYPES) {
  998. printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", type);
  999. return ret;
  1000. }
  1001. man = &bdev->man[type];
  1002. if (man->has_type) {
  1003. printk(KERN_ERR TTM_PFX
  1004. "Memory manager already initialized for type %d\n",
  1005. type);
  1006. return ret;
  1007. }
  1008. ret = bdev->driver->init_mem_type(bdev, type, man);
  1009. if (ret)
  1010. return ret;
  1011. ret = 0;
  1012. if (type != TTM_PL_SYSTEM) {
  1013. if (!p_size) {
  1014. printk(KERN_ERR TTM_PFX
  1015. "Zero size memory manager type %d\n",
  1016. type);
  1017. return ret;
  1018. }
  1019. ret = drm_mm_init(&man->manager, p_offset, p_size);
  1020. if (ret)
  1021. return ret;
  1022. }
  1023. man->has_type = true;
  1024. man->use_type = true;
  1025. man->size = p_size;
  1026. INIT_LIST_HEAD(&man->lru);
  1027. return 0;
  1028. }
  1029. EXPORT_SYMBOL(ttm_bo_init_mm);
  1030. int ttm_bo_device_release(struct ttm_bo_device *bdev)
  1031. {
  1032. int ret = 0;
  1033. unsigned i = TTM_NUM_MEM_TYPES;
  1034. struct ttm_mem_type_manager *man;
  1035. while (i--) {
  1036. man = &bdev->man[i];
  1037. if (man->has_type) {
  1038. man->use_type = false;
  1039. if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
  1040. ret = -EBUSY;
  1041. printk(KERN_ERR TTM_PFX
  1042. "DRM memory manager type %d "
  1043. "is not clean.\n", i);
  1044. }
  1045. man->has_type = false;
  1046. }
  1047. }
  1048. if (!cancel_delayed_work(&bdev->wq))
  1049. flush_scheduled_work();
  1050. while (ttm_bo_delayed_delete(bdev, true))
  1051. ;
  1052. spin_lock(&bdev->lru_lock);
  1053. if (list_empty(&bdev->ddestroy))
  1054. TTM_DEBUG("Delayed destroy list was clean\n");
  1055. if (list_empty(&bdev->man[0].lru))
  1056. TTM_DEBUG("Swap list was clean\n");
  1057. spin_unlock(&bdev->lru_lock);
  1058. ttm_mem_unregister_shrink(bdev->mem_glob, &bdev->shrink);
  1059. BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
  1060. write_lock(&bdev->vm_lock);
  1061. drm_mm_takedown(&bdev->addr_space_mm);
  1062. write_unlock(&bdev->vm_lock);
  1063. __free_page(bdev->dummy_read_page);
  1064. return ret;
  1065. }
  1066. EXPORT_SYMBOL(ttm_bo_device_release);
  1067. /*
  1068. * This function is intended to be called on drm driver load.
  1069. * If you decide to call it from firstopen, you must protect the call
  1070. * from a potentially racing ttm_bo_driver_finish in lastclose.
  1071. * (This may happen on X server restart).
  1072. */
  1073. int ttm_bo_device_init(struct ttm_bo_device *bdev,
  1074. struct ttm_mem_global *mem_glob,
  1075. struct ttm_bo_driver *driver, uint64_t file_page_offset)
  1076. {
  1077. int ret = -EINVAL;
  1078. bdev->dummy_read_page = NULL;
  1079. rwlock_init(&bdev->vm_lock);
  1080. spin_lock_init(&bdev->lru_lock);
  1081. bdev->driver = driver;
  1082. bdev->mem_glob = mem_glob;
  1083. memset(bdev->man, 0, sizeof(bdev->man));
  1084. bdev->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
  1085. if (unlikely(bdev->dummy_read_page == NULL)) {
  1086. ret = -ENOMEM;
  1087. goto out_err0;
  1088. }
  1089. /*
  1090. * Initialize the system memory buffer type.
  1091. * Other types need to be driver / IOCTL initialized.
  1092. */
  1093. ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0, 0);
  1094. if (unlikely(ret != 0))
  1095. goto out_err1;
  1096. bdev->addr_space_rb = RB_ROOT;
  1097. ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
  1098. if (unlikely(ret != 0))
  1099. goto out_err2;
  1100. INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
  1101. bdev->nice_mode = true;
  1102. INIT_LIST_HEAD(&bdev->ddestroy);
  1103. INIT_LIST_HEAD(&bdev->swap_lru);
  1104. bdev->dev_mapping = NULL;
  1105. ttm_mem_init_shrink(&bdev->shrink, ttm_bo_swapout);
  1106. ret = ttm_mem_register_shrink(mem_glob, &bdev->shrink);
  1107. if (unlikely(ret != 0)) {
  1108. printk(KERN_ERR TTM_PFX
  1109. "Could not register buffer object swapout.\n");
  1110. goto out_err2;
  1111. }
  1112. bdev->ttm_bo_extra_size =
  1113. ttm_round_pot(sizeof(struct ttm_tt)) +
  1114. ttm_round_pot(sizeof(struct ttm_backend));
  1115. bdev->ttm_bo_size = bdev->ttm_bo_extra_size +
  1116. ttm_round_pot(sizeof(struct ttm_buffer_object));
  1117. return 0;
  1118. out_err2:
  1119. ttm_bo_clean_mm(bdev, 0);
  1120. out_err1:
  1121. __free_page(bdev->dummy_read_page);
  1122. out_err0:
  1123. return ret;
  1124. }
  1125. EXPORT_SYMBOL(ttm_bo_device_init);
  1126. /*
  1127. * buffer object vm functions.
  1128. */
  1129. bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
  1130. {
  1131. struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
  1132. if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
  1133. if (mem->mem_type == TTM_PL_SYSTEM)
  1134. return false;
  1135. if (man->flags & TTM_MEMTYPE_FLAG_CMA)
  1136. return false;
  1137. if (mem->placement & TTM_PL_FLAG_CACHED)
  1138. return false;
  1139. }
  1140. return true;
  1141. }
  1142. int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
  1143. struct ttm_mem_reg *mem,
  1144. unsigned long *bus_base,
  1145. unsigned long *bus_offset, unsigned long *bus_size)
  1146. {
  1147. struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
  1148. *bus_size = 0;
  1149. if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
  1150. return -EINVAL;
  1151. if (ttm_mem_reg_is_pci(bdev, mem)) {
  1152. *bus_offset = mem->mm_node->start << PAGE_SHIFT;
  1153. *bus_size = mem->num_pages << PAGE_SHIFT;
  1154. *bus_base = man->io_offset;
  1155. }
  1156. return 0;
  1157. }
  1158. void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
  1159. {
  1160. struct ttm_bo_device *bdev = bo->bdev;
  1161. loff_t offset = (loff_t) bo->addr_space_offset;
  1162. loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
  1163. if (!bdev->dev_mapping)
  1164. return;
  1165. unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
  1166. }
  1167. static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
  1168. {
  1169. struct ttm_bo_device *bdev = bo->bdev;
  1170. struct rb_node **cur = &bdev->addr_space_rb.rb_node;
  1171. struct rb_node *parent = NULL;
  1172. struct ttm_buffer_object *cur_bo;
  1173. unsigned long offset = bo->vm_node->start;
  1174. unsigned long cur_offset;
  1175. while (*cur) {
  1176. parent = *cur;
  1177. cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
  1178. cur_offset = cur_bo->vm_node->start;
  1179. if (offset < cur_offset)
  1180. cur = &parent->rb_left;
  1181. else if (offset > cur_offset)
  1182. cur = &parent->rb_right;
  1183. else
  1184. BUG();
  1185. }
  1186. rb_link_node(&bo->vm_rb, parent, cur);
  1187. rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
  1188. }
  1189. /**
  1190. * ttm_bo_setup_vm:
  1191. *
  1192. * @bo: the buffer to allocate address space for
  1193. *
  1194. * Allocate address space in the drm device so that applications
  1195. * can mmap the buffer and access the contents. This only
  1196. * applies to ttm_bo_type_device objects as others are not
  1197. * placed in the drm device address space.
  1198. */
  1199. static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
  1200. {
  1201. struct ttm_bo_device *bdev = bo->bdev;
  1202. int ret;
  1203. retry_pre_get:
  1204. ret = drm_mm_pre_get(&bdev->addr_space_mm);
  1205. if (unlikely(ret != 0))
  1206. return ret;
  1207. write_lock(&bdev->vm_lock);
  1208. bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
  1209. bo->mem.num_pages, 0, 0);
  1210. if (unlikely(bo->vm_node == NULL)) {
  1211. ret = -ENOMEM;
  1212. goto out_unlock;
  1213. }
  1214. bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
  1215. bo->mem.num_pages, 0);
  1216. if (unlikely(bo->vm_node == NULL)) {
  1217. write_unlock(&bdev->vm_lock);
  1218. goto retry_pre_get;
  1219. }
  1220. ttm_bo_vm_insert_rb(bo);
  1221. write_unlock(&bdev->vm_lock);
  1222. bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
  1223. return 0;
  1224. out_unlock:
  1225. write_unlock(&bdev->vm_lock);
  1226. return ret;
  1227. }
  1228. int ttm_bo_wait(struct ttm_buffer_object *bo,
  1229. bool lazy, bool interruptible, bool no_wait)
  1230. {
  1231. struct ttm_bo_driver *driver = bo->bdev->driver;
  1232. void *sync_obj;
  1233. void *sync_obj_arg;
  1234. int ret = 0;
  1235. if (likely(bo->sync_obj == NULL))
  1236. return 0;
  1237. while (bo->sync_obj) {
  1238. if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) {
  1239. void *tmp_obj = bo->sync_obj;
  1240. bo->sync_obj = NULL;
  1241. clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
  1242. spin_unlock(&bo->lock);
  1243. driver->sync_obj_unref(&tmp_obj);
  1244. spin_lock(&bo->lock);
  1245. continue;
  1246. }
  1247. if (no_wait)
  1248. return -EBUSY;
  1249. sync_obj = driver->sync_obj_ref(bo->sync_obj);
  1250. sync_obj_arg = bo->sync_obj_arg;
  1251. spin_unlock(&bo->lock);
  1252. ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
  1253. lazy, interruptible);
  1254. if (unlikely(ret != 0)) {
  1255. driver->sync_obj_unref(&sync_obj);
  1256. spin_lock(&bo->lock);
  1257. return ret;
  1258. }
  1259. spin_lock(&bo->lock);
  1260. if (likely(bo->sync_obj == sync_obj &&
  1261. bo->sync_obj_arg == sync_obj_arg)) {
  1262. void *tmp_obj = bo->sync_obj;
  1263. bo->sync_obj = NULL;
  1264. clear_bit(TTM_BO_PRIV_FLAG_MOVING,
  1265. &bo->priv_flags);
  1266. spin_unlock(&bo->lock);
  1267. driver->sync_obj_unref(&sync_obj);
  1268. driver->sync_obj_unref(&tmp_obj);
  1269. spin_lock(&bo->lock);
  1270. }
  1271. }
  1272. return 0;
  1273. }
  1274. EXPORT_SYMBOL(ttm_bo_wait);
  1275. void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo)
  1276. {
  1277. atomic_set(&bo->reserved, 0);
  1278. wake_up_all(&bo->event_queue);
  1279. }
  1280. int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible,
  1281. bool no_wait)
  1282. {
  1283. int ret;
  1284. while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
  1285. if (no_wait)
  1286. return -EBUSY;
  1287. else if (interruptible) {
  1288. ret = wait_event_interruptible
  1289. (bo->event_queue, atomic_read(&bo->reserved) == 0);
  1290. if (unlikely(ret != 0))
  1291. return -ERESTART;
  1292. } else {
  1293. wait_event(bo->event_queue,
  1294. atomic_read(&bo->reserved) == 0);
  1295. }
  1296. }
  1297. return 0;
  1298. }
  1299. int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
  1300. {
  1301. int ret = 0;
  1302. /*
  1303. * Using ttm_bo_reserve instead of ttm_bo_block_reservation
  1304. * makes sure the lru lists are updated.
  1305. */
  1306. ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
  1307. if (unlikely(ret != 0))
  1308. return ret;
  1309. spin_lock(&bo->lock);
  1310. ret = ttm_bo_wait(bo, false, true, no_wait);
  1311. spin_unlock(&bo->lock);
  1312. if (likely(ret == 0))
  1313. atomic_inc(&bo->cpu_writers);
  1314. ttm_bo_unreserve(bo);
  1315. return ret;
  1316. }
  1317. void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
  1318. {
  1319. if (atomic_dec_and_test(&bo->cpu_writers))
  1320. wake_up_all(&bo->event_queue);
  1321. }
  1322. /**
  1323. * A buffer object shrink method that tries to swap out the first
  1324. * buffer object on the bo_global::swap_lru list.
  1325. */
  1326. static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
  1327. {
  1328. struct ttm_bo_device *bdev =
  1329. container_of(shrink, struct ttm_bo_device, shrink);
  1330. struct ttm_buffer_object *bo;
  1331. int ret = -EBUSY;
  1332. int put_count;
  1333. uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
  1334. spin_lock(&bdev->lru_lock);
  1335. while (ret == -EBUSY) {
  1336. if (unlikely(list_empty(&bdev->swap_lru))) {
  1337. spin_unlock(&bdev->lru_lock);
  1338. return -EBUSY;
  1339. }
  1340. bo = list_first_entry(&bdev->swap_lru,
  1341. struct ttm_buffer_object, swap);
  1342. kref_get(&bo->list_kref);
  1343. /**
  1344. * Reserve buffer. Since we unlock while sleeping, we need
  1345. * to re-check that nobody removed us from the swap-list while
  1346. * we slept.
  1347. */
  1348. ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
  1349. if (unlikely(ret == -EBUSY)) {
  1350. spin_unlock(&bdev->lru_lock);
  1351. ttm_bo_wait_unreserved(bo, false);
  1352. kref_put(&bo->list_kref, ttm_bo_release_list);
  1353. spin_lock(&bdev->lru_lock);
  1354. }
  1355. }
  1356. BUG_ON(ret != 0);
  1357. put_count = ttm_bo_del_from_lru(bo);
  1358. spin_unlock(&bdev->lru_lock);
  1359. while (put_count--)
  1360. kref_put(&bo->list_kref, ttm_bo_ref_bug);
  1361. /**
  1362. * Wait for GPU, then move to system cached.
  1363. */
  1364. spin_lock(&bo->lock);
  1365. ret = ttm_bo_wait(bo, false, false, false);
  1366. spin_unlock(&bo->lock);
  1367. if (unlikely(ret != 0))
  1368. goto out;
  1369. if ((bo->mem.placement & swap_placement) != swap_placement) {
  1370. struct ttm_mem_reg evict_mem;
  1371. evict_mem = bo->mem;
  1372. evict_mem.mm_node = NULL;
  1373. evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
  1374. evict_mem.mem_type = TTM_PL_SYSTEM;
  1375. ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
  1376. false, false);
  1377. if (unlikely(ret != 0))
  1378. goto out;
  1379. }
  1380. ttm_bo_unmap_virtual(bo);
  1381. /**
  1382. * Swap out. Buffer will be swapped in again as soon as
  1383. * anyone tries to access a ttm page.
  1384. */
  1385. ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage);
  1386. out:
  1387. /**
  1388. *
  1389. * Unreserve without putting on LRU to avoid swapping out an
  1390. * already swapped buffer.
  1391. */
  1392. atomic_set(&bo->reserved, 0);
  1393. wake_up_all(&bo->event_queue);
  1394. kref_put(&bo->list_kref, ttm_bo_release_list);
  1395. return ret;
  1396. }
  1397. void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
  1398. {
  1399. while (ttm_bo_swapout(&bdev->shrink) == 0)
  1400. ;
  1401. }