ttm_bo.c 40 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701
  1. /**************************************************************************
  2. *
  3. * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. /*
  28. * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
  29. */
  30. #include "ttm/ttm_module.h"
  31. #include "ttm/ttm_bo_driver.h"
  32. #include "ttm/ttm_placement.h"
  33. #include <linux/jiffies.h>
  34. #include <linux/slab.h>
  35. #include <linux/sched.h>
  36. #include <linux/mm.h>
  37. #include <linux/file.h>
  38. #include <linux/module.h>
  39. #define TTM_ASSERT_LOCKED(param)
  40. #define TTM_DEBUG(fmt, arg...)
  41. #define TTM_BO_HASH_ORDER 13
  42. static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
  43. static void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
  44. static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
  45. static inline uint32_t ttm_bo_type_flags(unsigned type)
  46. {
  47. return 1 << (type);
  48. }
  49. static void ttm_bo_release_list(struct kref *list_kref)
  50. {
  51. struct ttm_buffer_object *bo =
  52. container_of(list_kref, struct ttm_buffer_object, list_kref);
  53. struct ttm_bo_device *bdev = bo->bdev;
  54. BUG_ON(atomic_read(&bo->list_kref.refcount));
  55. BUG_ON(atomic_read(&bo->kref.refcount));
  56. BUG_ON(atomic_read(&bo->cpu_writers));
  57. BUG_ON(bo->sync_obj != NULL);
  58. BUG_ON(bo->mem.mm_node != NULL);
  59. BUG_ON(!list_empty(&bo->lru));
  60. BUG_ON(!list_empty(&bo->ddestroy));
  61. if (bo->ttm)
  62. ttm_tt_destroy(bo->ttm);
  63. if (bo->destroy)
  64. bo->destroy(bo);
  65. else {
  66. ttm_mem_global_free(bdev->mem_glob, bo->acc_size, false);
  67. kfree(bo);
  68. }
  69. }
  70. int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
  71. {
  72. if (interruptible) {
  73. int ret = 0;
  74. ret = wait_event_interruptible(bo->event_queue,
  75. atomic_read(&bo->reserved) == 0);
  76. if (unlikely(ret != 0))
  77. return -ERESTART;
  78. } else {
  79. wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
  80. }
  81. return 0;
  82. }
  83. static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
  84. {
  85. struct ttm_bo_device *bdev = bo->bdev;
  86. struct ttm_mem_type_manager *man;
  87. BUG_ON(!atomic_read(&bo->reserved));
  88. if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
  89. BUG_ON(!list_empty(&bo->lru));
  90. man = &bdev->man[bo->mem.mem_type];
  91. list_add_tail(&bo->lru, &man->lru);
  92. kref_get(&bo->list_kref);
  93. if (bo->ttm != NULL) {
  94. list_add_tail(&bo->swap, &bdev->swap_lru);
  95. kref_get(&bo->list_kref);
  96. }
  97. }
  98. }
  99. /**
  100. * Call with the lru_lock held.
  101. */
  102. static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
  103. {
  104. int put_count = 0;
  105. if (!list_empty(&bo->swap)) {
  106. list_del_init(&bo->swap);
  107. ++put_count;
  108. }
  109. if (!list_empty(&bo->lru)) {
  110. list_del_init(&bo->lru);
  111. ++put_count;
  112. }
  113. /*
  114. * TODO: Add a driver hook to delete from
  115. * driver-specific LRU's here.
  116. */
  117. return put_count;
  118. }
  119. int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
  120. bool interruptible,
  121. bool no_wait, bool use_sequence, uint32_t sequence)
  122. {
  123. struct ttm_bo_device *bdev = bo->bdev;
  124. int ret;
  125. while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
  126. if (use_sequence && bo->seq_valid &&
  127. (sequence - bo->val_seq < (1 << 31))) {
  128. return -EAGAIN;
  129. }
  130. if (no_wait)
  131. return -EBUSY;
  132. spin_unlock(&bdev->lru_lock);
  133. ret = ttm_bo_wait_unreserved(bo, interruptible);
  134. spin_lock(&bdev->lru_lock);
  135. if (unlikely(ret))
  136. return ret;
  137. }
  138. if (use_sequence) {
  139. bo->val_seq = sequence;
  140. bo->seq_valid = true;
  141. } else {
  142. bo->seq_valid = false;
  143. }
  144. return 0;
  145. }
  146. EXPORT_SYMBOL(ttm_bo_reserve);
  147. static void ttm_bo_ref_bug(struct kref *list_kref)
  148. {
  149. BUG();
  150. }
  151. int ttm_bo_reserve(struct ttm_buffer_object *bo,
  152. bool interruptible,
  153. bool no_wait, bool use_sequence, uint32_t sequence)
  154. {
  155. struct ttm_bo_device *bdev = bo->bdev;
  156. int put_count = 0;
  157. int ret;
  158. spin_lock(&bdev->lru_lock);
  159. ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
  160. sequence);
  161. if (likely(ret == 0))
  162. put_count = ttm_bo_del_from_lru(bo);
  163. spin_unlock(&bdev->lru_lock);
  164. while (put_count--)
  165. kref_put(&bo->list_kref, ttm_bo_ref_bug);
  166. return ret;
  167. }
  168. void ttm_bo_unreserve(struct ttm_buffer_object *bo)
  169. {
  170. struct ttm_bo_device *bdev = bo->bdev;
  171. spin_lock(&bdev->lru_lock);
  172. ttm_bo_add_to_lru(bo);
  173. atomic_set(&bo->reserved, 0);
  174. wake_up_all(&bo->event_queue);
  175. spin_unlock(&bdev->lru_lock);
  176. }
  177. EXPORT_SYMBOL(ttm_bo_unreserve);
  178. /*
  179. * Call bo->mutex locked.
  180. */
  181. static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
  182. {
  183. struct ttm_bo_device *bdev = bo->bdev;
  184. int ret = 0;
  185. uint32_t page_flags = 0;
  186. TTM_ASSERT_LOCKED(&bo->mutex);
  187. bo->ttm = NULL;
  188. switch (bo->type) {
  189. case ttm_bo_type_device:
  190. if (zero_alloc)
  191. page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
  192. case ttm_bo_type_kernel:
  193. bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
  194. page_flags, bdev->dummy_read_page);
  195. if (unlikely(bo->ttm == NULL))
  196. ret = -ENOMEM;
  197. break;
  198. case ttm_bo_type_user:
  199. bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
  200. page_flags | TTM_PAGE_FLAG_USER,
  201. bdev->dummy_read_page);
  202. if (unlikely(bo->ttm == NULL))
  203. ret = -ENOMEM;
  204. break;
  205. ret = ttm_tt_set_user(bo->ttm, current,
  206. bo->buffer_start, bo->num_pages);
  207. if (unlikely(ret != 0))
  208. ttm_tt_destroy(bo->ttm);
  209. break;
  210. default:
  211. printk(KERN_ERR TTM_PFX "Illegal buffer object type\n");
  212. ret = -EINVAL;
  213. break;
  214. }
  215. return ret;
  216. }
  217. static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
  218. struct ttm_mem_reg *mem,
  219. bool evict, bool interruptible, bool no_wait)
  220. {
  221. struct ttm_bo_device *bdev = bo->bdev;
  222. bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
  223. bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
  224. struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
  225. struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
  226. int ret = 0;
  227. if (old_is_pci || new_is_pci ||
  228. ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0))
  229. ttm_bo_unmap_virtual(bo);
  230. /*
  231. * Create and bind a ttm if required.
  232. */
  233. if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) {
  234. ret = ttm_bo_add_ttm(bo, false);
  235. if (ret)
  236. goto out_err;
  237. ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
  238. if (ret)
  239. goto out_err;
  240. if (mem->mem_type != TTM_PL_SYSTEM) {
  241. ret = ttm_tt_bind(bo->ttm, mem);
  242. if (ret)
  243. goto out_err;
  244. }
  245. if (bo->mem.mem_type == TTM_PL_SYSTEM) {
  246. struct ttm_mem_reg *old_mem = &bo->mem;
  247. uint32_t save_flags = old_mem->placement;
  248. *old_mem = *mem;
  249. mem->mm_node = NULL;
  250. ttm_flag_masked(&save_flags, mem->placement,
  251. TTM_PL_MASK_MEMTYPE);
  252. goto moved;
  253. }
  254. }
  255. if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
  256. !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
  257. ret = ttm_bo_move_ttm(bo, evict, no_wait, mem);
  258. else if (bdev->driver->move)
  259. ret = bdev->driver->move(bo, evict, interruptible,
  260. no_wait, mem);
  261. else
  262. ret = ttm_bo_move_memcpy(bo, evict, no_wait, mem);
  263. if (ret)
  264. goto out_err;
  265. moved:
  266. if (bo->evicted) {
  267. ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
  268. if (ret)
  269. printk(KERN_ERR TTM_PFX "Can not flush read caches\n");
  270. bo->evicted = false;
  271. }
  272. if (bo->mem.mm_node) {
  273. spin_lock(&bo->lock);
  274. bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
  275. bdev->man[bo->mem.mem_type].gpu_offset;
  276. bo->cur_placement = bo->mem.placement;
  277. spin_unlock(&bo->lock);
  278. }
  279. return 0;
  280. out_err:
  281. new_man = &bdev->man[bo->mem.mem_type];
  282. if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
  283. ttm_tt_unbind(bo->ttm);
  284. ttm_tt_destroy(bo->ttm);
  285. bo->ttm = NULL;
  286. }
  287. return ret;
  288. }
  289. /**
  290. * If bo idle, remove from delayed- and lru lists, and unref.
  291. * If not idle, and already on delayed list, do nothing.
  292. * If not idle, and not on delayed list, put on delayed list,
  293. * up the list_kref and schedule a delayed list check.
  294. */
  295. static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
  296. {
  297. struct ttm_bo_device *bdev = bo->bdev;
  298. struct ttm_bo_driver *driver = bdev->driver;
  299. int ret;
  300. spin_lock(&bo->lock);
  301. (void) ttm_bo_wait(bo, false, false, !remove_all);
  302. if (!bo->sync_obj) {
  303. int put_count;
  304. spin_unlock(&bo->lock);
  305. spin_lock(&bdev->lru_lock);
  306. ret = ttm_bo_reserve_locked(bo, false, false, false, 0);
  307. BUG_ON(ret);
  308. if (bo->ttm)
  309. ttm_tt_unbind(bo->ttm);
  310. if (!list_empty(&bo->ddestroy)) {
  311. list_del_init(&bo->ddestroy);
  312. kref_put(&bo->list_kref, ttm_bo_ref_bug);
  313. }
  314. if (bo->mem.mm_node) {
  315. drm_mm_put_block(bo->mem.mm_node);
  316. bo->mem.mm_node = NULL;
  317. }
  318. put_count = ttm_bo_del_from_lru(bo);
  319. spin_unlock(&bdev->lru_lock);
  320. atomic_set(&bo->reserved, 0);
  321. while (put_count--)
  322. kref_put(&bo->list_kref, ttm_bo_release_list);
  323. return 0;
  324. }
  325. spin_lock(&bdev->lru_lock);
  326. if (list_empty(&bo->ddestroy)) {
  327. void *sync_obj = bo->sync_obj;
  328. void *sync_obj_arg = bo->sync_obj_arg;
  329. kref_get(&bo->list_kref);
  330. list_add_tail(&bo->ddestroy, &bdev->ddestroy);
  331. spin_unlock(&bdev->lru_lock);
  332. spin_unlock(&bo->lock);
  333. if (sync_obj)
  334. driver->sync_obj_flush(sync_obj, sync_obj_arg);
  335. schedule_delayed_work(&bdev->wq,
  336. ((HZ / 100) < 1) ? 1 : HZ / 100);
  337. ret = 0;
  338. } else {
  339. spin_unlock(&bdev->lru_lock);
  340. spin_unlock(&bo->lock);
  341. ret = -EBUSY;
  342. }
  343. return ret;
  344. }
  345. /**
  346. * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
  347. * encountered buffers.
  348. */
  349. static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
  350. {
  351. struct ttm_buffer_object *entry, *nentry;
  352. struct list_head *list, *next;
  353. int ret;
  354. spin_lock(&bdev->lru_lock);
  355. list_for_each_safe(list, next, &bdev->ddestroy) {
  356. entry = list_entry(list, struct ttm_buffer_object, ddestroy);
  357. nentry = NULL;
  358. /*
  359. * Protect the next list entry from destruction while we
  360. * unlock the lru_lock.
  361. */
  362. if (next != &bdev->ddestroy) {
  363. nentry = list_entry(next, struct ttm_buffer_object,
  364. ddestroy);
  365. kref_get(&nentry->list_kref);
  366. }
  367. kref_get(&entry->list_kref);
  368. spin_unlock(&bdev->lru_lock);
  369. ret = ttm_bo_cleanup_refs(entry, remove_all);
  370. kref_put(&entry->list_kref, ttm_bo_release_list);
  371. spin_lock(&bdev->lru_lock);
  372. if (nentry) {
  373. bool next_onlist = !list_empty(next);
  374. spin_unlock(&bdev->lru_lock);
  375. kref_put(&nentry->list_kref, ttm_bo_release_list);
  376. spin_lock(&bdev->lru_lock);
  377. /*
  378. * Someone might have raced us and removed the
  379. * next entry from the list. We don't bother restarting
  380. * list traversal.
  381. */
  382. if (!next_onlist)
  383. break;
  384. }
  385. if (ret)
  386. break;
  387. }
  388. ret = !list_empty(&bdev->ddestroy);
  389. spin_unlock(&bdev->lru_lock);
  390. return ret;
  391. }
  392. static void ttm_bo_delayed_workqueue(struct work_struct *work)
  393. {
  394. struct ttm_bo_device *bdev =
  395. container_of(work, struct ttm_bo_device, wq.work);
  396. if (ttm_bo_delayed_delete(bdev, false)) {
  397. schedule_delayed_work(&bdev->wq,
  398. ((HZ / 100) < 1) ? 1 : HZ / 100);
  399. }
  400. }
  401. static void ttm_bo_release(struct kref *kref)
  402. {
  403. struct ttm_buffer_object *bo =
  404. container_of(kref, struct ttm_buffer_object, kref);
  405. struct ttm_bo_device *bdev = bo->bdev;
  406. if (likely(bo->vm_node != NULL)) {
  407. rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
  408. drm_mm_put_block(bo->vm_node);
  409. bo->vm_node = NULL;
  410. }
  411. write_unlock(&bdev->vm_lock);
  412. ttm_bo_cleanup_refs(bo, false);
  413. kref_put(&bo->list_kref, ttm_bo_release_list);
  414. write_lock(&bdev->vm_lock);
  415. }
  416. void ttm_bo_unref(struct ttm_buffer_object **p_bo)
  417. {
  418. struct ttm_buffer_object *bo = *p_bo;
  419. struct ttm_bo_device *bdev = bo->bdev;
  420. *p_bo = NULL;
  421. write_lock(&bdev->vm_lock);
  422. kref_put(&bo->kref, ttm_bo_release);
  423. write_unlock(&bdev->vm_lock);
  424. }
  425. EXPORT_SYMBOL(ttm_bo_unref);
  426. static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
  427. bool interruptible, bool no_wait)
  428. {
  429. int ret = 0;
  430. struct ttm_bo_device *bdev = bo->bdev;
  431. struct ttm_mem_reg evict_mem;
  432. uint32_t proposed_placement;
  433. if (bo->mem.mem_type != mem_type)
  434. goto out;
  435. spin_lock(&bo->lock);
  436. ret = ttm_bo_wait(bo, false, interruptible, no_wait);
  437. spin_unlock(&bo->lock);
  438. if (unlikely(ret != 0)) {
  439. if (ret != -ERESTART) {
  440. printk(KERN_ERR TTM_PFX
  441. "Failed to expire sync object before "
  442. "buffer eviction.\n");
  443. }
  444. goto out;
  445. }
  446. BUG_ON(!atomic_read(&bo->reserved));
  447. evict_mem = bo->mem;
  448. evict_mem.mm_node = NULL;
  449. proposed_placement = bdev->driver->evict_flags(bo);
  450. ret = ttm_bo_mem_space(bo, proposed_placement,
  451. &evict_mem, interruptible, no_wait);
  452. if (unlikely(ret != 0 && ret != -ERESTART))
  453. ret = ttm_bo_mem_space(bo, TTM_PL_FLAG_SYSTEM,
  454. &evict_mem, interruptible, no_wait);
  455. if (ret) {
  456. if (ret != -ERESTART)
  457. printk(KERN_ERR TTM_PFX
  458. "Failed to find memory space for "
  459. "buffer 0x%p eviction.\n", bo);
  460. goto out;
  461. }
  462. ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
  463. no_wait);
  464. if (ret) {
  465. if (ret != -ERESTART)
  466. printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
  467. goto out;
  468. }
  469. spin_lock(&bdev->lru_lock);
  470. if (evict_mem.mm_node) {
  471. drm_mm_put_block(evict_mem.mm_node);
  472. evict_mem.mm_node = NULL;
  473. }
  474. spin_unlock(&bdev->lru_lock);
  475. bo->evicted = true;
  476. out:
  477. return ret;
  478. }
  479. /**
  480. * Repeatedly evict memory from the LRU for @mem_type until we create enough
  481. * space, or we've evicted everything and there isn't enough space.
  482. */
  483. static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev,
  484. struct ttm_mem_reg *mem,
  485. uint32_t mem_type,
  486. bool interruptible, bool no_wait)
  487. {
  488. struct drm_mm_node *node;
  489. struct ttm_buffer_object *entry;
  490. struct ttm_mem_type_manager *man = &bdev->man[mem_type];
  491. struct list_head *lru;
  492. unsigned long num_pages = mem->num_pages;
  493. int put_count = 0;
  494. int ret;
  495. retry_pre_get:
  496. ret = drm_mm_pre_get(&man->manager);
  497. if (unlikely(ret != 0))
  498. return ret;
  499. spin_lock(&bdev->lru_lock);
  500. do {
  501. node = drm_mm_search_free(&man->manager, num_pages,
  502. mem->page_alignment, 1);
  503. if (node)
  504. break;
  505. lru = &man->lru;
  506. if (list_empty(lru))
  507. break;
  508. entry = list_first_entry(lru, struct ttm_buffer_object, lru);
  509. kref_get(&entry->list_kref);
  510. ret =
  511. ttm_bo_reserve_locked(entry, interruptible, no_wait,
  512. false, 0);
  513. if (likely(ret == 0))
  514. put_count = ttm_bo_del_from_lru(entry);
  515. spin_unlock(&bdev->lru_lock);
  516. if (unlikely(ret != 0))
  517. return ret;
  518. while (put_count--)
  519. kref_put(&entry->list_kref, ttm_bo_ref_bug);
  520. ret = ttm_bo_evict(entry, mem_type, interruptible, no_wait);
  521. ttm_bo_unreserve(entry);
  522. kref_put(&entry->list_kref, ttm_bo_release_list);
  523. if (ret)
  524. return ret;
  525. spin_lock(&bdev->lru_lock);
  526. } while (1);
  527. if (!node) {
  528. spin_unlock(&bdev->lru_lock);
  529. return -ENOMEM;
  530. }
  531. node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment);
  532. if (unlikely(!node)) {
  533. spin_unlock(&bdev->lru_lock);
  534. goto retry_pre_get;
  535. }
  536. spin_unlock(&bdev->lru_lock);
  537. mem->mm_node = node;
  538. mem->mem_type = mem_type;
  539. return 0;
  540. }
  541. static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
  542. bool disallow_fixed,
  543. uint32_t mem_type,
  544. uint32_t mask, uint32_t *res_mask)
  545. {
  546. uint32_t cur_flags = ttm_bo_type_flags(mem_type);
  547. if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
  548. return false;
  549. if ((cur_flags & mask & TTM_PL_MASK_MEM) == 0)
  550. return false;
  551. if ((mask & man->available_caching) == 0)
  552. return false;
  553. if (mask & man->default_caching)
  554. cur_flags |= man->default_caching;
  555. else if (mask & TTM_PL_FLAG_CACHED)
  556. cur_flags |= TTM_PL_FLAG_CACHED;
  557. else if (mask & TTM_PL_FLAG_WC)
  558. cur_flags |= TTM_PL_FLAG_WC;
  559. else
  560. cur_flags |= TTM_PL_FLAG_UNCACHED;
  561. *res_mask = cur_flags;
  562. return true;
  563. }
  564. /**
  565. * Creates space for memory region @mem according to its type.
  566. *
  567. * This function first searches for free space in compatible memory types in
  568. * the priority order defined by the driver. If free space isn't found, then
  569. * ttm_bo_mem_force_space is attempted in priority order to evict and find
  570. * space.
  571. */
  572. int ttm_bo_mem_space(struct ttm_buffer_object *bo,
  573. uint32_t proposed_placement,
  574. struct ttm_mem_reg *mem,
  575. bool interruptible, bool no_wait)
  576. {
  577. struct ttm_bo_device *bdev = bo->bdev;
  578. struct ttm_mem_type_manager *man;
  579. uint32_t num_prios = bdev->driver->num_mem_type_prio;
  580. const uint32_t *prios = bdev->driver->mem_type_prio;
  581. uint32_t i;
  582. uint32_t mem_type = TTM_PL_SYSTEM;
  583. uint32_t cur_flags = 0;
  584. bool type_found = false;
  585. bool type_ok = false;
  586. bool has_eagain = false;
  587. struct drm_mm_node *node = NULL;
  588. int ret;
  589. mem->mm_node = NULL;
  590. for (i = 0; i < num_prios; ++i) {
  591. mem_type = prios[i];
  592. man = &bdev->man[mem_type];
  593. type_ok = ttm_bo_mt_compatible(man,
  594. bo->type == ttm_bo_type_user,
  595. mem_type, proposed_placement,
  596. &cur_flags);
  597. if (!type_ok)
  598. continue;
  599. if (mem_type == TTM_PL_SYSTEM)
  600. break;
  601. if (man->has_type && man->use_type) {
  602. type_found = true;
  603. do {
  604. ret = drm_mm_pre_get(&man->manager);
  605. if (unlikely(ret))
  606. return ret;
  607. spin_lock(&bdev->lru_lock);
  608. node = drm_mm_search_free(&man->manager,
  609. mem->num_pages,
  610. mem->page_alignment,
  611. 1);
  612. if (unlikely(!node)) {
  613. spin_unlock(&bdev->lru_lock);
  614. break;
  615. }
  616. node = drm_mm_get_block_atomic(node,
  617. mem->num_pages,
  618. mem->
  619. page_alignment);
  620. spin_unlock(&bdev->lru_lock);
  621. } while (!node);
  622. }
  623. if (node)
  624. break;
  625. }
  626. if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || node) {
  627. mem->mm_node = node;
  628. mem->mem_type = mem_type;
  629. mem->placement = cur_flags;
  630. return 0;
  631. }
  632. if (!type_found)
  633. return -EINVAL;
  634. num_prios = bdev->driver->num_mem_busy_prio;
  635. prios = bdev->driver->mem_busy_prio;
  636. for (i = 0; i < num_prios; ++i) {
  637. mem_type = prios[i];
  638. man = &bdev->man[mem_type];
  639. if (!man->has_type)
  640. continue;
  641. if (!ttm_bo_mt_compatible(man,
  642. bo->type == ttm_bo_type_user,
  643. mem_type,
  644. proposed_placement, &cur_flags))
  645. continue;
  646. ret = ttm_bo_mem_force_space(bdev, mem, mem_type,
  647. interruptible, no_wait);
  648. if (ret == 0 && mem->mm_node) {
  649. mem->placement = cur_flags;
  650. return 0;
  651. }
  652. if (ret == -ERESTART)
  653. has_eagain = true;
  654. }
  655. ret = (has_eagain) ? -ERESTART : -ENOMEM;
  656. return ret;
  657. }
  658. EXPORT_SYMBOL(ttm_bo_mem_space);
  659. int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
  660. {
  661. int ret = 0;
  662. if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
  663. return -EBUSY;
  664. ret = wait_event_interruptible(bo->event_queue,
  665. atomic_read(&bo->cpu_writers) == 0);
  666. if (ret == -ERESTARTSYS)
  667. ret = -ERESTART;
  668. return ret;
  669. }
  670. int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
  671. uint32_t proposed_placement,
  672. bool interruptible, bool no_wait)
  673. {
  674. struct ttm_bo_device *bdev = bo->bdev;
  675. int ret = 0;
  676. struct ttm_mem_reg mem;
  677. BUG_ON(!atomic_read(&bo->reserved));
  678. /*
  679. * FIXME: It's possible to pipeline buffer moves.
  680. * Have the driver move function wait for idle when necessary,
  681. * instead of doing it here.
  682. */
  683. spin_lock(&bo->lock);
  684. ret = ttm_bo_wait(bo, false, interruptible, no_wait);
  685. spin_unlock(&bo->lock);
  686. if (ret)
  687. return ret;
  688. mem.num_pages = bo->num_pages;
  689. mem.size = mem.num_pages << PAGE_SHIFT;
  690. mem.page_alignment = bo->mem.page_alignment;
  691. /*
  692. * Determine where to move the buffer.
  693. */
  694. ret = ttm_bo_mem_space(bo, proposed_placement, &mem,
  695. interruptible, no_wait);
  696. if (ret)
  697. goto out_unlock;
  698. ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
  699. out_unlock:
  700. if (ret && mem.mm_node) {
  701. spin_lock(&bdev->lru_lock);
  702. drm_mm_put_block(mem.mm_node);
  703. spin_unlock(&bdev->lru_lock);
  704. }
  705. return ret;
  706. }
  707. static int ttm_bo_mem_compat(uint32_t proposed_placement,
  708. struct ttm_mem_reg *mem)
  709. {
  710. if ((proposed_placement & mem->placement & TTM_PL_MASK_MEM) == 0)
  711. return 0;
  712. if ((proposed_placement & mem->placement & TTM_PL_MASK_CACHING) == 0)
  713. return 0;
  714. return 1;
  715. }
  716. int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
  717. uint32_t proposed_placement,
  718. bool interruptible, bool no_wait)
  719. {
  720. int ret;
  721. BUG_ON(!atomic_read(&bo->reserved));
  722. bo->proposed_placement = proposed_placement;
  723. TTM_DEBUG("Proposed placement 0x%08lx, Old flags 0x%08lx\n",
  724. (unsigned long)proposed_placement,
  725. (unsigned long)bo->mem.placement);
  726. /*
  727. * Check whether we need to move buffer.
  728. */
  729. if (!ttm_bo_mem_compat(bo->proposed_placement, &bo->mem)) {
  730. ret = ttm_bo_move_buffer(bo, bo->proposed_placement,
  731. interruptible, no_wait);
  732. if (ret) {
  733. if (ret != -ERESTART)
  734. printk(KERN_ERR TTM_PFX
  735. "Failed moving buffer. "
  736. "Proposed placement 0x%08x\n",
  737. bo->proposed_placement);
  738. if (ret == -ENOMEM)
  739. printk(KERN_ERR TTM_PFX
  740. "Out of aperture space or "
  741. "DRM memory quota.\n");
  742. return ret;
  743. }
  744. }
  745. /*
  746. * We might need to add a TTM.
  747. */
  748. if (bo->mem.mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) {
  749. ret = ttm_bo_add_ttm(bo, true);
  750. if (ret)
  751. return ret;
  752. }
  753. /*
  754. * Validation has succeeded, move the access and other
  755. * non-mapping-related flag bits from the proposed flags to
  756. * the active flags
  757. */
  758. ttm_flag_masked(&bo->mem.placement, bo->proposed_placement,
  759. ~TTM_PL_MASK_MEMTYPE);
  760. return 0;
  761. }
  762. EXPORT_SYMBOL(ttm_buffer_object_validate);
  763. int
  764. ttm_bo_check_placement(struct ttm_buffer_object *bo,
  765. uint32_t set_flags, uint32_t clr_flags)
  766. {
  767. uint32_t new_mask = set_flags | clr_flags;
  768. if ((bo->type == ttm_bo_type_user) &&
  769. (clr_flags & TTM_PL_FLAG_CACHED)) {
  770. printk(KERN_ERR TTM_PFX
  771. "User buffers require cache-coherent memory.\n");
  772. return -EINVAL;
  773. }
  774. if (!capable(CAP_SYS_ADMIN)) {
  775. if (new_mask & TTM_PL_FLAG_NO_EVICT) {
  776. printk(KERN_ERR TTM_PFX "Need to be root to modify"
  777. " NO_EVICT status.\n");
  778. return -EINVAL;
  779. }
  780. if ((clr_flags & bo->mem.placement & TTM_PL_MASK_MEMTYPE) &&
  781. (bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
  782. printk(KERN_ERR TTM_PFX
  783. "Incompatible memory specification"
  784. " for NO_EVICT buffer.\n");
  785. return -EINVAL;
  786. }
  787. }
  788. return 0;
  789. }
  790. int ttm_buffer_object_init(struct ttm_bo_device *bdev,
  791. struct ttm_buffer_object *bo,
  792. unsigned long size,
  793. enum ttm_bo_type type,
  794. uint32_t flags,
  795. uint32_t page_alignment,
  796. unsigned long buffer_start,
  797. bool interruptible,
  798. struct file *persistant_swap_storage,
  799. size_t acc_size,
  800. void (*destroy) (struct ttm_buffer_object *))
  801. {
  802. int ret = 0;
  803. unsigned long num_pages;
  804. size += buffer_start & ~PAGE_MASK;
  805. num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
  806. if (num_pages == 0) {
  807. printk(KERN_ERR TTM_PFX "Illegal buffer object size.\n");
  808. return -EINVAL;
  809. }
  810. bo->destroy = destroy;
  811. spin_lock_init(&bo->lock);
  812. kref_init(&bo->kref);
  813. kref_init(&bo->list_kref);
  814. atomic_set(&bo->cpu_writers, 0);
  815. atomic_set(&bo->reserved, 1);
  816. init_waitqueue_head(&bo->event_queue);
  817. INIT_LIST_HEAD(&bo->lru);
  818. INIT_LIST_HEAD(&bo->ddestroy);
  819. INIT_LIST_HEAD(&bo->swap);
  820. bo->bdev = bdev;
  821. bo->type = type;
  822. bo->num_pages = num_pages;
  823. bo->mem.mem_type = TTM_PL_SYSTEM;
  824. bo->mem.num_pages = bo->num_pages;
  825. bo->mem.mm_node = NULL;
  826. bo->mem.page_alignment = page_alignment;
  827. bo->buffer_start = buffer_start & PAGE_MASK;
  828. bo->priv_flags = 0;
  829. bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
  830. bo->seq_valid = false;
  831. bo->persistant_swap_storage = persistant_swap_storage;
  832. bo->acc_size = acc_size;
  833. ret = ttm_bo_check_placement(bo, flags, 0ULL);
  834. if (unlikely(ret != 0))
  835. goto out_err;
  836. /*
  837. * If no caching attributes are set, accept any form of caching.
  838. */
  839. if ((flags & TTM_PL_MASK_CACHING) == 0)
  840. flags |= TTM_PL_MASK_CACHING;
  841. /*
  842. * For ttm_bo_type_device buffers, allocate
  843. * address space from the device.
  844. */
  845. if (bo->type == ttm_bo_type_device) {
  846. ret = ttm_bo_setup_vm(bo);
  847. if (ret)
  848. goto out_err;
  849. }
  850. ret = ttm_buffer_object_validate(bo, flags, interruptible, false);
  851. if (ret)
  852. goto out_err;
  853. ttm_bo_unreserve(bo);
  854. return 0;
  855. out_err:
  856. ttm_bo_unreserve(bo);
  857. ttm_bo_unref(&bo);
  858. return ret;
  859. }
  860. EXPORT_SYMBOL(ttm_buffer_object_init);
  861. static inline size_t ttm_bo_size(struct ttm_bo_device *bdev,
  862. unsigned long num_pages)
  863. {
  864. size_t page_array_size = (num_pages * sizeof(void *) + PAGE_SIZE - 1) &
  865. PAGE_MASK;
  866. return bdev->ttm_bo_size + 2 * page_array_size;
  867. }
  868. int ttm_buffer_object_create(struct ttm_bo_device *bdev,
  869. unsigned long size,
  870. enum ttm_bo_type type,
  871. uint32_t flags,
  872. uint32_t page_alignment,
  873. unsigned long buffer_start,
  874. bool interruptible,
  875. struct file *persistant_swap_storage,
  876. struct ttm_buffer_object **p_bo)
  877. {
  878. struct ttm_buffer_object *bo;
  879. int ret;
  880. struct ttm_mem_global *mem_glob = bdev->mem_glob;
  881. size_t acc_size =
  882. ttm_bo_size(bdev, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
  883. ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false, false);
  884. if (unlikely(ret != 0))
  885. return ret;
  886. bo = kzalloc(sizeof(*bo), GFP_KERNEL);
  887. if (unlikely(bo == NULL)) {
  888. ttm_mem_global_free(mem_glob, acc_size, false);
  889. return -ENOMEM;
  890. }
  891. ret = ttm_buffer_object_init(bdev, bo, size, type, flags,
  892. page_alignment, buffer_start,
  893. interruptible,
  894. persistant_swap_storage, acc_size, NULL);
  895. if (likely(ret == 0))
  896. *p_bo = bo;
  897. return ret;
  898. }
  899. static int ttm_bo_leave_list(struct ttm_buffer_object *bo,
  900. uint32_t mem_type, bool allow_errors)
  901. {
  902. int ret;
  903. spin_lock(&bo->lock);
  904. ret = ttm_bo_wait(bo, false, false, false);
  905. spin_unlock(&bo->lock);
  906. if (ret && allow_errors)
  907. goto out;
  908. if (bo->mem.mem_type == mem_type)
  909. ret = ttm_bo_evict(bo, mem_type, false, false);
  910. if (ret) {
  911. if (allow_errors) {
  912. goto out;
  913. } else {
  914. ret = 0;
  915. printk(KERN_ERR TTM_PFX "Cleanup eviction failed\n");
  916. }
  917. }
  918. out:
  919. return ret;
  920. }
  921. static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
  922. struct list_head *head,
  923. unsigned mem_type, bool allow_errors)
  924. {
  925. struct ttm_buffer_object *entry;
  926. int ret;
  927. int put_count;
  928. /*
  929. * Can't use standard list traversal since we're unlocking.
  930. */
  931. spin_lock(&bdev->lru_lock);
  932. while (!list_empty(head)) {
  933. entry = list_first_entry(head, struct ttm_buffer_object, lru);
  934. kref_get(&entry->list_kref);
  935. ret = ttm_bo_reserve_locked(entry, false, false, false, 0);
  936. put_count = ttm_bo_del_from_lru(entry);
  937. spin_unlock(&bdev->lru_lock);
  938. while (put_count--)
  939. kref_put(&entry->list_kref, ttm_bo_ref_bug);
  940. BUG_ON(ret);
  941. ret = ttm_bo_leave_list(entry, mem_type, allow_errors);
  942. ttm_bo_unreserve(entry);
  943. kref_put(&entry->list_kref, ttm_bo_release_list);
  944. spin_lock(&bdev->lru_lock);
  945. }
  946. spin_unlock(&bdev->lru_lock);
  947. return 0;
  948. }
  949. int ttm_bo_clean_mm(struct ttm_bo_device *bdev, unsigned mem_type)
  950. {
  951. struct ttm_mem_type_manager *man = &bdev->man[mem_type];
  952. int ret = -EINVAL;
  953. if (mem_type >= TTM_NUM_MEM_TYPES) {
  954. printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", mem_type);
  955. return ret;
  956. }
  957. if (!man->has_type) {
  958. printk(KERN_ERR TTM_PFX "Trying to take down uninitialized "
  959. "memory manager type %u\n", mem_type);
  960. return ret;
  961. }
  962. man->use_type = false;
  963. man->has_type = false;
  964. ret = 0;
  965. if (mem_type > 0) {
  966. ttm_bo_force_list_clean(bdev, &man->lru, mem_type, false);
  967. spin_lock(&bdev->lru_lock);
  968. if (drm_mm_clean(&man->manager))
  969. drm_mm_takedown(&man->manager);
  970. else
  971. ret = -EBUSY;
  972. spin_unlock(&bdev->lru_lock);
  973. }
  974. return ret;
  975. }
  976. EXPORT_SYMBOL(ttm_bo_clean_mm);
  977. int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type)
  978. {
  979. struct ttm_mem_type_manager *man = &bdev->man[mem_type];
  980. if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) {
  981. printk(KERN_ERR TTM_PFX
  982. "Illegal memory manager memory type %u.\n",
  983. mem_type);
  984. return -EINVAL;
  985. }
  986. if (!man->has_type) {
  987. printk(KERN_ERR TTM_PFX
  988. "Memory type %u has not been initialized.\n",
  989. mem_type);
  990. return 0;
  991. }
  992. return ttm_bo_force_list_clean(bdev, &man->lru, mem_type, true);
  993. }
  994. EXPORT_SYMBOL(ttm_bo_evict_mm);
  995. int ttm_bo_init_mm(struct ttm_bo_device *bdev, unsigned type,
  996. unsigned long p_offset, unsigned long p_size)
  997. {
  998. int ret = -EINVAL;
  999. struct ttm_mem_type_manager *man;
  1000. if (type >= TTM_NUM_MEM_TYPES) {
  1001. printk(KERN_ERR TTM_PFX "Illegal memory type %d\n", type);
  1002. return ret;
  1003. }
  1004. man = &bdev->man[type];
  1005. if (man->has_type) {
  1006. printk(KERN_ERR TTM_PFX
  1007. "Memory manager already initialized for type %d\n",
  1008. type);
  1009. return ret;
  1010. }
  1011. ret = bdev->driver->init_mem_type(bdev, type, man);
  1012. if (ret)
  1013. return ret;
  1014. ret = 0;
  1015. if (type != TTM_PL_SYSTEM) {
  1016. if (!p_size) {
  1017. printk(KERN_ERR TTM_PFX
  1018. "Zero size memory manager type %d\n",
  1019. type);
  1020. return ret;
  1021. }
  1022. ret = drm_mm_init(&man->manager, p_offset, p_size);
  1023. if (ret)
  1024. return ret;
  1025. }
  1026. man->has_type = true;
  1027. man->use_type = true;
  1028. man->size = p_size;
  1029. INIT_LIST_HEAD(&man->lru);
  1030. return 0;
  1031. }
  1032. EXPORT_SYMBOL(ttm_bo_init_mm);
  1033. int ttm_bo_device_release(struct ttm_bo_device *bdev)
  1034. {
  1035. int ret = 0;
  1036. unsigned i = TTM_NUM_MEM_TYPES;
  1037. struct ttm_mem_type_manager *man;
  1038. while (i--) {
  1039. man = &bdev->man[i];
  1040. if (man->has_type) {
  1041. man->use_type = false;
  1042. if ((i != TTM_PL_SYSTEM) && ttm_bo_clean_mm(bdev, i)) {
  1043. ret = -EBUSY;
  1044. printk(KERN_ERR TTM_PFX
  1045. "DRM memory manager type %d "
  1046. "is not clean.\n", i);
  1047. }
  1048. man->has_type = false;
  1049. }
  1050. }
  1051. if (!cancel_delayed_work(&bdev->wq))
  1052. flush_scheduled_work();
  1053. while (ttm_bo_delayed_delete(bdev, true))
  1054. ;
  1055. spin_lock(&bdev->lru_lock);
  1056. if (list_empty(&bdev->ddestroy))
  1057. TTM_DEBUG("Delayed destroy list was clean\n");
  1058. if (list_empty(&bdev->man[0].lru))
  1059. TTM_DEBUG("Swap list was clean\n");
  1060. spin_unlock(&bdev->lru_lock);
  1061. ttm_mem_unregister_shrink(bdev->mem_glob, &bdev->shrink);
  1062. BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
  1063. write_lock(&bdev->vm_lock);
  1064. drm_mm_takedown(&bdev->addr_space_mm);
  1065. write_unlock(&bdev->vm_lock);
  1066. __free_page(bdev->dummy_read_page);
  1067. return ret;
  1068. }
  1069. EXPORT_SYMBOL(ttm_bo_device_release);
  1070. /*
  1071. * This function is intended to be called on drm driver load.
  1072. * If you decide to call it from firstopen, you must protect the call
  1073. * from a potentially racing ttm_bo_driver_finish in lastclose.
  1074. * (This may happen on X server restart).
  1075. */
  1076. int ttm_bo_device_init(struct ttm_bo_device *bdev,
  1077. struct ttm_mem_global *mem_glob,
  1078. struct ttm_bo_driver *driver, uint64_t file_page_offset)
  1079. {
  1080. int ret = -EINVAL;
  1081. bdev->dummy_read_page = NULL;
  1082. rwlock_init(&bdev->vm_lock);
  1083. spin_lock_init(&bdev->lru_lock);
  1084. bdev->driver = driver;
  1085. bdev->mem_glob = mem_glob;
  1086. memset(bdev->man, 0, sizeof(bdev->man));
  1087. bdev->dummy_read_page = alloc_page(__GFP_ZERO | GFP_DMA32);
  1088. if (unlikely(bdev->dummy_read_page == NULL)) {
  1089. ret = -ENOMEM;
  1090. goto out_err0;
  1091. }
  1092. /*
  1093. * Initialize the system memory buffer type.
  1094. * Other types need to be driver / IOCTL initialized.
  1095. */
  1096. ret = ttm_bo_init_mm(bdev, TTM_PL_SYSTEM, 0, 0);
  1097. if (unlikely(ret != 0))
  1098. goto out_err1;
  1099. bdev->addr_space_rb = RB_ROOT;
  1100. ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
  1101. if (unlikely(ret != 0))
  1102. goto out_err2;
  1103. INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
  1104. bdev->nice_mode = true;
  1105. INIT_LIST_HEAD(&bdev->ddestroy);
  1106. INIT_LIST_HEAD(&bdev->swap_lru);
  1107. bdev->dev_mapping = NULL;
  1108. ttm_mem_init_shrink(&bdev->shrink, ttm_bo_swapout);
  1109. ret = ttm_mem_register_shrink(mem_glob, &bdev->shrink);
  1110. if (unlikely(ret != 0)) {
  1111. printk(KERN_ERR TTM_PFX
  1112. "Could not register buffer object swapout.\n");
  1113. goto out_err2;
  1114. }
  1115. bdev->ttm_bo_extra_size =
  1116. ttm_round_pot(sizeof(struct ttm_tt)) +
  1117. ttm_round_pot(sizeof(struct ttm_backend));
  1118. bdev->ttm_bo_size = bdev->ttm_bo_extra_size +
  1119. ttm_round_pot(sizeof(struct ttm_buffer_object));
  1120. return 0;
  1121. out_err2:
  1122. ttm_bo_clean_mm(bdev, 0);
  1123. out_err1:
  1124. __free_page(bdev->dummy_read_page);
  1125. out_err0:
  1126. return ret;
  1127. }
  1128. EXPORT_SYMBOL(ttm_bo_device_init);
  1129. /*
  1130. * buffer object vm functions.
  1131. */
  1132. bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
  1133. {
  1134. struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
  1135. if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED)) {
  1136. if (mem->mem_type == TTM_PL_SYSTEM)
  1137. return false;
  1138. if (man->flags & TTM_MEMTYPE_FLAG_CMA)
  1139. return false;
  1140. if (mem->placement & TTM_PL_FLAG_CACHED)
  1141. return false;
  1142. }
  1143. return true;
  1144. }
  1145. int ttm_bo_pci_offset(struct ttm_bo_device *bdev,
  1146. struct ttm_mem_reg *mem,
  1147. unsigned long *bus_base,
  1148. unsigned long *bus_offset, unsigned long *bus_size)
  1149. {
  1150. struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
  1151. *bus_size = 0;
  1152. if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
  1153. return -EINVAL;
  1154. if (ttm_mem_reg_is_pci(bdev, mem)) {
  1155. *bus_offset = mem->mm_node->start << PAGE_SHIFT;
  1156. *bus_size = mem->num_pages << PAGE_SHIFT;
  1157. *bus_base = man->io_offset;
  1158. }
  1159. return 0;
  1160. }
  1161. void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
  1162. {
  1163. struct ttm_bo_device *bdev = bo->bdev;
  1164. loff_t offset = (loff_t) bo->addr_space_offset;
  1165. loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
  1166. if (!bdev->dev_mapping)
  1167. return;
  1168. unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
  1169. }
  1170. static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
  1171. {
  1172. struct ttm_bo_device *bdev = bo->bdev;
  1173. struct rb_node **cur = &bdev->addr_space_rb.rb_node;
  1174. struct rb_node *parent = NULL;
  1175. struct ttm_buffer_object *cur_bo;
  1176. unsigned long offset = bo->vm_node->start;
  1177. unsigned long cur_offset;
  1178. while (*cur) {
  1179. parent = *cur;
  1180. cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
  1181. cur_offset = cur_bo->vm_node->start;
  1182. if (offset < cur_offset)
  1183. cur = &parent->rb_left;
  1184. else if (offset > cur_offset)
  1185. cur = &parent->rb_right;
  1186. else
  1187. BUG();
  1188. }
  1189. rb_link_node(&bo->vm_rb, parent, cur);
  1190. rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
  1191. }
  1192. /**
  1193. * ttm_bo_setup_vm:
  1194. *
  1195. * @bo: the buffer to allocate address space for
  1196. *
  1197. * Allocate address space in the drm device so that applications
  1198. * can mmap the buffer and access the contents. This only
  1199. * applies to ttm_bo_type_device objects as others are not
  1200. * placed in the drm device address space.
  1201. */
  1202. static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
  1203. {
  1204. struct ttm_bo_device *bdev = bo->bdev;
  1205. int ret;
  1206. retry_pre_get:
  1207. ret = drm_mm_pre_get(&bdev->addr_space_mm);
  1208. if (unlikely(ret != 0))
  1209. return ret;
  1210. write_lock(&bdev->vm_lock);
  1211. bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
  1212. bo->mem.num_pages, 0, 0);
  1213. if (unlikely(bo->vm_node == NULL)) {
  1214. ret = -ENOMEM;
  1215. goto out_unlock;
  1216. }
  1217. bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
  1218. bo->mem.num_pages, 0);
  1219. if (unlikely(bo->vm_node == NULL)) {
  1220. write_unlock(&bdev->vm_lock);
  1221. goto retry_pre_get;
  1222. }
  1223. ttm_bo_vm_insert_rb(bo);
  1224. write_unlock(&bdev->vm_lock);
  1225. bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
  1226. return 0;
  1227. out_unlock:
  1228. write_unlock(&bdev->vm_lock);
  1229. return ret;
  1230. }
  1231. int ttm_bo_wait(struct ttm_buffer_object *bo,
  1232. bool lazy, bool interruptible, bool no_wait)
  1233. {
  1234. struct ttm_bo_driver *driver = bo->bdev->driver;
  1235. void *sync_obj;
  1236. void *sync_obj_arg;
  1237. int ret = 0;
  1238. if (likely(bo->sync_obj == NULL))
  1239. return 0;
  1240. while (bo->sync_obj) {
  1241. if (driver->sync_obj_signaled(bo->sync_obj, bo->sync_obj_arg)) {
  1242. void *tmp_obj = bo->sync_obj;
  1243. bo->sync_obj = NULL;
  1244. clear_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags);
  1245. spin_unlock(&bo->lock);
  1246. driver->sync_obj_unref(&tmp_obj);
  1247. spin_lock(&bo->lock);
  1248. continue;
  1249. }
  1250. if (no_wait)
  1251. return -EBUSY;
  1252. sync_obj = driver->sync_obj_ref(bo->sync_obj);
  1253. sync_obj_arg = bo->sync_obj_arg;
  1254. spin_unlock(&bo->lock);
  1255. ret = driver->sync_obj_wait(sync_obj, sync_obj_arg,
  1256. lazy, interruptible);
  1257. if (unlikely(ret != 0)) {
  1258. driver->sync_obj_unref(&sync_obj);
  1259. spin_lock(&bo->lock);
  1260. return ret;
  1261. }
  1262. spin_lock(&bo->lock);
  1263. if (likely(bo->sync_obj == sync_obj &&
  1264. bo->sync_obj_arg == sync_obj_arg)) {
  1265. void *tmp_obj = bo->sync_obj;
  1266. bo->sync_obj = NULL;
  1267. clear_bit(TTM_BO_PRIV_FLAG_MOVING,
  1268. &bo->priv_flags);
  1269. spin_unlock(&bo->lock);
  1270. driver->sync_obj_unref(&sync_obj);
  1271. driver->sync_obj_unref(&tmp_obj);
  1272. spin_lock(&bo->lock);
  1273. }
  1274. }
  1275. return 0;
  1276. }
  1277. EXPORT_SYMBOL(ttm_bo_wait);
  1278. void ttm_bo_unblock_reservation(struct ttm_buffer_object *bo)
  1279. {
  1280. atomic_set(&bo->reserved, 0);
  1281. wake_up_all(&bo->event_queue);
  1282. }
  1283. int ttm_bo_block_reservation(struct ttm_buffer_object *bo, bool interruptible,
  1284. bool no_wait)
  1285. {
  1286. int ret;
  1287. while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
  1288. if (no_wait)
  1289. return -EBUSY;
  1290. else if (interruptible) {
  1291. ret = wait_event_interruptible
  1292. (bo->event_queue, atomic_read(&bo->reserved) == 0);
  1293. if (unlikely(ret != 0))
  1294. return -ERESTART;
  1295. } else {
  1296. wait_event(bo->event_queue,
  1297. atomic_read(&bo->reserved) == 0);
  1298. }
  1299. }
  1300. return 0;
  1301. }
  1302. int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait)
  1303. {
  1304. int ret = 0;
  1305. /*
  1306. * Using ttm_bo_reserve instead of ttm_bo_block_reservation
  1307. * makes sure the lru lists are updated.
  1308. */
  1309. ret = ttm_bo_reserve(bo, true, no_wait, false, 0);
  1310. if (unlikely(ret != 0))
  1311. return ret;
  1312. spin_lock(&bo->lock);
  1313. ret = ttm_bo_wait(bo, false, true, no_wait);
  1314. spin_unlock(&bo->lock);
  1315. if (likely(ret == 0))
  1316. atomic_inc(&bo->cpu_writers);
  1317. ttm_bo_unreserve(bo);
  1318. return ret;
  1319. }
  1320. void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo)
  1321. {
  1322. if (atomic_dec_and_test(&bo->cpu_writers))
  1323. wake_up_all(&bo->event_queue);
  1324. }
  1325. /**
  1326. * A buffer object shrink method that tries to swap out the first
  1327. * buffer object on the bo_global::swap_lru list.
  1328. */
  1329. static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
  1330. {
  1331. struct ttm_bo_device *bdev =
  1332. container_of(shrink, struct ttm_bo_device, shrink);
  1333. struct ttm_buffer_object *bo;
  1334. int ret = -EBUSY;
  1335. int put_count;
  1336. uint32_t swap_placement = (TTM_PL_FLAG_CACHED | TTM_PL_FLAG_SYSTEM);
  1337. spin_lock(&bdev->lru_lock);
  1338. while (ret == -EBUSY) {
  1339. if (unlikely(list_empty(&bdev->swap_lru))) {
  1340. spin_unlock(&bdev->lru_lock);
  1341. return -EBUSY;
  1342. }
  1343. bo = list_first_entry(&bdev->swap_lru,
  1344. struct ttm_buffer_object, swap);
  1345. kref_get(&bo->list_kref);
  1346. /**
  1347. * Reserve buffer. Since we unlock while sleeping, we need
  1348. * to re-check that nobody removed us from the swap-list while
  1349. * we slept.
  1350. */
  1351. ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
  1352. if (unlikely(ret == -EBUSY)) {
  1353. spin_unlock(&bdev->lru_lock);
  1354. ttm_bo_wait_unreserved(bo, false);
  1355. kref_put(&bo->list_kref, ttm_bo_release_list);
  1356. spin_lock(&bdev->lru_lock);
  1357. }
  1358. }
  1359. BUG_ON(ret != 0);
  1360. put_count = ttm_bo_del_from_lru(bo);
  1361. spin_unlock(&bdev->lru_lock);
  1362. while (put_count--)
  1363. kref_put(&bo->list_kref, ttm_bo_ref_bug);
  1364. /**
  1365. * Wait for GPU, then move to system cached.
  1366. */
  1367. spin_lock(&bo->lock);
  1368. ret = ttm_bo_wait(bo, false, false, false);
  1369. spin_unlock(&bo->lock);
  1370. if (unlikely(ret != 0))
  1371. goto out;
  1372. if ((bo->mem.placement & swap_placement) != swap_placement) {
  1373. struct ttm_mem_reg evict_mem;
  1374. evict_mem = bo->mem;
  1375. evict_mem.mm_node = NULL;
  1376. evict_mem.placement = TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED;
  1377. evict_mem.mem_type = TTM_PL_SYSTEM;
  1378. ret = ttm_bo_handle_move_mem(bo, &evict_mem, true,
  1379. false, false);
  1380. if (unlikely(ret != 0))
  1381. goto out;
  1382. }
  1383. ttm_bo_unmap_virtual(bo);
  1384. /**
  1385. * Swap out. Buffer will be swapped in again as soon as
  1386. * anyone tries to access a ttm page.
  1387. */
  1388. ret = ttm_tt_swapout(bo->ttm, bo->persistant_swap_storage);
  1389. out:
  1390. /**
  1391. *
  1392. * Unreserve without putting on LRU to avoid swapping out an
  1393. * already swapped buffer.
  1394. */
  1395. atomic_set(&bo->reserved, 0);
  1396. wake_up_all(&bo->event_queue);
  1397. kref_put(&bo->list_kref, ttm_bo_release_list);
  1398. return ret;
  1399. }
  1400. void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
  1401. {
  1402. while (ttm_bo_swapout(&bdev->shrink) == 0)
  1403. ;
  1404. }