vmwgfx_fence.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626
  1. /**************************************************************************
  2. *
  3. * Copyright © 2011 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. #include "drmP.h"
  28. #include "vmwgfx_drv.h"
  29. #define VMW_FENCE_WRAP (1 << 31)
  30. struct vmw_fence_manager {
  31. int num_fence_objects;
  32. struct vmw_private *dev_priv;
  33. spinlock_t lock;
  34. u32 next_seqno;
  35. struct list_head fence_list;
  36. struct work_struct work;
  37. u32 user_fence_size;
  38. u32 fence_size;
  39. bool fifo_down;
  40. struct list_head cleanup_list;
  41. };
  42. struct vmw_user_fence {
  43. struct ttm_base_object base;
  44. struct vmw_fence_obj fence;
  45. };
  46. /**
  47. * vmw_fence_destroy_locked
  48. *
  49. */
  50. static void vmw_fence_obj_destroy_locked(struct kref *kref)
  51. {
  52. struct vmw_fence_obj *fence =
  53. container_of(kref, struct vmw_fence_obj, kref);
  54. struct vmw_fence_manager *fman = fence->fman;
  55. unsigned int num_fences;
  56. list_del_init(&fence->head);
  57. num_fences = --fman->num_fence_objects;
  58. spin_unlock_irq(&fman->lock);
  59. if (fence->destroy)
  60. fence->destroy(fence);
  61. else
  62. kfree(fence);
  63. spin_lock_irq(&fman->lock);
  64. }
  65. /**
  66. * Execute signal actions on fences recently signaled.
  67. * This is done from a workqueue so we don't have to execute
  68. * signal actions from atomic context.
  69. */
  70. static void vmw_fence_work_func(struct work_struct *work)
  71. {
  72. struct vmw_fence_manager *fman =
  73. container_of(work, struct vmw_fence_manager, work);
  74. struct list_head list;
  75. struct vmw_fence_action *action, *next_action;
  76. do {
  77. INIT_LIST_HEAD(&list);
  78. spin_lock_irq(&fman->lock);
  79. list_splice_init(&fman->cleanup_list, &list);
  80. spin_unlock_irq(&fman->lock);
  81. if (list_empty(&list))
  82. return;
  83. /*
  84. * At this point, only we should be able to manipulate the
  85. * list heads of the actions we have on the private list.
  86. */
  87. list_for_each_entry_safe(action, next_action, &list, head) {
  88. list_del_init(&action->head);
  89. action->cleanup(action);
  90. }
  91. } while (1);
  92. }
  93. struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
  94. {
  95. struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
  96. if (unlikely(fman == NULL))
  97. return NULL;
  98. fman->dev_priv = dev_priv;
  99. spin_lock_init(&fman->lock);
  100. INIT_LIST_HEAD(&fman->fence_list);
  101. INIT_LIST_HEAD(&fman->cleanup_list);
  102. INIT_WORK(&fman->work, &vmw_fence_work_func);
  103. fman->fifo_down = true;
  104. fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence));
  105. fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
  106. return fman;
  107. }
  108. void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
  109. {
  110. unsigned long irq_flags;
  111. bool lists_empty;
  112. (void) cancel_work_sync(&fman->work);
  113. spin_lock_irqsave(&fman->lock, irq_flags);
  114. lists_empty = list_empty(&fman->fence_list) &&
  115. list_empty(&fman->cleanup_list);
  116. spin_unlock_irqrestore(&fman->lock, irq_flags);
  117. BUG_ON(!lists_empty);
  118. kfree(fman);
  119. }
  120. static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
  121. struct vmw_fence_obj *fence,
  122. u32 seqno,
  123. uint32_t mask,
  124. void (*destroy) (struct vmw_fence_obj *fence))
  125. {
  126. unsigned long irq_flags;
  127. unsigned int num_fences;
  128. int ret = 0;
  129. fence->seqno = seqno;
  130. INIT_LIST_HEAD(&fence->seq_passed_actions);
  131. fence->fman = fman;
  132. fence->signaled = 0;
  133. fence->signal_mask = mask;
  134. kref_init(&fence->kref);
  135. fence->destroy = destroy;
  136. init_waitqueue_head(&fence->queue);
  137. spin_lock_irqsave(&fman->lock, irq_flags);
  138. if (unlikely(fman->fifo_down)) {
  139. ret = -EBUSY;
  140. goto out_unlock;
  141. }
  142. list_add_tail(&fence->head, &fman->fence_list);
  143. num_fences = ++fman->num_fence_objects;
  144. out_unlock:
  145. spin_unlock_irqrestore(&fman->lock, irq_flags);
  146. return ret;
  147. }
  148. struct vmw_fence_obj *vmw_fence_obj_reference(struct vmw_fence_obj *fence)
  149. {
  150. if (unlikely(fence == NULL))
  151. return NULL;
  152. kref_get(&fence->kref);
  153. return fence;
  154. }
  155. /**
  156. * vmw_fence_obj_unreference
  157. *
  158. * Note that this function may not be entered with disabled irqs since
  159. * it may re-enable them in the destroy function.
  160. *
  161. */
  162. void vmw_fence_obj_unreference(struct vmw_fence_obj **fence_p)
  163. {
  164. struct vmw_fence_obj *fence = *fence_p;
  165. struct vmw_fence_manager *fman;
  166. if (unlikely(fence == NULL))
  167. return;
  168. fman = fence->fman;
  169. *fence_p = NULL;
  170. spin_lock_irq(&fman->lock);
  171. BUG_ON(atomic_read(&fence->kref.refcount) == 0);
  172. kref_put(&fence->kref, vmw_fence_obj_destroy_locked);
  173. spin_unlock_irq(&fman->lock);
  174. }
  175. void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
  176. struct list_head *list)
  177. {
  178. struct vmw_fence_action *action, *next_action;
  179. list_for_each_entry_safe(action, next_action, list, head) {
  180. list_del_init(&action->head);
  181. if (action->seq_passed != NULL)
  182. action->seq_passed(action);
  183. /*
  184. * Add the cleanup action to the cleanup list so that
  185. * it will be performed by a worker task.
  186. */
  187. if (action->cleanup != NULL)
  188. list_add_tail(&action->head, &fman->cleanup_list);
  189. }
  190. }
  191. void vmw_fences_update(struct vmw_fence_manager *fman, u32 seqno)
  192. {
  193. unsigned long flags;
  194. struct vmw_fence_obj *fence, *next_fence;
  195. struct list_head action_list;
  196. spin_lock_irqsave(&fman->lock, flags);
  197. list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
  198. if (seqno - fence->seqno < VMW_FENCE_WRAP) {
  199. list_del_init(&fence->head);
  200. fence->signaled |= DRM_VMW_FENCE_FLAG_EXEC;
  201. INIT_LIST_HEAD(&action_list);
  202. list_splice_init(&fence->seq_passed_actions,
  203. &action_list);
  204. vmw_fences_perform_actions(fman, &action_list);
  205. wake_up_all(&fence->queue);
  206. }
  207. }
  208. if (!list_empty(&fman->cleanup_list))
  209. (void) schedule_work(&fman->work);
  210. spin_unlock_irqrestore(&fman->lock, flags);
  211. }
  212. bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence,
  213. uint32_t flags)
  214. {
  215. struct vmw_fence_manager *fman = fence->fman;
  216. unsigned long irq_flags;
  217. uint32_t signaled;
  218. spin_lock_irqsave(&fman->lock, irq_flags);
  219. signaled = fence->signaled;
  220. spin_unlock_irqrestore(&fman->lock, irq_flags);
  221. flags &= fence->signal_mask;
  222. if ((signaled & flags) == flags)
  223. return 1;
  224. if ((signaled & DRM_VMW_FENCE_FLAG_EXEC) == 0) {
  225. struct vmw_private *dev_priv = fman->dev_priv;
  226. __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
  227. u32 seqno;
  228. seqno = ioread32(fifo_mem + SVGA_FIFO_FENCE);
  229. vmw_fences_update(fman, seqno);
  230. }
  231. spin_lock_irqsave(&fman->lock, irq_flags);
  232. signaled = fence->signaled;
  233. spin_unlock_irqrestore(&fman->lock, irq_flags);
  234. return ((signaled & flags) == flags);
  235. }
  236. int vmw_fence_obj_wait(struct vmw_fence_obj *fence,
  237. uint32_t flags, bool lazy,
  238. bool interruptible, unsigned long timeout)
  239. {
  240. struct vmw_private *dev_priv = fence->fman->dev_priv;
  241. long ret;
  242. if (likely(vmw_fence_obj_signaled(fence, flags)))
  243. return 0;
  244. vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
  245. vmw_seqno_waiter_add(dev_priv);
  246. if (interruptible)
  247. ret = wait_event_interruptible_timeout
  248. (fence->queue,
  249. vmw_fence_obj_signaled(fence, flags),
  250. timeout);
  251. else
  252. ret = wait_event_timeout
  253. (fence->queue,
  254. vmw_fence_obj_signaled(fence, flags),
  255. timeout);
  256. vmw_seqno_waiter_remove(dev_priv);
  257. if (unlikely(ret == 0))
  258. ret = -EBUSY;
  259. else if (likely(ret > 0))
  260. ret = 0;
  261. return ret;
  262. }
  263. void vmw_fence_obj_flush(struct vmw_fence_obj *fence)
  264. {
  265. struct vmw_private *dev_priv = fence->fman->dev_priv;
  266. vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
  267. }
  268. static void vmw_fence_destroy(struct vmw_fence_obj *fence)
  269. {
  270. struct vmw_fence_manager *fman = fence->fman;
  271. kfree(fence);
  272. /*
  273. * Free kernel space accounting.
  274. */
  275. ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
  276. fman->fence_size);
  277. }
  278. int vmw_fence_create(struct vmw_fence_manager *fman,
  279. uint32_t seqno,
  280. uint32_t mask,
  281. struct vmw_fence_obj **p_fence)
  282. {
  283. struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
  284. struct vmw_fence_obj *fence;
  285. int ret;
  286. ret = ttm_mem_global_alloc(mem_glob, fman->fence_size,
  287. false, false);
  288. if (unlikely(ret != 0))
  289. return ret;
  290. fence = kzalloc(sizeof(*fence), GFP_KERNEL);
  291. if (unlikely(fence == NULL)) {
  292. ret = -ENOMEM;
  293. goto out_no_object;
  294. }
  295. ret = vmw_fence_obj_init(fman, fence, seqno, mask,
  296. vmw_fence_destroy);
  297. if (unlikely(ret != 0))
  298. goto out_err_init;
  299. *p_fence = fence;
  300. return 0;
  301. out_err_init:
  302. kfree(fence);
  303. out_no_object:
  304. ttm_mem_global_free(mem_glob, fman->fence_size);
  305. return ret;
  306. }
  307. static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)
  308. {
  309. struct vmw_user_fence *ufence =
  310. container_of(fence, struct vmw_user_fence, fence);
  311. struct vmw_fence_manager *fman = fence->fman;
  312. kfree(ufence);
  313. /*
  314. * Free kernel space accounting.
  315. */
  316. ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
  317. fman->user_fence_size);
  318. }
  319. static void vmw_user_fence_base_release(struct ttm_base_object **p_base)
  320. {
  321. struct ttm_base_object *base = *p_base;
  322. struct vmw_user_fence *ufence =
  323. container_of(base, struct vmw_user_fence, base);
  324. struct vmw_fence_obj *fence = &ufence->fence;
  325. *p_base = NULL;
  326. vmw_fence_obj_unreference(&fence);
  327. }
  328. int vmw_user_fence_create(struct drm_file *file_priv,
  329. struct vmw_fence_manager *fman,
  330. uint32_t seqno,
  331. uint32_t mask,
  332. struct vmw_fence_obj **p_fence,
  333. uint32_t *p_handle)
  334. {
  335. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  336. struct vmw_user_fence *ufence;
  337. struct vmw_fence_obj *tmp;
  338. struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
  339. int ret;
  340. /*
  341. * Kernel memory space accounting, since this object may
  342. * be created by a user-space request.
  343. */
  344. ret = ttm_mem_global_alloc(mem_glob, fman->user_fence_size,
  345. false, false);
  346. if (unlikely(ret != 0))
  347. return ret;
  348. ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
  349. if (unlikely(ufence == NULL)) {
  350. ret = -ENOMEM;
  351. goto out_no_object;
  352. }
  353. ret = vmw_fence_obj_init(fman, &ufence->fence, seqno,
  354. mask, vmw_user_fence_destroy);
  355. if (unlikely(ret != 0)) {
  356. kfree(ufence);
  357. goto out_no_object;
  358. }
  359. /*
  360. * The base object holds a reference which is freed in
  361. * vmw_user_fence_base_release.
  362. */
  363. tmp = vmw_fence_obj_reference(&ufence->fence);
  364. ret = ttm_base_object_init(tfile, &ufence->base, false,
  365. VMW_RES_FENCE,
  366. &vmw_user_fence_base_release, NULL);
  367. if (unlikely(ret != 0)) {
  368. /*
  369. * Free the base object's reference
  370. */
  371. vmw_fence_obj_unreference(&tmp);
  372. goto out_err;
  373. }
  374. *p_fence = &ufence->fence;
  375. *p_handle = ufence->base.hash.key;
  376. return 0;
  377. out_err:
  378. tmp = &ufence->fence;
  379. vmw_fence_obj_unreference(&tmp);
  380. out_no_object:
  381. ttm_mem_global_free(mem_glob, fman->user_fence_size);
  382. return ret;
  383. }
  384. /**
  385. * vmw_fence_fifo_down - signal all unsignaled fence objects.
  386. */
  387. void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
  388. {
  389. unsigned long irq_flags;
  390. struct list_head action_list;
  391. int ret;
  392. /*
  393. * The list may be altered while we traverse it, so always
  394. * restart when we've released the fman->lock.
  395. */
  396. spin_lock_irqsave(&fman->lock, irq_flags);
  397. fman->fifo_down = true;
  398. while (!list_empty(&fman->fence_list)) {
  399. struct vmw_fence_obj *fence =
  400. list_entry(fman->fence_list.prev, struct vmw_fence_obj,
  401. head);
  402. kref_get(&fence->kref);
  403. spin_unlock_irq(&fman->lock);
  404. ret = vmw_fence_obj_wait(fence, fence->signal_mask,
  405. false, false,
  406. VMW_FENCE_WAIT_TIMEOUT);
  407. if (unlikely(ret != 0)) {
  408. list_del_init(&fence->head);
  409. fence->signaled |= DRM_VMW_FENCE_FLAG_EXEC;
  410. INIT_LIST_HEAD(&action_list);
  411. list_splice_init(&fence->seq_passed_actions,
  412. &action_list);
  413. vmw_fences_perform_actions(fman, &action_list);
  414. wake_up_all(&fence->queue);
  415. }
  416. spin_lock_irq(&fman->lock);
  417. BUG_ON(!list_empty(&fence->head));
  418. kref_put(&fence->kref, vmw_fence_obj_destroy_locked);
  419. }
  420. spin_unlock_irqrestore(&fman->lock, irq_flags);
  421. }
  422. void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
  423. {
  424. unsigned long irq_flags;
  425. spin_lock_irqsave(&fman->lock, irq_flags);
  426. fman->fifo_down = false;
  427. spin_unlock_irqrestore(&fman->lock, irq_flags);
  428. }
  429. int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
  430. struct drm_file *file_priv)
  431. {
  432. struct drm_vmw_fence_wait_arg *arg =
  433. (struct drm_vmw_fence_wait_arg *)data;
  434. unsigned long timeout;
  435. struct ttm_base_object *base;
  436. struct vmw_fence_obj *fence;
  437. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  438. int ret;
  439. uint64_t wait_timeout = ((uint64_t)arg->timeout_us * HZ);
  440. /*
  441. * 64-bit division not present on 32-bit systems, so do an
  442. * approximation. (Divide by 1000000).
  443. */
  444. wait_timeout = (wait_timeout >> 20) + (wait_timeout >> 24) -
  445. (wait_timeout >> 26);
  446. if (!arg->cookie_valid) {
  447. arg->cookie_valid = 1;
  448. arg->kernel_cookie = jiffies + wait_timeout;
  449. }
  450. base = ttm_base_object_lookup(tfile, arg->handle);
  451. if (unlikely(base == NULL)) {
  452. printk(KERN_ERR "Wait invalid fence object handle "
  453. "0x%08lx.\n",
  454. (unsigned long)arg->handle);
  455. return -EINVAL;
  456. }
  457. fence = &(container_of(base, struct vmw_user_fence, base)->fence);
  458. timeout = jiffies;
  459. if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) {
  460. ret = ((vmw_fence_obj_signaled(fence, arg->flags)) ?
  461. 0 : -EBUSY);
  462. goto out;
  463. }
  464. timeout = (unsigned long)arg->kernel_cookie - timeout;
  465. ret = vmw_fence_obj_wait(fence, arg->flags, arg->lazy, true, timeout);
  466. out:
  467. ttm_base_object_unref(&base);
  468. /*
  469. * Optionally unref the fence object.
  470. */
  471. if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF))
  472. return ttm_ref_object_base_unref(tfile, arg->handle,
  473. TTM_REF_USAGE);
  474. return ret;
  475. }
  476. int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
  477. struct drm_file *file_priv)
  478. {
  479. struct drm_vmw_fence_signaled_arg *arg =
  480. (struct drm_vmw_fence_signaled_arg *) data;
  481. struct ttm_base_object *base;
  482. struct vmw_fence_obj *fence;
  483. struct vmw_fence_manager *fman;
  484. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  485. struct vmw_private *dev_priv = vmw_priv(dev);
  486. base = ttm_base_object_lookup(tfile, arg->handle);
  487. if (unlikely(base == NULL)) {
  488. printk(KERN_ERR "Fence signaled invalid fence object handle "
  489. "0x%08lx.\n",
  490. (unsigned long)arg->handle);
  491. return -EINVAL;
  492. }
  493. fence = &(container_of(base, struct vmw_user_fence, base)->fence);
  494. fman = fence->fman;
  495. arg->signaled = vmw_fence_obj_signaled(fence, arg->flags);
  496. spin_lock_irq(&fman->lock);
  497. arg->signaled_flags = fence->signaled;
  498. arg->passed_seqno = dev_priv->last_read_seqno;
  499. spin_unlock_irq(&fman->lock);
  500. ttm_base_object_unref(&base);
  501. return 0;
  502. }
  503. int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
  504. struct drm_file *file_priv)
  505. {
  506. struct drm_vmw_fence_arg *arg =
  507. (struct drm_vmw_fence_arg *) data;
  508. return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
  509. arg->handle,
  510. TTM_REF_USAGE);
  511. }