vmwgfx_overlay.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634
  1. /**************************************************************************
  2. *
  3. * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. #include "drmP.h"
  28. #include "vmwgfx_drv.h"
  29. #include "ttm/ttm_placement.h"
  30. #include "svga_overlay.h"
  31. #include "svga_escape.h"
  32. #define VMW_MAX_NUM_STREAMS 1
  33. struct vmw_stream {
  34. struct vmw_dma_buffer *buf;
  35. bool claimed;
  36. bool paused;
  37. struct drm_vmw_control_stream_arg saved;
  38. };
  39. /**
  40. * Overlay control
  41. */
  42. struct vmw_overlay {
  43. /*
  44. * Each stream is a single overlay. In Xv these are called ports.
  45. */
  46. struct mutex mutex;
  47. struct vmw_stream stream[VMW_MAX_NUM_STREAMS];
  48. };
  49. static inline struct vmw_overlay *vmw_overlay(struct drm_device *dev)
  50. {
  51. struct vmw_private *dev_priv = vmw_priv(dev);
  52. return dev_priv ? dev_priv->overlay_priv : NULL;
  53. }
  54. struct vmw_escape_header {
  55. uint32_t cmd;
  56. SVGAFifoCmdEscape body;
  57. };
  58. struct vmw_escape_video_flush {
  59. struct vmw_escape_header escape;
  60. SVGAEscapeVideoFlush flush;
  61. };
  62. static inline void fill_escape(struct vmw_escape_header *header,
  63. uint32_t size)
  64. {
  65. header->cmd = SVGA_CMD_ESCAPE;
  66. header->body.nsid = SVGA_ESCAPE_NSID_VMWARE;
  67. header->body.size = size;
  68. }
  69. static inline void fill_flush(struct vmw_escape_video_flush *cmd,
  70. uint32_t stream_id)
  71. {
  72. fill_escape(&cmd->escape, sizeof(cmd->flush));
  73. cmd->flush.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_FLUSH;
  74. cmd->flush.streamId = stream_id;
  75. }
  76. /**
  77. * Pin or unpin a buffer in vram.
  78. *
  79. * @dev_priv: Driver private.
  80. * @buf: DMA buffer to pin or unpin.
  81. * @pin: Pin buffer in vram if true.
  82. * @interruptible: Use interruptible wait.
  83. *
  84. * Takes the current masters ttm lock in read.
  85. *
  86. * Returns
  87. * -ERESTARTSYS if interrupted by a signal.
  88. */
  89. static int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
  90. struct vmw_dma_buffer *buf,
  91. bool pin, bool interruptible)
  92. {
  93. struct ttm_buffer_object *bo = &buf->base;
  94. struct ttm_bo_global *glob = bo->glob;
  95. struct ttm_placement *overlay_placement = &vmw_vram_placement;
  96. int ret;
  97. ret = ttm_read_lock(&dev_priv->active_master->lock, interruptible);
  98. if (unlikely(ret != 0))
  99. return ret;
  100. ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
  101. if (unlikely(ret != 0))
  102. goto err;
  103. if (buf->gmr_bound) {
  104. vmw_gmr_unbind(dev_priv, buf->gmr_id);
  105. spin_lock(&glob->lru_lock);
  106. ida_remove(&dev_priv->gmr_ida, buf->gmr_id);
  107. spin_unlock(&glob->lru_lock);
  108. buf->gmr_bound = NULL;
  109. }
  110. if (pin)
  111. overlay_placement = &vmw_vram_ne_placement;
  112. ret = ttm_bo_validate(bo, overlay_placement, interruptible, false);
  113. ttm_bo_unreserve(bo);
  114. err:
  115. ttm_read_unlock(&dev_priv->active_master->lock);
  116. return ret;
  117. }
  118. /**
  119. * Send put command to hw.
  120. *
  121. * Returns
  122. * -ERESTARTSYS if interrupted by a signal.
  123. */
  124. static int vmw_overlay_send_put(struct vmw_private *dev_priv,
  125. struct vmw_dma_buffer *buf,
  126. struct drm_vmw_control_stream_arg *arg,
  127. bool interruptible)
  128. {
  129. struct {
  130. struct vmw_escape_header escape;
  131. struct {
  132. struct {
  133. uint32_t cmdType;
  134. uint32_t streamId;
  135. } header;
  136. struct {
  137. uint32_t registerId;
  138. uint32_t value;
  139. } items[SVGA_VIDEO_PITCH_3 + 1];
  140. } body;
  141. struct vmw_escape_video_flush flush;
  142. } *cmds;
  143. uint32_t offset;
  144. int i, ret;
  145. for (;;) {
  146. cmds = vmw_fifo_reserve(dev_priv, sizeof(*cmds));
  147. if (cmds)
  148. break;
  149. ret = vmw_fallback_wait(dev_priv, false, true, 0,
  150. interruptible, 3*HZ);
  151. if (interruptible && ret == -ERESTARTSYS)
  152. return ret;
  153. else
  154. BUG_ON(ret != 0);
  155. }
  156. fill_escape(&cmds->escape, sizeof(cmds->body));
  157. cmds->body.header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS;
  158. cmds->body.header.streamId = arg->stream_id;
  159. for (i = 0; i <= SVGA_VIDEO_PITCH_3; i++)
  160. cmds->body.items[i].registerId = i;
  161. offset = buf->base.offset + arg->offset;
  162. cmds->body.items[SVGA_VIDEO_ENABLED].value = true;
  163. cmds->body.items[SVGA_VIDEO_FLAGS].value = arg->flags;
  164. cmds->body.items[SVGA_VIDEO_DATA_OFFSET].value = offset;
  165. cmds->body.items[SVGA_VIDEO_FORMAT].value = arg->format;
  166. cmds->body.items[SVGA_VIDEO_COLORKEY].value = arg->color_key;
  167. cmds->body.items[SVGA_VIDEO_SIZE].value = arg->size;
  168. cmds->body.items[SVGA_VIDEO_WIDTH].value = arg->width;
  169. cmds->body.items[SVGA_VIDEO_HEIGHT].value = arg->height;
  170. cmds->body.items[SVGA_VIDEO_SRC_X].value = arg->src.x;
  171. cmds->body.items[SVGA_VIDEO_SRC_Y].value = arg->src.y;
  172. cmds->body.items[SVGA_VIDEO_SRC_WIDTH].value = arg->src.w;
  173. cmds->body.items[SVGA_VIDEO_SRC_HEIGHT].value = arg->src.h;
  174. cmds->body.items[SVGA_VIDEO_DST_X].value = arg->dst.x;
  175. cmds->body.items[SVGA_VIDEO_DST_Y].value = arg->dst.y;
  176. cmds->body.items[SVGA_VIDEO_DST_WIDTH].value = arg->dst.w;
  177. cmds->body.items[SVGA_VIDEO_DST_HEIGHT].value = arg->dst.h;
  178. cmds->body.items[SVGA_VIDEO_PITCH_1].value = arg->pitch[0];
  179. cmds->body.items[SVGA_VIDEO_PITCH_2].value = arg->pitch[1];
  180. cmds->body.items[SVGA_VIDEO_PITCH_3].value = arg->pitch[2];
  181. fill_flush(&cmds->flush, arg->stream_id);
  182. vmw_fifo_commit(dev_priv, sizeof(*cmds));
  183. return 0;
  184. }
  185. /**
  186. * Send stop command to hw.
  187. *
  188. * Returns
  189. * -ERESTARTSYS if interrupted by a signal.
  190. */
  191. static int vmw_overlay_send_stop(struct vmw_private *dev_priv,
  192. uint32_t stream_id,
  193. bool interruptible)
  194. {
  195. struct {
  196. struct vmw_escape_header escape;
  197. SVGAEscapeVideoSetRegs body;
  198. struct vmw_escape_video_flush flush;
  199. } *cmds;
  200. int ret;
  201. for (;;) {
  202. cmds = vmw_fifo_reserve(dev_priv, sizeof(*cmds));
  203. if (cmds)
  204. break;
  205. ret = vmw_fallback_wait(dev_priv, false, true, 0,
  206. interruptible, 3*HZ);
  207. if (interruptible && ret == -ERESTARTSYS)
  208. return ret;
  209. else
  210. BUG_ON(ret != 0);
  211. }
  212. fill_escape(&cmds->escape, sizeof(cmds->body));
  213. cmds->body.header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS;
  214. cmds->body.header.streamId = stream_id;
  215. cmds->body.items[0].registerId = SVGA_VIDEO_ENABLED;
  216. cmds->body.items[0].value = false;
  217. fill_flush(&cmds->flush, stream_id);
  218. vmw_fifo_commit(dev_priv, sizeof(*cmds));
  219. return 0;
  220. }
  221. /**
  222. * Stop or pause a stream.
  223. *
  224. * If the stream is paused the no evict flag is removed from the buffer
  225. * but left in vram. This allows for instance mode_set to evict it
  226. * should it need to.
  227. *
  228. * The caller must hold the overlay lock.
  229. *
  230. * @stream_id which stream to stop/pause.
  231. * @pause true to pause, false to stop completely.
  232. */
  233. static int vmw_overlay_stop(struct vmw_private *dev_priv,
  234. uint32_t stream_id, bool pause,
  235. bool interruptible)
  236. {
  237. struct vmw_overlay *overlay = dev_priv->overlay_priv;
  238. struct vmw_stream *stream = &overlay->stream[stream_id];
  239. int ret;
  240. /* no buffer attached the stream is completely stopped */
  241. if (!stream->buf)
  242. return 0;
  243. /* If the stream is paused this is already done */
  244. if (!stream->paused) {
  245. ret = vmw_overlay_send_stop(dev_priv, stream_id,
  246. interruptible);
  247. if (ret)
  248. return ret;
  249. /* We just remove the NO_EVICT flag so no -ENOMEM */
  250. ret = vmw_dmabuf_pin_in_vram(dev_priv, stream->buf, false,
  251. interruptible);
  252. if (interruptible && ret == -ERESTARTSYS)
  253. return ret;
  254. else
  255. BUG_ON(ret != 0);
  256. }
  257. if (!pause) {
  258. vmw_dmabuf_unreference(&stream->buf);
  259. stream->paused = false;
  260. } else {
  261. stream->paused = true;
  262. }
  263. return 0;
  264. }
  265. /**
  266. * Update a stream and send any put or stop fifo commands needed.
  267. *
  268. * The caller must hold the overlay lock.
  269. *
  270. * Returns
  271. * -ENOMEM if buffer doesn't fit in vram.
  272. * -ERESTARTSYS if interrupted.
  273. */
  274. static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
  275. struct vmw_dma_buffer *buf,
  276. struct drm_vmw_control_stream_arg *arg,
  277. bool interruptible)
  278. {
  279. struct vmw_overlay *overlay = dev_priv->overlay_priv;
  280. struct vmw_stream *stream = &overlay->stream[arg->stream_id];
  281. int ret = 0;
  282. if (!buf)
  283. return -EINVAL;
  284. DRM_DEBUG(" %s: old %p, new %p, %spaused\n", __func__,
  285. stream->buf, buf, stream->paused ? "" : "not ");
  286. if (stream->buf != buf) {
  287. ret = vmw_overlay_stop(dev_priv, arg->stream_id,
  288. false, interruptible);
  289. if (ret)
  290. return ret;
  291. } else if (!stream->paused) {
  292. /* If the buffers match and not paused then just send
  293. * the put command, no need to do anything else.
  294. */
  295. ret = vmw_overlay_send_put(dev_priv, buf, arg, interruptible);
  296. if (ret == 0)
  297. stream->saved = *arg;
  298. else
  299. BUG_ON(!interruptible);
  300. return ret;
  301. }
  302. /* We don't start the old stream if we are interrupted.
  303. * Might return -ENOMEM if it can't fit the buffer in vram.
  304. */
  305. ret = vmw_dmabuf_pin_in_vram(dev_priv, buf, true, interruptible);
  306. if (ret)
  307. return ret;
  308. ret = vmw_overlay_send_put(dev_priv, buf, arg, interruptible);
  309. if (ret) {
  310. /* This one needs to happen no matter what. We only remove
  311. * the NO_EVICT flag so this is safe from -ENOMEM.
  312. */
  313. BUG_ON(vmw_dmabuf_pin_in_vram(dev_priv, buf, false, false) != 0);
  314. return ret;
  315. }
  316. if (stream->buf != buf)
  317. stream->buf = vmw_dmabuf_reference(buf);
  318. stream->saved = *arg;
  319. return 0;
  320. }
  321. /**
  322. * Stop all streams.
  323. *
  324. * Used by the fb code when starting.
  325. *
  326. * Takes the overlay lock.
  327. */
  328. int vmw_overlay_stop_all(struct vmw_private *dev_priv)
  329. {
  330. struct vmw_overlay *overlay = dev_priv->overlay_priv;
  331. int i, ret;
  332. if (!overlay)
  333. return 0;
  334. mutex_lock(&overlay->mutex);
  335. for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
  336. struct vmw_stream *stream = &overlay->stream[i];
  337. if (!stream->buf)
  338. continue;
  339. ret = vmw_overlay_stop(dev_priv, i, false, false);
  340. WARN_ON(ret != 0);
  341. }
  342. mutex_unlock(&overlay->mutex);
  343. return 0;
  344. }
  345. /**
  346. * Try to resume all paused streams.
  347. *
  348. * Used by the kms code after moving a new scanout buffer to vram.
  349. *
  350. * Takes the overlay lock.
  351. */
  352. int vmw_overlay_resume_all(struct vmw_private *dev_priv)
  353. {
  354. struct vmw_overlay *overlay = dev_priv->overlay_priv;
  355. int i, ret;
  356. if (!overlay)
  357. return 0;
  358. mutex_lock(&overlay->mutex);
  359. for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
  360. struct vmw_stream *stream = &overlay->stream[i];
  361. if (!stream->paused)
  362. continue;
  363. ret = vmw_overlay_update_stream(dev_priv, stream->buf,
  364. &stream->saved, false);
  365. if (ret != 0)
  366. DRM_INFO("%s: *warning* failed to resume stream %i\n",
  367. __func__, i);
  368. }
  369. mutex_unlock(&overlay->mutex);
  370. return 0;
  371. }
  372. /**
  373. * Pauses all active streams.
  374. *
  375. * Used by the kms code when moving a new scanout buffer to vram.
  376. *
  377. * Takes the overlay lock.
  378. */
  379. int vmw_overlay_pause_all(struct vmw_private *dev_priv)
  380. {
  381. struct vmw_overlay *overlay = dev_priv->overlay_priv;
  382. int i, ret;
  383. if (!overlay)
  384. return 0;
  385. mutex_lock(&overlay->mutex);
  386. for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
  387. if (overlay->stream[i].paused)
  388. DRM_INFO("%s: *warning* stream %i already paused\n",
  389. __func__, i);
  390. ret = vmw_overlay_stop(dev_priv, i, true, false);
  391. WARN_ON(ret != 0);
  392. }
  393. mutex_unlock(&overlay->mutex);
  394. return 0;
  395. }
  396. int vmw_overlay_ioctl(struct drm_device *dev, void *data,
  397. struct drm_file *file_priv)
  398. {
  399. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  400. struct vmw_private *dev_priv = vmw_priv(dev);
  401. struct vmw_overlay *overlay = dev_priv->overlay_priv;
  402. struct drm_vmw_control_stream_arg *arg =
  403. (struct drm_vmw_control_stream_arg *)data;
  404. struct vmw_dma_buffer *buf;
  405. struct vmw_resource *res;
  406. int ret;
  407. if (!overlay)
  408. return -ENOSYS;
  409. ret = vmw_user_stream_lookup(dev_priv, tfile, &arg->stream_id, &res);
  410. if (ret)
  411. return ret;
  412. mutex_lock(&overlay->mutex);
  413. if (!arg->enabled) {
  414. ret = vmw_overlay_stop(dev_priv, arg->stream_id, false, true);
  415. goto out_unlock;
  416. }
  417. ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf);
  418. if (ret)
  419. goto out_unlock;
  420. ret = vmw_overlay_update_stream(dev_priv, buf, arg, true);
  421. vmw_dmabuf_unreference(&buf);
  422. out_unlock:
  423. mutex_unlock(&overlay->mutex);
  424. vmw_resource_unreference(&res);
  425. return ret;
  426. }
  427. int vmw_overlay_num_overlays(struct vmw_private *dev_priv)
  428. {
  429. if (!dev_priv->overlay_priv)
  430. return 0;
  431. return VMW_MAX_NUM_STREAMS;
  432. }
  433. int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv)
  434. {
  435. struct vmw_overlay *overlay = dev_priv->overlay_priv;
  436. int i, k;
  437. if (!overlay)
  438. return 0;
  439. mutex_lock(&overlay->mutex);
  440. for (i = 0, k = 0; i < VMW_MAX_NUM_STREAMS; i++)
  441. if (!overlay->stream[i].claimed)
  442. k++;
  443. mutex_unlock(&overlay->mutex);
  444. return k;
  445. }
  446. int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out)
  447. {
  448. struct vmw_overlay *overlay = dev_priv->overlay_priv;
  449. int i;
  450. if (!overlay)
  451. return -ENOSYS;
  452. mutex_lock(&overlay->mutex);
  453. for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
  454. if (overlay->stream[i].claimed)
  455. continue;
  456. overlay->stream[i].claimed = true;
  457. *out = i;
  458. mutex_unlock(&overlay->mutex);
  459. return 0;
  460. }
  461. mutex_unlock(&overlay->mutex);
  462. return -ESRCH;
  463. }
  464. int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id)
  465. {
  466. struct vmw_overlay *overlay = dev_priv->overlay_priv;
  467. BUG_ON(stream_id >= VMW_MAX_NUM_STREAMS);
  468. if (!overlay)
  469. return -ENOSYS;
  470. mutex_lock(&overlay->mutex);
  471. WARN_ON(!overlay->stream[stream_id].claimed);
  472. vmw_overlay_stop(dev_priv, stream_id, false, false);
  473. overlay->stream[stream_id].claimed = false;
  474. mutex_unlock(&overlay->mutex);
  475. return 0;
  476. }
  477. int vmw_overlay_init(struct vmw_private *dev_priv)
  478. {
  479. struct vmw_overlay *overlay;
  480. int i;
  481. if (dev_priv->overlay_priv)
  482. return -EINVAL;
  483. if (!(dev_priv->fifo.capabilities & SVGA_FIFO_CAP_VIDEO) &&
  484. (dev_priv->fifo.capabilities & SVGA_FIFO_CAP_ESCAPE)) {
  485. DRM_INFO("hardware doesn't support overlays\n");
  486. return -ENOSYS;
  487. }
  488. overlay = kmalloc(GFP_KERNEL, sizeof(*overlay));
  489. if (!overlay)
  490. return -ENOMEM;
  491. memset(overlay, 0, sizeof(*overlay));
  492. mutex_init(&overlay->mutex);
  493. for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
  494. overlay->stream[i].buf = NULL;
  495. overlay->stream[i].paused = false;
  496. overlay->stream[i].claimed = false;
  497. }
  498. dev_priv->overlay_priv = overlay;
  499. return 0;
  500. }
  501. int vmw_overlay_close(struct vmw_private *dev_priv)
  502. {
  503. struct vmw_overlay *overlay = dev_priv->overlay_priv;
  504. bool forgotten_buffer = false;
  505. int i;
  506. if (!overlay)
  507. return -ENOSYS;
  508. for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
  509. if (overlay->stream[i].buf) {
  510. forgotten_buffer = true;
  511. vmw_overlay_stop(dev_priv, i, false, false);
  512. }
  513. }
  514. WARN_ON(forgotten_buffer);
  515. dev_priv->overlay_priv = NULL;
  516. kfree(overlay);
  517. return 0;
  518. }