vmwgfx_overlay.c 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625
  1. /**************************************************************************
  2. *
  3. * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. #include "drmP.h"
  28. #include "vmwgfx_drv.h"
  29. #include "ttm/ttm_placement.h"
  30. #include "svga_overlay.h"
  31. #include "svga_escape.h"
  32. #define VMW_MAX_NUM_STREAMS 1
  33. struct vmw_stream {
  34. struct vmw_dma_buffer *buf;
  35. bool claimed;
  36. bool paused;
  37. struct drm_vmw_control_stream_arg saved;
  38. };
  39. /**
  40. * Overlay control
  41. */
  42. struct vmw_overlay {
  43. /*
  44. * Each stream is a single overlay. In Xv these are called ports.
  45. */
  46. struct mutex mutex;
  47. struct vmw_stream stream[VMW_MAX_NUM_STREAMS];
  48. };
  49. static inline struct vmw_overlay *vmw_overlay(struct drm_device *dev)
  50. {
  51. struct vmw_private *dev_priv = vmw_priv(dev);
  52. return dev_priv ? dev_priv->overlay_priv : NULL;
  53. }
  54. struct vmw_escape_header {
  55. uint32_t cmd;
  56. SVGAFifoCmdEscape body;
  57. };
  58. struct vmw_escape_video_flush {
  59. struct vmw_escape_header escape;
  60. SVGAEscapeVideoFlush flush;
  61. };
  62. static inline void fill_escape(struct vmw_escape_header *header,
  63. uint32_t size)
  64. {
  65. header->cmd = SVGA_CMD_ESCAPE;
  66. header->body.nsid = SVGA_ESCAPE_NSID_VMWARE;
  67. header->body.size = size;
  68. }
  69. static inline void fill_flush(struct vmw_escape_video_flush *cmd,
  70. uint32_t stream_id)
  71. {
  72. fill_escape(&cmd->escape, sizeof(cmd->flush));
  73. cmd->flush.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_FLUSH;
  74. cmd->flush.streamId = stream_id;
  75. }
  76. /**
  77. * Pin or unpin a buffer in vram.
  78. *
  79. * @dev_priv: Driver private.
  80. * @buf: DMA buffer to pin or unpin.
  81. * @pin: Pin buffer in vram if true.
  82. * @interruptible: Use interruptible wait.
  83. *
  84. * Takes the current masters ttm lock in read.
  85. *
  86. * Returns
  87. * -ERESTARTSYS if interrupted by a signal.
  88. */
  89. static int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv,
  90. struct vmw_dma_buffer *buf,
  91. bool pin, bool interruptible)
  92. {
  93. struct ttm_buffer_object *bo = &buf->base;
  94. struct ttm_placement *overlay_placement = &vmw_vram_placement;
  95. int ret;
  96. ret = ttm_read_lock(&dev_priv->active_master->lock, interruptible);
  97. if (unlikely(ret != 0))
  98. return ret;
  99. ret = ttm_bo_reserve(bo, interruptible, false, false, 0);
  100. if (unlikely(ret != 0))
  101. goto err;
  102. if (pin)
  103. overlay_placement = &vmw_vram_ne_placement;
  104. ret = ttm_bo_validate(bo, overlay_placement, interruptible, false);
  105. ttm_bo_unreserve(bo);
  106. err:
  107. ttm_read_unlock(&dev_priv->active_master->lock);
  108. return ret;
  109. }
  110. /**
  111. * Send put command to hw.
  112. *
  113. * Returns
  114. * -ERESTARTSYS if interrupted by a signal.
  115. */
  116. static int vmw_overlay_send_put(struct vmw_private *dev_priv,
  117. struct vmw_dma_buffer *buf,
  118. struct drm_vmw_control_stream_arg *arg,
  119. bool interruptible)
  120. {
  121. struct {
  122. struct vmw_escape_header escape;
  123. struct {
  124. struct {
  125. uint32_t cmdType;
  126. uint32_t streamId;
  127. } header;
  128. struct {
  129. uint32_t registerId;
  130. uint32_t value;
  131. } items[SVGA_VIDEO_PITCH_3 + 1];
  132. } body;
  133. struct vmw_escape_video_flush flush;
  134. } *cmds;
  135. uint32_t offset;
  136. int i, ret;
  137. for (;;) {
  138. cmds = vmw_fifo_reserve(dev_priv, sizeof(*cmds));
  139. if (cmds)
  140. break;
  141. ret = vmw_fallback_wait(dev_priv, false, true, 0,
  142. interruptible, 3*HZ);
  143. if (interruptible && ret == -ERESTARTSYS)
  144. return ret;
  145. else
  146. BUG_ON(ret != 0);
  147. }
  148. fill_escape(&cmds->escape, sizeof(cmds->body));
  149. cmds->body.header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS;
  150. cmds->body.header.streamId = arg->stream_id;
  151. for (i = 0; i <= SVGA_VIDEO_PITCH_3; i++)
  152. cmds->body.items[i].registerId = i;
  153. offset = buf->base.offset + arg->offset;
  154. cmds->body.items[SVGA_VIDEO_ENABLED].value = true;
  155. cmds->body.items[SVGA_VIDEO_FLAGS].value = arg->flags;
  156. cmds->body.items[SVGA_VIDEO_DATA_OFFSET].value = offset;
  157. cmds->body.items[SVGA_VIDEO_FORMAT].value = arg->format;
  158. cmds->body.items[SVGA_VIDEO_COLORKEY].value = arg->color_key;
  159. cmds->body.items[SVGA_VIDEO_SIZE].value = arg->size;
  160. cmds->body.items[SVGA_VIDEO_WIDTH].value = arg->width;
  161. cmds->body.items[SVGA_VIDEO_HEIGHT].value = arg->height;
  162. cmds->body.items[SVGA_VIDEO_SRC_X].value = arg->src.x;
  163. cmds->body.items[SVGA_VIDEO_SRC_Y].value = arg->src.y;
  164. cmds->body.items[SVGA_VIDEO_SRC_WIDTH].value = arg->src.w;
  165. cmds->body.items[SVGA_VIDEO_SRC_HEIGHT].value = arg->src.h;
  166. cmds->body.items[SVGA_VIDEO_DST_X].value = arg->dst.x;
  167. cmds->body.items[SVGA_VIDEO_DST_Y].value = arg->dst.y;
  168. cmds->body.items[SVGA_VIDEO_DST_WIDTH].value = arg->dst.w;
  169. cmds->body.items[SVGA_VIDEO_DST_HEIGHT].value = arg->dst.h;
  170. cmds->body.items[SVGA_VIDEO_PITCH_1].value = arg->pitch[0];
  171. cmds->body.items[SVGA_VIDEO_PITCH_2].value = arg->pitch[1];
  172. cmds->body.items[SVGA_VIDEO_PITCH_3].value = arg->pitch[2];
  173. fill_flush(&cmds->flush, arg->stream_id);
  174. vmw_fifo_commit(dev_priv, sizeof(*cmds));
  175. return 0;
  176. }
  177. /**
  178. * Send stop command to hw.
  179. *
  180. * Returns
  181. * -ERESTARTSYS if interrupted by a signal.
  182. */
  183. static int vmw_overlay_send_stop(struct vmw_private *dev_priv,
  184. uint32_t stream_id,
  185. bool interruptible)
  186. {
  187. struct {
  188. struct vmw_escape_header escape;
  189. SVGAEscapeVideoSetRegs body;
  190. struct vmw_escape_video_flush flush;
  191. } *cmds;
  192. int ret;
  193. for (;;) {
  194. cmds = vmw_fifo_reserve(dev_priv, sizeof(*cmds));
  195. if (cmds)
  196. break;
  197. ret = vmw_fallback_wait(dev_priv, false, true, 0,
  198. interruptible, 3*HZ);
  199. if (interruptible && ret == -ERESTARTSYS)
  200. return ret;
  201. else
  202. BUG_ON(ret != 0);
  203. }
  204. fill_escape(&cmds->escape, sizeof(cmds->body));
  205. cmds->body.header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS;
  206. cmds->body.header.streamId = stream_id;
  207. cmds->body.items[0].registerId = SVGA_VIDEO_ENABLED;
  208. cmds->body.items[0].value = false;
  209. fill_flush(&cmds->flush, stream_id);
  210. vmw_fifo_commit(dev_priv, sizeof(*cmds));
  211. return 0;
  212. }
  213. /**
  214. * Stop or pause a stream.
  215. *
  216. * If the stream is paused the no evict flag is removed from the buffer
  217. * but left in vram. This allows for instance mode_set to evict it
  218. * should it need to.
  219. *
  220. * The caller must hold the overlay lock.
  221. *
  222. * @stream_id which stream to stop/pause.
  223. * @pause true to pause, false to stop completely.
  224. */
  225. static int vmw_overlay_stop(struct vmw_private *dev_priv,
  226. uint32_t stream_id, bool pause,
  227. bool interruptible)
  228. {
  229. struct vmw_overlay *overlay = dev_priv->overlay_priv;
  230. struct vmw_stream *stream = &overlay->stream[stream_id];
  231. int ret;
  232. /* no buffer attached the stream is completely stopped */
  233. if (!stream->buf)
  234. return 0;
  235. /* If the stream is paused this is already done */
  236. if (!stream->paused) {
  237. ret = vmw_overlay_send_stop(dev_priv, stream_id,
  238. interruptible);
  239. if (ret)
  240. return ret;
  241. /* We just remove the NO_EVICT flag so no -ENOMEM */
  242. ret = vmw_dmabuf_pin_in_vram(dev_priv, stream->buf, false,
  243. interruptible);
  244. if (interruptible && ret == -ERESTARTSYS)
  245. return ret;
  246. else
  247. BUG_ON(ret != 0);
  248. }
  249. if (!pause) {
  250. vmw_dmabuf_unreference(&stream->buf);
  251. stream->paused = false;
  252. } else {
  253. stream->paused = true;
  254. }
  255. return 0;
  256. }
  257. /**
  258. * Update a stream and send any put or stop fifo commands needed.
  259. *
  260. * The caller must hold the overlay lock.
  261. *
  262. * Returns
  263. * -ENOMEM if buffer doesn't fit in vram.
  264. * -ERESTARTSYS if interrupted.
  265. */
  266. static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
  267. struct vmw_dma_buffer *buf,
  268. struct drm_vmw_control_stream_arg *arg,
  269. bool interruptible)
  270. {
  271. struct vmw_overlay *overlay = dev_priv->overlay_priv;
  272. struct vmw_stream *stream = &overlay->stream[arg->stream_id];
  273. int ret = 0;
  274. if (!buf)
  275. return -EINVAL;
  276. DRM_DEBUG(" %s: old %p, new %p, %spaused\n", __func__,
  277. stream->buf, buf, stream->paused ? "" : "not ");
  278. if (stream->buf != buf) {
  279. ret = vmw_overlay_stop(dev_priv, arg->stream_id,
  280. false, interruptible);
  281. if (ret)
  282. return ret;
  283. } else if (!stream->paused) {
  284. /* If the buffers match and not paused then just send
  285. * the put command, no need to do anything else.
  286. */
  287. ret = vmw_overlay_send_put(dev_priv, buf, arg, interruptible);
  288. if (ret == 0)
  289. stream->saved = *arg;
  290. else
  291. BUG_ON(!interruptible);
  292. return ret;
  293. }
  294. /* We don't start the old stream if we are interrupted.
  295. * Might return -ENOMEM if it can't fit the buffer in vram.
  296. */
  297. ret = vmw_dmabuf_pin_in_vram(dev_priv, buf, true, interruptible);
  298. if (ret)
  299. return ret;
  300. ret = vmw_overlay_send_put(dev_priv, buf, arg, interruptible);
  301. if (ret) {
  302. /* This one needs to happen no matter what. We only remove
  303. * the NO_EVICT flag so this is safe from -ENOMEM.
  304. */
  305. BUG_ON(vmw_dmabuf_pin_in_vram(dev_priv, buf, false, false) != 0);
  306. return ret;
  307. }
  308. if (stream->buf != buf)
  309. stream->buf = vmw_dmabuf_reference(buf);
  310. stream->saved = *arg;
  311. return 0;
  312. }
  313. /**
  314. * Stop all streams.
  315. *
  316. * Used by the fb code when starting.
  317. *
  318. * Takes the overlay lock.
  319. */
  320. int vmw_overlay_stop_all(struct vmw_private *dev_priv)
  321. {
  322. struct vmw_overlay *overlay = dev_priv->overlay_priv;
  323. int i, ret;
  324. if (!overlay)
  325. return 0;
  326. mutex_lock(&overlay->mutex);
  327. for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
  328. struct vmw_stream *stream = &overlay->stream[i];
  329. if (!stream->buf)
  330. continue;
  331. ret = vmw_overlay_stop(dev_priv, i, false, false);
  332. WARN_ON(ret != 0);
  333. }
  334. mutex_unlock(&overlay->mutex);
  335. return 0;
  336. }
  337. /**
  338. * Try to resume all paused streams.
  339. *
  340. * Used by the kms code after moving a new scanout buffer to vram.
  341. *
  342. * Takes the overlay lock.
  343. */
  344. int vmw_overlay_resume_all(struct vmw_private *dev_priv)
  345. {
  346. struct vmw_overlay *overlay = dev_priv->overlay_priv;
  347. int i, ret;
  348. if (!overlay)
  349. return 0;
  350. mutex_lock(&overlay->mutex);
  351. for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
  352. struct vmw_stream *stream = &overlay->stream[i];
  353. if (!stream->paused)
  354. continue;
  355. ret = vmw_overlay_update_stream(dev_priv, stream->buf,
  356. &stream->saved, false);
  357. if (ret != 0)
  358. DRM_INFO("%s: *warning* failed to resume stream %i\n",
  359. __func__, i);
  360. }
  361. mutex_unlock(&overlay->mutex);
  362. return 0;
  363. }
  364. /**
  365. * Pauses all active streams.
  366. *
  367. * Used by the kms code when moving a new scanout buffer to vram.
  368. *
  369. * Takes the overlay lock.
  370. */
  371. int vmw_overlay_pause_all(struct vmw_private *dev_priv)
  372. {
  373. struct vmw_overlay *overlay = dev_priv->overlay_priv;
  374. int i, ret;
  375. if (!overlay)
  376. return 0;
  377. mutex_lock(&overlay->mutex);
  378. for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
  379. if (overlay->stream[i].paused)
  380. DRM_INFO("%s: *warning* stream %i already paused\n",
  381. __func__, i);
  382. ret = vmw_overlay_stop(dev_priv, i, true, false);
  383. WARN_ON(ret != 0);
  384. }
  385. mutex_unlock(&overlay->mutex);
  386. return 0;
  387. }
  388. int vmw_overlay_ioctl(struct drm_device *dev, void *data,
  389. struct drm_file *file_priv)
  390. {
  391. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  392. struct vmw_private *dev_priv = vmw_priv(dev);
  393. struct vmw_overlay *overlay = dev_priv->overlay_priv;
  394. struct drm_vmw_control_stream_arg *arg =
  395. (struct drm_vmw_control_stream_arg *)data;
  396. struct vmw_dma_buffer *buf;
  397. struct vmw_resource *res;
  398. int ret;
  399. if (!overlay)
  400. return -ENOSYS;
  401. ret = vmw_user_stream_lookup(dev_priv, tfile, &arg->stream_id, &res);
  402. if (ret)
  403. return ret;
  404. mutex_lock(&overlay->mutex);
  405. if (!arg->enabled) {
  406. ret = vmw_overlay_stop(dev_priv, arg->stream_id, false, true);
  407. goto out_unlock;
  408. }
  409. ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf);
  410. if (ret)
  411. goto out_unlock;
  412. ret = vmw_overlay_update_stream(dev_priv, buf, arg, true);
  413. vmw_dmabuf_unreference(&buf);
  414. out_unlock:
  415. mutex_unlock(&overlay->mutex);
  416. vmw_resource_unreference(&res);
  417. return ret;
  418. }
  419. int vmw_overlay_num_overlays(struct vmw_private *dev_priv)
  420. {
  421. if (!dev_priv->overlay_priv)
  422. return 0;
  423. return VMW_MAX_NUM_STREAMS;
  424. }
  425. int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv)
  426. {
  427. struct vmw_overlay *overlay = dev_priv->overlay_priv;
  428. int i, k;
  429. if (!overlay)
  430. return 0;
  431. mutex_lock(&overlay->mutex);
  432. for (i = 0, k = 0; i < VMW_MAX_NUM_STREAMS; i++)
  433. if (!overlay->stream[i].claimed)
  434. k++;
  435. mutex_unlock(&overlay->mutex);
  436. return k;
  437. }
  438. int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out)
  439. {
  440. struct vmw_overlay *overlay = dev_priv->overlay_priv;
  441. int i;
  442. if (!overlay)
  443. return -ENOSYS;
  444. mutex_lock(&overlay->mutex);
  445. for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
  446. if (overlay->stream[i].claimed)
  447. continue;
  448. overlay->stream[i].claimed = true;
  449. *out = i;
  450. mutex_unlock(&overlay->mutex);
  451. return 0;
  452. }
  453. mutex_unlock(&overlay->mutex);
  454. return -ESRCH;
  455. }
  456. int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id)
  457. {
  458. struct vmw_overlay *overlay = dev_priv->overlay_priv;
  459. BUG_ON(stream_id >= VMW_MAX_NUM_STREAMS);
  460. if (!overlay)
  461. return -ENOSYS;
  462. mutex_lock(&overlay->mutex);
  463. WARN_ON(!overlay->stream[stream_id].claimed);
  464. vmw_overlay_stop(dev_priv, stream_id, false, false);
  465. overlay->stream[stream_id].claimed = false;
  466. mutex_unlock(&overlay->mutex);
  467. return 0;
  468. }
  469. int vmw_overlay_init(struct vmw_private *dev_priv)
  470. {
  471. struct vmw_overlay *overlay;
  472. int i;
  473. if (dev_priv->overlay_priv)
  474. return -EINVAL;
  475. if (!(dev_priv->fifo.capabilities & SVGA_FIFO_CAP_VIDEO) &&
  476. (dev_priv->fifo.capabilities & SVGA_FIFO_CAP_ESCAPE)) {
  477. DRM_INFO("hardware doesn't support overlays\n");
  478. return -ENOSYS;
  479. }
  480. overlay = kmalloc(GFP_KERNEL, sizeof(*overlay));
  481. if (!overlay)
  482. return -ENOMEM;
  483. memset(overlay, 0, sizeof(*overlay));
  484. mutex_init(&overlay->mutex);
  485. for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
  486. overlay->stream[i].buf = NULL;
  487. overlay->stream[i].paused = false;
  488. overlay->stream[i].claimed = false;
  489. }
  490. dev_priv->overlay_priv = overlay;
  491. return 0;
  492. }
  493. int vmw_overlay_close(struct vmw_private *dev_priv)
  494. {
  495. struct vmw_overlay *overlay = dev_priv->overlay_priv;
  496. bool forgotten_buffer = false;
  497. int i;
  498. if (!overlay)
  499. return -ENOSYS;
  500. for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
  501. if (overlay->stream[i].buf) {
  502. forgotten_buffer = true;
  503. vmw_overlay_stop(dev_priv, i, false, false);
  504. }
  505. }
  506. WARN_ON(forgotten_buffer);
  507. dev_priv->overlay_priv = NULL;
  508. kfree(overlay);
  509. return 0;
  510. }