vmwgfx_kms.c 28 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069
  1. /**************************************************************************
  2. *
  3. * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. #include "vmwgfx_kms.h"
  28. /* Might need a hrtimer here? */
  29. #define VMWGFX_PRESENT_RATE ((HZ / 60 > 0) ? HZ / 60 : 1)
  30. static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb);
  31. static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb);
  32. void vmw_display_unit_cleanup(struct vmw_display_unit *du)
  33. {
  34. if (du->cursor_surface)
  35. vmw_surface_unreference(&du->cursor_surface);
  36. if (du->cursor_dmabuf)
  37. vmw_dmabuf_unreference(&du->cursor_dmabuf);
  38. drm_crtc_cleanup(&du->crtc);
  39. drm_encoder_cleanup(&du->encoder);
  40. drm_connector_cleanup(&du->connector);
  41. }
  42. /*
  43. * Display Unit Cursor functions
  44. */
  45. int vmw_cursor_update_image(struct vmw_private *dev_priv,
  46. u32 *image, u32 width, u32 height,
  47. u32 hotspotX, u32 hotspotY)
  48. {
  49. struct {
  50. u32 cmd;
  51. SVGAFifoCmdDefineAlphaCursor cursor;
  52. } *cmd;
  53. u32 image_size = width * height * 4;
  54. u32 cmd_size = sizeof(*cmd) + image_size;
  55. if (!image)
  56. return -EINVAL;
  57. cmd = vmw_fifo_reserve(dev_priv, cmd_size);
  58. if (unlikely(cmd == NULL)) {
  59. DRM_ERROR("Fifo reserve failed.\n");
  60. return -ENOMEM;
  61. }
  62. memset(cmd, 0, sizeof(*cmd));
  63. memcpy(&cmd[1], image, image_size);
  64. cmd->cmd = cpu_to_le32(SVGA_CMD_DEFINE_ALPHA_CURSOR);
  65. cmd->cursor.id = cpu_to_le32(0);
  66. cmd->cursor.width = cpu_to_le32(width);
  67. cmd->cursor.height = cpu_to_le32(height);
  68. cmd->cursor.hotspotX = cpu_to_le32(hotspotX);
  69. cmd->cursor.hotspotY = cpu_to_le32(hotspotY);
  70. vmw_fifo_commit(dev_priv, cmd_size);
  71. return 0;
  72. }
  73. void vmw_cursor_update_position(struct vmw_private *dev_priv,
  74. bool show, int x, int y)
  75. {
  76. __le32 __iomem *fifo_mem = dev_priv->mmio_virt;
  77. uint32_t count;
  78. iowrite32(show ? 1 : 0, fifo_mem + SVGA_FIFO_CURSOR_ON);
  79. iowrite32(x, fifo_mem + SVGA_FIFO_CURSOR_X);
  80. iowrite32(y, fifo_mem + SVGA_FIFO_CURSOR_Y);
  81. count = ioread32(fifo_mem + SVGA_FIFO_CURSOR_COUNT);
  82. iowrite32(++count, fifo_mem + SVGA_FIFO_CURSOR_COUNT);
  83. }
  84. int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
  85. uint32_t handle, uint32_t width, uint32_t height)
  86. {
  87. struct vmw_private *dev_priv = vmw_priv(crtc->dev);
  88. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  89. struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
  90. struct vmw_surface *surface = NULL;
  91. struct vmw_dma_buffer *dmabuf = NULL;
  92. int ret;
  93. if (handle) {
  94. ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
  95. handle, &surface);
  96. if (!ret) {
  97. if (!surface->snooper.image) {
  98. DRM_ERROR("surface not suitable for cursor\n");
  99. return -EINVAL;
  100. }
  101. } else {
  102. ret = vmw_user_dmabuf_lookup(tfile,
  103. handle, &dmabuf);
  104. if (ret) {
  105. DRM_ERROR("failed to find surface or dmabuf: %i\n", ret);
  106. return -EINVAL;
  107. }
  108. }
  109. }
  110. /* takedown old cursor */
  111. if (du->cursor_surface) {
  112. du->cursor_surface->snooper.crtc = NULL;
  113. vmw_surface_unreference(&du->cursor_surface);
  114. }
  115. if (du->cursor_dmabuf)
  116. vmw_dmabuf_unreference(&du->cursor_dmabuf);
  117. /* setup new image */
  118. if (surface) {
  119. /* vmw_user_surface_lookup takes one reference */
  120. du->cursor_surface = surface;
  121. du->cursor_surface->snooper.crtc = crtc;
  122. du->cursor_age = du->cursor_surface->snooper.age;
  123. vmw_cursor_update_image(dev_priv, surface->snooper.image,
  124. 64, 64, du->hotspot_x, du->hotspot_y);
  125. } else if (dmabuf) {
  126. struct ttm_bo_kmap_obj map;
  127. unsigned long kmap_offset;
  128. unsigned long kmap_num;
  129. void *virtual;
  130. bool dummy;
  131. /* vmw_user_surface_lookup takes one reference */
  132. du->cursor_dmabuf = dmabuf;
  133. kmap_offset = 0;
  134. kmap_num = (64*64*4) >> PAGE_SHIFT;
  135. ret = ttm_bo_reserve(&dmabuf->base, true, false, false, 0);
  136. if (unlikely(ret != 0)) {
  137. DRM_ERROR("reserve failed\n");
  138. return -EINVAL;
  139. }
  140. ret = ttm_bo_kmap(&dmabuf->base, kmap_offset, kmap_num, &map);
  141. if (unlikely(ret != 0))
  142. goto err_unreserve;
  143. virtual = ttm_kmap_obj_virtual(&map, &dummy);
  144. vmw_cursor_update_image(dev_priv, virtual, 64, 64,
  145. du->hotspot_x, du->hotspot_y);
  146. ttm_bo_kunmap(&map);
  147. err_unreserve:
  148. ttm_bo_unreserve(&dmabuf->base);
  149. } else {
  150. vmw_cursor_update_position(dev_priv, false, 0, 0);
  151. return 0;
  152. }
  153. vmw_cursor_update_position(dev_priv, true, du->cursor_x, du->cursor_y);
  154. return 0;
  155. }
  156. int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
  157. {
  158. struct vmw_private *dev_priv = vmw_priv(crtc->dev);
  159. struct vmw_display_unit *du = vmw_crtc_to_du(crtc);
  160. bool shown = du->cursor_surface || du->cursor_dmabuf ? true : false;
  161. du->cursor_x = x + crtc->x;
  162. du->cursor_y = y + crtc->y;
  163. vmw_cursor_update_position(dev_priv, shown,
  164. du->cursor_x, du->cursor_y);
  165. return 0;
  166. }
  167. void vmw_kms_cursor_snoop(struct vmw_surface *srf,
  168. struct ttm_object_file *tfile,
  169. struct ttm_buffer_object *bo,
  170. SVGA3dCmdHeader *header)
  171. {
  172. struct ttm_bo_kmap_obj map;
  173. unsigned long kmap_offset;
  174. unsigned long kmap_num;
  175. SVGA3dCopyBox *box;
  176. unsigned box_count;
  177. void *virtual;
  178. bool dummy;
  179. struct vmw_dma_cmd {
  180. SVGA3dCmdHeader header;
  181. SVGA3dCmdSurfaceDMA dma;
  182. } *cmd;
  183. int ret;
  184. cmd = container_of(header, struct vmw_dma_cmd, header);
  185. /* No snooper installed */
  186. if (!srf->snooper.image)
  187. return;
  188. if (cmd->dma.host.face != 0 || cmd->dma.host.mipmap != 0) {
  189. DRM_ERROR("face and mipmap for cursors should never != 0\n");
  190. return;
  191. }
  192. if (cmd->header.size < 64) {
  193. DRM_ERROR("at least one full copy box must be given\n");
  194. return;
  195. }
  196. box = (SVGA3dCopyBox *)&cmd[1];
  197. box_count = (cmd->header.size - sizeof(SVGA3dCmdSurfaceDMA)) /
  198. sizeof(SVGA3dCopyBox);
  199. if (cmd->dma.guest.pitch != (64 * 4) ||
  200. cmd->dma.guest.ptr.offset % PAGE_SIZE ||
  201. box->x != 0 || box->y != 0 || box->z != 0 ||
  202. box->srcx != 0 || box->srcy != 0 || box->srcz != 0 ||
  203. box->w != 64 || box->h != 64 || box->d != 1 ||
  204. box_count != 1) {
  205. /* TODO handle none page aligned offsets */
  206. /* TODO handle partial uploads and pitch != 256 */
  207. /* TODO handle more then one copy (size != 64) */
  208. DRM_ERROR("lazy programer, cant handle wierd stuff\n");
  209. return;
  210. }
  211. kmap_offset = cmd->dma.guest.ptr.offset >> PAGE_SHIFT;
  212. kmap_num = (64*64*4) >> PAGE_SHIFT;
  213. ret = ttm_bo_reserve(bo, true, false, false, 0);
  214. if (unlikely(ret != 0)) {
  215. DRM_ERROR("reserve failed\n");
  216. return;
  217. }
  218. ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map);
  219. if (unlikely(ret != 0))
  220. goto err_unreserve;
  221. virtual = ttm_kmap_obj_virtual(&map, &dummy);
  222. memcpy(srf->snooper.image, virtual, 64*64*4);
  223. srf->snooper.age++;
  224. /* we can't call this function from this function since execbuf has
  225. * reserved fifo space.
  226. *
  227. * if (srf->snooper.crtc)
  228. * vmw_ldu_crtc_cursor_update_image(dev_priv,
  229. * srf->snooper.image, 64, 64,
  230. * du->hotspot_x, du->hotspot_y);
  231. */
  232. ttm_bo_kunmap(&map);
  233. err_unreserve:
  234. ttm_bo_unreserve(bo);
  235. }
  236. void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv)
  237. {
  238. struct drm_device *dev = dev_priv->dev;
  239. struct vmw_display_unit *du;
  240. struct drm_crtc *crtc;
  241. mutex_lock(&dev->mode_config.mutex);
  242. list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
  243. du = vmw_crtc_to_du(crtc);
  244. if (!du->cursor_surface ||
  245. du->cursor_age == du->cursor_surface->snooper.age)
  246. continue;
  247. du->cursor_age = du->cursor_surface->snooper.age;
  248. vmw_cursor_update_image(dev_priv,
  249. du->cursor_surface->snooper.image,
  250. 64, 64, du->hotspot_x, du->hotspot_y);
  251. }
  252. mutex_unlock(&dev->mode_config.mutex);
  253. }
  254. /*
  255. * Generic framebuffer code
  256. */
  257. int vmw_framebuffer_create_handle(struct drm_framebuffer *fb,
  258. struct drm_file *file_priv,
  259. unsigned int *handle)
  260. {
  261. if (handle)
  262. handle = 0;
  263. return 0;
  264. }
  265. /*
  266. * Surface framebuffer code
  267. */
  268. #define vmw_framebuffer_to_vfbs(x) \
  269. container_of(x, struct vmw_framebuffer_surface, base.base)
  270. struct vmw_framebuffer_surface {
  271. struct vmw_framebuffer base;
  272. struct vmw_surface *surface;
  273. struct vmw_dma_buffer *buffer;
  274. struct delayed_work d_work;
  275. struct mutex work_lock;
  276. bool present_fs;
  277. };
  278. void vmw_framebuffer_surface_destroy(struct drm_framebuffer *framebuffer)
  279. {
  280. struct vmw_framebuffer_surface *vfb =
  281. vmw_framebuffer_to_vfbs(framebuffer);
  282. cancel_delayed_work_sync(&vfb->d_work);
  283. drm_framebuffer_cleanup(framebuffer);
  284. vmw_surface_unreference(&vfb->surface);
  285. kfree(framebuffer);
  286. }
  287. static void vmw_framebuffer_present_fs_callback(struct work_struct *work)
  288. {
  289. struct delayed_work *d_work =
  290. container_of(work, struct delayed_work, work);
  291. struct vmw_framebuffer_surface *vfbs =
  292. container_of(d_work, struct vmw_framebuffer_surface, d_work);
  293. struct vmw_surface *surf = vfbs->surface;
  294. struct drm_framebuffer *framebuffer = &vfbs->base.base;
  295. struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
  296. struct {
  297. SVGA3dCmdHeader header;
  298. SVGA3dCmdPresent body;
  299. SVGA3dCopyRect cr;
  300. } *cmd;
  301. mutex_lock(&vfbs->work_lock);
  302. if (!vfbs->present_fs)
  303. goto out_unlock;
  304. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  305. if (unlikely(cmd == NULL))
  306. goto out_resched;
  307. cmd->header.id = cpu_to_le32(SVGA_3D_CMD_PRESENT);
  308. cmd->header.size = cpu_to_le32(sizeof(cmd->body) + sizeof(cmd->cr));
  309. cmd->body.sid = cpu_to_le32(surf->res.id);
  310. cmd->cr.x = cpu_to_le32(0);
  311. cmd->cr.y = cpu_to_le32(0);
  312. cmd->cr.srcx = cmd->cr.x;
  313. cmd->cr.srcy = cmd->cr.y;
  314. cmd->cr.w = cpu_to_le32(framebuffer->width);
  315. cmd->cr.h = cpu_to_le32(framebuffer->height);
  316. vfbs->present_fs = false;
  317. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  318. out_resched:
  319. /**
  320. * Will not re-add if already pending.
  321. */
  322. schedule_delayed_work(&vfbs->d_work, VMWGFX_PRESENT_RATE);
  323. out_unlock:
  324. mutex_unlock(&vfbs->work_lock);
  325. }
  326. int vmw_framebuffer_surface_dirty(struct drm_framebuffer *framebuffer,
  327. struct drm_file *file_priv,
  328. unsigned flags, unsigned color,
  329. struct drm_clip_rect *clips,
  330. unsigned num_clips)
  331. {
  332. struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
  333. struct vmw_framebuffer_surface *vfbs =
  334. vmw_framebuffer_to_vfbs(framebuffer);
  335. struct vmw_surface *surf = vfbs->surface;
  336. struct drm_clip_rect norect;
  337. SVGA3dCopyRect *cr;
  338. int i, inc = 1;
  339. struct {
  340. SVGA3dCmdHeader header;
  341. SVGA3dCmdPresent body;
  342. SVGA3dCopyRect cr;
  343. } *cmd;
  344. if (!num_clips ||
  345. !(dev_priv->fifo.capabilities &
  346. SVGA_FIFO_CAP_SCREEN_OBJECT)) {
  347. int ret;
  348. mutex_lock(&vfbs->work_lock);
  349. vfbs->present_fs = true;
  350. ret = schedule_delayed_work(&vfbs->d_work, VMWGFX_PRESENT_RATE);
  351. mutex_unlock(&vfbs->work_lock);
  352. if (ret) {
  353. /**
  354. * No work pending, Force immediate present.
  355. */
  356. vmw_framebuffer_present_fs_callback(&vfbs->d_work.work);
  357. }
  358. return 0;
  359. }
  360. if (!num_clips) {
  361. num_clips = 1;
  362. clips = &norect;
  363. norect.x1 = norect.y1 = 0;
  364. norect.x2 = framebuffer->width;
  365. norect.y2 = framebuffer->height;
  366. } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
  367. num_clips /= 2;
  368. inc = 2; /* skip source rects */
  369. }
  370. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr));
  371. if (unlikely(cmd == NULL)) {
  372. DRM_ERROR("Fifo reserve failed.\n");
  373. return -ENOMEM;
  374. }
  375. memset(cmd, 0, sizeof(*cmd));
  376. cmd->header.id = cpu_to_le32(SVGA_3D_CMD_PRESENT);
  377. cmd->header.size = cpu_to_le32(sizeof(cmd->body) + num_clips * sizeof(cmd->cr));
  378. cmd->body.sid = cpu_to_le32(surf->res.id);
  379. for (i = 0, cr = &cmd->cr; i < num_clips; i++, cr++, clips += inc) {
  380. cr->x = cpu_to_le16(clips->x1);
  381. cr->y = cpu_to_le16(clips->y1);
  382. cr->srcx = cr->x;
  383. cr->srcy = cr->y;
  384. cr->w = cpu_to_le16(clips->x2 - clips->x1);
  385. cr->h = cpu_to_le16(clips->y2 - clips->y1);
  386. }
  387. vmw_fifo_commit(dev_priv, sizeof(*cmd) + (num_clips - 1) * sizeof(cmd->cr));
  388. return 0;
  389. }
  390. static struct drm_framebuffer_funcs vmw_framebuffer_surface_funcs = {
  391. .destroy = vmw_framebuffer_surface_destroy,
  392. .dirty = vmw_framebuffer_surface_dirty,
  393. .create_handle = vmw_framebuffer_create_handle,
  394. };
  395. static int vmw_kms_new_framebuffer_surface(struct vmw_private *dev_priv,
  396. struct vmw_surface *surface,
  397. struct vmw_framebuffer **out,
  398. const struct drm_mode_fb_cmd
  399. *mode_cmd)
  400. {
  401. struct drm_device *dev = dev_priv->dev;
  402. struct vmw_framebuffer_surface *vfbs;
  403. enum SVGA3dSurfaceFormat format;
  404. int ret;
  405. /*
  406. * Sanity checks.
  407. */
  408. if (unlikely(surface->mip_levels[0] != 1 ||
  409. surface->num_sizes != 1 ||
  410. surface->sizes[0].width < mode_cmd->width ||
  411. surface->sizes[0].height < mode_cmd->height ||
  412. surface->sizes[0].depth != 1)) {
  413. DRM_ERROR("Incompatible surface dimensions "
  414. "for requested mode.\n");
  415. return -EINVAL;
  416. }
  417. switch (mode_cmd->depth) {
  418. case 32:
  419. format = SVGA3D_A8R8G8B8;
  420. break;
  421. case 24:
  422. format = SVGA3D_X8R8G8B8;
  423. break;
  424. case 16:
  425. format = SVGA3D_R5G6B5;
  426. break;
  427. case 15:
  428. format = SVGA3D_A1R5G5B5;
  429. break;
  430. default:
  431. DRM_ERROR("Invalid color depth: %d\n", mode_cmd->depth);
  432. return -EINVAL;
  433. }
  434. if (unlikely(format != surface->format)) {
  435. DRM_ERROR("Invalid surface format for requested mode.\n");
  436. return -EINVAL;
  437. }
  438. vfbs = kzalloc(sizeof(*vfbs), GFP_KERNEL);
  439. if (!vfbs) {
  440. ret = -ENOMEM;
  441. goto out_err1;
  442. }
  443. ret = drm_framebuffer_init(dev, &vfbs->base.base,
  444. &vmw_framebuffer_surface_funcs);
  445. if (ret)
  446. goto out_err2;
  447. if (!vmw_surface_reference(surface)) {
  448. DRM_ERROR("failed to reference surface %p\n", surface);
  449. goto out_err3;
  450. }
  451. /* XXX get the first 3 from the surface info */
  452. vfbs->base.base.bits_per_pixel = mode_cmd->bpp;
  453. vfbs->base.base.pitch = mode_cmd->pitch;
  454. vfbs->base.base.depth = mode_cmd->depth;
  455. vfbs->base.base.width = mode_cmd->width;
  456. vfbs->base.base.height = mode_cmd->height;
  457. vfbs->base.pin = &vmw_surface_dmabuf_pin;
  458. vfbs->base.unpin = &vmw_surface_dmabuf_unpin;
  459. vfbs->surface = surface;
  460. mutex_init(&vfbs->work_lock);
  461. INIT_DELAYED_WORK(&vfbs->d_work, &vmw_framebuffer_present_fs_callback);
  462. *out = &vfbs->base;
  463. return 0;
  464. out_err3:
  465. drm_framebuffer_cleanup(&vfbs->base.base);
  466. out_err2:
  467. kfree(vfbs);
  468. out_err1:
  469. return ret;
  470. }
  471. /*
  472. * Dmabuf framebuffer code
  473. */
  474. #define vmw_framebuffer_to_vfbd(x) \
  475. container_of(x, struct vmw_framebuffer_dmabuf, base.base)
  476. struct vmw_framebuffer_dmabuf {
  477. struct vmw_framebuffer base;
  478. struct vmw_dma_buffer *buffer;
  479. };
  480. void vmw_framebuffer_dmabuf_destroy(struct drm_framebuffer *framebuffer)
  481. {
  482. struct vmw_framebuffer_dmabuf *vfbd =
  483. vmw_framebuffer_to_vfbd(framebuffer);
  484. drm_framebuffer_cleanup(framebuffer);
  485. vmw_dmabuf_unreference(&vfbd->buffer);
  486. kfree(vfbd);
  487. }
  488. int vmw_framebuffer_dmabuf_dirty(struct drm_framebuffer *framebuffer,
  489. struct drm_file *file_priv,
  490. unsigned flags, unsigned color,
  491. struct drm_clip_rect *clips,
  492. unsigned num_clips)
  493. {
  494. struct vmw_private *dev_priv = vmw_priv(framebuffer->dev);
  495. struct drm_clip_rect norect;
  496. struct {
  497. uint32_t header;
  498. SVGAFifoCmdUpdate body;
  499. } *cmd;
  500. int i, increment = 1;
  501. if (!num_clips) {
  502. num_clips = 1;
  503. clips = &norect;
  504. norect.x1 = norect.y1 = 0;
  505. norect.x2 = framebuffer->width;
  506. norect.y2 = framebuffer->height;
  507. } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) {
  508. num_clips /= 2;
  509. increment = 2;
  510. }
  511. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd) * num_clips);
  512. if (unlikely(cmd == NULL)) {
  513. DRM_ERROR("Fifo reserve failed.\n");
  514. return -ENOMEM;
  515. }
  516. for (i = 0; i < num_clips; i++, clips += increment) {
  517. cmd[i].header = cpu_to_le32(SVGA_CMD_UPDATE);
  518. cmd[i].body.x = cpu_to_le32(clips->x1);
  519. cmd[i].body.y = cpu_to_le32(clips->y1);
  520. cmd[i].body.width = cpu_to_le32(clips->x2 - clips->x1);
  521. cmd[i].body.height = cpu_to_le32(clips->y2 - clips->y1);
  522. }
  523. vmw_fifo_commit(dev_priv, sizeof(*cmd) * num_clips);
  524. return 0;
  525. }
  526. static struct drm_framebuffer_funcs vmw_framebuffer_dmabuf_funcs = {
  527. .destroy = vmw_framebuffer_dmabuf_destroy,
  528. .dirty = vmw_framebuffer_dmabuf_dirty,
  529. .create_handle = vmw_framebuffer_create_handle,
  530. };
  531. static int vmw_surface_dmabuf_pin(struct vmw_framebuffer *vfb)
  532. {
  533. struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
  534. struct vmw_framebuffer_surface *vfbs =
  535. vmw_framebuffer_to_vfbs(&vfb->base);
  536. unsigned long size = vfbs->base.base.pitch * vfbs->base.base.height;
  537. int ret;
  538. vfbs->buffer = kzalloc(sizeof(*vfbs->buffer), GFP_KERNEL);
  539. if (unlikely(vfbs->buffer == NULL))
  540. return -ENOMEM;
  541. vmw_overlay_pause_all(dev_priv);
  542. ret = vmw_dmabuf_init(dev_priv, vfbs->buffer, size,
  543. &vmw_vram_ne_placement,
  544. false, &vmw_dmabuf_bo_free);
  545. vmw_overlay_resume_all(dev_priv);
  546. return ret;
  547. }
  548. static int vmw_surface_dmabuf_unpin(struct vmw_framebuffer *vfb)
  549. {
  550. struct ttm_buffer_object *bo;
  551. struct vmw_framebuffer_surface *vfbs =
  552. vmw_framebuffer_to_vfbs(&vfb->base);
  553. bo = &vfbs->buffer->base;
  554. ttm_bo_unref(&bo);
  555. vfbs->buffer = NULL;
  556. return 0;
  557. }
  558. static int vmw_framebuffer_dmabuf_pin(struct vmw_framebuffer *vfb)
  559. {
  560. struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
  561. struct vmw_framebuffer_dmabuf *vfbd =
  562. vmw_framebuffer_to_vfbd(&vfb->base);
  563. int ret;
  564. vmw_overlay_pause_all(dev_priv);
  565. ret = vmw_dmabuf_to_start_of_vram(dev_priv, vfbd->buffer);
  566. vmw_overlay_resume_all(dev_priv);
  567. WARN_ON(ret != 0);
  568. return 0;
  569. }
  570. static int vmw_framebuffer_dmabuf_unpin(struct vmw_framebuffer *vfb)
  571. {
  572. struct vmw_private *dev_priv = vmw_priv(vfb->base.dev);
  573. struct vmw_framebuffer_dmabuf *vfbd =
  574. vmw_framebuffer_to_vfbd(&vfb->base);
  575. if (!vfbd->buffer) {
  576. WARN_ON(!vfbd->buffer);
  577. return 0;
  578. }
  579. return vmw_dmabuf_from_vram(dev_priv, vfbd->buffer);
  580. }
  581. static int vmw_kms_new_framebuffer_dmabuf(struct vmw_private *dev_priv,
  582. struct vmw_dma_buffer *dmabuf,
  583. struct vmw_framebuffer **out,
  584. const struct drm_mode_fb_cmd
  585. *mode_cmd)
  586. {
  587. struct drm_device *dev = dev_priv->dev;
  588. struct vmw_framebuffer_dmabuf *vfbd;
  589. unsigned int requested_size;
  590. int ret;
  591. requested_size = mode_cmd->height * mode_cmd->pitch;
  592. if (unlikely(requested_size > dmabuf->base.num_pages * PAGE_SIZE)) {
  593. DRM_ERROR("Screen buffer object size is too small "
  594. "for requested mode.\n");
  595. return -EINVAL;
  596. }
  597. vfbd = kzalloc(sizeof(*vfbd), GFP_KERNEL);
  598. if (!vfbd) {
  599. ret = -ENOMEM;
  600. goto out_err1;
  601. }
  602. ret = drm_framebuffer_init(dev, &vfbd->base.base,
  603. &vmw_framebuffer_dmabuf_funcs);
  604. if (ret)
  605. goto out_err2;
  606. if (!vmw_dmabuf_reference(dmabuf)) {
  607. DRM_ERROR("failed to reference dmabuf %p\n", dmabuf);
  608. goto out_err3;
  609. }
  610. vfbd->base.base.bits_per_pixel = mode_cmd->bpp;
  611. vfbd->base.base.pitch = mode_cmd->pitch;
  612. vfbd->base.base.depth = mode_cmd->depth;
  613. vfbd->base.base.width = mode_cmd->width;
  614. vfbd->base.base.height = mode_cmd->height;
  615. vfbd->base.pin = vmw_framebuffer_dmabuf_pin;
  616. vfbd->base.unpin = vmw_framebuffer_dmabuf_unpin;
  617. vfbd->buffer = dmabuf;
  618. *out = &vfbd->base;
  619. return 0;
  620. out_err3:
  621. drm_framebuffer_cleanup(&vfbd->base.base);
  622. out_err2:
  623. kfree(vfbd);
  624. out_err1:
  625. return ret;
  626. }
  627. /*
  628. * Generic Kernel modesetting functions
  629. */
  630. static struct drm_framebuffer *vmw_kms_fb_create(struct drm_device *dev,
  631. struct drm_file *file_priv,
  632. struct drm_mode_fb_cmd *mode_cmd)
  633. {
  634. struct vmw_private *dev_priv = vmw_priv(dev);
  635. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  636. struct vmw_framebuffer *vfb = NULL;
  637. struct vmw_surface *surface = NULL;
  638. struct vmw_dma_buffer *bo = NULL;
  639. unsigned int required_size;
  640. int ret;
  641. /**
  642. * This code should be conditioned on Screen Objects not being used.
  643. * If screen objects are used, we can allocate a GMR to hold the
  644. * requested framebuffer.
  645. */
  646. required_size = mode_cmd->pitch * mode_cmd->height;
  647. if (unlikely(required_size > dev_priv->vram_size)) {
  648. DRM_ERROR("VRAM size is too small for requested mode.\n");
  649. return NULL;
  650. }
  651. /**
  652. * End conditioned code.
  653. */
  654. ret = vmw_user_surface_lookup_handle(dev_priv, tfile,
  655. mode_cmd->handle, &surface);
  656. if (ret)
  657. goto try_dmabuf;
  658. if (!surface->scanout)
  659. goto err_not_scanout;
  660. ret = vmw_kms_new_framebuffer_surface(dev_priv, surface, &vfb,
  661. mode_cmd);
  662. /* vmw_user_surface_lookup takes one ref so does new_fb */
  663. vmw_surface_unreference(&surface);
  664. if (ret) {
  665. DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
  666. return ERR_PTR(ret);
  667. }
  668. return &vfb->base;
  669. try_dmabuf:
  670. DRM_INFO("%s: trying buffer\n", __func__);
  671. ret = vmw_user_dmabuf_lookup(tfile, mode_cmd->handle, &bo);
  672. if (ret) {
  673. DRM_ERROR("failed to find buffer: %i\n", ret);
  674. return ERR_PTR(-ENOENT);
  675. }
  676. ret = vmw_kms_new_framebuffer_dmabuf(dev_priv, bo, &vfb,
  677. mode_cmd);
  678. /* vmw_user_dmabuf_lookup takes one ref so does new_fb */
  679. vmw_dmabuf_unreference(&bo);
  680. if (ret) {
  681. DRM_ERROR("failed to create vmw_framebuffer: %i\n", ret);
  682. return ERR_PTR(ret);
  683. }
  684. return &vfb->base;
  685. err_not_scanout:
  686. DRM_ERROR("surface not marked as scanout\n");
  687. /* vmw_user_surface_lookup takes one ref */
  688. vmw_surface_unreference(&surface);
  689. return ERR_PTR(-EINVAL);
  690. }
  691. static struct drm_mode_config_funcs vmw_kms_funcs = {
  692. .fb_create = vmw_kms_fb_create,
  693. };
  694. int vmw_kms_init(struct vmw_private *dev_priv)
  695. {
  696. struct drm_device *dev = dev_priv->dev;
  697. int ret;
  698. drm_mode_config_init(dev);
  699. dev->mode_config.funcs = &vmw_kms_funcs;
  700. dev->mode_config.min_width = 1;
  701. dev->mode_config.min_height = 1;
  702. /* assumed largest fb size */
  703. dev->mode_config.max_width = 8192;
  704. dev->mode_config.max_height = 8192;
  705. ret = vmw_kms_init_legacy_display_system(dev_priv);
  706. return 0;
  707. }
  708. int vmw_kms_close(struct vmw_private *dev_priv)
  709. {
  710. /*
  711. * Docs says we should take the lock before calling this function
  712. * but since it destroys encoders and our destructor calls
  713. * drm_encoder_cleanup which takes the lock we deadlock.
  714. */
  715. drm_mode_config_cleanup(dev_priv->dev);
  716. vmw_kms_close_legacy_display_system(dev_priv);
  717. return 0;
  718. }
  719. int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
  720. struct drm_file *file_priv)
  721. {
  722. struct drm_vmw_cursor_bypass_arg *arg = data;
  723. struct vmw_display_unit *du;
  724. struct drm_mode_object *obj;
  725. struct drm_crtc *crtc;
  726. int ret = 0;
  727. mutex_lock(&dev->mode_config.mutex);
  728. if (arg->flags & DRM_VMW_CURSOR_BYPASS_ALL) {
  729. list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
  730. du = vmw_crtc_to_du(crtc);
  731. du->hotspot_x = arg->xhot;
  732. du->hotspot_y = arg->yhot;
  733. }
  734. mutex_unlock(&dev->mode_config.mutex);
  735. return 0;
  736. }
  737. obj = drm_mode_object_find(dev, arg->crtc_id, DRM_MODE_OBJECT_CRTC);
  738. if (!obj) {
  739. ret = -EINVAL;
  740. goto out;
  741. }
  742. crtc = obj_to_crtc(obj);
  743. du = vmw_crtc_to_du(crtc);
  744. du->hotspot_x = arg->xhot;
  745. du->hotspot_y = arg->yhot;
  746. out:
  747. mutex_unlock(&dev->mode_config.mutex);
  748. return ret;
  749. }
  750. void vmw_kms_write_svga(struct vmw_private *vmw_priv,
  751. unsigned width, unsigned height, unsigned pitch,
  752. unsigned bbp, unsigned depth)
  753. {
  754. if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
  755. vmw_write(vmw_priv, SVGA_REG_PITCHLOCK, pitch);
  756. else if (vmw_fifo_have_pitchlock(vmw_priv))
  757. iowrite32(pitch, vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
  758. vmw_write(vmw_priv, SVGA_REG_WIDTH, width);
  759. vmw_write(vmw_priv, SVGA_REG_HEIGHT, height);
  760. vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, bbp);
  761. vmw_write(vmw_priv, SVGA_REG_DEPTH, depth);
  762. vmw_write(vmw_priv, SVGA_REG_RED_MASK, 0x00ff0000);
  763. vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, 0x0000ff00);
  764. vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, 0x000000ff);
  765. }
  766. int vmw_kms_save_vga(struct vmw_private *vmw_priv)
  767. {
  768. struct vmw_vga_topology_state *save;
  769. uint32_t i;
  770. vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH);
  771. vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT);
  772. vmw_priv->vga_depth = vmw_read(vmw_priv, SVGA_REG_DEPTH);
  773. vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL);
  774. vmw_priv->vga_pseudo = vmw_read(vmw_priv, SVGA_REG_PSEUDOCOLOR);
  775. vmw_priv->vga_red_mask = vmw_read(vmw_priv, SVGA_REG_RED_MASK);
  776. vmw_priv->vga_blue_mask = vmw_read(vmw_priv, SVGA_REG_BLUE_MASK);
  777. vmw_priv->vga_green_mask = vmw_read(vmw_priv, SVGA_REG_GREEN_MASK);
  778. if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
  779. vmw_priv->vga_pitchlock =
  780. vmw_read(vmw_priv, SVGA_REG_PITCHLOCK);
  781. else if (vmw_fifo_have_pitchlock(vmw_priv))
  782. vmw_priv->vga_pitchlock = ioread32(vmw_priv->mmio_virt +
  783. SVGA_FIFO_PITCHLOCK);
  784. if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
  785. return 0;
  786. vmw_priv->num_displays = vmw_read(vmw_priv,
  787. SVGA_REG_NUM_GUEST_DISPLAYS);
  788. for (i = 0; i < vmw_priv->num_displays; ++i) {
  789. save = &vmw_priv->vga_save[i];
  790. vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
  791. save->primary = vmw_read(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY);
  792. save->pos_x = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_X);
  793. save->pos_y = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y);
  794. save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH);
  795. save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT);
  796. vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
  797. if (i == 0 && vmw_priv->num_displays == 1 &&
  798. save->width == 0 && save->height == 0) {
  799. /*
  800. * It should be fairly safe to assume that these
  801. * values are uninitialized.
  802. */
  803. save->width = vmw_priv->vga_width - save->pos_x;
  804. save->height = vmw_priv->vga_height - save->pos_y;
  805. }
  806. }
  807. return 0;
  808. }
  809. int vmw_kms_restore_vga(struct vmw_private *vmw_priv)
  810. {
  811. struct vmw_vga_topology_state *save;
  812. uint32_t i;
  813. vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width);
  814. vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height);
  815. vmw_write(vmw_priv, SVGA_REG_DEPTH, vmw_priv->vga_depth);
  816. vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp);
  817. vmw_write(vmw_priv, SVGA_REG_PSEUDOCOLOR, vmw_priv->vga_pseudo);
  818. vmw_write(vmw_priv, SVGA_REG_RED_MASK, vmw_priv->vga_red_mask);
  819. vmw_write(vmw_priv, SVGA_REG_GREEN_MASK, vmw_priv->vga_green_mask);
  820. vmw_write(vmw_priv, SVGA_REG_BLUE_MASK, vmw_priv->vga_blue_mask);
  821. if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
  822. vmw_write(vmw_priv, SVGA_REG_PITCHLOCK,
  823. vmw_priv->vga_pitchlock);
  824. else if (vmw_fifo_have_pitchlock(vmw_priv))
  825. iowrite32(vmw_priv->vga_pitchlock,
  826. vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
  827. if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
  828. return 0;
  829. for (i = 0; i < vmw_priv->num_displays; ++i) {
  830. save = &vmw_priv->vga_save[i];
  831. vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
  832. vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, save->primary);
  833. vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, save->pos_x);
  834. vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, save->pos_y);
  835. vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, save->width);
  836. vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, save->height);
  837. vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
  838. }
  839. return 0;
  840. }
  841. int vmw_kms_update_layout_ioctl(struct drm_device *dev, void *data,
  842. struct drm_file *file_priv)
  843. {
  844. struct vmw_private *dev_priv = vmw_priv(dev);
  845. struct drm_vmw_update_layout_arg *arg =
  846. (struct drm_vmw_update_layout_arg *)data;
  847. struct vmw_master *vmaster = vmw_master(file_priv->master);
  848. void __user *user_rects;
  849. struct drm_vmw_rect *rects;
  850. unsigned rects_size;
  851. int ret;
  852. ret = ttm_read_lock(&vmaster->lock, true);
  853. if (unlikely(ret != 0))
  854. return ret;
  855. if (!arg->num_outputs) {
  856. struct drm_vmw_rect def_rect = {0, 0, 800, 600};
  857. vmw_kms_ldu_update_layout(dev_priv, 1, &def_rect);
  858. goto out_unlock;
  859. }
  860. rects_size = arg->num_outputs * sizeof(struct drm_vmw_rect);
  861. rects = kzalloc(rects_size, GFP_KERNEL);
  862. if (unlikely(!rects)) {
  863. ret = -ENOMEM;
  864. goto out_unlock;
  865. }
  866. user_rects = (void __user *)(unsigned long)arg->rects;
  867. ret = copy_from_user(rects, user_rects, rects_size);
  868. if (unlikely(ret != 0)) {
  869. DRM_ERROR("Failed to get rects.\n");
  870. ret = -EFAULT;
  871. goto out_free;
  872. }
  873. vmw_kms_ldu_update_layout(dev_priv, arg->num_outputs, rects);
  874. out_free:
  875. kfree(rects);
  876. out_unlock:
  877. ttm_read_unlock(&vmaster->lock);
  878. return ret;
  879. }
  880. u32 vmw_get_vblank_counter(struct drm_device *dev, int crtc)
  881. {
  882. return 0;
  883. }