vmwgfx_resource.c 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201
  1. /**************************************************************************
  2. *
  3. * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. #include "vmwgfx_drv.h"
  28. #include "vmwgfx_drm.h"
  29. #include "ttm/ttm_object.h"
  30. #include "ttm/ttm_placement.h"
  31. #include "drmP.h"
  32. #define VMW_RES_CONTEXT ttm_driver_type0
  33. #define VMW_RES_SURFACE ttm_driver_type1
  34. #define VMW_RES_STREAM ttm_driver_type2
  35. /* XXX: This isn't a real hardware flag, but just a hack for kernel to
  36. * know about primary surfaces. Find a better way to accomplish this.
  37. */
  38. #define SVGA3D_SURFACE_HINT_SCANOUT (1 << 9)
  39. struct vmw_user_context {
  40. struct ttm_base_object base;
  41. struct vmw_resource res;
  42. };
  43. struct vmw_user_surface {
  44. struct ttm_base_object base;
  45. struct vmw_surface srf;
  46. };
  47. struct vmw_user_dma_buffer {
  48. struct ttm_base_object base;
  49. struct vmw_dma_buffer dma;
  50. };
  51. struct vmw_bo_user_rep {
  52. uint32_t handle;
  53. uint64_t map_handle;
  54. };
  55. struct vmw_stream {
  56. struct vmw_resource res;
  57. uint32_t stream_id;
  58. };
  59. struct vmw_user_stream {
  60. struct ttm_base_object base;
  61. struct vmw_stream stream;
  62. };
  63. static inline struct vmw_dma_buffer *
  64. vmw_dma_buffer(struct ttm_buffer_object *bo)
  65. {
  66. return container_of(bo, struct vmw_dma_buffer, base);
  67. }
  68. static inline struct vmw_user_dma_buffer *
  69. vmw_user_dma_buffer(struct ttm_buffer_object *bo)
  70. {
  71. struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
  72. return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
  73. }
  74. struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
  75. {
  76. kref_get(&res->kref);
  77. return res;
  78. }
  79. static void vmw_resource_release(struct kref *kref)
  80. {
  81. struct vmw_resource *res =
  82. container_of(kref, struct vmw_resource, kref);
  83. struct vmw_private *dev_priv = res->dev_priv;
  84. idr_remove(res->idr, res->id);
  85. write_unlock(&dev_priv->resource_lock);
  86. if (likely(res->hw_destroy != NULL))
  87. res->hw_destroy(res);
  88. if (res->res_free != NULL)
  89. res->res_free(res);
  90. else
  91. kfree(res);
  92. write_lock(&dev_priv->resource_lock);
  93. }
  94. void vmw_resource_unreference(struct vmw_resource **p_res)
  95. {
  96. struct vmw_resource *res = *p_res;
  97. struct vmw_private *dev_priv = res->dev_priv;
  98. *p_res = NULL;
  99. write_lock(&dev_priv->resource_lock);
  100. kref_put(&res->kref, vmw_resource_release);
  101. write_unlock(&dev_priv->resource_lock);
  102. }
  103. static int vmw_resource_init(struct vmw_private *dev_priv,
  104. struct vmw_resource *res,
  105. struct idr *idr,
  106. enum ttm_object_type obj_type,
  107. void (*res_free) (struct vmw_resource *res))
  108. {
  109. int ret;
  110. kref_init(&res->kref);
  111. res->hw_destroy = NULL;
  112. res->res_free = res_free;
  113. res->res_type = obj_type;
  114. res->idr = idr;
  115. res->avail = false;
  116. res->dev_priv = dev_priv;
  117. do {
  118. if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
  119. return -ENOMEM;
  120. write_lock(&dev_priv->resource_lock);
  121. ret = idr_get_new_above(idr, res, 1, &res->id);
  122. write_unlock(&dev_priv->resource_lock);
  123. } while (ret == -EAGAIN);
  124. return ret;
  125. }
  126. /**
  127. * vmw_resource_activate
  128. *
  129. * @res: Pointer to the newly created resource
  130. * @hw_destroy: Destroy function. NULL if none.
  131. *
  132. * Activate a resource after the hardware has been made aware of it.
  133. * Set tye destroy function to @destroy. Typically this frees the
  134. * resource and destroys the hardware resources associated with it.
  135. * Activate basically means that the function vmw_resource_lookup will
  136. * find it.
  137. */
  138. static void vmw_resource_activate(struct vmw_resource *res,
  139. void (*hw_destroy) (struct vmw_resource *))
  140. {
  141. struct vmw_private *dev_priv = res->dev_priv;
  142. write_lock(&dev_priv->resource_lock);
  143. res->avail = true;
  144. res->hw_destroy = hw_destroy;
  145. write_unlock(&dev_priv->resource_lock);
  146. }
  147. struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
  148. struct idr *idr, int id)
  149. {
  150. struct vmw_resource *res;
  151. read_lock(&dev_priv->resource_lock);
  152. res = idr_find(idr, id);
  153. if (res && res->avail)
  154. kref_get(&res->kref);
  155. else
  156. res = NULL;
  157. read_unlock(&dev_priv->resource_lock);
  158. if (unlikely(res == NULL))
  159. return NULL;
  160. return res;
  161. }
  162. /**
  163. * Context management:
  164. */
  165. static void vmw_hw_context_destroy(struct vmw_resource *res)
  166. {
  167. struct vmw_private *dev_priv = res->dev_priv;
  168. struct {
  169. SVGA3dCmdHeader header;
  170. SVGA3dCmdDestroyContext body;
  171. } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  172. if (unlikely(cmd == NULL)) {
  173. DRM_ERROR("Failed reserving FIFO space for surface "
  174. "destruction.\n");
  175. return;
  176. }
  177. cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
  178. cmd->header.size = cpu_to_le32(sizeof(cmd->body));
  179. cmd->body.cid = cpu_to_le32(res->id);
  180. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  181. }
  182. static int vmw_context_init(struct vmw_private *dev_priv,
  183. struct vmw_resource *res,
  184. void (*res_free) (struct vmw_resource *res))
  185. {
  186. int ret;
  187. struct {
  188. SVGA3dCmdHeader header;
  189. SVGA3dCmdDefineContext body;
  190. } *cmd;
  191. ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr,
  192. VMW_RES_CONTEXT, res_free);
  193. if (unlikely(ret != 0)) {
  194. if (res_free == NULL)
  195. kfree(res);
  196. else
  197. res_free(res);
  198. return ret;
  199. }
  200. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  201. if (unlikely(cmd == NULL)) {
  202. DRM_ERROR("Fifo reserve failed.\n");
  203. vmw_resource_unreference(&res);
  204. return -ENOMEM;
  205. }
  206. cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
  207. cmd->header.size = cpu_to_le32(sizeof(cmd->body));
  208. cmd->body.cid = cpu_to_le32(res->id);
  209. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  210. vmw_resource_activate(res, vmw_hw_context_destroy);
  211. return 0;
  212. }
  213. struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
  214. {
  215. struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
  216. int ret;
  217. if (unlikely(res == NULL))
  218. return NULL;
  219. ret = vmw_context_init(dev_priv, res, NULL);
  220. return (ret == 0) ? res : NULL;
  221. }
  222. /**
  223. * User-space context management:
  224. */
  225. static void vmw_user_context_free(struct vmw_resource *res)
  226. {
  227. struct vmw_user_context *ctx =
  228. container_of(res, struct vmw_user_context, res);
  229. kfree(ctx);
  230. }
  231. /**
  232. * This function is called when user space has no more references on the
  233. * base object. It releases the base-object's reference on the resource object.
  234. */
  235. static void vmw_user_context_base_release(struct ttm_base_object **p_base)
  236. {
  237. struct ttm_base_object *base = *p_base;
  238. struct vmw_user_context *ctx =
  239. container_of(base, struct vmw_user_context, base);
  240. struct vmw_resource *res = &ctx->res;
  241. *p_base = NULL;
  242. vmw_resource_unreference(&res);
  243. }
  244. int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
  245. struct drm_file *file_priv)
  246. {
  247. struct vmw_private *dev_priv = vmw_priv(dev);
  248. struct vmw_resource *res;
  249. struct vmw_user_context *ctx;
  250. struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
  251. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  252. int ret = 0;
  253. res = vmw_resource_lookup(dev_priv, &dev_priv->context_idr, arg->cid);
  254. if (unlikely(res == NULL))
  255. return -EINVAL;
  256. if (res->res_free != &vmw_user_context_free) {
  257. ret = -EINVAL;
  258. goto out;
  259. }
  260. ctx = container_of(res, struct vmw_user_context, res);
  261. if (ctx->base.tfile != tfile && !ctx->base.shareable) {
  262. ret = -EPERM;
  263. goto out;
  264. }
  265. ttm_ref_object_base_unref(tfile, ctx->base.hash.key, TTM_REF_USAGE);
  266. out:
  267. vmw_resource_unreference(&res);
  268. return ret;
  269. }
  270. int vmw_context_define_ioctl(struct drm_device *dev, void *data,
  271. struct drm_file *file_priv)
  272. {
  273. struct vmw_private *dev_priv = vmw_priv(dev);
  274. struct vmw_user_context *ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
  275. struct vmw_resource *res;
  276. struct vmw_resource *tmp;
  277. struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
  278. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  279. int ret;
  280. if (unlikely(ctx == NULL))
  281. return -ENOMEM;
  282. res = &ctx->res;
  283. ctx->base.shareable = false;
  284. ctx->base.tfile = NULL;
  285. ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
  286. if (unlikely(ret != 0))
  287. return ret;
  288. tmp = vmw_resource_reference(&ctx->res);
  289. ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
  290. &vmw_user_context_base_release, NULL);
  291. if (unlikely(ret != 0)) {
  292. vmw_resource_unreference(&tmp);
  293. goto out_err;
  294. }
  295. arg->cid = res->id;
  296. out_err:
  297. vmw_resource_unreference(&res);
  298. return ret;
  299. }
  300. int vmw_context_check(struct vmw_private *dev_priv,
  301. struct ttm_object_file *tfile,
  302. int id)
  303. {
  304. struct vmw_resource *res;
  305. int ret = 0;
  306. read_lock(&dev_priv->resource_lock);
  307. res = idr_find(&dev_priv->context_idr, id);
  308. if (res && res->avail) {
  309. struct vmw_user_context *ctx =
  310. container_of(res, struct vmw_user_context, res);
  311. if (ctx->base.tfile != tfile && !ctx->base.shareable)
  312. ret = -EPERM;
  313. } else
  314. ret = -EINVAL;
  315. read_unlock(&dev_priv->resource_lock);
  316. return ret;
  317. }
  318. /**
  319. * Surface management.
  320. */
  321. static void vmw_hw_surface_destroy(struct vmw_resource *res)
  322. {
  323. struct vmw_private *dev_priv = res->dev_priv;
  324. struct {
  325. SVGA3dCmdHeader header;
  326. SVGA3dCmdDestroySurface body;
  327. } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  328. if (unlikely(cmd == NULL)) {
  329. DRM_ERROR("Failed reserving FIFO space for surface "
  330. "destruction.\n");
  331. return;
  332. }
  333. cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DESTROY);
  334. cmd->header.size = cpu_to_le32(sizeof(cmd->body));
  335. cmd->body.sid = cpu_to_le32(res->id);
  336. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  337. }
  338. void vmw_surface_res_free(struct vmw_resource *res)
  339. {
  340. struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
  341. kfree(srf->sizes);
  342. kfree(srf->snooper.image);
  343. kfree(srf);
  344. }
  345. int vmw_surface_init(struct vmw_private *dev_priv,
  346. struct vmw_surface *srf,
  347. void (*res_free) (struct vmw_resource *res))
  348. {
  349. int ret;
  350. struct {
  351. SVGA3dCmdHeader header;
  352. SVGA3dCmdDefineSurface body;
  353. } *cmd;
  354. SVGA3dSize *cmd_size;
  355. struct vmw_resource *res = &srf->res;
  356. struct drm_vmw_size *src_size;
  357. size_t submit_size;
  358. uint32_t cmd_len;
  359. int i;
  360. BUG_ON(res_free == NULL);
  361. ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr,
  362. VMW_RES_SURFACE, res_free);
  363. if (unlikely(ret != 0)) {
  364. res_free(res);
  365. return ret;
  366. }
  367. submit_size = sizeof(*cmd) + srf->num_sizes * sizeof(SVGA3dSize);
  368. cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
  369. cmd = vmw_fifo_reserve(dev_priv, submit_size);
  370. if (unlikely(cmd == NULL)) {
  371. DRM_ERROR("Fifo reserve failed for create surface.\n");
  372. vmw_resource_unreference(&res);
  373. return -ENOMEM;
  374. }
  375. cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DEFINE);
  376. cmd->header.size = cpu_to_le32(cmd_len);
  377. cmd->body.sid = cpu_to_le32(res->id);
  378. cmd->body.surfaceFlags = cpu_to_le32(srf->flags);
  379. cmd->body.format = cpu_to_le32(srf->format);
  380. for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
  381. cmd->body.face[i].numMipLevels =
  382. cpu_to_le32(srf->mip_levels[i]);
  383. }
  384. cmd += 1;
  385. cmd_size = (SVGA3dSize *) cmd;
  386. src_size = srf->sizes;
  387. for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
  388. cmd_size->width = cpu_to_le32(src_size->width);
  389. cmd_size->height = cpu_to_le32(src_size->height);
  390. cmd_size->depth = cpu_to_le32(src_size->depth);
  391. }
  392. vmw_fifo_commit(dev_priv, submit_size);
  393. vmw_resource_activate(res, vmw_hw_surface_destroy);
  394. return 0;
  395. }
  396. static void vmw_user_surface_free(struct vmw_resource *res)
  397. {
  398. struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
  399. struct vmw_user_surface *user_srf =
  400. container_of(srf, struct vmw_user_surface, srf);
  401. kfree(srf->sizes);
  402. kfree(srf->snooper.image);
  403. kfree(user_srf);
  404. }
  405. int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
  406. struct ttm_object_file *tfile,
  407. uint32_t handle, struct vmw_surface **out)
  408. {
  409. struct vmw_resource *res;
  410. struct vmw_surface *srf;
  411. struct vmw_user_surface *user_srf;
  412. struct ttm_base_object *base;
  413. int ret = -EINVAL;
  414. base = ttm_base_object_lookup(tfile, handle);
  415. if (unlikely(base == NULL))
  416. return -EINVAL;
  417. if (unlikely(base->object_type != VMW_RES_SURFACE))
  418. goto out_bad_resource;
  419. user_srf = container_of(base, struct vmw_user_surface, base);
  420. srf = &user_srf->srf;
  421. res = &srf->res;
  422. read_lock(&dev_priv->resource_lock);
  423. if (!res->avail || res->res_free != &vmw_user_surface_free) {
  424. read_unlock(&dev_priv->resource_lock);
  425. goto out_bad_resource;
  426. }
  427. kref_get(&res->kref);
  428. read_unlock(&dev_priv->resource_lock);
  429. *out = srf;
  430. ret = 0;
  431. out_bad_resource:
  432. ttm_base_object_unref(&base);
  433. return ret;
  434. }
  435. static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
  436. {
  437. struct ttm_base_object *base = *p_base;
  438. struct vmw_user_surface *user_srf =
  439. container_of(base, struct vmw_user_surface, base);
  440. struct vmw_resource *res = &user_srf->srf.res;
  441. *p_base = NULL;
  442. vmw_resource_unreference(&res);
  443. }
  444. int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
  445. struct drm_file *file_priv)
  446. {
  447. struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
  448. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  449. return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
  450. }
  451. int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
  452. struct drm_file *file_priv)
  453. {
  454. struct vmw_private *dev_priv = vmw_priv(dev);
  455. struct vmw_user_surface *user_srf =
  456. kmalloc(sizeof(*user_srf), GFP_KERNEL);
  457. struct vmw_surface *srf;
  458. struct vmw_resource *res;
  459. struct vmw_resource *tmp;
  460. union drm_vmw_surface_create_arg *arg =
  461. (union drm_vmw_surface_create_arg *)data;
  462. struct drm_vmw_surface_create_req *req = &arg->req;
  463. struct drm_vmw_surface_arg *rep = &arg->rep;
  464. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  465. struct drm_vmw_size __user *user_sizes;
  466. int ret;
  467. int i;
  468. if (unlikely(user_srf == NULL))
  469. return -ENOMEM;
  470. srf = &user_srf->srf;
  471. res = &srf->res;
  472. srf->flags = req->flags;
  473. srf->format = req->format;
  474. memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
  475. srf->num_sizes = 0;
  476. for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
  477. srf->num_sizes += srf->mip_levels[i];
  478. if (srf->num_sizes > DRM_VMW_MAX_SURFACE_FACES *
  479. DRM_VMW_MAX_MIP_LEVELS) {
  480. ret = -EINVAL;
  481. goto out_err0;
  482. }
  483. srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
  484. if (unlikely(srf->sizes == NULL)) {
  485. ret = -ENOMEM;
  486. goto out_err0;
  487. }
  488. user_sizes = (struct drm_vmw_size __user *)(unsigned long)
  489. req->size_addr;
  490. ret = copy_from_user(srf->sizes, user_sizes,
  491. srf->num_sizes * sizeof(*srf->sizes));
  492. if (unlikely(ret != 0))
  493. goto out_err1;
  494. if (srf->flags & SVGA3D_SURFACE_HINT_SCANOUT) {
  495. /* we should not send this flag down to hardware since
  496. * its not a official one
  497. */
  498. srf->flags &= ~SVGA3D_SURFACE_HINT_SCANOUT;
  499. srf->scanout = true;
  500. } else {
  501. srf->scanout = false;
  502. }
  503. if (srf->scanout &&
  504. srf->num_sizes == 1 &&
  505. srf->sizes[0].width == 64 &&
  506. srf->sizes[0].height == 64 &&
  507. srf->format == SVGA3D_A8R8G8B8) {
  508. srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
  509. /* clear the image */
  510. if (srf->snooper.image) {
  511. memset(srf->snooper.image, 0x00, 64 * 64 * 4);
  512. } else {
  513. DRM_ERROR("Failed to allocate cursor_image\n");
  514. ret = -ENOMEM;
  515. goto out_err1;
  516. }
  517. } else {
  518. srf->snooper.image = NULL;
  519. }
  520. srf->snooper.crtc = NULL;
  521. user_srf->base.shareable = false;
  522. user_srf->base.tfile = NULL;
  523. /**
  524. * From this point, the generic resource management functions
  525. * destroy the object on failure.
  526. */
  527. ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
  528. if (unlikely(ret != 0))
  529. return ret;
  530. tmp = vmw_resource_reference(&srf->res);
  531. ret = ttm_base_object_init(tfile, &user_srf->base,
  532. req->shareable, VMW_RES_SURFACE,
  533. &vmw_user_surface_base_release, NULL);
  534. if (unlikely(ret != 0)) {
  535. vmw_resource_unreference(&tmp);
  536. vmw_resource_unreference(&res);
  537. return ret;
  538. }
  539. rep->sid = user_srf->base.hash.key;
  540. if (rep->sid == SVGA3D_INVALID_ID)
  541. DRM_ERROR("Created bad Surface ID.\n");
  542. vmw_resource_unreference(&res);
  543. return 0;
  544. out_err1:
  545. kfree(srf->sizes);
  546. out_err0:
  547. kfree(user_srf);
  548. return ret;
  549. }
  550. int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
  551. struct drm_file *file_priv)
  552. {
  553. union drm_vmw_surface_reference_arg *arg =
  554. (union drm_vmw_surface_reference_arg *)data;
  555. struct drm_vmw_surface_arg *req = &arg->req;
  556. struct drm_vmw_surface_create_req *rep = &arg->rep;
  557. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  558. struct vmw_surface *srf;
  559. struct vmw_user_surface *user_srf;
  560. struct drm_vmw_size __user *user_sizes;
  561. struct ttm_base_object *base;
  562. int ret = -EINVAL;
  563. base = ttm_base_object_lookup(tfile, req->sid);
  564. if (unlikely(base == NULL)) {
  565. DRM_ERROR("Could not find surface to reference.\n");
  566. return -EINVAL;
  567. }
  568. if (unlikely(base->object_type != VMW_RES_SURFACE))
  569. goto out_bad_resource;
  570. user_srf = container_of(base, struct vmw_user_surface, base);
  571. srf = &user_srf->srf;
  572. ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
  573. if (unlikely(ret != 0)) {
  574. DRM_ERROR("Could not add a reference to a surface.\n");
  575. goto out_no_reference;
  576. }
  577. rep->flags = srf->flags;
  578. rep->format = srf->format;
  579. memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
  580. user_sizes = (struct drm_vmw_size __user *)(unsigned long)
  581. rep->size_addr;
  582. if (user_sizes)
  583. ret = copy_to_user(user_sizes, srf->sizes,
  584. srf->num_sizes * sizeof(*srf->sizes));
  585. if (unlikely(ret != 0))
  586. DRM_ERROR("copy_to_user failed %p %u\n",
  587. user_sizes, srf->num_sizes);
  588. out_bad_resource:
  589. out_no_reference:
  590. ttm_base_object_unref(&base);
  591. return ret;
  592. }
  593. int vmw_surface_check(struct vmw_private *dev_priv,
  594. struct ttm_object_file *tfile,
  595. uint32_t handle, int *id)
  596. {
  597. struct ttm_base_object *base;
  598. struct vmw_user_surface *user_srf;
  599. int ret = -EPERM;
  600. base = ttm_base_object_lookup(tfile, handle);
  601. if (unlikely(base == NULL))
  602. return -EINVAL;
  603. if (unlikely(base->object_type != VMW_RES_SURFACE))
  604. goto out_bad_surface;
  605. user_srf = container_of(base, struct vmw_user_surface, base);
  606. *id = user_srf->srf.res.id;
  607. ret = 0;
  608. out_bad_surface:
  609. /**
  610. * FIXME: May deadlock here when called from the
  611. * command parsing code.
  612. */
  613. ttm_base_object_unref(&base);
  614. return ret;
  615. }
  616. /**
  617. * Buffer management.
  618. */
  619. static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob,
  620. unsigned long num_pages)
  621. {
  622. static size_t bo_user_size = ~0;
  623. size_t page_array_size =
  624. (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK;
  625. if (unlikely(bo_user_size == ~0)) {
  626. bo_user_size = glob->ttm_bo_extra_size +
  627. ttm_round_pot(sizeof(struct vmw_dma_buffer));
  628. }
  629. return bo_user_size + page_array_size;
  630. }
  631. void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo)
  632. {
  633. struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
  634. struct ttm_bo_global *glob = bo->glob;
  635. struct vmw_private *dev_priv =
  636. container_of(bo->bdev, struct vmw_private, bdev);
  637. if (vmw_bo->gmr_bound) {
  638. vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id);
  639. spin_lock(&glob->lru_lock);
  640. ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id);
  641. spin_unlock(&glob->lru_lock);
  642. vmw_bo->gmr_bound = false;
  643. }
  644. }
  645. void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
  646. {
  647. struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
  648. struct ttm_bo_global *glob = bo->glob;
  649. vmw_dmabuf_gmr_unbind(bo);
  650. ttm_mem_global_free(glob->mem_glob, bo->acc_size);
  651. kfree(vmw_bo);
  652. }
  653. int vmw_dmabuf_init(struct vmw_private *dev_priv,
  654. struct vmw_dma_buffer *vmw_bo,
  655. size_t size, struct ttm_placement *placement,
  656. bool interruptible,
  657. void (*bo_free) (struct ttm_buffer_object *bo))
  658. {
  659. struct ttm_bo_device *bdev = &dev_priv->bdev;
  660. struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
  661. size_t acc_size;
  662. int ret;
  663. BUG_ON(!bo_free);
  664. acc_size =
  665. vmw_dmabuf_acc_size(bdev->glob,
  666. (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
  667. ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
  668. if (unlikely(ret != 0)) {
  669. /* we must free the bo here as
  670. * ttm_buffer_object_init does so as well */
  671. bo_free(&vmw_bo->base);
  672. return ret;
  673. }
  674. memset(vmw_bo, 0, sizeof(*vmw_bo));
  675. INIT_LIST_HEAD(&vmw_bo->gmr_lru);
  676. INIT_LIST_HEAD(&vmw_bo->validate_list);
  677. vmw_bo->gmr_id = 0;
  678. vmw_bo->gmr_bound = false;
  679. ret = ttm_bo_init(bdev, &vmw_bo->base, size,
  680. ttm_bo_type_device, placement,
  681. 0, 0, interruptible,
  682. NULL, acc_size, bo_free);
  683. return ret;
  684. }
  685. static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
  686. {
  687. struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
  688. struct ttm_bo_global *glob = bo->glob;
  689. vmw_dmabuf_gmr_unbind(bo);
  690. ttm_mem_global_free(glob->mem_glob, bo->acc_size);
  691. kfree(vmw_user_bo);
  692. }
  693. static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
  694. {
  695. struct vmw_user_dma_buffer *vmw_user_bo;
  696. struct ttm_base_object *base = *p_base;
  697. struct ttm_buffer_object *bo;
  698. *p_base = NULL;
  699. if (unlikely(base == NULL))
  700. return;
  701. vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
  702. bo = &vmw_user_bo->dma.base;
  703. ttm_bo_unref(&bo);
  704. }
  705. int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
  706. struct drm_file *file_priv)
  707. {
  708. struct vmw_private *dev_priv = vmw_priv(dev);
  709. union drm_vmw_alloc_dmabuf_arg *arg =
  710. (union drm_vmw_alloc_dmabuf_arg *)data;
  711. struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
  712. struct drm_vmw_dmabuf_rep *rep = &arg->rep;
  713. struct vmw_user_dma_buffer *vmw_user_bo;
  714. struct ttm_buffer_object *tmp;
  715. struct vmw_master *vmaster = vmw_master(file_priv->master);
  716. int ret;
  717. vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
  718. if (unlikely(vmw_user_bo == NULL))
  719. return -ENOMEM;
  720. ret = ttm_read_lock(&vmaster->lock, true);
  721. if (unlikely(ret != 0)) {
  722. kfree(vmw_user_bo);
  723. return ret;
  724. }
  725. ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size,
  726. &vmw_vram_sys_placement, true,
  727. &vmw_user_dmabuf_destroy);
  728. if (unlikely(ret != 0))
  729. return ret;
  730. tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
  731. ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
  732. &vmw_user_bo->base,
  733. false,
  734. ttm_buffer_type,
  735. &vmw_user_dmabuf_release, NULL);
  736. if (unlikely(ret != 0)) {
  737. ttm_bo_unref(&tmp);
  738. } else {
  739. rep->handle = vmw_user_bo->base.hash.key;
  740. rep->map_handle = vmw_user_bo->dma.base.addr_space_offset;
  741. rep->cur_gmr_id = vmw_user_bo->base.hash.key;
  742. rep->cur_gmr_offset = 0;
  743. }
  744. ttm_bo_unref(&tmp);
  745. ttm_read_unlock(&vmaster->lock);
  746. return 0;
  747. }
  748. int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
  749. struct drm_file *file_priv)
  750. {
  751. struct drm_vmw_unref_dmabuf_arg *arg =
  752. (struct drm_vmw_unref_dmabuf_arg *)data;
  753. return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
  754. arg->handle,
  755. TTM_REF_USAGE);
  756. }
  757. uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
  758. uint32_t cur_validate_node)
  759. {
  760. struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
  761. if (likely(vmw_bo->on_validate_list))
  762. return vmw_bo->cur_validate_node;
  763. vmw_bo->cur_validate_node = cur_validate_node;
  764. vmw_bo->on_validate_list = true;
  765. return cur_validate_node;
  766. }
  767. void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo)
  768. {
  769. struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
  770. vmw_bo->on_validate_list = false;
  771. }
  772. uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object *bo)
  773. {
  774. struct vmw_dma_buffer *vmw_bo;
  775. if (bo->mem.mem_type == TTM_PL_VRAM)
  776. return SVGA_GMR_FRAMEBUFFER;
  777. vmw_bo = vmw_dma_buffer(bo);
  778. return (vmw_bo->gmr_bound) ? vmw_bo->gmr_id : SVGA_GMR_NULL;
  779. }
  780. void vmw_dmabuf_set_gmr(struct ttm_buffer_object *bo, uint32_t id)
  781. {
  782. struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
  783. vmw_bo->gmr_bound = true;
  784. vmw_bo->gmr_id = id;
  785. }
  786. int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
  787. uint32_t handle, struct vmw_dma_buffer **out)
  788. {
  789. struct vmw_user_dma_buffer *vmw_user_bo;
  790. struct ttm_base_object *base;
  791. base = ttm_base_object_lookup(tfile, handle);
  792. if (unlikely(base == NULL)) {
  793. printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
  794. (unsigned long)handle);
  795. return -ESRCH;
  796. }
  797. if (unlikely(base->object_type != ttm_buffer_type)) {
  798. ttm_base_object_unref(&base);
  799. printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
  800. (unsigned long)handle);
  801. return -EINVAL;
  802. }
  803. vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
  804. (void)ttm_bo_reference(&vmw_user_bo->dma.base);
  805. ttm_base_object_unref(&base);
  806. *out = &vmw_user_bo->dma;
  807. return 0;
  808. }
  809. /**
  810. * TODO: Implement a gmr id eviction mechanism. Currently we just fail
  811. * when we're out of ids, causing GMR space to be allocated
  812. * out of VRAM.
  813. */
  814. int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id)
  815. {
  816. struct ttm_bo_global *glob = dev_priv->bdev.glob;
  817. int id;
  818. int ret;
  819. do {
  820. if (unlikely(ida_pre_get(&dev_priv->gmr_ida, GFP_KERNEL) == 0))
  821. return -ENOMEM;
  822. spin_lock(&glob->lru_lock);
  823. ret = ida_get_new(&dev_priv->gmr_ida, &id);
  824. spin_unlock(&glob->lru_lock);
  825. } while (ret == -EAGAIN);
  826. if (unlikely(ret != 0))
  827. return ret;
  828. if (unlikely(id >= dev_priv->max_gmr_ids)) {
  829. spin_lock(&glob->lru_lock);
  830. ida_remove(&dev_priv->gmr_ida, id);
  831. spin_unlock(&glob->lru_lock);
  832. return -EBUSY;
  833. }
  834. *p_id = (uint32_t) id;
  835. return 0;
  836. }
  837. /*
  838. * Stream managment
  839. */
  840. static void vmw_stream_destroy(struct vmw_resource *res)
  841. {
  842. struct vmw_private *dev_priv = res->dev_priv;
  843. struct vmw_stream *stream;
  844. int ret;
  845. DRM_INFO("%s: unref\n", __func__);
  846. stream = container_of(res, struct vmw_stream, res);
  847. ret = vmw_overlay_unref(dev_priv, stream->stream_id);
  848. WARN_ON(ret != 0);
  849. }
  850. static int vmw_stream_init(struct vmw_private *dev_priv,
  851. struct vmw_stream *stream,
  852. void (*res_free) (struct vmw_resource *res))
  853. {
  854. struct vmw_resource *res = &stream->res;
  855. int ret;
  856. ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr,
  857. VMW_RES_STREAM, res_free);
  858. if (unlikely(ret != 0)) {
  859. if (res_free == NULL)
  860. kfree(stream);
  861. else
  862. res_free(&stream->res);
  863. return ret;
  864. }
  865. ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
  866. if (ret) {
  867. vmw_resource_unreference(&res);
  868. return ret;
  869. }
  870. DRM_INFO("%s: claimed\n", __func__);
  871. vmw_resource_activate(&stream->res, vmw_stream_destroy);
  872. return 0;
  873. }
  874. /**
  875. * User-space context management:
  876. */
  877. static void vmw_user_stream_free(struct vmw_resource *res)
  878. {
  879. struct vmw_user_stream *stream =
  880. container_of(res, struct vmw_user_stream, stream.res);
  881. kfree(stream);
  882. }
  883. /**
  884. * This function is called when user space has no more references on the
  885. * base object. It releases the base-object's reference on the resource object.
  886. */
  887. static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
  888. {
  889. struct ttm_base_object *base = *p_base;
  890. struct vmw_user_stream *stream =
  891. container_of(base, struct vmw_user_stream, base);
  892. struct vmw_resource *res = &stream->stream.res;
  893. *p_base = NULL;
  894. vmw_resource_unreference(&res);
  895. }
  896. int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
  897. struct drm_file *file_priv)
  898. {
  899. struct vmw_private *dev_priv = vmw_priv(dev);
  900. struct vmw_resource *res;
  901. struct vmw_user_stream *stream;
  902. struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
  903. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  904. int ret = 0;
  905. res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, arg->stream_id);
  906. if (unlikely(res == NULL))
  907. return -EINVAL;
  908. if (res->res_free != &vmw_user_stream_free) {
  909. ret = -EINVAL;
  910. goto out;
  911. }
  912. stream = container_of(res, struct vmw_user_stream, stream.res);
  913. if (stream->base.tfile != tfile) {
  914. ret = -EINVAL;
  915. goto out;
  916. }
  917. ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
  918. out:
  919. vmw_resource_unreference(&res);
  920. return ret;
  921. }
  922. int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
  923. struct drm_file *file_priv)
  924. {
  925. struct vmw_private *dev_priv = vmw_priv(dev);
  926. struct vmw_user_stream *stream = kmalloc(sizeof(*stream), GFP_KERNEL);
  927. struct vmw_resource *res;
  928. struct vmw_resource *tmp;
  929. struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
  930. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  931. int ret;
  932. if (unlikely(stream == NULL))
  933. return -ENOMEM;
  934. res = &stream->stream.res;
  935. stream->base.shareable = false;
  936. stream->base.tfile = NULL;
  937. ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
  938. if (unlikely(ret != 0))
  939. return ret;
  940. tmp = vmw_resource_reference(res);
  941. ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
  942. &vmw_user_stream_base_release, NULL);
  943. if (unlikely(ret != 0)) {
  944. vmw_resource_unreference(&tmp);
  945. goto out_err;
  946. }
  947. arg->stream_id = res->id;
  948. out_err:
  949. vmw_resource_unreference(&res);
  950. return ret;
  951. }
  952. int vmw_user_stream_lookup(struct vmw_private *dev_priv,
  953. struct ttm_object_file *tfile,
  954. uint32_t *inout_id, struct vmw_resource **out)
  955. {
  956. struct vmw_user_stream *stream;
  957. struct vmw_resource *res;
  958. int ret;
  959. res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, *inout_id);
  960. if (unlikely(res == NULL))
  961. return -EINVAL;
  962. if (res->res_free != &vmw_user_stream_free) {
  963. ret = -EINVAL;
  964. goto err_ref;
  965. }
  966. stream = container_of(res, struct vmw_user_stream, stream.res);
  967. if (stream->base.tfile != tfile) {
  968. ret = -EPERM;
  969. goto err_ref;
  970. }
  971. *inout_id = stream->stream.stream_id;
  972. *out = res;
  973. return 0;
  974. err_ref:
  975. vmw_resource_unreference(&res);
  976. return ret;
  977. }