vmwgfx_resource.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183
  1. /**************************************************************************
  2. *
  3. * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. #include "vmwgfx_drv.h"
  28. #include "vmwgfx_drm.h"
  29. #include "ttm/ttm_object.h"
  30. #include "ttm/ttm_placement.h"
  31. #include "drmP.h"
  32. #define VMW_RES_CONTEXT ttm_driver_type0
  33. #define VMW_RES_SURFACE ttm_driver_type1
  34. #define VMW_RES_STREAM ttm_driver_type2
  35. struct vmw_user_context {
  36. struct ttm_base_object base;
  37. struct vmw_resource res;
  38. };
  39. struct vmw_user_surface {
  40. struct ttm_base_object base;
  41. struct vmw_surface srf;
  42. };
  43. struct vmw_user_dma_buffer {
  44. struct ttm_base_object base;
  45. struct vmw_dma_buffer dma;
  46. };
  47. struct vmw_bo_user_rep {
  48. uint32_t handle;
  49. uint64_t map_handle;
  50. };
  51. struct vmw_stream {
  52. struct vmw_resource res;
  53. uint32_t stream_id;
  54. };
  55. struct vmw_user_stream {
  56. struct ttm_base_object base;
  57. struct vmw_stream stream;
  58. };
  59. static inline struct vmw_dma_buffer *
  60. vmw_dma_buffer(struct ttm_buffer_object *bo)
  61. {
  62. return container_of(bo, struct vmw_dma_buffer, base);
  63. }
  64. static inline struct vmw_user_dma_buffer *
  65. vmw_user_dma_buffer(struct ttm_buffer_object *bo)
  66. {
  67. struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
  68. return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
  69. }
  70. struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
  71. {
  72. kref_get(&res->kref);
  73. return res;
  74. }
  75. static void vmw_resource_release(struct kref *kref)
  76. {
  77. struct vmw_resource *res =
  78. container_of(kref, struct vmw_resource, kref);
  79. struct vmw_private *dev_priv = res->dev_priv;
  80. idr_remove(res->idr, res->id);
  81. write_unlock(&dev_priv->resource_lock);
  82. if (likely(res->hw_destroy != NULL))
  83. res->hw_destroy(res);
  84. if (res->res_free != NULL)
  85. res->res_free(res);
  86. else
  87. kfree(res);
  88. write_lock(&dev_priv->resource_lock);
  89. }
  90. void vmw_resource_unreference(struct vmw_resource **p_res)
  91. {
  92. struct vmw_resource *res = *p_res;
  93. struct vmw_private *dev_priv = res->dev_priv;
  94. *p_res = NULL;
  95. write_lock(&dev_priv->resource_lock);
  96. kref_put(&res->kref, vmw_resource_release);
  97. write_unlock(&dev_priv->resource_lock);
  98. }
  99. static int vmw_resource_init(struct vmw_private *dev_priv,
  100. struct vmw_resource *res,
  101. struct idr *idr,
  102. enum ttm_object_type obj_type,
  103. void (*res_free) (struct vmw_resource *res))
  104. {
  105. int ret;
  106. kref_init(&res->kref);
  107. res->hw_destroy = NULL;
  108. res->res_free = res_free;
  109. res->res_type = obj_type;
  110. res->idr = idr;
  111. res->avail = false;
  112. res->dev_priv = dev_priv;
  113. do {
  114. if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
  115. return -ENOMEM;
  116. write_lock(&dev_priv->resource_lock);
  117. ret = idr_get_new_above(idr, res, 1, &res->id);
  118. write_unlock(&dev_priv->resource_lock);
  119. } while (ret == -EAGAIN);
  120. return ret;
  121. }
  122. /**
  123. * vmw_resource_activate
  124. *
  125. * @res: Pointer to the newly created resource
  126. * @hw_destroy: Destroy function. NULL if none.
  127. *
  128. * Activate a resource after the hardware has been made aware of it.
  129. * Set tye destroy function to @destroy. Typically this frees the
  130. * resource and destroys the hardware resources associated with it.
  131. * Activate basically means that the function vmw_resource_lookup will
  132. * find it.
  133. */
  134. static void vmw_resource_activate(struct vmw_resource *res,
  135. void (*hw_destroy) (struct vmw_resource *))
  136. {
  137. struct vmw_private *dev_priv = res->dev_priv;
  138. write_lock(&dev_priv->resource_lock);
  139. res->avail = true;
  140. res->hw_destroy = hw_destroy;
  141. write_unlock(&dev_priv->resource_lock);
  142. }
  143. struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
  144. struct idr *idr, int id)
  145. {
  146. struct vmw_resource *res;
  147. read_lock(&dev_priv->resource_lock);
  148. res = idr_find(idr, id);
  149. if (res && res->avail)
  150. kref_get(&res->kref);
  151. else
  152. res = NULL;
  153. read_unlock(&dev_priv->resource_lock);
  154. if (unlikely(res == NULL))
  155. return NULL;
  156. return res;
  157. }
  158. /**
  159. * Context management:
  160. */
  161. static void vmw_hw_context_destroy(struct vmw_resource *res)
  162. {
  163. struct vmw_private *dev_priv = res->dev_priv;
  164. struct {
  165. SVGA3dCmdHeader header;
  166. SVGA3dCmdDestroyContext body;
  167. } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  168. if (unlikely(cmd == NULL)) {
  169. DRM_ERROR("Failed reserving FIFO space for surface "
  170. "destruction.\n");
  171. return;
  172. }
  173. cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
  174. cmd->header.size = cpu_to_le32(sizeof(cmd->body));
  175. cmd->body.cid = cpu_to_le32(res->id);
  176. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  177. }
  178. static int vmw_context_init(struct vmw_private *dev_priv,
  179. struct vmw_resource *res,
  180. void (*res_free) (struct vmw_resource *res))
  181. {
  182. int ret;
  183. struct {
  184. SVGA3dCmdHeader header;
  185. SVGA3dCmdDefineContext body;
  186. } *cmd;
  187. ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr,
  188. VMW_RES_CONTEXT, res_free);
  189. if (unlikely(ret != 0)) {
  190. if (res_free == NULL)
  191. kfree(res);
  192. else
  193. res_free(res);
  194. return ret;
  195. }
  196. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  197. if (unlikely(cmd == NULL)) {
  198. DRM_ERROR("Fifo reserve failed.\n");
  199. vmw_resource_unreference(&res);
  200. return -ENOMEM;
  201. }
  202. cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
  203. cmd->header.size = cpu_to_le32(sizeof(cmd->body));
  204. cmd->body.cid = cpu_to_le32(res->id);
  205. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  206. vmw_resource_activate(res, vmw_hw_context_destroy);
  207. return 0;
  208. }
  209. struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
  210. {
  211. struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
  212. int ret;
  213. if (unlikely(res == NULL))
  214. return NULL;
  215. ret = vmw_context_init(dev_priv, res, NULL);
  216. return (ret == 0) ? res : NULL;
  217. }
  218. /**
  219. * User-space context management:
  220. */
  221. static void vmw_user_context_free(struct vmw_resource *res)
  222. {
  223. struct vmw_user_context *ctx =
  224. container_of(res, struct vmw_user_context, res);
  225. kfree(ctx);
  226. }
  227. /**
  228. * This function is called when user space has no more references on the
  229. * base object. It releases the base-object's reference on the resource object.
  230. */
  231. static void vmw_user_context_base_release(struct ttm_base_object **p_base)
  232. {
  233. struct ttm_base_object *base = *p_base;
  234. struct vmw_user_context *ctx =
  235. container_of(base, struct vmw_user_context, base);
  236. struct vmw_resource *res = &ctx->res;
  237. *p_base = NULL;
  238. vmw_resource_unreference(&res);
  239. }
  240. int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
  241. struct drm_file *file_priv)
  242. {
  243. struct vmw_private *dev_priv = vmw_priv(dev);
  244. struct vmw_resource *res;
  245. struct vmw_user_context *ctx;
  246. struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
  247. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  248. int ret = 0;
  249. res = vmw_resource_lookup(dev_priv, &dev_priv->context_idr, arg->cid);
  250. if (unlikely(res == NULL))
  251. return -EINVAL;
  252. if (res->res_free != &vmw_user_context_free) {
  253. ret = -EINVAL;
  254. goto out;
  255. }
  256. ctx = container_of(res, struct vmw_user_context, res);
  257. if (ctx->base.tfile != tfile && !ctx->base.shareable) {
  258. ret = -EPERM;
  259. goto out;
  260. }
  261. ttm_ref_object_base_unref(tfile, ctx->base.hash.key, TTM_REF_USAGE);
  262. out:
  263. vmw_resource_unreference(&res);
  264. return ret;
  265. }
  266. int vmw_context_define_ioctl(struct drm_device *dev, void *data,
  267. struct drm_file *file_priv)
  268. {
  269. struct vmw_private *dev_priv = vmw_priv(dev);
  270. struct vmw_user_context *ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
  271. struct vmw_resource *res;
  272. struct vmw_resource *tmp;
  273. struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
  274. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  275. int ret;
  276. if (unlikely(ctx == NULL))
  277. return -ENOMEM;
  278. res = &ctx->res;
  279. ctx->base.shareable = false;
  280. ctx->base.tfile = NULL;
  281. ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
  282. if (unlikely(ret != 0))
  283. return ret;
  284. tmp = vmw_resource_reference(&ctx->res);
  285. ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
  286. &vmw_user_context_base_release, NULL);
  287. if (unlikely(ret != 0)) {
  288. vmw_resource_unreference(&tmp);
  289. goto out_err;
  290. }
  291. arg->cid = res->id;
  292. out_err:
  293. vmw_resource_unreference(&res);
  294. return ret;
  295. }
  296. int vmw_context_check(struct vmw_private *dev_priv,
  297. struct ttm_object_file *tfile,
  298. int id)
  299. {
  300. struct vmw_resource *res;
  301. int ret = 0;
  302. read_lock(&dev_priv->resource_lock);
  303. res = idr_find(&dev_priv->context_idr, id);
  304. if (res && res->avail) {
  305. struct vmw_user_context *ctx =
  306. container_of(res, struct vmw_user_context, res);
  307. if (ctx->base.tfile != tfile && !ctx->base.shareable)
  308. ret = -EPERM;
  309. } else
  310. ret = -EINVAL;
  311. read_unlock(&dev_priv->resource_lock);
  312. return ret;
  313. }
  314. /**
  315. * Surface management.
  316. */
  317. static void vmw_hw_surface_destroy(struct vmw_resource *res)
  318. {
  319. struct vmw_private *dev_priv = res->dev_priv;
  320. struct {
  321. SVGA3dCmdHeader header;
  322. SVGA3dCmdDestroySurface body;
  323. } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  324. if (unlikely(cmd == NULL)) {
  325. DRM_ERROR("Failed reserving FIFO space for surface "
  326. "destruction.\n");
  327. return;
  328. }
  329. cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DESTROY);
  330. cmd->header.size = cpu_to_le32(sizeof(cmd->body));
  331. cmd->body.sid = cpu_to_le32(res->id);
  332. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  333. }
  334. void vmw_surface_res_free(struct vmw_resource *res)
  335. {
  336. struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
  337. kfree(srf->sizes);
  338. kfree(srf->snooper.image);
  339. kfree(srf);
  340. }
  341. int vmw_surface_init(struct vmw_private *dev_priv,
  342. struct vmw_surface *srf,
  343. void (*res_free) (struct vmw_resource *res))
  344. {
  345. int ret;
  346. struct {
  347. SVGA3dCmdHeader header;
  348. SVGA3dCmdDefineSurface body;
  349. } *cmd;
  350. SVGA3dSize *cmd_size;
  351. struct vmw_resource *res = &srf->res;
  352. struct drm_vmw_size *src_size;
  353. size_t submit_size;
  354. uint32_t cmd_len;
  355. int i;
  356. BUG_ON(res_free == NULL);
  357. ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr,
  358. VMW_RES_SURFACE, res_free);
  359. if (unlikely(ret != 0)) {
  360. res_free(res);
  361. return ret;
  362. }
  363. submit_size = sizeof(*cmd) + srf->num_sizes * sizeof(SVGA3dSize);
  364. cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
  365. cmd = vmw_fifo_reserve(dev_priv, submit_size);
  366. if (unlikely(cmd == NULL)) {
  367. DRM_ERROR("Fifo reserve failed for create surface.\n");
  368. vmw_resource_unreference(&res);
  369. return -ENOMEM;
  370. }
  371. cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DEFINE);
  372. cmd->header.size = cpu_to_le32(cmd_len);
  373. cmd->body.sid = cpu_to_le32(res->id);
  374. cmd->body.surfaceFlags = cpu_to_le32(srf->flags);
  375. cmd->body.format = cpu_to_le32(srf->format);
  376. for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
  377. cmd->body.face[i].numMipLevels =
  378. cpu_to_le32(srf->mip_levels[i]);
  379. }
  380. cmd += 1;
  381. cmd_size = (SVGA3dSize *) cmd;
  382. src_size = srf->sizes;
  383. for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
  384. cmd_size->width = cpu_to_le32(src_size->width);
  385. cmd_size->height = cpu_to_le32(src_size->height);
  386. cmd_size->depth = cpu_to_le32(src_size->depth);
  387. }
  388. vmw_fifo_commit(dev_priv, submit_size);
  389. vmw_resource_activate(res, vmw_hw_surface_destroy);
  390. return 0;
  391. }
  392. static void vmw_user_surface_free(struct vmw_resource *res)
  393. {
  394. struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
  395. struct vmw_user_surface *user_srf =
  396. container_of(srf, struct vmw_user_surface, srf);
  397. kfree(srf->sizes);
  398. kfree(srf->snooper.image);
  399. kfree(user_srf);
  400. }
  401. int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
  402. struct ttm_object_file *tfile,
  403. uint32_t handle, struct vmw_surface **out)
  404. {
  405. struct vmw_resource *res;
  406. struct vmw_surface *srf;
  407. struct vmw_user_surface *user_srf;
  408. struct ttm_base_object *base;
  409. int ret = -EINVAL;
  410. base = ttm_base_object_lookup(tfile, handle);
  411. if (unlikely(base == NULL))
  412. return -EINVAL;
  413. if (unlikely(base->object_type != VMW_RES_SURFACE))
  414. goto out_bad_resource;
  415. user_srf = container_of(base, struct vmw_user_surface, base);
  416. srf = &user_srf->srf;
  417. res = &srf->res;
  418. read_lock(&dev_priv->resource_lock);
  419. if (!res->avail || res->res_free != &vmw_user_surface_free) {
  420. read_unlock(&dev_priv->resource_lock);
  421. goto out_bad_resource;
  422. }
  423. kref_get(&res->kref);
  424. read_unlock(&dev_priv->resource_lock);
  425. *out = srf;
  426. ret = 0;
  427. out_bad_resource:
  428. ttm_base_object_unref(&base);
  429. return ret;
  430. }
  431. static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
  432. {
  433. struct ttm_base_object *base = *p_base;
  434. struct vmw_user_surface *user_srf =
  435. container_of(base, struct vmw_user_surface, base);
  436. struct vmw_resource *res = &user_srf->srf.res;
  437. *p_base = NULL;
  438. vmw_resource_unreference(&res);
  439. }
  440. int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
  441. struct drm_file *file_priv)
  442. {
  443. struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
  444. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  445. return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
  446. }
  447. int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
  448. struct drm_file *file_priv)
  449. {
  450. struct vmw_private *dev_priv = vmw_priv(dev);
  451. struct vmw_user_surface *user_srf =
  452. kmalloc(sizeof(*user_srf), GFP_KERNEL);
  453. struct vmw_surface *srf;
  454. struct vmw_resource *res;
  455. struct vmw_resource *tmp;
  456. union drm_vmw_surface_create_arg *arg =
  457. (union drm_vmw_surface_create_arg *)data;
  458. struct drm_vmw_surface_create_req *req = &arg->req;
  459. struct drm_vmw_surface_arg *rep = &arg->rep;
  460. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  461. struct drm_vmw_size __user *user_sizes;
  462. int ret;
  463. int i;
  464. if (unlikely(user_srf == NULL))
  465. return -ENOMEM;
  466. srf = &user_srf->srf;
  467. res = &srf->res;
  468. srf->flags = req->flags;
  469. srf->format = req->format;
  470. memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
  471. srf->num_sizes = 0;
  472. for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
  473. srf->num_sizes += srf->mip_levels[i];
  474. if (srf->num_sizes > DRM_VMW_MAX_SURFACE_FACES *
  475. DRM_VMW_MAX_MIP_LEVELS) {
  476. ret = -EINVAL;
  477. goto out_err0;
  478. }
  479. srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
  480. if (unlikely(srf->sizes == NULL)) {
  481. ret = -ENOMEM;
  482. goto out_err0;
  483. }
  484. user_sizes = (struct drm_vmw_size __user *)(unsigned long)
  485. req->size_addr;
  486. ret = copy_from_user(srf->sizes, user_sizes,
  487. srf->num_sizes * sizeof(*srf->sizes));
  488. if (unlikely(ret != 0))
  489. goto out_err1;
  490. user_srf->base.shareable = false;
  491. user_srf->base.tfile = NULL;
  492. /**
  493. * From this point, the generic resource management functions
  494. * destroy the object on failure.
  495. */
  496. ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
  497. if (unlikely(ret != 0))
  498. return ret;
  499. tmp = vmw_resource_reference(&srf->res);
  500. ret = ttm_base_object_init(tfile, &user_srf->base,
  501. req->shareable, VMW_RES_SURFACE,
  502. &vmw_user_surface_base_release, NULL);
  503. if (unlikely(ret != 0)) {
  504. vmw_resource_unreference(&tmp);
  505. vmw_resource_unreference(&res);
  506. return ret;
  507. }
  508. if (srf->flags & (1 << 9) &&
  509. srf->num_sizes == 1 &&
  510. srf->sizes[0].width == 64 &&
  511. srf->sizes[0].height == 64 &&
  512. srf->format == SVGA3D_A8R8G8B8) {
  513. srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
  514. /* clear the image */
  515. if (srf->snooper.image)
  516. memset(srf->snooper.image, 0x00, 64 * 64 * 4);
  517. else
  518. DRM_ERROR("Failed to allocate cursor_image\n");
  519. } else {
  520. srf->snooper.image = NULL;
  521. }
  522. srf->snooper.crtc = NULL;
  523. rep->sid = user_srf->base.hash.key;
  524. if (rep->sid == SVGA3D_INVALID_ID)
  525. DRM_ERROR("Created bad Surface ID.\n");
  526. vmw_resource_unreference(&res);
  527. return 0;
  528. out_err1:
  529. kfree(srf->sizes);
  530. out_err0:
  531. kfree(user_srf);
  532. return ret;
  533. }
  534. int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
  535. struct drm_file *file_priv)
  536. {
  537. union drm_vmw_surface_reference_arg *arg =
  538. (union drm_vmw_surface_reference_arg *)data;
  539. struct drm_vmw_surface_arg *req = &arg->req;
  540. struct drm_vmw_surface_create_req *rep = &arg->rep;
  541. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  542. struct vmw_surface *srf;
  543. struct vmw_user_surface *user_srf;
  544. struct drm_vmw_size __user *user_sizes;
  545. struct ttm_base_object *base;
  546. int ret = -EINVAL;
  547. base = ttm_base_object_lookup(tfile, req->sid);
  548. if (unlikely(base == NULL)) {
  549. DRM_ERROR("Could not find surface to reference.\n");
  550. return -EINVAL;
  551. }
  552. if (unlikely(base->object_type != VMW_RES_SURFACE))
  553. goto out_bad_resource;
  554. user_srf = container_of(base, struct vmw_user_surface, base);
  555. srf = &user_srf->srf;
  556. ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
  557. if (unlikely(ret != 0)) {
  558. DRM_ERROR("Could not add a reference to a surface.\n");
  559. goto out_no_reference;
  560. }
  561. rep->flags = srf->flags;
  562. rep->format = srf->format;
  563. memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
  564. user_sizes = (struct drm_vmw_size __user *)(unsigned long)
  565. rep->size_addr;
  566. if (user_sizes)
  567. ret = copy_to_user(user_sizes, srf->sizes,
  568. srf->num_sizes * sizeof(*srf->sizes));
  569. if (unlikely(ret != 0))
  570. DRM_ERROR("copy_to_user failed %p %u\n",
  571. user_sizes, srf->num_sizes);
  572. out_bad_resource:
  573. out_no_reference:
  574. ttm_base_object_unref(&base);
  575. return ret;
  576. }
  577. int vmw_surface_check(struct vmw_private *dev_priv,
  578. struct ttm_object_file *tfile,
  579. uint32_t handle, int *id)
  580. {
  581. struct ttm_base_object *base;
  582. struct vmw_user_surface *user_srf;
  583. int ret = -EPERM;
  584. base = ttm_base_object_lookup(tfile, handle);
  585. if (unlikely(base == NULL))
  586. return -EINVAL;
  587. if (unlikely(base->object_type != VMW_RES_SURFACE))
  588. goto out_bad_surface;
  589. user_srf = container_of(base, struct vmw_user_surface, base);
  590. *id = user_srf->srf.res.id;
  591. ret = 0;
  592. out_bad_surface:
  593. /**
  594. * FIXME: May deadlock here when called from the
  595. * command parsing code.
  596. */
  597. ttm_base_object_unref(&base);
  598. return ret;
  599. }
  600. /**
  601. * Buffer management.
  602. */
  603. static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob,
  604. unsigned long num_pages)
  605. {
  606. static size_t bo_user_size = ~0;
  607. size_t page_array_size =
  608. (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK;
  609. if (unlikely(bo_user_size == ~0)) {
  610. bo_user_size = glob->ttm_bo_extra_size +
  611. ttm_round_pot(sizeof(struct vmw_dma_buffer));
  612. }
  613. return bo_user_size + page_array_size;
  614. }
  615. void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
  616. {
  617. struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
  618. struct ttm_bo_global *glob = bo->glob;
  619. struct vmw_private *dev_priv =
  620. container_of(bo->bdev, struct vmw_private, bdev);
  621. ttm_mem_global_free(glob->mem_glob, bo->acc_size);
  622. if (vmw_bo->gmr_bound) {
  623. vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id);
  624. spin_lock(&glob->lru_lock);
  625. ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id);
  626. spin_unlock(&glob->lru_lock);
  627. }
  628. kfree(vmw_bo);
  629. }
  630. int vmw_dmabuf_init(struct vmw_private *dev_priv,
  631. struct vmw_dma_buffer *vmw_bo,
  632. size_t size, struct ttm_placement *placement,
  633. bool interruptible,
  634. void (*bo_free) (struct ttm_buffer_object *bo))
  635. {
  636. struct ttm_bo_device *bdev = &dev_priv->bdev;
  637. struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
  638. size_t acc_size;
  639. int ret;
  640. BUG_ON(!bo_free);
  641. acc_size =
  642. vmw_dmabuf_acc_size(bdev->glob,
  643. (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
  644. ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
  645. if (unlikely(ret != 0)) {
  646. /* we must free the bo here as
  647. * ttm_buffer_object_init does so as well */
  648. bo_free(&vmw_bo->base);
  649. return ret;
  650. }
  651. memset(vmw_bo, 0, sizeof(*vmw_bo));
  652. INIT_LIST_HEAD(&vmw_bo->gmr_lru);
  653. INIT_LIST_HEAD(&vmw_bo->validate_list);
  654. vmw_bo->gmr_id = 0;
  655. vmw_bo->gmr_bound = false;
  656. ret = ttm_bo_init(bdev, &vmw_bo->base, size,
  657. ttm_bo_type_device, placement,
  658. 0, 0, interruptible,
  659. NULL, acc_size, bo_free);
  660. return ret;
  661. }
  662. static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
  663. {
  664. struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
  665. struct vmw_dma_buffer *vmw_bo = &vmw_user_bo->dma;
  666. struct ttm_bo_global *glob = bo->glob;
  667. struct vmw_private *dev_priv =
  668. container_of(bo->bdev, struct vmw_private, bdev);
  669. ttm_mem_global_free(glob->mem_glob, bo->acc_size);
  670. if (vmw_bo->gmr_bound) {
  671. vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id);
  672. spin_lock(&glob->lru_lock);
  673. ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id);
  674. spin_unlock(&glob->lru_lock);
  675. }
  676. kfree(vmw_user_bo);
  677. }
  678. static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
  679. {
  680. struct vmw_user_dma_buffer *vmw_user_bo;
  681. struct ttm_base_object *base = *p_base;
  682. struct ttm_buffer_object *bo;
  683. *p_base = NULL;
  684. if (unlikely(base == NULL))
  685. return;
  686. vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
  687. bo = &vmw_user_bo->dma.base;
  688. ttm_bo_unref(&bo);
  689. }
  690. int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
  691. struct drm_file *file_priv)
  692. {
  693. struct vmw_private *dev_priv = vmw_priv(dev);
  694. union drm_vmw_alloc_dmabuf_arg *arg =
  695. (union drm_vmw_alloc_dmabuf_arg *)data;
  696. struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
  697. struct drm_vmw_dmabuf_rep *rep = &arg->rep;
  698. struct vmw_user_dma_buffer *vmw_user_bo;
  699. struct ttm_buffer_object *tmp;
  700. struct vmw_master *vmaster = vmw_master(file_priv->master);
  701. int ret;
  702. vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
  703. if (unlikely(vmw_user_bo == NULL))
  704. return -ENOMEM;
  705. ret = ttm_read_lock(&vmaster->lock, true);
  706. if (unlikely(ret != 0)) {
  707. kfree(vmw_user_bo);
  708. return ret;
  709. }
  710. ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size,
  711. &vmw_vram_placement, true,
  712. &vmw_user_dmabuf_destroy);
  713. if (unlikely(ret != 0))
  714. return ret;
  715. tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
  716. ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
  717. &vmw_user_bo->base,
  718. false,
  719. ttm_buffer_type,
  720. &vmw_user_dmabuf_release, NULL);
  721. if (unlikely(ret != 0)) {
  722. ttm_bo_unref(&tmp);
  723. } else {
  724. rep->handle = vmw_user_bo->base.hash.key;
  725. rep->map_handle = vmw_user_bo->dma.base.addr_space_offset;
  726. rep->cur_gmr_id = vmw_user_bo->base.hash.key;
  727. rep->cur_gmr_offset = 0;
  728. }
  729. ttm_bo_unref(&tmp);
  730. ttm_read_unlock(&vmaster->lock);
  731. return 0;
  732. }
  733. int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
  734. struct drm_file *file_priv)
  735. {
  736. struct drm_vmw_unref_dmabuf_arg *arg =
  737. (struct drm_vmw_unref_dmabuf_arg *)data;
  738. return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
  739. arg->handle,
  740. TTM_REF_USAGE);
  741. }
  742. uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
  743. uint32_t cur_validate_node)
  744. {
  745. struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
  746. if (likely(vmw_bo->on_validate_list))
  747. return vmw_bo->cur_validate_node;
  748. vmw_bo->cur_validate_node = cur_validate_node;
  749. vmw_bo->on_validate_list = true;
  750. return cur_validate_node;
  751. }
  752. void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo)
  753. {
  754. struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
  755. vmw_bo->on_validate_list = false;
  756. }
  757. uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object *bo)
  758. {
  759. struct vmw_dma_buffer *vmw_bo;
  760. if (bo->mem.mem_type == TTM_PL_VRAM)
  761. return SVGA_GMR_FRAMEBUFFER;
  762. vmw_bo = vmw_dma_buffer(bo);
  763. return (vmw_bo->gmr_bound) ? vmw_bo->gmr_id : SVGA_GMR_NULL;
  764. }
  765. void vmw_dmabuf_set_gmr(struct ttm_buffer_object *bo, uint32_t id)
  766. {
  767. struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
  768. vmw_bo->gmr_bound = true;
  769. vmw_bo->gmr_id = id;
  770. }
  771. int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
  772. uint32_t handle, struct vmw_dma_buffer **out)
  773. {
  774. struct vmw_user_dma_buffer *vmw_user_bo;
  775. struct ttm_base_object *base;
  776. base = ttm_base_object_lookup(tfile, handle);
  777. if (unlikely(base == NULL)) {
  778. printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
  779. (unsigned long)handle);
  780. return -ESRCH;
  781. }
  782. if (unlikely(base->object_type != ttm_buffer_type)) {
  783. ttm_base_object_unref(&base);
  784. printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
  785. (unsigned long)handle);
  786. return -EINVAL;
  787. }
  788. vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
  789. (void)ttm_bo_reference(&vmw_user_bo->dma.base);
  790. ttm_base_object_unref(&base);
  791. *out = &vmw_user_bo->dma;
  792. return 0;
  793. }
  794. /**
  795. * TODO: Implement a gmr id eviction mechanism. Currently we just fail
  796. * when we're out of ids, causing GMR space to be allocated
  797. * out of VRAM.
  798. */
  799. int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id)
  800. {
  801. struct ttm_bo_global *glob = dev_priv->bdev.glob;
  802. int id;
  803. int ret;
  804. do {
  805. if (unlikely(ida_pre_get(&dev_priv->gmr_ida, GFP_KERNEL) == 0))
  806. return -ENOMEM;
  807. spin_lock(&glob->lru_lock);
  808. ret = ida_get_new(&dev_priv->gmr_ida, &id);
  809. spin_unlock(&glob->lru_lock);
  810. } while (ret == -EAGAIN);
  811. if (unlikely(ret != 0))
  812. return ret;
  813. if (unlikely(id >= dev_priv->max_gmr_ids)) {
  814. spin_lock(&glob->lru_lock);
  815. ida_remove(&dev_priv->gmr_ida, id);
  816. spin_unlock(&glob->lru_lock);
  817. return -EBUSY;
  818. }
  819. *p_id = (uint32_t) id;
  820. return 0;
  821. }
  822. /*
  823. * Stream managment
  824. */
  825. static void vmw_stream_destroy(struct vmw_resource *res)
  826. {
  827. struct vmw_private *dev_priv = res->dev_priv;
  828. struct vmw_stream *stream;
  829. int ret;
  830. DRM_INFO("%s: unref\n", __func__);
  831. stream = container_of(res, struct vmw_stream, res);
  832. ret = vmw_overlay_unref(dev_priv, stream->stream_id);
  833. WARN_ON(ret != 0);
  834. }
  835. static int vmw_stream_init(struct vmw_private *dev_priv,
  836. struct vmw_stream *stream,
  837. void (*res_free) (struct vmw_resource *res))
  838. {
  839. struct vmw_resource *res = &stream->res;
  840. int ret;
  841. ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr,
  842. VMW_RES_STREAM, res_free);
  843. if (unlikely(ret != 0)) {
  844. if (res_free == NULL)
  845. kfree(stream);
  846. else
  847. res_free(&stream->res);
  848. return ret;
  849. }
  850. ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
  851. if (ret) {
  852. vmw_resource_unreference(&res);
  853. return ret;
  854. }
  855. DRM_INFO("%s: claimed\n", __func__);
  856. vmw_resource_activate(&stream->res, vmw_stream_destroy);
  857. return 0;
  858. }
  859. /**
  860. * User-space context management:
  861. */
  862. static void vmw_user_stream_free(struct vmw_resource *res)
  863. {
  864. struct vmw_user_stream *stream =
  865. container_of(res, struct vmw_user_stream, stream.res);
  866. kfree(stream);
  867. }
  868. /**
  869. * This function is called when user space has no more references on the
  870. * base object. It releases the base-object's reference on the resource object.
  871. */
  872. static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
  873. {
  874. struct ttm_base_object *base = *p_base;
  875. struct vmw_user_stream *stream =
  876. container_of(base, struct vmw_user_stream, base);
  877. struct vmw_resource *res = &stream->stream.res;
  878. *p_base = NULL;
  879. vmw_resource_unreference(&res);
  880. }
  881. int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
  882. struct drm_file *file_priv)
  883. {
  884. struct vmw_private *dev_priv = vmw_priv(dev);
  885. struct vmw_resource *res;
  886. struct vmw_user_stream *stream;
  887. struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
  888. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  889. int ret = 0;
  890. res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, arg->stream_id);
  891. if (unlikely(res == NULL))
  892. return -EINVAL;
  893. if (res->res_free != &vmw_user_stream_free) {
  894. ret = -EINVAL;
  895. goto out;
  896. }
  897. stream = container_of(res, struct vmw_user_stream, stream.res);
  898. if (stream->base.tfile != tfile) {
  899. ret = -EINVAL;
  900. goto out;
  901. }
  902. ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
  903. out:
  904. vmw_resource_unreference(&res);
  905. return ret;
  906. }
  907. int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
  908. struct drm_file *file_priv)
  909. {
  910. struct vmw_private *dev_priv = vmw_priv(dev);
  911. struct vmw_user_stream *stream = kmalloc(sizeof(*stream), GFP_KERNEL);
  912. struct vmw_resource *res;
  913. struct vmw_resource *tmp;
  914. struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
  915. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  916. int ret;
  917. if (unlikely(stream == NULL))
  918. return -ENOMEM;
  919. res = &stream->stream.res;
  920. stream->base.shareable = false;
  921. stream->base.tfile = NULL;
  922. ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
  923. if (unlikely(ret != 0))
  924. return ret;
  925. tmp = vmw_resource_reference(res);
  926. ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
  927. &vmw_user_stream_base_release, NULL);
  928. if (unlikely(ret != 0)) {
  929. vmw_resource_unreference(&tmp);
  930. goto out_err;
  931. }
  932. arg->stream_id = res->id;
  933. out_err:
  934. vmw_resource_unreference(&res);
  935. return ret;
  936. }
  937. int vmw_user_stream_lookup(struct vmw_private *dev_priv,
  938. struct ttm_object_file *tfile,
  939. uint32_t *inout_id, struct vmw_resource **out)
  940. {
  941. struct vmw_user_stream *stream;
  942. struct vmw_resource *res;
  943. int ret;
  944. res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, *inout_id);
  945. if (unlikely(res == NULL))
  946. return -EINVAL;
  947. if (res->res_free != &vmw_user_stream_free) {
  948. ret = -EINVAL;
  949. goto err_ref;
  950. }
  951. stream = container_of(res, struct vmw_user_stream, stream.res);
  952. if (stream->base.tfile != tfile) {
  953. ret = -EPERM;
  954. goto err_ref;
  955. }
  956. *inout_id = stream->stream.stream_id;
  957. *out = res;
  958. return 0;
  959. err_ref:
  960. vmw_resource_unreference(&res);
  961. return ret;
  962. }