vmwgfx_resource.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191
  1. /**************************************************************************
  2. *
  3. * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. #include "vmwgfx_drv.h"
  28. #include "vmwgfx_drm.h"
  29. #include "ttm/ttm_object.h"
  30. #include "ttm/ttm_placement.h"
  31. #include "drmP.h"
  32. #define VMW_RES_CONTEXT ttm_driver_type0
  33. #define VMW_RES_SURFACE ttm_driver_type1
  34. #define VMW_RES_STREAM ttm_driver_type2
  35. struct vmw_user_context {
  36. struct ttm_base_object base;
  37. struct vmw_resource res;
  38. };
  39. struct vmw_user_surface {
  40. struct ttm_base_object base;
  41. struct vmw_surface srf;
  42. };
  43. struct vmw_user_dma_buffer {
  44. struct ttm_base_object base;
  45. struct vmw_dma_buffer dma;
  46. };
  47. struct vmw_bo_user_rep {
  48. uint32_t handle;
  49. uint64_t map_handle;
  50. };
  51. struct vmw_stream {
  52. struct vmw_resource res;
  53. uint32_t stream_id;
  54. };
  55. struct vmw_user_stream {
  56. struct ttm_base_object base;
  57. struct vmw_stream stream;
  58. };
  59. static inline struct vmw_dma_buffer *
  60. vmw_dma_buffer(struct ttm_buffer_object *bo)
  61. {
  62. return container_of(bo, struct vmw_dma_buffer, base);
  63. }
  64. static inline struct vmw_user_dma_buffer *
  65. vmw_user_dma_buffer(struct ttm_buffer_object *bo)
  66. {
  67. struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
  68. return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
  69. }
  70. struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
  71. {
  72. kref_get(&res->kref);
  73. return res;
  74. }
  75. static void vmw_resource_release(struct kref *kref)
  76. {
  77. struct vmw_resource *res =
  78. container_of(kref, struct vmw_resource, kref);
  79. struct vmw_private *dev_priv = res->dev_priv;
  80. idr_remove(res->idr, res->id);
  81. write_unlock(&dev_priv->resource_lock);
  82. if (likely(res->hw_destroy != NULL))
  83. res->hw_destroy(res);
  84. if (res->res_free != NULL)
  85. res->res_free(res);
  86. else
  87. kfree(res);
  88. write_lock(&dev_priv->resource_lock);
  89. }
  90. void vmw_resource_unreference(struct vmw_resource **p_res)
  91. {
  92. struct vmw_resource *res = *p_res;
  93. struct vmw_private *dev_priv = res->dev_priv;
  94. *p_res = NULL;
  95. write_lock(&dev_priv->resource_lock);
  96. kref_put(&res->kref, vmw_resource_release);
  97. write_unlock(&dev_priv->resource_lock);
  98. }
  99. static int vmw_resource_init(struct vmw_private *dev_priv,
  100. struct vmw_resource *res,
  101. struct idr *idr,
  102. enum ttm_object_type obj_type,
  103. void (*res_free) (struct vmw_resource *res))
  104. {
  105. int ret;
  106. kref_init(&res->kref);
  107. res->hw_destroy = NULL;
  108. res->res_free = res_free;
  109. res->res_type = obj_type;
  110. res->idr = idr;
  111. res->avail = false;
  112. res->dev_priv = dev_priv;
  113. do {
  114. if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
  115. return -ENOMEM;
  116. write_lock(&dev_priv->resource_lock);
  117. ret = idr_get_new_above(idr, res, 1, &res->id);
  118. write_unlock(&dev_priv->resource_lock);
  119. } while (ret == -EAGAIN);
  120. return ret;
  121. }
  122. /**
  123. * vmw_resource_activate
  124. *
  125. * @res: Pointer to the newly created resource
  126. * @hw_destroy: Destroy function. NULL if none.
  127. *
  128. * Activate a resource after the hardware has been made aware of it.
  129. * Set tye destroy function to @destroy. Typically this frees the
  130. * resource and destroys the hardware resources associated with it.
  131. * Activate basically means that the function vmw_resource_lookup will
  132. * find it.
  133. */
  134. static void vmw_resource_activate(struct vmw_resource *res,
  135. void (*hw_destroy) (struct vmw_resource *))
  136. {
  137. struct vmw_private *dev_priv = res->dev_priv;
  138. write_lock(&dev_priv->resource_lock);
  139. res->avail = true;
  140. res->hw_destroy = hw_destroy;
  141. write_unlock(&dev_priv->resource_lock);
  142. }
  143. struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
  144. struct idr *idr, int id)
  145. {
  146. struct vmw_resource *res;
  147. read_lock(&dev_priv->resource_lock);
  148. res = idr_find(idr, id);
  149. if (res && res->avail)
  150. kref_get(&res->kref);
  151. else
  152. res = NULL;
  153. read_unlock(&dev_priv->resource_lock);
  154. if (unlikely(res == NULL))
  155. return NULL;
  156. return res;
  157. }
  158. /**
  159. * Context management:
  160. */
  161. static void vmw_hw_context_destroy(struct vmw_resource *res)
  162. {
  163. struct vmw_private *dev_priv = res->dev_priv;
  164. struct {
  165. SVGA3dCmdHeader header;
  166. SVGA3dCmdDestroyContext body;
  167. } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  168. if (unlikely(cmd == NULL)) {
  169. DRM_ERROR("Failed reserving FIFO space for surface "
  170. "destruction.\n");
  171. return;
  172. }
  173. cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
  174. cmd->header.size = cpu_to_le32(sizeof(cmd->body));
  175. cmd->body.cid = cpu_to_le32(res->id);
  176. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  177. }
  178. static int vmw_context_init(struct vmw_private *dev_priv,
  179. struct vmw_resource *res,
  180. void (*res_free) (struct vmw_resource *res))
  181. {
  182. int ret;
  183. struct {
  184. SVGA3dCmdHeader header;
  185. SVGA3dCmdDefineContext body;
  186. } *cmd;
  187. ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr,
  188. VMW_RES_CONTEXT, res_free);
  189. if (unlikely(ret != 0)) {
  190. if (res_free == NULL)
  191. kfree(res);
  192. else
  193. res_free(res);
  194. return ret;
  195. }
  196. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  197. if (unlikely(cmd == NULL)) {
  198. DRM_ERROR("Fifo reserve failed.\n");
  199. vmw_resource_unreference(&res);
  200. return -ENOMEM;
  201. }
  202. cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
  203. cmd->header.size = cpu_to_le32(sizeof(cmd->body));
  204. cmd->body.cid = cpu_to_le32(res->id);
  205. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  206. vmw_resource_activate(res, vmw_hw_context_destroy);
  207. return 0;
  208. }
  209. struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
  210. {
  211. struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
  212. int ret;
  213. if (unlikely(res == NULL))
  214. return NULL;
  215. ret = vmw_context_init(dev_priv, res, NULL);
  216. return (ret == 0) ? res : NULL;
  217. }
  218. /**
  219. * User-space context management:
  220. */
  221. static void vmw_user_context_free(struct vmw_resource *res)
  222. {
  223. struct vmw_user_context *ctx =
  224. container_of(res, struct vmw_user_context, res);
  225. kfree(ctx);
  226. }
  227. /**
  228. * This function is called when user space has no more references on the
  229. * base object. It releases the base-object's reference on the resource object.
  230. */
  231. static void vmw_user_context_base_release(struct ttm_base_object **p_base)
  232. {
  233. struct ttm_base_object *base = *p_base;
  234. struct vmw_user_context *ctx =
  235. container_of(base, struct vmw_user_context, base);
  236. struct vmw_resource *res = &ctx->res;
  237. *p_base = NULL;
  238. vmw_resource_unreference(&res);
  239. }
  240. int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
  241. struct drm_file *file_priv)
  242. {
  243. struct vmw_private *dev_priv = vmw_priv(dev);
  244. struct vmw_resource *res;
  245. struct vmw_user_context *ctx;
  246. struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
  247. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  248. int ret = 0;
  249. res = vmw_resource_lookup(dev_priv, &dev_priv->context_idr, arg->cid);
  250. if (unlikely(res == NULL))
  251. return -EINVAL;
  252. if (res->res_free != &vmw_user_context_free) {
  253. ret = -EINVAL;
  254. goto out;
  255. }
  256. ctx = container_of(res, struct vmw_user_context, res);
  257. if (ctx->base.tfile != tfile && !ctx->base.shareable) {
  258. ret = -EPERM;
  259. goto out;
  260. }
  261. ttm_ref_object_base_unref(tfile, ctx->base.hash.key, TTM_REF_USAGE);
  262. out:
  263. vmw_resource_unreference(&res);
  264. return ret;
  265. }
  266. int vmw_context_define_ioctl(struct drm_device *dev, void *data,
  267. struct drm_file *file_priv)
  268. {
  269. struct vmw_private *dev_priv = vmw_priv(dev);
  270. struct vmw_user_context *ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
  271. struct vmw_resource *res;
  272. struct vmw_resource *tmp;
  273. struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
  274. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  275. int ret;
  276. if (unlikely(ctx == NULL))
  277. return -ENOMEM;
  278. res = &ctx->res;
  279. ctx->base.shareable = false;
  280. ctx->base.tfile = NULL;
  281. ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
  282. if (unlikely(ret != 0))
  283. return ret;
  284. tmp = vmw_resource_reference(&ctx->res);
  285. ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
  286. &vmw_user_context_base_release, NULL);
  287. if (unlikely(ret != 0)) {
  288. vmw_resource_unreference(&tmp);
  289. goto out_err;
  290. }
  291. arg->cid = res->id;
  292. out_err:
  293. vmw_resource_unreference(&res);
  294. return ret;
  295. }
  296. int vmw_context_check(struct vmw_private *dev_priv,
  297. struct ttm_object_file *tfile,
  298. int id)
  299. {
  300. struct vmw_resource *res;
  301. int ret = 0;
  302. read_lock(&dev_priv->resource_lock);
  303. res = idr_find(&dev_priv->context_idr, id);
  304. if (res && res->avail) {
  305. struct vmw_user_context *ctx =
  306. container_of(res, struct vmw_user_context, res);
  307. if (ctx->base.tfile != tfile && !ctx->base.shareable)
  308. ret = -EPERM;
  309. } else
  310. ret = -EINVAL;
  311. read_unlock(&dev_priv->resource_lock);
  312. return ret;
  313. }
  314. /**
  315. * Surface management.
  316. */
  317. static void vmw_hw_surface_destroy(struct vmw_resource *res)
  318. {
  319. struct vmw_private *dev_priv = res->dev_priv;
  320. struct {
  321. SVGA3dCmdHeader header;
  322. SVGA3dCmdDestroySurface body;
  323. } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  324. if (unlikely(cmd == NULL)) {
  325. DRM_ERROR("Failed reserving FIFO space for surface "
  326. "destruction.\n");
  327. return;
  328. }
  329. cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DESTROY);
  330. cmd->header.size = cpu_to_le32(sizeof(cmd->body));
  331. cmd->body.sid = cpu_to_le32(res->id);
  332. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  333. }
  334. void vmw_surface_res_free(struct vmw_resource *res)
  335. {
  336. struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
  337. kfree(srf->sizes);
  338. kfree(srf->snooper.image);
  339. kfree(srf);
  340. }
  341. int vmw_surface_init(struct vmw_private *dev_priv,
  342. struct vmw_surface *srf,
  343. void (*res_free) (struct vmw_resource *res))
  344. {
  345. int ret;
  346. struct {
  347. SVGA3dCmdHeader header;
  348. SVGA3dCmdDefineSurface body;
  349. } *cmd;
  350. SVGA3dSize *cmd_size;
  351. struct vmw_resource *res = &srf->res;
  352. struct drm_vmw_size *src_size;
  353. size_t submit_size;
  354. uint32_t cmd_len;
  355. int i;
  356. BUG_ON(res_free == NULL);
  357. ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr,
  358. VMW_RES_SURFACE, res_free);
  359. if (unlikely(ret != 0)) {
  360. res_free(res);
  361. return ret;
  362. }
  363. submit_size = sizeof(*cmd) + srf->num_sizes * sizeof(SVGA3dSize);
  364. cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
  365. cmd = vmw_fifo_reserve(dev_priv, submit_size);
  366. if (unlikely(cmd == NULL)) {
  367. DRM_ERROR("Fifo reserve failed for create surface.\n");
  368. vmw_resource_unreference(&res);
  369. return -ENOMEM;
  370. }
  371. cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DEFINE);
  372. cmd->header.size = cpu_to_le32(cmd_len);
  373. cmd->body.sid = cpu_to_le32(res->id);
  374. cmd->body.surfaceFlags = cpu_to_le32(srf->flags);
  375. cmd->body.format = cpu_to_le32(srf->format);
  376. for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
  377. cmd->body.face[i].numMipLevels =
  378. cpu_to_le32(srf->mip_levels[i]);
  379. }
  380. cmd += 1;
  381. cmd_size = (SVGA3dSize *) cmd;
  382. src_size = srf->sizes;
  383. for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
  384. cmd_size->width = cpu_to_le32(src_size->width);
  385. cmd_size->height = cpu_to_le32(src_size->height);
  386. cmd_size->depth = cpu_to_le32(src_size->depth);
  387. }
  388. vmw_fifo_commit(dev_priv, submit_size);
  389. vmw_resource_activate(res, vmw_hw_surface_destroy);
  390. return 0;
  391. }
  392. static void vmw_user_surface_free(struct vmw_resource *res)
  393. {
  394. struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
  395. struct vmw_user_surface *user_srf =
  396. container_of(srf, struct vmw_user_surface, srf);
  397. kfree(srf->sizes);
  398. kfree(srf->snooper.image);
  399. kfree(user_srf);
  400. }
  401. int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
  402. struct ttm_object_file *tfile,
  403. uint32_t handle, struct vmw_surface **out)
  404. {
  405. struct vmw_resource *res;
  406. struct vmw_surface *srf;
  407. struct vmw_user_surface *user_srf;
  408. struct ttm_base_object *base;
  409. int ret = -EINVAL;
  410. base = ttm_base_object_lookup(tfile, handle);
  411. if (unlikely(base == NULL))
  412. return -EINVAL;
  413. if (unlikely(base->object_type != VMW_RES_SURFACE))
  414. goto out_bad_resource;
  415. user_srf = container_of(base, struct vmw_user_surface, base);
  416. srf = &user_srf->srf;
  417. res = &srf->res;
  418. read_lock(&dev_priv->resource_lock);
  419. if (!res->avail || res->res_free != &vmw_user_surface_free) {
  420. read_unlock(&dev_priv->resource_lock);
  421. goto out_bad_resource;
  422. }
  423. kref_get(&res->kref);
  424. read_unlock(&dev_priv->resource_lock);
  425. *out = srf;
  426. ret = 0;
  427. out_bad_resource:
  428. ttm_base_object_unref(&base);
  429. return ret;
  430. }
  431. static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
  432. {
  433. struct ttm_base_object *base = *p_base;
  434. struct vmw_user_surface *user_srf =
  435. container_of(base, struct vmw_user_surface, base);
  436. struct vmw_resource *res = &user_srf->srf.res;
  437. *p_base = NULL;
  438. vmw_resource_unreference(&res);
  439. }
  440. int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
  441. struct drm_file *file_priv)
  442. {
  443. struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
  444. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  445. return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
  446. }
  447. int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
  448. struct drm_file *file_priv)
  449. {
  450. struct vmw_private *dev_priv = vmw_priv(dev);
  451. struct vmw_user_surface *user_srf =
  452. kmalloc(sizeof(*user_srf), GFP_KERNEL);
  453. struct vmw_surface *srf;
  454. struct vmw_resource *res;
  455. struct vmw_resource *tmp;
  456. union drm_vmw_surface_create_arg *arg =
  457. (union drm_vmw_surface_create_arg *)data;
  458. struct drm_vmw_surface_create_req *req = &arg->req;
  459. struct drm_vmw_surface_arg *rep = &arg->rep;
  460. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  461. struct drm_vmw_size __user *user_sizes;
  462. int ret;
  463. int i;
  464. if (unlikely(user_srf == NULL))
  465. return -ENOMEM;
  466. srf = &user_srf->srf;
  467. res = &srf->res;
  468. srf->flags = req->flags;
  469. srf->format = req->format;
  470. srf->scanout = req->scanout;
  471. memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
  472. srf->num_sizes = 0;
  473. for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
  474. srf->num_sizes += srf->mip_levels[i];
  475. if (srf->num_sizes > DRM_VMW_MAX_SURFACE_FACES *
  476. DRM_VMW_MAX_MIP_LEVELS) {
  477. ret = -EINVAL;
  478. goto out_err0;
  479. }
  480. srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
  481. if (unlikely(srf->sizes == NULL)) {
  482. ret = -ENOMEM;
  483. goto out_err0;
  484. }
  485. user_sizes = (struct drm_vmw_size __user *)(unsigned long)
  486. req->size_addr;
  487. ret = copy_from_user(srf->sizes, user_sizes,
  488. srf->num_sizes * sizeof(*srf->sizes));
  489. if (unlikely(ret != 0)) {
  490. ret = -EFAULT;
  491. goto out_err1;
  492. }
  493. if (srf->scanout &&
  494. srf->num_sizes == 1 &&
  495. srf->sizes[0].width == 64 &&
  496. srf->sizes[0].height == 64 &&
  497. srf->format == SVGA3D_A8R8G8B8) {
  498. srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
  499. /* clear the image */
  500. if (srf->snooper.image) {
  501. memset(srf->snooper.image, 0x00, 64 * 64 * 4);
  502. } else {
  503. DRM_ERROR("Failed to allocate cursor_image\n");
  504. ret = -ENOMEM;
  505. goto out_err1;
  506. }
  507. } else {
  508. srf->snooper.image = NULL;
  509. }
  510. srf->snooper.crtc = NULL;
  511. user_srf->base.shareable = false;
  512. user_srf->base.tfile = NULL;
  513. /**
  514. * From this point, the generic resource management functions
  515. * destroy the object on failure.
  516. */
  517. ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
  518. if (unlikely(ret != 0))
  519. return ret;
  520. tmp = vmw_resource_reference(&srf->res);
  521. ret = ttm_base_object_init(tfile, &user_srf->base,
  522. req->shareable, VMW_RES_SURFACE,
  523. &vmw_user_surface_base_release, NULL);
  524. if (unlikely(ret != 0)) {
  525. vmw_resource_unreference(&tmp);
  526. vmw_resource_unreference(&res);
  527. return ret;
  528. }
  529. rep->sid = user_srf->base.hash.key;
  530. if (rep->sid == SVGA3D_INVALID_ID)
  531. DRM_ERROR("Created bad Surface ID.\n");
  532. vmw_resource_unreference(&res);
  533. return 0;
  534. out_err1:
  535. kfree(srf->sizes);
  536. out_err0:
  537. kfree(user_srf);
  538. return ret;
  539. }
  540. int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
  541. struct drm_file *file_priv)
  542. {
  543. union drm_vmw_surface_reference_arg *arg =
  544. (union drm_vmw_surface_reference_arg *)data;
  545. struct drm_vmw_surface_arg *req = &arg->req;
  546. struct drm_vmw_surface_create_req *rep = &arg->rep;
  547. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  548. struct vmw_surface *srf;
  549. struct vmw_user_surface *user_srf;
  550. struct drm_vmw_size __user *user_sizes;
  551. struct ttm_base_object *base;
  552. int ret = -EINVAL;
  553. base = ttm_base_object_lookup(tfile, req->sid);
  554. if (unlikely(base == NULL)) {
  555. DRM_ERROR("Could not find surface to reference.\n");
  556. return -EINVAL;
  557. }
  558. if (unlikely(base->object_type != VMW_RES_SURFACE))
  559. goto out_bad_resource;
  560. user_srf = container_of(base, struct vmw_user_surface, base);
  561. srf = &user_srf->srf;
  562. ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
  563. if (unlikely(ret != 0)) {
  564. DRM_ERROR("Could not add a reference to a surface.\n");
  565. goto out_no_reference;
  566. }
  567. rep->flags = srf->flags;
  568. rep->format = srf->format;
  569. memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
  570. user_sizes = (struct drm_vmw_size __user *)(unsigned long)
  571. rep->size_addr;
  572. if (user_sizes)
  573. ret = copy_to_user(user_sizes, srf->sizes,
  574. srf->num_sizes * sizeof(*srf->sizes));
  575. if (unlikely(ret != 0)) {
  576. DRM_ERROR("copy_to_user failed %p %u\n",
  577. user_sizes, srf->num_sizes);
  578. ret = -EFAULT;
  579. }
  580. out_bad_resource:
  581. out_no_reference:
  582. ttm_base_object_unref(&base);
  583. return ret;
  584. }
  585. int vmw_surface_check(struct vmw_private *dev_priv,
  586. struct ttm_object_file *tfile,
  587. uint32_t handle, int *id)
  588. {
  589. struct ttm_base_object *base;
  590. struct vmw_user_surface *user_srf;
  591. int ret = -EPERM;
  592. base = ttm_base_object_lookup(tfile, handle);
  593. if (unlikely(base == NULL))
  594. return -EINVAL;
  595. if (unlikely(base->object_type != VMW_RES_SURFACE))
  596. goto out_bad_surface;
  597. user_srf = container_of(base, struct vmw_user_surface, base);
  598. *id = user_srf->srf.res.id;
  599. ret = 0;
  600. out_bad_surface:
  601. /**
  602. * FIXME: May deadlock here when called from the
  603. * command parsing code.
  604. */
  605. ttm_base_object_unref(&base);
  606. return ret;
  607. }
  608. /**
  609. * Buffer management.
  610. */
  611. static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob,
  612. unsigned long num_pages)
  613. {
  614. static size_t bo_user_size = ~0;
  615. size_t page_array_size =
  616. (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK;
  617. if (unlikely(bo_user_size == ~0)) {
  618. bo_user_size = glob->ttm_bo_extra_size +
  619. ttm_round_pot(sizeof(struct vmw_dma_buffer));
  620. }
  621. return bo_user_size + page_array_size;
  622. }
  623. void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo)
  624. {
  625. struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
  626. struct ttm_bo_global *glob = bo->glob;
  627. struct vmw_private *dev_priv =
  628. container_of(bo->bdev, struct vmw_private, bdev);
  629. if (vmw_bo->gmr_bound) {
  630. vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id);
  631. spin_lock(&glob->lru_lock);
  632. ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id);
  633. spin_unlock(&glob->lru_lock);
  634. vmw_bo->gmr_bound = false;
  635. }
  636. }
  637. void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
  638. {
  639. struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
  640. struct ttm_bo_global *glob = bo->glob;
  641. vmw_dmabuf_gmr_unbind(bo);
  642. ttm_mem_global_free(glob->mem_glob, bo->acc_size);
  643. kfree(vmw_bo);
  644. }
  645. int vmw_dmabuf_init(struct vmw_private *dev_priv,
  646. struct vmw_dma_buffer *vmw_bo,
  647. size_t size, struct ttm_placement *placement,
  648. bool interruptible,
  649. void (*bo_free) (struct ttm_buffer_object *bo))
  650. {
  651. struct ttm_bo_device *bdev = &dev_priv->bdev;
  652. struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
  653. size_t acc_size;
  654. int ret;
  655. BUG_ON(!bo_free);
  656. acc_size =
  657. vmw_dmabuf_acc_size(bdev->glob,
  658. (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
  659. ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
  660. if (unlikely(ret != 0)) {
  661. /* we must free the bo here as
  662. * ttm_buffer_object_init does so as well */
  663. bo_free(&vmw_bo->base);
  664. return ret;
  665. }
  666. memset(vmw_bo, 0, sizeof(*vmw_bo));
  667. INIT_LIST_HEAD(&vmw_bo->gmr_lru);
  668. INIT_LIST_HEAD(&vmw_bo->validate_list);
  669. vmw_bo->gmr_id = 0;
  670. vmw_bo->gmr_bound = false;
  671. ret = ttm_bo_init(bdev, &vmw_bo->base, size,
  672. ttm_bo_type_device, placement,
  673. 0, 0, interruptible,
  674. NULL, acc_size, bo_free);
  675. return ret;
  676. }
  677. static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
  678. {
  679. struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
  680. struct ttm_bo_global *glob = bo->glob;
  681. vmw_dmabuf_gmr_unbind(bo);
  682. ttm_mem_global_free(glob->mem_glob, bo->acc_size);
  683. kfree(vmw_user_bo);
  684. }
  685. static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
  686. {
  687. struct vmw_user_dma_buffer *vmw_user_bo;
  688. struct ttm_base_object *base = *p_base;
  689. struct ttm_buffer_object *bo;
  690. *p_base = NULL;
  691. if (unlikely(base == NULL))
  692. return;
  693. vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
  694. bo = &vmw_user_bo->dma.base;
  695. ttm_bo_unref(&bo);
  696. }
  697. int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
  698. struct drm_file *file_priv)
  699. {
  700. struct vmw_private *dev_priv = vmw_priv(dev);
  701. union drm_vmw_alloc_dmabuf_arg *arg =
  702. (union drm_vmw_alloc_dmabuf_arg *)data;
  703. struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
  704. struct drm_vmw_dmabuf_rep *rep = &arg->rep;
  705. struct vmw_user_dma_buffer *vmw_user_bo;
  706. struct ttm_buffer_object *tmp;
  707. struct vmw_master *vmaster = vmw_master(file_priv->master);
  708. int ret;
  709. vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
  710. if (unlikely(vmw_user_bo == NULL))
  711. return -ENOMEM;
  712. ret = ttm_read_lock(&vmaster->lock, true);
  713. if (unlikely(ret != 0)) {
  714. kfree(vmw_user_bo);
  715. return ret;
  716. }
  717. ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size,
  718. &vmw_vram_sys_placement, true,
  719. &vmw_user_dmabuf_destroy);
  720. if (unlikely(ret != 0))
  721. return ret;
  722. tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
  723. ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
  724. &vmw_user_bo->base,
  725. false,
  726. ttm_buffer_type,
  727. &vmw_user_dmabuf_release, NULL);
  728. if (unlikely(ret != 0)) {
  729. ttm_bo_unref(&tmp);
  730. } else {
  731. rep->handle = vmw_user_bo->base.hash.key;
  732. rep->map_handle = vmw_user_bo->dma.base.addr_space_offset;
  733. rep->cur_gmr_id = vmw_user_bo->base.hash.key;
  734. rep->cur_gmr_offset = 0;
  735. }
  736. ttm_bo_unref(&tmp);
  737. ttm_read_unlock(&vmaster->lock);
  738. return 0;
  739. }
  740. int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
  741. struct drm_file *file_priv)
  742. {
  743. struct drm_vmw_unref_dmabuf_arg *arg =
  744. (struct drm_vmw_unref_dmabuf_arg *)data;
  745. return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
  746. arg->handle,
  747. TTM_REF_USAGE);
  748. }
  749. uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
  750. uint32_t cur_validate_node)
  751. {
  752. struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
  753. if (likely(vmw_bo->on_validate_list))
  754. return vmw_bo->cur_validate_node;
  755. vmw_bo->cur_validate_node = cur_validate_node;
  756. vmw_bo->on_validate_list = true;
  757. return cur_validate_node;
  758. }
  759. void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo)
  760. {
  761. struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
  762. vmw_bo->on_validate_list = false;
  763. }
  764. uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object *bo)
  765. {
  766. struct vmw_dma_buffer *vmw_bo;
  767. if (bo->mem.mem_type == TTM_PL_VRAM)
  768. return SVGA_GMR_FRAMEBUFFER;
  769. vmw_bo = vmw_dma_buffer(bo);
  770. return (vmw_bo->gmr_bound) ? vmw_bo->gmr_id : SVGA_GMR_NULL;
  771. }
  772. void vmw_dmabuf_set_gmr(struct ttm_buffer_object *bo, uint32_t id)
  773. {
  774. struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
  775. vmw_bo->gmr_bound = true;
  776. vmw_bo->gmr_id = id;
  777. }
  778. int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
  779. uint32_t handle, struct vmw_dma_buffer **out)
  780. {
  781. struct vmw_user_dma_buffer *vmw_user_bo;
  782. struct ttm_base_object *base;
  783. base = ttm_base_object_lookup(tfile, handle);
  784. if (unlikely(base == NULL)) {
  785. printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
  786. (unsigned long)handle);
  787. return -ESRCH;
  788. }
  789. if (unlikely(base->object_type != ttm_buffer_type)) {
  790. ttm_base_object_unref(&base);
  791. printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
  792. (unsigned long)handle);
  793. return -EINVAL;
  794. }
  795. vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
  796. (void)ttm_bo_reference(&vmw_user_bo->dma.base);
  797. ttm_base_object_unref(&base);
  798. *out = &vmw_user_bo->dma;
  799. return 0;
  800. }
  801. /**
  802. * TODO: Implement a gmr id eviction mechanism. Currently we just fail
  803. * when we're out of ids, causing GMR space to be allocated
  804. * out of VRAM.
  805. */
  806. int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id)
  807. {
  808. struct ttm_bo_global *glob = dev_priv->bdev.glob;
  809. int id;
  810. int ret;
  811. do {
  812. if (unlikely(ida_pre_get(&dev_priv->gmr_ida, GFP_KERNEL) == 0))
  813. return -ENOMEM;
  814. spin_lock(&glob->lru_lock);
  815. ret = ida_get_new(&dev_priv->gmr_ida, &id);
  816. spin_unlock(&glob->lru_lock);
  817. } while (ret == -EAGAIN);
  818. if (unlikely(ret != 0))
  819. return ret;
  820. if (unlikely(id >= dev_priv->max_gmr_ids)) {
  821. spin_lock(&glob->lru_lock);
  822. ida_remove(&dev_priv->gmr_ida, id);
  823. spin_unlock(&glob->lru_lock);
  824. return -EBUSY;
  825. }
  826. *p_id = (uint32_t) id;
  827. return 0;
  828. }
  829. /*
  830. * Stream management
  831. */
  832. static void vmw_stream_destroy(struct vmw_resource *res)
  833. {
  834. struct vmw_private *dev_priv = res->dev_priv;
  835. struct vmw_stream *stream;
  836. int ret;
  837. DRM_INFO("%s: unref\n", __func__);
  838. stream = container_of(res, struct vmw_stream, res);
  839. ret = vmw_overlay_unref(dev_priv, stream->stream_id);
  840. WARN_ON(ret != 0);
  841. }
  842. static int vmw_stream_init(struct vmw_private *dev_priv,
  843. struct vmw_stream *stream,
  844. void (*res_free) (struct vmw_resource *res))
  845. {
  846. struct vmw_resource *res = &stream->res;
  847. int ret;
  848. ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr,
  849. VMW_RES_STREAM, res_free);
  850. if (unlikely(ret != 0)) {
  851. if (res_free == NULL)
  852. kfree(stream);
  853. else
  854. res_free(&stream->res);
  855. return ret;
  856. }
  857. ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
  858. if (ret) {
  859. vmw_resource_unreference(&res);
  860. return ret;
  861. }
  862. DRM_INFO("%s: claimed\n", __func__);
  863. vmw_resource_activate(&stream->res, vmw_stream_destroy);
  864. return 0;
  865. }
  866. /**
  867. * User-space context management:
  868. */
  869. static void vmw_user_stream_free(struct vmw_resource *res)
  870. {
  871. struct vmw_user_stream *stream =
  872. container_of(res, struct vmw_user_stream, stream.res);
  873. kfree(stream);
  874. }
  875. /**
  876. * This function is called when user space has no more references on the
  877. * base object. It releases the base-object's reference on the resource object.
  878. */
  879. static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
  880. {
  881. struct ttm_base_object *base = *p_base;
  882. struct vmw_user_stream *stream =
  883. container_of(base, struct vmw_user_stream, base);
  884. struct vmw_resource *res = &stream->stream.res;
  885. *p_base = NULL;
  886. vmw_resource_unreference(&res);
  887. }
  888. int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
  889. struct drm_file *file_priv)
  890. {
  891. struct vmw_private *dev_priv = vmw_priv(dev);
  892. struct vmw_resource *res;
  893. struct vmw_user_stream *stream;
  894. struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
  895. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  896. int ret = 0;
  897. res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, arg->stream_id);
  898. if (unlikely(res == NULL))
  899. return -EINVAL;
  900. if (res->res_free != &vmw_user_stream_free) {
  901. ret = -EINVAL;
  902. goto out;
  903. }
  904. stream = container_of(res, struct vmw_user_stream, stream.res);
  905. if (stream->base.tfile != tfile) {
  906. ret = -EINVAL;
  907. goto out;
  908. }
  909. ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
  910. out:
  911. vmw_resource_unreference(&res);
  912. return ret;
  913. }
  914. int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
  915. struct drm_file *file_priv)
  916. {
  917. struct vmw_private *dev_priv = vmw_priv(dev);
  918. struct vmw_user_stream *stream = kmalloc(sizeof(*stream), GFP_KERNEL);
  919. struct vmw_resource *res;
  920. struct vmw_resource *tmp;
  921. struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
  922. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  923. int ret;
  924. if (unlikely(stream == NULL))
  925. return -ENOMEM;
  926. res = &stream->stream.res;
  927. stream->base.shareable = false;
  928. stream->base.tfile = NULL;
  929. ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
  930. if (unlikely(ret != 0))
  931. return ret;
  932. tmp = vmw_resource_reference(res);
  933. ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
  934. &vmw_user_stream_base_release, NULL);
  935. if (unlikely(ret != 0)) {
  936. vmw_resource_unreference(&tmp);
  937. goto out_err;
  938. }
  939. arg->stream_id = res->id;
  940. out_err:
  941. vmw_resource_unreference(&res);
  942. return ret;
  943. }
  944. int vmw_user_stream_lookup(struct vmw_private *dev_priv,
  945. struct ttm_object_file *tfile,
  946. uint32_t *inout_id, struct vmw_resource **out)
  947. {
  948. struct vmw_user_stream *stream;
  949. struct vmw_resource *res;
  950. int ret;
  951. res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, *inout_id);
  952. if (unlikely(res == NULL))
  953. return -EINVAL;
  954. if (res->res_free != &vmw_user_stream_free) {
  955. ret = -EINVAL;
  956. goto err_ref;
  957. }
  958. stream = container_of(res, struct vmw_user_stream, stream.res);
  959. if (stream->base.tfile != tfile) {
  960. ret = -EPERM;
  961. goto err_ref;
  962. }
  963. *inout_id = stream->stream.stream_id;
  964. *out = res;
  965. return 0;
  966. err_ref:
  967. vmw_resource_unreference(&res);
  968. return ret;
  969. }