vmwgfx_resource.c 45 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810
  1. /**************************************************************************
  2. *
  3. * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. #include "vmwgfx_drv.h"
  28. #include "vmwgfx_drm.h"
  29. #include "ttm/ttm_object.h"
  30. #include "ttm/ttm_placement.h"
  31. #include "drmP.h"
  32. struct vmw_user_context {
  33. struct ttm_base_object base;
  34. struct vmw_resource res;
  35. };
  36. struct vmw_user_surface {
  37. struct ttm_base_object base;
  38. struct vmw_surface srf;
  39. };
  40. struct vmw_user_dma_buffer {
  41. struct ttm_base_object base;
  42. struct vmw_dma_buffer dma;
  43. };
  44. struct vmw_bo_user_rep {
  45. uint32_t handle;
  46. uint64_t map_handle;
  47. };
  48. struct vmw_stream {
  49. struct vmw_resource res;
  50. uint32_t stream_id;
  51. };
  52. struct vmw_user_stream {
  53. struct ttm_base_object base;
  54. struct vmw_stream stream;
  55. };
  56. struct vmw_surface_offset {
  57. uint32_t face;
  58. uint32_t mip;
  59. uint32_t bo_offset;
  60. };
  61. static inline struct vmw_dma_buffer *
  62. vmw_dma_buffer(struct ttm_buffer_object *bo)
  63. {
  64. return container_of(bo, struct vmw_dma_buffer, base);
  65. }
  66. static inline struct vmw_user_dma_buffer *
  67. vmw_user_dma_buffer(struct ttm_buffer_object *bo)
  68. {
  69. struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
  70. return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
  71. }
  72. struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
  73. {
  74. kref_get(&res->kref);
  75. return res;
  76. }
  77. /**
  78. * vmw_resource_release_id - release a resource id to the id manager.
  79. *
  80. * @res: Pointer to the resource.
  81. *
  82. * Release the resource id to the resource id manager and set it to -1
  83. */
  84. static void vmw_resource_release_id(struct vmw_resource *res)
  85. {
  86. struct vmw_private *dev_priv = res->dev_priv;
  87. write_lock(&dev_priv->resource_lock);
  88. if (res->id != -1)
  89. idr_remove(res->idr, res->id);
  90. res->id = -1;
  91. write_unlock(&dev_priv->resource_lock);
  92. }
  93. static void vmw_resource_release(struct kref *kref)
  94. {
  95. struct vmw_resource *res =
  96. container_of(kref, struct vmw_resource, kref);
  97. struct vmw_private *dev_priv = res->dev_priv;
  98. int id = res->id;
  99. struct idr *idr = res->idr;
  100. res->avail = false;
  101. if (res->remove_from_lists != NULL)
  102. res->remove_from_lists(res);
  103. write_unlock(&dev_priv->resource_lock);
  104. if (likely(res->hw_destroy != NULL))
  105. res->hw_destroy(res);
  106. if (res->res_free != NULL)
  107. res->res_free(res);
  108. else
  109. kfree(res);
  110. write_lock(&dev_priv->resource_lock);
  111. if (id != -1)
  112. idr_remove(idr, id);
  113. }
  114. void vmw_resource_unreference(struct vmw_resource **p_res)
  115. {
  116. struct vmw_resource *res = *p_res;
  117. struct vmw_private *dev_priv = res->dev_priv;
  118. *p_res = NULL;
  119. write_lock(&dev_priv->resource_lock);
  120. kref_put(&res->kref, vmw_resource_release);
  121. write_unlock(&dev_priv->resource_lock);
  122. }
  123. /**
  124. * vmw_resource_alloc_id - release a resource id to the id manager.
  125. *
  126. * @dev_priv: Pointer to the device private structure.
  127. * @res: Pointer to the resource.
  128. *
  129. * Allocate the lowest free resource from the resource manager, and set
  130. * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
  131. */
  132. static int vmw_resource_alloc_id(struct vmw_private *dev_priv,
  133. struct vmw_resource *res)
  134. {
  135. int ret;
  136. BUG_ON(res->id != -1);
  137. do {
  138. if (unlikely(idr_pre_get(res->idr, GFP_KERNEL) == 0))
  139. return -ENOMEM;
  140. write_lock(&dev_priv->resource_lock);
  141. ret = idr_get_new_above(res->idr, res, 1, &res->id);
  142. write_unlock(&dev_priv->resource_lock);
  143. } while (ret == -EAGAIN);
  144. return ret;
  145. }
  146. static int vmw_resource_init(struct vmw_private *dev_priv,
  147. struct vmw_resource *res,
  148. struct idr *idr,
  149. enum ttm_object_type obj_type,
  150. bool delay_id,
  151. void (*res_free) (struct vmw_resource *res),
  152. void (*remove_from_lists)
  153. (struct vmw_resource *res))
  154. {
  155. kref_init(&res->kref);
  156. res->hw_destroy = NULL;
  157. res->res_free = res_free;
  158. res->remove_from_lists = remove_from_lists;
  159. res->res_type = obj_type;
  160. res->idr = idr;
  161. res->avail = false;
  162. res->dev_priv = dev_priv;
  163. INIT_LIST_HEAD(&res->query_head);
  164. INIT_LIST_HEAD(&res->validate_head);
  165. res->id = -1;
  166. if (delay_id)
  167. return 0;
  168. else
  169. return vmw_resource_alloc_id(dev_priv, res);
  170. }
  171. /**
  172. * vmw_resource_activate
  173. *
  174. * @res: Pointer to the newly created resource
  175. * @hw_destroy: Destroy function. NULL if none.
  176. *
  177. * Activate a resource after the hardware has been made aware of it.
  178. * Set tye destroy function to @destroy. Typically this frees the
  179. * resource and destroys the hardware resources associated with it.
  180. * Activate basically means that the function vmw_resource_lookup will
  181. * find it.
  182. */
  183. static void vmw_resource_activate(struct vmw_resource *res,
  184. void (*hw_destroy) (struct vmw_resource *))
  185. {
  186. struct vmw_private *dev_priv = res->dev_priv;
  187. write_lock(&dev_priv->resource_lock);
  188. res->avail = true;
  189. res->hw_destroy = hw_destroy;
  190. write_unlock(&dev_priv->resource_lock);
  191. }
  192. struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
  193. struct idr *idr, int id)
  194. {
  195. struct vmw_resource *res;
  196. read_lock(&dev_priv->resource_lock);
  197. res = idr_find(idr, id);
  198. if (res && res->avail)
  199. kref_get(&res->kref);
  200. else
  201. res = NULL;
  202. read_unlock(&dev_priv->resource_lock);
  203. if (unlikely(res == NULL))
  204. return NULL;
  205. return res;
  206. }
  207. /**
  208. * Context management:
  209. */
  210. static void vmw_hw_context_destroy(struct vmw_resource *res)
  211. {
  212. struct vmw_private *dev_priv = res->dev_priv;
  213. struct {
  214. SVGA3dCmdHeader header;
  215. SVGA3dCmdDestroyContext body;
  216. } *cmd;
  217. vmw_execbuf_release_pinned_bo(dev_priv, true, res->id);
  218. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  219. if (unlikely(cmd == NULL)) {
  220. DRM_ERROR("Failed reserving FIFO space for surface "
  221. "destruction.\n");
  222. return;
  223. }
  224. cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
  225. cmd->header.size = cpu_to_le32(sizeof(cmd->body));
  226. cmd->body.cid = cpu_to_le32(res->id);
  227. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  228. vmw_3d_resource_dec(dev_priv, false);
  229. }
  230. static int vmw_context_init(struct vmw_private *dev_priv,
  231. struct vmw_resource *res,
  232. void (*res_free) (struct vmw_resource *res))
  233. {
  234. int ret;
  235. struct {
  236. SVGA3dCmdHeader header;
  237. SVGA3dCmdDefineContext body;
  238. } *cmd;
  239. ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr,
  240. VMW_RES_CONTEXT, false, res_free, NULL);
  241. if (unlikely(ret != 0)) {
  242. DRM_ERROR("Failed to allocate a resource id.\n");
  243. goto out_early;
  244. }
  245. if (unlikely(res->id >= SVGA3D_MAX_CONTEXT_IDS)) {
  246. DRM_ERROR("Out of hw context ids.\n");
  247. vmw_resource_unreference(&res);
  248. return -ENOMEM;
  249. }
  250. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  251. if (unlikely(cmd == NULL)) {
  252. DRM_ERROR("Fifo reserve failed.\n");
  253. vmw_resource_unreference(&res);
  254. return -ENOMEM;
  255. }
  256. cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
  257. cmd->header.size = cpu_to_le32(sizeof(cmd->body));
  258. cmd->body.cid = cpu_to_le32(res->id);
  259. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  260. (void) vmw_3d_resource_inc(dev_priv, false);
  261. vmw_resource_activate(res, vmw_hw_context_destroy);
  262. return 0;
  263. out_early:
  264. if (res_free == NULL)
  265. kfree(res);
  266. else
  267. res_free(res);
  268. return ret;
  269. }
  270. struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
  271. {
  272. struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
  273. int ret;
  274. if (unlikely(res == NULL))
  275. return NULL;
  276. ret = vmw_context_init(dev_priv, res, NULL);
  277. return (ret == 0) ? res : NULL;
  278. }
  279. /**
  280. * User-space context management:
  281. */
  282. static void vmw_user_context_free(struct vmw_resource *res)
  283. {
  284. struct vmw_user_context *ctx =
  285. container_of(res, struct vmw_user_context, res);
  286. kfree(ctx);
  287. }
  288. /**
  289. * This function is called when user space has no more references on the
  290. * base object. It releases the base-object's reference on the resource object.
  291. */
  292. static void vmw_user_context_base_release(struct ttm_base_object **p_base)
  293. {
  294. struct ttm_base_object *base = *p_base;
  295. struct vmw_user_context *ctx =
  296. container_of(base, struct vmw_user_context, base);
  297. struct vmw_resource *res = &ctx->res;
  298. *p_base = NULL;
  299. vmw_resource_unreference(&res);
  300. }
  301. int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
  302. struct drm_file *file_priv)
  303. {
  304. struct vmw_private *dev_priv = vmw_priv(dev);
  305. struct vmw_resource *res;
  306. struct vmw_user_context *ctx;
  307. struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
  308. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  309. int ret = 0;
  310. res = vmw_resource_lookup(dev_priv, &dev_priv->context_idr, arg->cid);
  311. if (unlikely(res == NULL))
  312. return -EINVAL;
  313. if (res->res_free != &vmw_user_context_free) {
  314. ret = -EINVAL;
  315. goto out;
  316. }
  317. ctx = container_of(res, struct vmw_user_context, res);
  318. if (ctx->base.tfile != tfile && !ctx->base.shareable) {
  319. ret = -EPERM;
  320. goto out;
  321. }
  322. ttm_ref_object_base_unref(tfile, ctx->base.hash.key, TTM_REF_USAGE);
  323. out:
  324. vmw_resource_unreference(&res);
  325. return ret;
  326. }
  327. int vmw_context_define_ioctl(struct drm_device *dev, void *data,
  328. struct drm_file *file_priv)
  329. {
  330. struct vmw_private *dev_priv = vmw_priv(dev);
  331. struct vmw_user_context *ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
  332. struct vmw_resource *res;
  333. struct vmw_resource *tmp;
  334. struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
  335. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  336. int ret;
  337. if (unlikely(ctx == NULL))
  338. return -ENOMEM;
  339. res = &ctx->res;
  340. ctx->base.shareable = false;
  341. ctx->base.tfile = NULL;
  342. ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
  343. if (unlikely(ret != 0))
  344. return ret;
  345. tmp = vmw_resource_reference(&ctx->res);
  346. ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
  347. &vmw_user_context_base_release, NULL);
  348. if (unlikely(ret != 0)) {
  349. vmw_resource_unreference(&tmp);
  350. goto out_err;
  351. }
  352. arg->cid = res->id;
  353. out_err:
  354. vmw_resource_unreference(&res);
  355. return ret;
  356. }
  357. int vmw_context_check(struct vmw_private *dev_priv,
  358. struct ttm_object_file *tfile,
  359. int id,
  360. struct vmw_resource **p_res)
  361. {
  362. struct vmw_resource *res;
  363. int ret = 0;
  364. read_lock(&dev_priv->resource_lock);
  365. res = idr_find(&dev_priv->context_idr, id);
  366. if (res && res->avail) {
  367. struct vmw_user_context *ctx =
  368. container_of(res, struct vmw_user_context, res);
  369. if (ctx->base.tfile != tfile && !ctx->base.shareable)
  370. ret = -EPERM;
  371. if (p_res)
  372. *p_res = vmw_resource_reference(res);
  373. } else
  374. ret = -EINVAL;
  375. read_unlock(&dev_priv->resource_lock);
  376. return ret;
  377. }
  378. struct vmw_bpp {
  379. uint8_t bpp;
  380. uint8_t s_bpp;
  381. };
  382. /*
  383. * Size table for the supported SVGA3D surface formats. It consists of
  384. * two values. The bpp value and the s_bpp value which is short for
  385. * "stride bits per pixel" The values are given in such a way that the
  386. * minimum stride for the image is calculated using
  387. *
  388. * min_stride = w*s_bpp
  389. *
  390. * and the total memory requirement for the image is
  391. *
  392. * h*min_stride*bpp/s_bpp
  393. *
  394. */
  395. static const struct vmw_bpp vmw_sf_bpp[] = {
  396. [SVGA3D_FORMAT_INVALID] = {0, 0},
  397. [SVGA3D_X8R8G8B8] = {32, 32},
  398. [SVGA3D_A8R8G8B8] = {32, 32},
  399. [SVGA3D_R5G6B5] = {16, 16},
  400. [SVGA3D_X1R5G5B5] = {16, 16},
  401. [SVGA3D_A1R5G5B5] = {16, 16},
  402. [SVGA3D_A4R4G4B4] = {16, 16},
  403. [SVGA3D_Z_D32] = {32, 32},
  404. [SVGA3D_Z_D16] = {16, 16},
  405. [SVGA3D_Z_D24S8] = {32, 32},
  406. [SVGA3D_Z_D15S1] = {16, 16},
  407. [SVGA3D_LUMINANCE8] = {8, 8},
  408. [SVGA3D_LUMINANCE4_ALPHA4] = {8, 8},
  409. [SVGA3D_LUMINANCE16] = {16, 16},
  410. [SVGA3D_LUMINANCE8_ALPHA8] = {16, 16},
  411. [SVGA3D_DXT1] = {4, 16},
  412. [SVGA3D_DXT2] = {8, 32},
  413. [SVGA3D_DXT3] = {8, 32},
  414. [SVGA3D_DXT4] = {8, 32},
  415. [SVGA3D_DXT5] = {8, 32},
  416. [SVGA3D_BUMPU8V8] = {16, 16},
  417. [SVGA3D_BUMPL6V5U5] = {16, 16},
  418. [SVGA3D_BUMPX8L8V8U8] = {32, 32},
  419. [SVGA3D_ARGB_S10E5] = {16, 16},
  420. [SVGA3D_ARGB_S23E8] = {32, 32},
  421. [SVGA3D_A2R10G10B10] = {32, 32},
  422. [SVGA3D_V8U8] = {16, 16},
  423. [SVGA3D_Q8W8V8U8] = {32, 32},
  424. [SVGA3D_CxV8U8] = {16, 16},
  425. [SVGA3D_X8L8V8U8] = {32, 32},
  426. [SVGA3D_A2W10V10U10] = {32, 32},
  427. [SVGA3D_ALPHA8] = {8, 8},
  428. [SVGA3D_R_S10E5] = {16, 16},
  429. [SVGA3D_R_S23E8] = {32, 32},
  430. [SVGA3D_RG_S10E5] = {16, 16},
  431. [SVGA3D_RG_S23E8] = {32, 32},
  432. [SVGA3D_BUFFER] = {8, 8},
  433. [SVGA3D_Z_D24X8] = {32, 32},
  434. [SVGA3D_V16U16] = {32, 32},
  435. [SVGA3D_G16R16] = {32, 32},
  436. [SVGA3D_A16B16G16R16] = {64, 64},
  437. [SVGA3D_UYVY] = {12, 12},
  438. [SVGA3D_YUY2] = {12, 12},
  439. [SVGA3D_NV12] = {12, 8},
  440. [SVGA3D_AYUV] = {32, 32},
  441. [SVGA3D_BC4_UNORM] = {4, 16},
  442. [SVGA3D_BC5_UNORM] = {8, 32},
  443. [SVGA3D_Z_DF16] = {16, 16},
  444. [SVGA3D_Z_DF24] = {24, 24},
  445. [SVGA3D_Z_D24S8_INT] = {32, 32}
  446. };
  447. /**
  448. * Surface management.
  449. */
  450. struct vmw_surface_dma {
  451. SVGA3dCmdHeader header;
  452. SVGA3dCmdSurfaceDMA body;
  453. SVGA3dCopyBox cb;
  454. SVGA3dCmdSurfaceDMASuffix suffix;
  455. };
  456. struct vmw_surface_define {
  457. SVGA3dCmdHeader header;
  458. SVGA3dCmdDefineSurface body;
  459. };
  460. struct vmw_surface_destroy {
  461. SVGA3dCmdHeader header;
  462. SVGA3dCmdDestroySurface body;
  463. };
  464. /**
  465. * vmw_surface_dma_size - Compute fifo size for a dma command.
  466. *
  467. * @srf: Pointer to a struct vmw_surface
  468. *
  469. * Computes the required size for a surface dma command for backup or
  470. * restoration of the surface represented by @srf.
  471. */
  472. static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
  473. {
  474. return srf->num_sizes * sizeof(struct vmw_surface_dma);
  475. }
  476. /**
  477. * vmw_surface_define_size - Compute fifo size for a surface define command.
  478. *
  479. * @srf: Pointer to a struct vmw_surface
  480. *
  481. * Computes the required size for a surface define command for the definition
  482. * of the surface represented by @srf.
  483. */
  484. static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
  485. {
  486. return sizeof(struct vmw_surface_define) + srf->num_sizes *
  487. sizeof(SVGA3dSize);
  488. }
  489. /**
  490. * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
  491. *
  492. * Computes the required size for a surface destroy command for the destruction
  493. * of a hw surface.
  494. */
  495. static inline uint32_t vmw_surface_destroy_size(void)
  496. {
  497. return sizeof(struct vmw_surface_destroy);
  498. }
  499. /**
  500. * vmw_surface_destroy_encode - Encode a surface_destroy command.
  501. *
  502. * @id: The surface id
  503. * @cmd_space: Pointer to memory area in which the commands should be encoded.
  504. */
  505. static void vmw_surface_destroy_encode(uint32_t id,
  506. void *cmd_space)
  507. {
  508. struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
  509. cmd_space;
  510. cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
  511. cmd->header.size = sizeof(cmd->body);
  512. cmd->body.sid = id;
  513. }
  514. /**
  515. * vmw_surface_define_encode - Encode a surface_define command.
  516. *
  517. * @srf: Pointer to a struct vmw_surface object.
  518. * @cmd_space: Pointer to memory area in which the commands should be encoded.
  519. */
  520. static void vmw_surface_define_encode(const struct vmw_surface *srf,
  521. void *cmd_space)
  522. {
  523. struct vmw_surface_define *cmd = (struct vmw_surface_define *)
  524. cmd_space;
  525. struct drm_vmw_size *src_size;
  526. SVGA3dSize *cmd_size;
  527. uint32_t cmd_len;
  528. int i;
  529. cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
  530. cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
  531. cmd->header.size = cmd_len;
  532. cmd->body.sid = srf->res.id;
  533. cmd->body.surfaceFlags = srf->flags;
  534. cmd->body.format = cpu_to_le32(srf->format);
  535. for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
  536. cmd->body.face[i].numMipLevels = srf->mip_levels[i];
  537. cmd += 1;
  538. cmd_size = (SVGA3dSize *) cmd;
  539. src_size = srf->sizes;
  540. for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
  541. cmd_size->width = src_size->width;
  542. cmd_size->height = src_size->height;
  543. cmd_size->depth = src_size->depth;
  544. }
  545. }
  546. /**
  547. * vmw_surface_dma_encode - Encode a surface_dma command.
  548. *
  549. * @srf: Pointer to a struct vmw_surface object.
  550. * @cmd_space: Pointer to memory area in which the commands should be encoded.
  551. * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
  552. * should be placed or read from.
  553. * @to_surface: Boolean whether to DMA to the surface or from the surface.
  554. */
  555. static void vmw_surface_dma_encode(struct vmw_surface *srf,
  556. void *cmd_space,
  557. const SVGAGuestPtr *ptr,
  558. bool to_surface)
  559. {
  560. uint32_t i;
  561. uint32_t bpp = vmw_sf_bpp[srf->format].bpp;
  562. uint32_t stride_bpp = vmw_sf_bpp[srf->format].s_bpp;
  563. struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
  564. for (i = 0; i < srf->num_sizes; ++i) {
  565. SVGA3dCmdHeader *header = &cmd->header;
  566. SVGA3dCmdSurfaceDMA *body = &cmd->body;
  567. SVGA3dCopyBox *cb = &cmd->cb;
  568. SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
  569. const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
  570. const struct drm_vmw_size *cur_size = &srf->sizes[i];
  571. header->id = SVGA_3D_CMD_SURFACE_DMA;
  572. header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
  573. body->guest.ptr = *ptr;
  574. body->guest.ptr.offset += cur_offset->bo_offset;
  575. body->guest.pitch = (cur_size->width * stride_bpp + 7) >> 3;
  576. body->host.sid = srf->res.id;
  577. body->host.face = cur_offset->face;
  578. body->host.mipmap = cur_offset->mip;
  579. body->transfer = ((to_surface) ? SVGA3D_WRITE_HOST_VRAM :
  580. SVGA3D_READ_HOST_VRAM);
  581. cb->x = 0;
  582. cb->y = 0;
  583. cb->z = 0;
  584. cb->srcx = 0;
  585. cb->srcy = 0;
  586. cb->srcz = 0;
  587. cb->w = cur_size->width;
  588. cb->h = cur_size->height;
  589. cb->d = cur_size->depth;
  590. suffix->suffixSize = sizeof(*suffix);
  591. suffix->maximumOffset = body->guest.pitch*cur_size->height*
  592. cur_size->depth*bpp / stride_bpp;
  593. suffix->flags.discard = 0;
  594. suffix->flags.unsynchronized = 0;
  595. suffix->flags.reserved = 0;
  596. ++cmd;
  597. }
  598. };
  599. static void vmw_hw_surface_destroy(struct vmw_resource *res)
  600. {
  601. struct vmw_private *dev_priv = res->dev_priv;
  602. struct vmw_surface *srf;
  603. void *cmd;
  604. if (res->id != -1) {
  605. cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
  606. if (unlikely(cmd == NULL)) {
  607. DRM_ERROR("Failed reserving FIFO space for surface "
  608. "destruction.\n");
  609. return;
  610. }
  611. vmw_surface_destroy_encode(res->id, cmd);
  612. vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
  613. /*
  614. * used_memory_size_atomic, or separate lock
  615. * to avoid taking dev_priv::cmdbuf_mutex in
  616. * the destroy path.
  617. */
  618. mutex_lock(&dev_priv->cmdbuf_mutex);
  619. srf = container_of(res, struct vmw_surface, res);
  620. dev_priv->used_memory_size -= srf->backup_size;
  621. mutex_unlock(&dev_priv->cmdbuf_mutex);
  622. }
  623. vmw_3d_resource_dec(dev_priv, false);
  624. }
  625. void vmw_surface_res_free(struct vmw_resource *res)
  626. {
  627. struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
  628. if (srf->backup)
  629. ttm_bo_unref(&srf->backup);
  630. kfree(srf->offsets);
  631. kfree(srf->sizes);
  632. kfree(srf->snooper.image);
  633. kfree(srf);
  634. }
  635. /**
  636. * vmw_surface_do_validate - make a surface available to the device.
  637. *
  638. * @dev_priv: Pointer to a device private struct.
  639. * @srf: Pointer to a struct vmw_surface.
  640. *
  641. * If the surface doesn't have a hw id, allocate one, and optionally
  642. * DMA the backed up surface contents to the device.
  643. *
  644. * Returns -EBUSY if there wasn't sufficient device resources to
  645. * complete the validation. Retry after freeing up resources.
  646. *
  647. * May return other errors if the kernel is out of guest resources.
  648. */
  649. int vmw_surface_do_validate(struct vmw_private *dev_priv,
  650. struct vmw_surface *srf)
  651. {
  652. struct vmw_resource *res = &srf->res;
  653. struct list_head val_list;
  654. struct ttm_validate_buffer val_buf;
  655. uint32_t submit_size;
  656. uint8_t *cmd;
  657. int ret;
  658. if (likely(res->id != -1))
  659. return 0;
  660. if (unlikely(dev_priv->used_memory_size + srf->backup_size >=
  661. dev_priv->memory_size))
  662. return -EBUSY;
  663. /*
  664. * Reserve- and validate the backup DMA bo.
  665. */
  666. if (srf->backup) {
  667. INIT_LIST_HEAD(&val_list);
  668. val_buf.bo = ttm_bo_reference(srf->backup);
  669. val_buf.new_sync_obj_arg = (void *)((unsigned long)
  670. DRM_VMW_FENCE_FLAG_EXEC);
  671. list_add_tail(&val_buf.head, &val_list);
  672. ret = ttm_eu_reserve_buffers(&val_list);
  673. if (unlikely(ret != 0))
  674. goto out_no_reserve;
  675. ret = ttm_bo_validate(srf->backup, &vmw_srf_placement,
  676. true, false, false);
  677. if (unlikely(ret != 0))
  678. goto out_no_validate;
  679. }
  680. /*
  681. * Alloc id for the resource.
  682. */
  683. ret = vmw_resource_alloc_id(dev_priv, res);
  684. if (unlikely(ret != 0)) {
  685. DRM_ERROR("Failed to allocate a surface id.\n");
  686. goto out_no_id;
  687. }
  688. if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) {
  689. ret = -EBUSY;
  690. goto out_no_fifo;
  691. }
  692. /*
  693. * Encode surface define- and dma commands.
  694. */
  695. submit_size = vmw_surface_define_size(srf);
  696. if (srf->backup)
  697. submit_size += vmw_surface_dma_size(srf);
  698. cmd = vmw_fifo_reserve(dev_priv, submit_size);
  699. if (unlikely(cmd == NULL)) {
  700. DRM_ERROR("Failed reserving FIFO space for surface "
  701. "validation.\n");
  702. ret = -ENOMEM;
  703. goto out_no_fifo;
  704. }
  705. vmw_surface_define_encode(srf, cmd);
  706. if (srf->backup) {
  707. SVGAGuestPtr ptr;
  708. cmd += vmw_surface_define_size(srf);
  709. vmw_bo_get_guest_ptr(srf->backup, &ptr);
  710. vmw_surface_dma_encode(srf, cmd, &ptr, true);
  711. }
  712. vmw_fifo_commit(dev_priv, submit_size);
  713. /*
  714. * Create a fence object and fence the backup buffer.
  715. */
  716. if (srf->backup) {
  717. struct vmw_fence_obj *fence;
  718. (void) vmw_execbuf_fence_commands(NULL, dev_priv,
  719. &fence, NULL);
  720. ttm_eu_fence_buffer_objects(&val_list, fence);
  721. if (likely(fence != NULL))
  722. vmw_fence_obj_unreference(&fence);
  723. ttm_bo_unref(&val_buf.bo);
  724. ttm_bo_unref(&srf->backup);
  725. }
  726. /*
  727. * Surface memory usage accounting.
  728. */
  729. dev_priv->used_memory_size += srf->backup_size;
  730. return 0;
  731. out_no_fifo:
  732. vmw_resource_release_id(res);
  733. out_no_id:
  734. out_no_validate:
  735. if (srf->backup)
  736. ttm_eu_backoff_reservation(&val_list);
  737. out_no_reserve:
  738. if (srf->backup)
  739. ttm_bo_unref(&val_buf.bo);
  740. return ret;
  741. }
  742. /**
  743. * vmw_surface_evict - Evict a hw surface.
  744. *
  745. * @dev_priv: Pointer to a device private struct.
  746. * @srf: Pointer to a struct vmw_surface
  747. *
  748. * DMA the contents of a hw surface to a backup guest buffer object,
  749. * and destroy the hw surface, releasing its id.
  750. */
  751. int vmw_surface_evict(struct vmw_private *dev_priv,
  752. struct vmw_surface *srf)
  753. {
  754. struct vmw_resource *res = &srf->res;
  755. struct list_head val_list;
  756. struct ttm_validate_buffer val_buf;
  757. uint32_t submit_size;
  758. uint8_t *cmd;
  759. int ret;
  760. struct vmw_fence_obj *fence;
  761. SVGAGuestPtr ptr;
  762. BUG_ON(res->id == -1);
  763. /*
  764. * Create a surface backup buffer object.
  765. */
  766. if (!srf->backup) {
  767. ret = ttm_bo_create(&dev_priv->bdev, srf->backup_size,
  768. ttm_bo_type_device,
  769. &vmw_srf_placement, 0, 0, true,
  770. NULL, &srf->backup);
  771. if (unlikely(ret != 0))
  772. return ret;
  773. }
  774. /*
  775. * Reserve- and validate the backup DMA bo.
  776. */
  777. INIT_LIST_HEAD(&val_list);
  778. val_buf.bo = ttm_bo_reference(srf->backup);
  779. val_buf.new_sync_obj_arg = (void *)(unsigned long)
  780. DRM_VMW_FENCE_FLAG_EXEC;
  781. list_add_tail(&val_buf.head, &val_list);
  782. ret = ttm_eu_reserve_buffers(&val_list);
  783. if (unlikely(ret != 0))
  784. goto out_no_reserve;
  785. ret = ttm_bo_validate(srf->backup, &vmw_srf_placement,
  786. true, false, false);
  787. if (unlikely(ret != 0))
  788. goto out_no_validate;
  789. /*
  790. * Encode the dma- and surface destroy commands.
  791. */
  792. submit_size = vmw_surface_dma_size(srf) + vmw_surface_destroy_size();
  793. cmd = vmw_fifo_reserve(dev_priv, submit_size);
  794. if (unlikely(cmd == NULL)) {
  795. DRM_ERROR("Failed reserving FIFO space for surface "
  796. "eviction.\n");
  797. ret = -ENOMEM;
  798. goto out_no_fifo;
  799. }
  800. vmw_bo_get_guest_ptr(srf->backup, &ptr);
  801. vmw_surface_dma_encode(srf, cmd, &ptr, false);
  802. cmd += vmw_surface_dma_size(srf);
  803. vmw_surface_destroy_encode(res->id, cmd);
  804. vmw_fifo_commit(dev_priv, submit_size);
  805. /*
  806. * Surface memory usage accounting.
  807. */
  808. dev_priv->used_memory_size -= srf->backup_size;
  809. /*
  810. * Create a fence object and fence the DMA buffer.
  811. */
  812. (void) vmw_execbuf_fence_commands(NULL, dev_priv,
  813. &fence, NULL);
  814. ttm_eu_fence_buffer_objects(&val_list, fence);
  815. if (likely(fence != NULL))
  816. vmw_fence_obj_unreference(&fence);
  817. ttm_bo_unref(&val_buf.bo);
  818. /*
  819. * Release the surface ID.
  820. */
  821. vmw_resource_release_id(res);
  822. return 0;
  823. out_no_fifo:
  824. out_no_validate:
  825. if (srf->backup)
  826. ttm_eu_backoff_reservation(&val_list);
  827. out_no_reserve:
  828. ttm_bo_unref(&val_buf.bo);
  829. ttm_bo_unref(&srf->backup);
  830. return ret;
  831. }
  832. /**
  833. * vmw_surface_validate - make a surface available to the device, evicting
  834. * other surfaces if needed.
  835. *
  836. * @dev_priv: Pointer to a device private struct.
  837. * @srf: Pointer to a struct vmw_surface.
  838. *
  839. * Try to validate a surface and if it fails due to limited device resources,
  840. * repeatedly try to evict other surfaces until the request can be
  841. * acommodated.
  842. *
  843. * May return errors if out of resources.
  844. */
  845. int vmw_surface_validate(struct vmw_private *dev_priv,
  846. struct vmw_surface *srf)
  847. {
  848. int ret;
  849. struct vmw_surface *evict_srf;
  850. do {
  851. write_lock(&dev_priv->resource_lock);
  852. list_del_init(&srf->lru_head);
  853. write_unlock(&dev_priv->resource_lock);
  854. ret = vmw_surface_do_validate(dev_priv, srf);
  855. if (likely(ret != -EBUSY))
  856. break;
  857. write_lock(&dev_priv->resource_lock);
  858. if (list_empty(&dev_priv->surface_lru)) {
  859. DRM_ERROR("Out of device memory for surfaces.\n");
  860. ret = -EBUSY;
  861. write_unlock(&dev_priv->resource_lock);
  862. break;
  863. }
  864. evict_srf = vmw_surface_reference
  865. (list_first_entry(&dev_priv->surface_lru,
  866. struct vmw_surface,
  867. lru_head));
  868. list_del_init(&evict_srf->lru_head);
  869. write_unlock(&dev_priv->resource_lock);
  870. (void) vmw_surface_evict(dev_priv, evict_srf);
  871. vmw_surface_unreference(&evict_srf);
  872. } while (1);
  873. if (unlikely(ret != 0 && srf->res.id != -1)) {
  874. write_lock(&dev_priv->resource_lock);
  875. list_add_tail(&srf->lru_head, &dev_priv->surface_lru);
  876. write_unlock(&dev_priv->resource_lock);
  877. }
  878. return ret;
  879. }
  880. /**
  881. * vmw_surface_remove_from_lists - Remove surface resources from lookup lists
  882. *
  883. * @res: Pointer to a struct vmw_resource embedded in a struct vmw_surface
  884. *
  885. * As part of the resource destruction, remove the surface from any
  886. * lookup lists.
  887. */
  888. static void vmw_surface_remove_from_lists(struct vmw_resource *res)
  889. {
  890. struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
  891. list_del_init(&srf->lru_head);
  892. }
  893. int vmw_surface_init(struct vmw_private *dev_priv,
  894. struct vmw_surface *srf,
  895. void (*res_free) (struct vmw_resource *res))
  896. {
  897. int ret;
  898. struct vmw_resource *res = &srf->res;
  899. BUG_ON(res_free == NULL);
  900. INIT_LIST_HEAD(&srf->lru_head);
  901. ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr,
  902. VMW_RES_SURFACE, true, res_free,
  903. vmw_surface_remove_from_lists);
  904. if (unlikely(ret != 0))
  905. res_free(res);
  906. /*
  907. * The surface won't be visible to hardware until a
  908. * surface validate.
  909. */
  910. (void) vmw_3d_resource_inc(dev_priv, false);
  911. vmw_resource_activate(res, vmw_hw_surface_destroy);
  912. return ret;
  913. }
  914. static void vmw_user_surface_free(struct vmw_resource *res)
  915. {
  916. struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
  917. struct vmw_user_surface *user_srf =
  918. container_of(srf, struct vmw_user_surface, srf);
  919. if (srf->backup)
  920. ttm_bo_unref(&srf->backup);
  921. kfree(srf->offsets);
  922. kfree(srf->sizes);
  923. kfree(srf->snooper.image);
  924. kfree(user_srf);
  925. }
  926. /**
  927. * vmw_resource_unreserve - unreserve resources previously reserved for
  928. * command submission.
  929. *
  930. * @list_head: list of resources to unreserve.
  931. *
  932. * Currently only surfaces are considered, and unreserving a surface
  933. * means putting it back on the device's surface lru list,
  934. * so that it can be evicted if necessary.
  935. * This function traverses the resource list and
  936. * checks whether resources are surfaces, and in that case puts them back
  937. * on the device's surface LRU list.
  938. */
  939. void vmw_resource_unreserve(struct list_head *list)
  940. {
  941. struct vmw_resource *res;
  942. struct vmw_surface *srf;
  943. rwlock_t *lock = NULL;
  944. list_for_each_entry(res, list, validate_head) {
  945. if (res->res_free != &vmw_surface_res_free &&
  946. res->res_free != &vmw_user_surface_free)
  947. continue;
  948. if (unlikely(lock == NULL)) {
  949. lock = &res->dev_priv->resource_lock;
  950. write_lock(lock);
  951. }
  952. srf = container_of(res, struct vmw_surface, res);
  953. list_del_init(&srf->lru_head);
  954. list_add_tail(&srf->lru_head, &res->dev_priv->surface_lru);
  955. }
  956. if (lock != NULL)
  957. write_unlock(lock);
  958. }
  959. int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
  960. struct ttm_object_file *tfile,
  961. uint32_t handle, struct vmw_surface **out)
  962. {
  963. struct vmw_resource *res;
  964. struct vmw_surface *srf;
  965. struct vmw_user_surface *user_srf;
  966. struct ttm_base_object *base;
  967. int ret = -EINVAL;
  968. base = ttm_base_object_lookup(tfile, handle);
  969. if (unlikely(base == NULL))
  970. return -EINVAL;
  971. if (unlikely(base->object_type != VMW_RES_SURFACE))
  972. goto out_bad_resource;
  973. user_srf = container_of(base, struct vmw_user_surface, base);
  974. srf = &user_srf->srf;
  975. res = &srf->res;
  976. read_lock(&dev_priv->resource_lock);
  977. if (!res->avail || res->res_free != &vmw_user_surface_free) {
  978. read_unlock(&dev_priv->resource_lock);
  979. goto out_bad_resource;
  980. }
  981. kref_get(&res->kref);
  982. read_unlock(&dev_priv->resource_lock);
  983. *out = srf;
  984. ret = 0;
  985. out_bad_resource:
  986. ttm_base_object_unref(&base);
  987. return ret;
  988. }
  989. static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
  990. {
  991. struct ttm_base_object *base = *p_base;
  992. struct vmw_user_surface *user_srf =
  993. container_of(base, struct vmw_user_surface, base);
  994. struct vmw_resource *res = &user_srf->srf.res;
  995. *p_base = NULL;
  996. vmw_resource_unreference(&res);
  997. }
  998. int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
  999. struct drm_file *file_priv)
  1000. {
  1001. struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
  1002. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  1003. return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
  1004. }
  1005. int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
  1006. struct drm_file *file_priv)
  1007. {
  1008. struct vmw_private *dev_priv = vmw_priv(dev);
  1009. struct vmw_user_surface *user_srf =
  1010. kmalloc(sizeof(*user_srf), GFP_KERNEL);
  1011. struct vmw_surface *srf;
  1012. struct vmw_resource *res;
  1013. struct vmw_resource *tmp;
  1014. union drm_vmw_surface_create_arg *arg =
  1015. (union drm_vmw_surface_create_arg *)data;
  1016. struct drm_vmw_surface_create_req *req = &arg->req;
  1017. struct drm_vmw_surface_arg *rep = &arg->rep;
  1018. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  1019. struct drm_vmw_size __user *user_sizes;
  1020. int ret;
  1021. int i, j;
  1022. uint32_t cur_bo_offset;
  1023. struct drm_vmw_size *cur_size;
  1024. struct vmw_surface_offset *cur_offset;
  1025. uint32_t stride_bpp;
  1026. uint32_t bpp;
  1027. if (unlikely(user_srf == NULL))
  1028. return -ENOMEM;
  1029. srf = &user_srf->srf;
  1030. res = &srf->res;
  1031. srf->flags = req->flags;
  1032. srf->format = req->format;
  1033. srf->scanout = req->scanout;
  1034. srf->backup = NULL;
  1035. memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
  1036. srf->num_sizes = 0;
  1037. for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
  1038. srf->num_sizes += srf->mip_levels[i];
  1039. if (srf->num_sizes > DRM_VMW_MAX_SURFACE_FACES *
  1040. DRM_VMW_MAX_MIP_LEVELS) {
  1041. ret = -EINVAL;
  1042. goto out_err0;
  1043. }
  1044. srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
  1045. if (unlikely(srf->sizes == NULL)) {
  1046. ret = -ENOMEM;
  1047. goto out_err0;
  1048. }
  1049. srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets),
  1050. GFP_KERNEL);
  1051. if (unlikely(srf->sizes == NULL)) {
  1052. ret = -ENOMEM;
  1053. goto out_no_offsets;
  1054. }
  1055. user_sizes = (struct drm_vmw_size __user *)(unsigned long)
  1056. req->size_addr;
  1057. ret = copy_from_user(srf->sizes, user_sizes,
  1058. srf->num_sizes * sizeof(*srf->sizes));
  1059. if (unlikely(ret != 0)) {
  1060. ret = -EFAULT;
  1061. goto out_err1;
  1062. }
  1063. cur_bo_offset = 0;
  1064. cur_offset = srf->offsets;
  1065. cur_size = srf->sizes;
  1066. bpp = vmw_sf_bpp[srf->format].bpp;
  1067. stride_bpp = vmw_sf_bpp[srf->format].s_bpp;
  1068. for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
  1069. for (j = 0; j < srf->mip_levels[i]; ++j) {
  1070. uint32_t stride =
  1071. (cur_size->width * stride_bpp + 7) >> 3;
  1072. cur_offset->face = i;
  1073. cur_offset->mip = j;
  1074. cur_offset->bo_offset = cur_bo_offset;
  1075. cur_bo_offset += stride * cur_size->height *
  1076. cur_size->depth * bpp / stride_bpp;
  1077. ++cur_offset;
  1078. ++cur_size;
  1079. }
  1080. }
  1081. srf->backup_size = cur_bo_offset;
  1082. if (srf->scanout &&
  1083. srf->num_sizes == 1 &&
  1084. srf->sizes[0].width == 64 &&
  1085. srf->sizes[0].height == 64 &&
  1086. srf->format == SVGA3D_A8R8G8B8) {
  1087. /* allocate image area and clear it */
  1088. srf->snooper.image = kzalloc(64 * 64 * 4, GFP_KERNEL);
  1089. if (!srf->snooper.image) {
  1090. DRM_ERROR("Failed to allocate cursor_image\n");
  1091. ret = -ENOMEM;
  1092. goto out_err1;
  1093. }
  1094. } else {
  1095. srf->snooper.image = NULL;
  1096. }
  1097. srf->snooper.crtc = NULL;
  1098. user_srf->base.shareable = false;
  1099. user_srf->base.tfile = NULL;
  1100. /**
  1101. * From this point, the generic resource management functions
  1102. * destroy the object on failure.
  1103. */
  1104. ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
  1105. if (unlikely(ret != 0))
  1106. return ret;
  1107. tmp = vmw_resource_reference(&srf->res);
  1108. ret = ttm_base_object_init(tfile, &user_srf->base,
  1109. req->shareable, VMW_RES_SURFACE,
  1110. &vmw_user_surface_base_release, NULL);
  1111. if (unlikely(ret != 0)) {
  1112. vmw_resource_unreference(&tmp);
  1113. vmw_resource_unreference(&res);
  1114. return ret;
  1115. }
  1116. rep->sid = user_srf->base.hash.key;
  1117. if (rep->sid == SVGA3D_INVALID_ID)
  1118. DRM_ERROR("Created bad Surface ID.\n");
  1119. vmw_resource_unreference(&res);
  1120. return 0;
  1121. out_err1:
  1122. kfree(srf->offsets);
  1123. out_no_offsets:
  1124. kfree(srf->sizes);
  1125. out_err0:
  1126. kfree(user_srf);
  1127. return ret;
  1128. }
  1129. int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
  1130. struct drm_file *file_priv)
  1131. {
  1132. union drm_vmw_surface_reference_arg *arg =
  1133. (union drm_vmw_surface_reference_arg *)data;
  1134. struct drm_vmw_surface_arg *req = &arg->req;
  1135. struct drm_vmw_surface_create_req *rep = &arg->rep;
  1136. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  1137. struct vmw_surface *srf;
  1138. struct vmw_user_surface *user_srf;
  1139. struct drm_vmw_size __user *user_sizes;
  1140. struct ttm_base_object *base;
  1141. int ret = -EINVAL;
  1142. base = ttm_base_object_lookup(tfile, req->sid);
  1143. if (unlikely(base == NULL)) {
  1144. DRM_ERROR("Could not find surface to reference.\n");
  1145. return -EINVAL;
  1146. }
  1147. if (unlikely(base->object_type != VMW_RES_SURFACE))
  1148. goto out_bad_resource;
  1149. user_srf = container_of(base, struct vmw_user_surface, base);
  1150. srf = &user_srf->srf;
  1151. ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
  1152. if (unlikely(ret != 0)) {
  1153. DRM_ERROR("Could not add a reference to a surface.\n");
  1154. goto out_no_reference;
  1155. }
  1156. rep->flags = srf->flags;
  1157. rep->format = srf->format;
  1158. memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
  1159. user_sizes = (struct drm_vmw_size __user *)(unsigned long)
  1160. rep->size_addr;
  1161. if (user_sizes)
  1162. ret = copy_to_user(user_sizes, srf->sizes,
  1163. srf->num_sizes * sizeof(*srf->sizes));
  1164. if (unlikely(ret != 0)) {
  1165. DRM_ERROR("copy_to_user failed %p %u\n",
  1166. user_sizes, srf->num_sizes);
  1167. ret = -EFAULT;
  1168. }
  1169. out_bad_resource:
  1170. out_no_reference:
  1171. ttm_base_object_unref(&base);
  1172. return ret;
  1173. }
  1174. int vmw_surface_check(struct vmw_private *dev_priv,
  1175. struct ttm_object_file *tfile,
  1176. uint32_t handle, int *id)
  1177. {
  1178. struct ttm_base_object *base;
  1179. struct vmw_user_surface *user_srf;
  1180. int ret = -EPERM;
  1181. base = ttm_base_object_lookup(tfile, handle);
  1182. if (unlikely(base == NULL))
  1183. return -EINVAL;
  1184. if (unlikely(base->object_type != VMW_RES_SURFACE))
  1185. goto out_bad_surface;
  1186. user_srf = container_of(base, struct vmw_user_surface, base);
  1187. *id = user_srf->srf.res.id;
  1188. ret = 0;
  1189. out_bad_surface:
  1190. /**
  1191. * FIXME: May deadlock here when called from the
  1192. * command parsing code.
  1193. */
  1194. ttm_base_object_unref(&base);
  1195. return ret;
  1196. }
  1197. /**
  1198. * Buffer management.
  1199. */
  1200. static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob,
  1201. unsigned long num_pages)
  1202. {
  1203. static size_t bo_user_size = ~0;
  1204. size_t page_array_size =
  1205. (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK;
  1206. if (unlikely(bo_user_size == ~0)) {
  1207. bo_user_size = glob->ttm_bo_extra_size +
  1208. ttm_round_pot(sizeof(struct vmw_dma_buffer));
  1209. }
  1210. return bo_user_size + page_array_size;
  1211. }
  1212. void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
  1213. {
  1214. struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
  1215. struct ttm_bo_global *glob = bo->glob;
  1216. ttm_mem_global_free(glob->mem_glob, bo->acc_size);
  1217. kfree(vmw_bo);
  1218. }
  1219. int vmw_dmabuf_init(struct vmw_private *dev_priv,
  1220. struct vmw_dma_buffer *vmw_bo,
  1221. size_t size, struct ttm_placement *placement,
  1222. bool interruptible,
  1223. void (*bo_free) (struct ttm_buffer_object *bo))
  1224. {
  1225. struct ttm_bo_device *bdev = &dev_priv->bdev;
  1226. struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
  1227. size_t acc_size;
  1228. int ret;
  1229. BUG_ON(!bo_free);
  1230. acc_size =
  1231. vmw_dmabuf_acc_size(bdev->glob,
  1232. (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
  1233. ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
  1234. if (unlikely(ret != 0)) {
  1235. /* we must free the bo here as
  1236. * ttm_buffer_object_init does so as well */
  1237. bo_free(&vmw_bo->base);
  1238. return ret;
  1239. }
  1240. memset(vmw_bo, 0, sizeof(*vmw_bo));
  1241. INIT_LIST_HEAD(&vmw_bo->validate_list);
  1242. ret = ttm_bo_init(bdev, &vmw_bo->base, size,
  1243. ttm_bo_type_device, placement,
  1244. 0, 0, interruptible,
  1245. NULL, acc_size, bo_free);
  1246. return ret;
  1247. }
  1248. static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
  1249. {
  1250. struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
  1251. struct ttm_bo_global *glob = bo->glob;
  1252. ttm_mem_global_free(glob->mem_glob, bo->acc_size);
  1253. kfree(vmw_user_bo);
  1254. }
  1255. static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
  1256. {
  1257. struct vmw_user_dma_buffer *vmw_user_bo;
  1258. struct ttm_base_object *base = *p_base;
  1259. struct ttm_buffer_object *bo;
  1260. *p_base = NULL;
  1261. if (unlikely(base == NULL))
  1262. return;
  1263. vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
  1264. bo = &vmw_user_bo->dma.base;
  1265. ttm_bo_unref(&bo);
  1266. }
  1267. int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
  1268. struct drm_file *file_priv)
  1269. {
  1270. struct vmw_private *dev_priv = vmw_priv(dev);
  1271. union drm_vmw_alloc_dmabuf_arg *arg =
  1272. (union drm_vmw_alloc_dmabuf_arg *)data;
  1273. struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
  1274. struct drm_vmw_dmabuf_rep *rep = &arg->rep;
  1275. struct vmw_user_dma_buffer *vmw_user_bo;
  1276. struct ttm_buffer_object *tmp;
  1277. struct vmw_master *vmaster = vmw_master(file_priv->master);
  1278. int ret;
  1279. vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
  1280. if (unlikely(vmw_user_bo == NULL))
  1281. return -ENOMEM;
  1282. ret = ttm_read_lock(&vmaster->lock, true);
  1283. if (unlikely(ret != 0)) {
  1284. kfree(vmw_user_bo);
  1285. return ret;
  1286. }
  1287. ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size,
  1288. &vmw_vram_sys_placement, true,
  1289. &vmw_user_dmabuf_destroy);
  1290. if (unlikely(ret != 0))
  1291. goto out_no_dmabuf;
  1292. tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
  1293. ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
  1294. &vmw_user_bo->base,
  1295. false,
  1296. ttm_buffer_type,
  1297. &vmw_user_dmabuf_release, NULL);
  1298. if (unlikely(ret != 0))
  1299. goto out_no_base_object;
  1300. else {
  1301. rep->handle = vmw_user_bo->base.hash.key;
  1302. rep->map_handle = vmw_user_bo->dma.base.addr_space_offset;
  1303. rep->cur_gmr_id = vmw_user_bo->base.hash.key;
  1304. rep->cur_gmr_offset = 0;
  1305. }
  1306. out_no_base_object:
  1307. ttm_bo_unref(&tmp);
  1308. out_no_dmabuf:
  1309. ttm_read_unlock(&vmaster->lock);
  1310. return ret;
  1311. }
  1312. int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
  1313. struct drm_file *file_priv)
  1314. {
  1315. struct drm_vmw_unref_dmabuf_arg *arg =
  1316. (struct drm_vmw_unref_dmabuf_arg *)data;
  1317. return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
  1318. arg->handle,
  1319. TTM_REF_USAGE);
  1320. }
  1321. uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
  1322. uint32_t cur_validate_node)
  1323. {
  1324. struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
  1325. if (likely(vmw_bo->on_validate_list))
  1326. return vmw_bo->cur_validate_node;
  1327. vmw_bo->cur_validate_node = cur_validate_node;
  1328. vmw_bo->on_validate_list = true;
  1329. return cur_validate_node;
  1330. }
  1331. void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo)
  1332. {
  1333. struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
  1334. vmw_bo->on_validate_list = false;
  1335. }
  1336. int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
  1337. uint32_t handle, struct vmw_dma_buffer **out)
  1338. {
  1339. struct vmw_user_dma_buffer *vmw_user_bo;
  1340. struct ttm_base_object *base;
  1341. base = ttm_base_object_lookup(tfile, handle);
  1342. if (unlikely(base == NULL)) {
  1343. printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
  1344. (unsigned long)handle);
  1345. return -ESRCH;
  1346. }
  1347. if (unlikely(base->object_type != ttm_buffer_type)) {
  1348. ttm_base_object_unref(&base);
  1349. printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
  1350. (unsigned long)handle);
  1351. return -EINVAL;
  1352. }
  1353. vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
  1354. (void)ttm_bo_reference(&vmw_user_bo->dma.base);
  1355. ttm_base_object_unref(&base);
  1356. *out = &vmw_user_bo->dma;
  1357. return 0;
  1358. }
  1359. /*
  1360. * Stream management
  1361. */
  1362. static void vmw_stream_destroy(struct vmw_resource *res)
  1363. {
  1364. struct vmw_private *dev_priv = res->dev_priv;
  1365. struct vmw_stream *stream;
  1366. int ret;
  1367. DRM_INFO("%s: unref\n", __func__);
  1368. stream = container_of(res, struct vmw_stream, res);
  1369. ret = vmw_overlay_unref(dev_priv, stream->stream_id);
  1370. WARN_ON(ret != 0);
  1371. }
  1372. static int vmw_stream_init(struct vmw_private *dev_priv,
  1373. struct vmw_stream *stream,
  1374. void (*res_free) (struct vmw_resource *res))
  1375. {
  1376. struct vmw_resource *res = &stream->res;
  1377. int ret;
  1378. ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr,
  1379. VMW_RES_STREAM, false, res_free, NULL);
  1380. if (unlikely(ret != 0)) {
  1381. if (res_free == NULL)
  1382. kfree(stream);
  1383. else
  1384. res_free(&stream->res);
  1385. return ret;
  1386. }
  1387. ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
  1388. if (ret) {
  1389. vmw_resource_unreference(&res);
  1390. return ret;
  1391. }
  1392. DRM_INFO("%s: claimed\n", __func__);
  1393. vmw_resource_activate(&stream->res, vmw_stream_destroy);
  1394. return 0;
  1395. }
  1396. /**
  1397. * User-space context management:
  1398. */
  1399. static void vmw_user_stream_free(struct vmw_resource *res)
  1400. {
  1401. struct vmw_user_stream *stream =
  1402. container_of(res, struct vmw_user_stream, stream.res);
  1403. kfree(stream);
  1404. }
  1405. /**
  1406. * This function is called when user space has no more references on the
  1407. * base object. It releases the base-object's reference on the resource object.
  1408. */
  1409. static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
  1410. {
  1411. struct ttm_base_object *base = *p_base;
  1412. struct vmw_user_stream *stream =
  1413. container_of(base, struct vmw_user_stream, base);
  1414. struct vmw_resource *res = &stream->stream.res;
  1415. *p_base = NULL;
  1416. vmw_resource_unreference(&res);
  1417. }
  1418. int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
  1419. struct drm_file *file_priv)
  1420. {
  1421. struct vmw_private *dev_priv = vmw_priv(dev);
  1422. struct vmw_resource *res;
  1423. struct vmw_user_stream *stream;
  1424. struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
  1425. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  1426. int ret = 0;
  1427. res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, arg->stream_id);
  1428. if (unlikely(res == NULL))
  1429. return -EINVAL;
  1430. if (res->res_free != &vmw_user_stream_free) {
  1431. ret = -EINVAL;
  1432. goto out;
  1433. }
  1434. stream = container_of(res, struct vmw_user_stream, stream.res);
  1435. if (stream->base.tfile != tfile) {
  1436. ret = -EINVAL;
  1437. goto out;
  1438. }
  1439. ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
  1440. out:
  1441. vmw_resource_unreference(&res);
  1442. return ret;
  1443. }
  1444. int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
  1445. struct drm_file *file_priv)
  1446. {
  1447. struct vmw_private *dev_priv = vmw_priv(dev);
  1448. struct vmw_user_stream *stream = kmalloc(sizeof(*stream), GFP_KERNEL);
  1449. struct vmw_resource *res;
  1450. struct vmw_resource *tmp;
  1451. struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
  1452. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  1453. int ret;
  1454. if (unlikely(stream == NULL))
  1455. return -ENOMEM;
  1456. res = &stream->stream.res;
  1457. stream->base.shareable = false;
  1458. stream->base.tfile = NULL;
  1459. ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
  1460. if (unlikely(ret != 0))
  1461. return ret;
  1462. tmp = vmw_resource_reference(res);
  1463. ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
  1464. &vmw_user_stream_base_release, NULL);
  1465. if (unlikely(ret != 0)) {
  1466. vmw_resource_unreference(&tmp);
  1467. goto out_err;
  1468. }
  1469. arg->stream_id = res->id;
  1470. out_err:
  1471. vmw_resource_unreference(&res);
  1472. return ret;
  1473. }
  1474. int vmw_user_stream_lookup(struct vmw_private *dev_priv,
  1475. struct ttm_object_file *tfile,
  1476. uint32_t *inout_id, struct vmw_resource **out)
  1477. {
  1478. struct vmw_user_stream *stream;
  1479. struct vmw_resource *res;
  1480. int ret;
  1481. res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, *inout_id);
  1482. if (unlikely(res == NULL))
  1483. return -EINVAL;
  1484. if (res->res_free != &vmw_user_stream_free) {
  1485. ret = -EINVAL;
  1486. goto err_ref;
  1487. }
  1488. stream = container_of(res, struct vmw_user_stream, stream.res);
  1489. if (stream->base.tfile != tfile) {
  1490. ret = -EPERM;
  1491. goto err_ref;
  1492. }
  1493. *inout_id = stream->stream.stream_id;
  1494. *out = res;
  1495. return 0;
  1496. err_ref:
  1497. vmw_resource_unreference(&res);
  1498. return ret;
  1499. }