vmwgfx_surface.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893
  1. /**************************************************************************
  2. *
  3. * Copyright © 2009-2012 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. #include "vmwgfx_drv.h"
  28. #include "vmwgfx_resource_priv.h"
  29. #include <ttm/ttm_placement.h>
  30. #include "svga3d_surfacedefs.h"
  31. /**
  32. * struct vmw_user_surface - User-space visible surface resource
  33. *
  34. * @base: The TTM base object handling user-space visibility.
  35. * @srf: The surface metadata.
  36. * @size: TTM accounting size for the surface.
  37. */
  38. struct vmw_user_surface {
  39. struct ttm_base_object base;
  40. struct vmw_surface srf;
  41. uint32_t size;
  42. uint32_t backup_handle;
  43. };
  44. /**
  45. * struct vmw_surface_offset - Backing store mip level offset info
  46. *
  47. * @face: Surface face.
  48. * @mip: Mip level.
  49. * @bo_offset: Offset into backing store of this mip level.
  50. *
  51. */
  52. struct vmw_surface_offset {
  53. uint32_t face;
  54. uint32_t mip;
  55. uint32_t bo_offset;
  56. };
  57. static void vmw_user_surface_free(struct vmw_resource *res);
  58. static struct vmw_resource *
  59. vmw_user_surface_base_to_res(struct ttm_base_object *base);
  60. static int vmw_legacy_srf_bind(struct vmw_resource *res,
  61. struct ttm_validate_buffer *val_buf);
  62. static int vmw_legacy_srf_unbind(struct vmw_resource *res,
  63. bool readback,
  64. struct ttm_validate_buffer *val_buf);
  65. static int vmw_legacy_srf_create(struct vmw_resource *res);
  66. static int vmw_legacy_srf_destroy(struct vmw_resource *res);
  67. static const struct vmw_user_resource_conv user_surface_conv = {
  68. .object_type = VMW_RES_SURFACE,
  69. .base_obj_to_res = vmw_user_surface_base_to_res,
  70. .res_free = vmw_user_surface_free
  71. };
  72. const struct vmw_user_resource_conv *user_surface_converter =
  73. &user_surface_conv;
  74. static uint64_t vmw_user_surface_size;
  75. static const struct vmw_res_func vmw_legacy_surface_func = {
  76. .res_type = vmw_res_surface,
  77. .needs_backup = false,
  78. .may_evict = true,
  79. .type_name = "legacy surfaces",
  80. .backup_placement = &vmw_srf_placement,
  81. .create = &vmw_legacy_srf_create,
  82. .destroy = &vmw_legacy_srf_destroy,
  83. .bind = &vmw_legacy_srf_bind,
  84. .unbind = &vmw_legacy_srf_unbind
  85. };
  86. /**
  87. * struct vmw_surface_dma - SVGA3D DMA command
  88. */
  89. struct vmw_surface_dma {
  90. SVGA3dCmdHeader header;
  91. SVGA3dCmdSurfaceDMA body;
  92. SVGA3dCopyBox cb;
  93. SVGA3dCmdSurfaceDMASuffix suffix;
  94. };
  95. /**
  96. * struct vmw_surface_define - SVGA3D Surface Define command
  97. */
  98. struct vmw_surface_define {
  99. SVGA3dCmdHeader header;
  100. SVGA3dCmdDefineSurface body;
  101. };
  102. /**
  103. * struct vmw_surface_destroy - SVGA3D Surface Destroy command
  104. */
  105. struct vmw_surface_destroy {
  106. SVGA3dCmdHeader header;
  107. SVGA3dCmdDestroySurface body;
  108. };
  109. /**
  110. * vmw_surface_dma_size - Compute fifo size for a dma command.
  111. *
  112. * @srf: Pointer to a struct vmw_surface
  113. *
  114. * Computes the required size for a surface dma command for backup or
  115. * restoration of the surface represented by @srf.
  116. */
  117. static inline uint32_t vmw_surface_dma_size(const struct vmw_surface *srf)
  118. {
  119. return srf->num_sizes * sizeof(struct vmw_surface_dma);
  120. }
  121. /**
  122. * vmw_surface_define_size - Compute fifo size for a surface define command.
  123. *
  124. * @srf: Pointer to a struct vmw_surface
  125. *
  126. * Computes the required size for a surface define command for the definition
  127. * of the surface represented by @srf.
  128. */
  129. static inline uint32_t vmw_surface_define_size(const struct vmw_surface *srf)
  130. {
  131. return sizeof(struct vmw_surface_define) + srf->num_sizes *
  132. sizeof(SVGA3dSize);
  133. }
  134. /**
  135. * vmw_surface_destroy_size - Compute fifo size for a surface destroy command.
  136. *
  137. * Computes the required size for a surface destroy command for the destruction
  138. * of a hw surface.
  139. */
  140. static inline uint32_t vmw_surface_destroy_size(void)
  141. {
  142. return sizeof(struct vmw_surface_destroy);
  143. }
  144. /**
  145. * vmw_surface_destroy_encode - Encode a surface_destroy command.
  146. *
  147. * @id: The surface id
  148. * @cmd_space: Pointer to memory area in which the commands should be encoded.
  149. */
  150. static void vmw_surface_destroy_encode(uint32_t id,
  151. void *cmd_space)
  152. {
  153. struct vmw_surface_destroy *cmd = (struct vmw_surface_destroy *)
  154. cmd_space;
  155. cmd->header.id = SVGA_3D_CMD_SURFACE_DESTROY;
  156. cmd->header.size = sizeof(cmd->body);
  157. cmd->body.sid = id;
  158. }
  159. /**
  160. * vmw_surface_define_encode - Encode a surface_define command.
  161. *
  162. * @srf: Pointer to a struct vmw_surface object.
  163. * @cmd_space: Pointer to memory area in which the commands should be encoded.
  164. */
  165. static void vmw_surface_define_encode(const struct vmw_surface *srf,
  166. void *cmd_space)
  167. {
  168. struct vmw_surface_define *cmd = (struct vmw_surface_define *)
  169. cmd_space;
  170. struct drm_vmw_size *src_size;
  171. SVGA3dSize *cmd_size;
  172. uint32_t cmd_len;
  173. int i;
  174. cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
  175. cmd->header.id = SVGA_3D_CMD_SURFACE_DEFINE;
  176. cmd->header.size = cmd_len;
  177. cmd->body.sid = srf->res.id;
  178. cmd->body.surfaceFlags = srf->flags;
  179. cmd->body.format = cpu_to_le32(srf->format);
  180. for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
  181. cmd->body.face[i].numMipLevels = srf->mip_levels[i];
  182. cmd += 1;
  183. cmd_size = (SVGA3dSize *) cmd;
  184. src_size = srf->sizes;
  185. for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
  186. cmd_size->width = src_size->width;
  187. cmd_size->height = src_size->height;
  188. cmd_size->depth = src_size->depth;
  189. }
  190. }
  191. /**
  192. * vmw_surface_dma_encode - Encode a surface_dma command.
  193. *
  194. * @srf: Pointer to a struct vmw_surface object.
  195. * @cmd_space: Pointer to memory area in which the commands should be encoded.
  196. * @ptr: Pointer to an SVGAGuestPtr indicating where the surface contents
  197. * should be placed or read from.
  198. * @to_surface: Boolean whether to DMA to the surface or from the surface.
  199. */
  200. static void vmw_surface_dma_encode(struct vmw_surface *srf,
  201. void *cmd_space,
  202. const SVGAGuestPtr *ptr,
  203. bool to_surface)
  204. {
  205. uint32_t i;
  206. struct vmw_surface_dma *cmd = (struct vmw_surface_dma *)cmd_space;
  207. const struct svga3d_surface_desc *desc =
  208. svga3dsurface_get_desc(srf->format);
  209. for (i = 0; i < srf->num_sizes; ++i) {
  210. SVGA3dCmdHeader *header = &cmd->header;
  211. SVGA3dCmdSurfaceDMA *body = &cmd->body;
  212. SVGA3dCopyBox *cb = &cmd->cb;
  213. SVGA3dCmdSurfaceDMASuffix *suffix = &cmd->suffix;
  214. const struct vmw_surface_offset *cur_offset = &srf->offsets[i];
  215. const struct drm_vmw_size *cur_size = &srf->sizes[i];
  216. header->id = SVGA_3D_CMD_SURFACE_DMA;
  217. header->size = sizeof(*body) + sizeof(*cb) + sizeof(*suffix);
  218. body->guest.ptr = *ptr;
  219. body->guest.ptr.offset += cur_offset->bo_offset;
  220. body->guest.pitch = svga3dsurface_calculate_pitch(desc,
  221. cur_size);
  222. body->host.sid = srf->res.id;
  223. body->host.face = cur_offset->face;
  224. body->host.mipmap = cur_offset->mip;
  225. body->transfer = ((to_surface) ? SVGA3D_WRITE_HOST_VRAM :
  226. SVGA3D_READ_HOST_VRAM);
  227. cb->x = 0;
  228. cb->y = 0;
  229. cb->z = 0;
  230. cb->srcx = 0;
  231. cb->srcy = 0;
  232. cb->srcz = 0;
  233. cb->w = cur_size->width;
  234. cb->h = cur_size->height;
  235. cb->d = cur_size->depth;
  236. suffix->suffixSize = sizeof(*suffix);
  237. suffix->maximumOffset =
  238. svga3dsurface_get_image_buffer_size(desc, cur_size,
  239. body->guest.pitch);
  240. suffix->flags.discard = 0;
  241. suffix->flags.unsynchronized = 0;
  242. suffix->flags.reserved = 0;
  243. ++cmd;
  244. }
  245. };
  246. /**
  247. * vmw_hw_surface_destroy - destroy a Device surface
  248. *
  249. * @res: Pointer to a struct vmw_resource embedded in a struct
  250. * vmw_surface.
  251. *
  252. * Destroys a the device surface associated with a struct vmw_surface if
  253. * any, and adjusts accounting and resource count accordingly.
  254. */
  255. static void vmw_hw_surface_destroy(struct vmw_resource *res)
  256. {
  257. struct vmw_private *dev_priv = res->dev_priv;
  258. struct vmw_surface *srf;
  259. void *cmd;
  260. if (res->id != -1) {
  261. cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size());
  262. if (unlikely(cmd == NULL)) {
  263. DRM_ERROR("Failed reserving FIFO space for surface "
  264. "destruction.\n");
  265. return;
  266. }
  267. vmw_surface_destroy_encode(res->id, cmd);
  268. vmw_fifo_commit(dev_priv, vmw_surface_destroy_size());
  269. /*
  270. * used_memory_size_atomic, or separate lock
  271. * to avoid taking dev_priv::cmdbuf_mutex in
  272. * the destroy path.
  273. */
  274. mutex_lock(&dev_priv->cmdbuf_mutex);
  275. srf = vmw_res_to_srf(res);
  276. dev_priv->used_memory_size -= res->backup_size;
  277. mutex_unlock(&dev_priv->cmdbuf_mutex);
  278. }
  279. vmw_3d_resource_dec(dev_priv, false);
  280. }
  281. /**
  282. * vmw_legacy_srf_create - Create a device surface as part of the
  283. * resource validation process.
  284. *
  285. * @res: Pointer to a struct vmw_surface.
  286. *
  287. * If the surface doesn't have a hw id.
  288. *
  289. * Returns -EBUSY if there wasn't sufficient device resources to
  290. * complete the validation. Retry after freeing up resources.
  291. *
  292. * May return other errors if the kernel is out of guest resources.
  293. */
  294. static int vmw_legacy_srf_create(struct vmw_resource *res)
  295. {
  296. struct vmw_private *dev_priv = res->dev_priv;
  297. struct vmw_surface *srf;
  298. uint32_t submit_size;
  299. uint8_t *cmd;
  300. int ret;
  301. if (likely(res->id != -1))
  302. return 0;
  303. srf = vmw_res_to_srf(res);
  304. if (unlikely(dev_priv->used_memory_size + res->backup_size >=
  305. dev_priv->memory_size))
  306. return -EBUSY;
  307. /*
  308. * Alloc id for the resource.
  309. */
  310. ret = vmw_resource_alloc_id(res);
  311. if (unlikely(ret != 0)) {
  312. DRM_ERROR("Failed to allocate a surface id.\n");
  313. goto out_no_id;
  314. }
  315. if (unlikely(res->id >= SVGA3D_MAX_SURFACE_IDS)) {
  316. ret = -EBUSY;
  317. goto out_no_fifo;
  318. }
  319. /*
  320. * Encode surface define- commands.
  321. */
  322. submit_size = vmw_surface_define_size(srf);
  323. cmd = vmw_fifo_reserve(dev_priv, submit_size);
  324. if (unlikely(cmd == NULL)) {
  325. DRM_ERROR("Failed reserving FIFO space for surface "
  326. "creation.\n");
  327. ret = -ENOMEM;
  328. goto out_no_fifo;
  329. }
  330. vmw_surface_define_encode(srf, cmd);
  331. vmw_fifo_commit(dev_priv, submit_size);
  332. /*
  333. * Surface memory usage accounting.
  334. */
  335. dev_priv->used_memory_size += res->backup_size;
  336. return 0;
  337. out_no_fifo:
  338. vmw_resource_release_id(res);
  339. out_no_id:
  340. return ret;
  341. }
  342. /**
  343. * vmw_legacy_srf_dma - Copy backup data to or from a legacy surface.
  344. *
  345. * @res: Pointer to a struct vmw_res embedded in a struct
  346. * vmw_surface.
  347. * @val_buf: Pointer to a struct ttm_validate_buffer containing
  348. * information about the backup buffer.
  349. * @bind: Boolean wether to DMA to the surface.
  350. *
  351. * Transfer backup data to or from a legacy surface as part of the
  352. * validation process.
  353. * May return other errors if the kernel is out of guest resources.
  354. * The backup buffer will be fenced or idle upon successful completion,
  355. * and if the surface needs persistent backup storage, the backup buffer
  356. * will also be returned reserved iff @bind is true.
  357. */
  358. static int vmw_legacy_srf_dma(struct vmw_resource *res,
  359. struct ttm_validate_buffer *val_buf,
  360. bool bind)
  361. {
  362. SVGAGuestPtr ptr;
  363. struct vmw_fence_obj *fence;
  364. uint32_t submit_size;
  365. struct vmw_surface *srf = vmw_res_to_srf(res);
  366. uint8_t *cmd;
  367. struct vmw_private *dev_priv = res->dev_priv;
  368. BUG_ON(val_buf->bo == NULL);
  369. submit_size = vmw_surface_dma_size(srf);
  370. cmd = vmw_fifo_reserve(dev_priv, submit_size);
  371. if (unlikely(cmd == NULL)) {
  372. DRM_ERROR("Failed reserving FIFO space for surface "
  373. "DMA.\n");
  374. return -ENOMEM;
  375. }
  376. vmw_bo_get_guest_ptr(val_buf->bo, &ptr);
  377. vmw_surface_dma_encode(srf, cmd, &ptr, bind);
  378. vmw_fifo_commit(dev_priv, submit_size);
  379. /*
  380. * Create a fence object and fence the backup buffer.
  381. */
  382. (void) vmw_execbuf_fence_commands(NULL, dev_priv,
  383. &fence, NULL);
  384. vmw_fence_single_bo(val_buf->bo, fence);
  385. if (likely(fence != NULL))
  386. vmw_fence_obj_unreference(&fence);
  387. return 0;
  388. }
  389. /**
  390. * vmw_legacy_srf_bind - Perform a legacy surface bind as part of the
  391. * surface validation process.
  392. *
  393. * @res: Pointer to a struct vmw_res embedded in a struct
  394. * vmw_surface.
  395. * @val_buf: Pointer to a struct ttm_validate_buffer containing
  396. * information about the backup buffer.
  397. *
  398. * This function will copy backup data to the surface if the
  399. * backup buffer is dirty.
  400. */
  401. static int vmw_legacy_srf_bind(struct vmw_resource *res,
  402. struct ttm_validate_buffer *val_buf)
  403. {
  404. if (!res->backup_dirty)
  405. return 0;
  406. return vmw_legacy_srf_dma(res, val_buf, true);
  407. }
  408. /**
  409. * vmw_legacy_srf_unbind - Perform a legacy surface unbind as part of the
  410. * surface eviction process.
  411. *
  412. * @res: Pointer to a struct vmw_res embedded in a struct
  413. * vmw_surface.
  414. * @val_buf: Pointer to a struct ttm_validate_buffer containing
  415. * information about the backup buffer.
  416. *
  417. * This function will copy backup data from the surface.
  418. */
  419. static int vmw_legacy_srf_unbind(struct vmw_resource *res,
  420. bool readback,
  421. struct ttm_validate_buffer *val_buf)
  422. {
  423. if (unlikely(readback))
  424. return vmw_legacy_srf_dma(res, val_buf, false);
  425. return 0;
  426. }
  427. /**
  428. * vmw_legacy_srf_destroy - Destroy a device surface as part of a
  429. * resource eviction process.
  430. *
  431. * @res: Pointer to a struct vmw_res embedded in a struct
  432. * vmw_surface.
  433. */
  434. static int vmw_legacy_srf_destroy(struct vmw_resource *res)
  435. {
  436. struct vmw_private *dev_priv = res->dev_priv;
  437. uint32_t submit_size;
  438. uint8_t *cmd;
  439. BUG_ON(res->id == -1);
  440. /*
  441. * Encode the dma- and surface destroy commands.
  442. */
  443. submit_size = vmw_surface_destroy_size();
  444. cmd = vmw_fifo_reserve(dev_priv, submit_size);
  445. if (unlikely(cmd == NULL)) {
  446. DRM_ERROR("Failed reserving FIFO space for surface "
  447. "eviction.\n");
  448. return -ENOMEM;
  449. }
  450. vmw_surface_destroy_encode(res->id, cmd);
  451. vmw_fifo_commit(dev_priv, submit_size);
  452. /*
  453. * Surface memory usage accounting.
  454. */
  455. dev_priv->used_memory_size -= res->backup_size;
  456. /*
  457. * Release the surface ID.
  458. */
  459. vmw_resource_release_id(res);
  460. return 0;
  461. }
  462. /**
  463. * vmw_surface_init - initialize a struct vmw_surface
  464. *
  465. * @dev_priv: Pointer to a device private struct.
  466. * @srf: Pointer to the struct vmw_surface to initialize.
  467. * @res_free: Pointer to a resource destructor used to free
  468. * the object.
  469. */
  470. static int vmw_surface_init(struct vmw_private *dev_priv,
  471. struct vmw_surface *srf,
  472. void (*res_free) (struct vmw_resource *res))
  473. {
  474. int ret;
  475. struct vmw_resource *res = &srf->res;
  476. BUG_ON(res_free == NULL);
  477. (void) vmw_3d_resource_inc(dev_priv, false);
  478. ret = vmw_resource_init(dev_priv, res, true, res_free,
  479. &vmw_legacy_surface_func);
  480. if (unlikely(ret != 0)) {
  481. vmw_3d_resource_dec(dev_priv, false);
  482. res_free(res);
  483. return ret;
  484. }
  485. /*
  486. * The surface won't be visible to hardware until a
  487. * surface validate.
  488. */
  489. vmw_resource_activate(res, vmw_hw_surface_destroy);
  490. return ret;
  491. }
  492. /**
  493. * vmw_user_surface_base_to_res - TTM base object to resource converter for
  494. * user visible surfaces
  495. *
  496. * @base: Pointer to a TTM base object
  497. *
  498. * Returns the struct vmw_resource embedded in a struct vmw_surface
  499. * for the user-visible object identified by the TTM base object @base.
  500. */
  501. static struct vmw_resource *
  502. vmw_user_surface_base_to_res(struct ttm_base_object *base)
  503. {
  504. return &(container_of(base, struct vmw_user_surface, base)->srf.res);
  505. }
  506. /**
  507. * vmw_user_surface_free - User visible surface resource destructor
  508. *
  509. * @res: A struct vmw_resource embedded in a struct vmw_surface.
  510. */
  511. static void vmw_user_surface_free(struct vmw_resource *res)
  512. {
  513. struct vmw_surface *srf = vmw_res_to_srf(res);
  514. struct vmw_user_surface *user_srf =
  515. container_of(srf, struct vmw_user_surface, srf);
  516. struct vmw_private *dev_priv = srf->res.dev_priv;
  517. uint32_t size = user_srf->size;
  518. kfree(srf->offsets);
  519. kfree(srf->sizes);
  520. kfree(srf->snooper.image);
  521. ttm_base_object_kfree(user_srf, base);
  522. ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
  523. }
  524. /**
  525. * vmw_user_surface_free - User visible surface TTM base object destructor
  526. *
  527. * @p_base: Pointer to a pointer to a TTM base object
  528. * embedded in a struct vmw_user_surface.
  529. *
  530. * Drops the base object's reference on its resource, and the
  531. * pointer pointed to by *p_base is set to NULL.
  532. */
  533. static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
  534. {
  535. struct ttm_base_object *base = *p_base;
  536. struct vmw_user_surface *user_srf =
  537. container_of(base, struct vmw_user_surface, base);
  538. struct vmw_resource *res = &user_srf->srf.res;
  539. *p_base = NULL;
  540. vmw_resource_unreference(&res);
  541. }
  542. /**
  543. * vmw_user_surface_destroy_ioctl - Ioctl function implementing
  544. * the user surface destroy functionality.
  545. *
  546. * @dev: Pointer to a struct drm_device.
  547. * @data: Pointer to data copied from / to user-space.
  548. * @file_priv: Pointer to a drm file private structure.
  549. */
  550. int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
  551. struct drm_file *file_priv)
  552. {
  553. struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
  554. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  555. return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
  556. }
  557. /**
  558. * vmw_user_surface_define_ioctl - Ioctl function implementing
  559. * the user surface define functionality.
  560. *
  561. * @dev: Pointer to a struct drm_device.
  562. * @data: Pointer to data copied from / to user-space.
  563. * @file_priv: Pointer to a drm file private structure.
  564. */
  565. int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
  566. struct drm_file *file_priv)
  567. {
  568. struct vmw_private *dev_priv = vmw_priv(dev);
  569. struct vmw_user_surface *user_srf;
  570. struct vmw_surface *srf;
  571. struct vmw_resource *res;
  572. struct vmw_resource *tmp;
  573. union drm_vmw_surface_create_arg *arg =
  574. (union drm_vmw_surface_create_arg *)data;
  575. struct drm_vmw_surface_create_req *req = &arg->req;
  576. struct drm_vmw_surface_arg *rep = &arg->rep;
  577. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  578. struct drm_vmw_size __user *user_sizes;
  579. int ret;
  580. int i, j;
  581. uint32_t cur_bo_offset;
  582. struct drm_vmw_size *cur_size;
  583. struct vmw_surface_offset *cur_offset;
  584. uint32_t num_sizes;
  585. uint32_t size;
  586. struct vmw_master *vmaster = vmw_master(file_priv->master);
  587. const struct svga3d_surface_desc *desc;
  588. if (unlikely(vmw_user_surface_size == 0))
  589. vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) +
  590. 128;
  591. num_sizes = 0;
  592. for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
  593. num_sizes += req->mip_levels[i];
  594. if (num_sizes > DRM_VMW_MAX_SURFACE_FACES *
  595. DRM_VMW_MAX_MIP_LEVELS)
  596. return -EINVAL;
  597. size = vmw_user_surface_size + 128 +
  598. ttm_round_pot(num_sizes * sizeof(struct drm_vmw_size)) +
  599. ttm_round_pot(num_sizes * sizeof(struct vmw_surface_offset));
  600. desc = svga3dsurface_get_desc(req->format);
  601. if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) {
  602. DRM_ERROR("Invalid surface format for surface creation.\n");
  603. return -EINVAL;
  604. }
  605. ret = ttm_read_lock(&vmaster->lock, true);
  606. if (unlikely(ret != 0))
  607. return ret;
  608. ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
  609. size, false, true);
  610. if (unlikely(ret != 0)) {
  611. if (ret != -ERESTARTSYS)
  612. DRM_ERROR("Out of graphics memory for surface"
  613. " creation.\n");
  614. goto out_unlock;
  615. }
  616. user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL);
  617. if (unlikely(user_srf == NULL)) {
  618. ret = -ENOMEM;
  619. goto out_no_user_srf;
  620. }
  621. srf = &user_srf->srf;
  622. res = &srf->res;
  623. srf->flags = req->flags;
  624. srf->format = req->format;
  625. srf->scanout = req->scanout;
  626. memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
  627. srf->num_sizes = num_sizes;
  628. user_srf->size = size;
  629. srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
  630. if (unlikely(srf->sizes == NULL)) {
  631. ret = -ENOMEM;
  632. goto out_no_sizes;
  633. }
  634. srf->offsets = kmalloc(srf->num_sizes * sizeof(*srf->offsets),
  635. GFP_KERNEL);
  636. if (unlikely(srf->sizes == NULL)) {
  637. ret = -ENOMEM;
  638. goto out_no_offsets;
  639. }
  640. user_sizes = (struct drm_vmw_size __user *)(unsigned long)
  641. req->size_addr;
  642. ret = copy_from_user(srf->sizes, user_sizes,
  643. srf->num_sizes * sizeof(*srf->sizes));
  644. if (unlikely(ret != 0)) {
  645. ret = -EFAULT;
  646. goto out_no_copy;
  647. }
  648. srf->base_size = *srf->sizes;
  649. srf->autogen_filter = SVGA3D_TEX_FILTER_NONE;
  650. srf->multisample_count = 1;
  651. cur_bo_offset = 0;
  652. cur_offset = srf->offsets;
  653. cur_size = srf->sizes;
  654. for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
  655. for (j = 0; j < srf->mip_levels[i]; ++j) {
  656. uint32_t stride = svga3dsurface_calculate_pitch
  657. (desc, cur_size);
  658. cur_offset->face = i;
  659. cur_offset->mip = j;
  660. cur_offset->bo_offset = cur_bo_offset;
  661. cur_bo_offset += svga3dsurface_get_image_buffer_size
  662. (desc, cur_size, stride);
  663. ++cur_offset;
  664. ++cur_size;
  665. }
  666. }
  667. res->backup_size = cur_bo_offset;
  668. if (srf->scanout &&
  669. srf->num_sizes == 1 &&
  670. srf->sizes[0].width == 64 &&
  671. srf->sizes[0].height == 64 &&
  672. srf->format == SVGA3D_A8R8G8B8) {
  673. srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
  674. /* clear the image */
  675. if (srf->snooper.image) {
  676. memset(srf->snooper.image, 0x00, 64 * 64 * 4);
  677. } else {
  678. DRM_ERROR("Failed to allocate cursor_image\n");
  679. ret = -ENOMEM;
  680. goto out_no_copy;
  681. }
  682. } else {
  683. srf->snooper.image = NULL;
  684. }
  685. srf->snooper.crtc = NULL;
  686. user_srf->base.shareable = false;
  687. user_srf->base.tfile = NULL;
  688. /**
  689. * From this point, the generic resource management functions
  690. * destroy the object on failure.
  691. */
  692. ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
  693. if (unlikely(ret != 0))
  694. goto out_unlock;
  695. tmp = vmw_resource_reference(&srf->res);
  696. ret = ttm_base_object_init(tfile, &user_srf->base,
  697. req->shareable, VMW_RES_SURFACE,
  698. &vmw_user_surface_base_release, NULL);
  699. if (unlikely(ret != 0)) {
  700. vmw_resource_unreference(&tmp);
  701. vmw_resource_unreference(&res);
  702. goto out_unlock;
  703. }
  704. rep->sid = user_srf->base.hash.key;
  705. vmw_resource_unreference(&res);
  706. ttm_read_unlock(&vmaster->lock);
  707. return 0;
  708. out_no_copy:
  709. kfree(srf->offsets);
  710. out_no_offsets:
  711. kfree(srf->sizes);
  712. out_no_sizes:
  713. ttm_base_object_kfree(user_srf, base);
  714. out_no_user_srf:
  715. ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
  716. out_unlock:
  717. ttm_read_unlock(&vmaster->lock);
  718. return ret;
  719. }
  720. /**
  721. * vmw_user_surface_define_ioctl - Ioctl function implementing
  722. * the user surface reference functionality.
  723. *
  724. * @dev: Pointer to a struct drm_device.
  725. * @data: Pointer to data copied from / to user-space.
  726. * @file_priv: Pointer to a drm file private structure.
  727. */
  728. int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
  729. struct drm_file *file_priv)
  730. {
  731. union drm_vmw_surface_reference_arg *arg =
  732. (union drm_vmw_surface_reference_arg *)data;
  733. struct drm_vmw_surface_arg *req = &arg->req;
  734. struct drm_vmw_surface_create_req *rep = &arg->rep;
  735. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  736. struct vmw_surface *srf;
  737. struct vmw_user_surface *user_srf;
  738. struct drm_vmw_size __user *user_sizes;
  739. struct ttm_base_object *base;
  740. int ret = -EINVAL;
  741. base = ttm_base_object_lookup(tfile, req->sid);
  742. if (unlikely(base == NULL)) {
  743. DRM_ERROR("Could not find surface to reference.\n");
  744. return -EINVAL;
  745. }
  746. if (unlikely(base->object_type != VMW_RES_SURFACE))
  747. goto out_bad_resource;
  748. user_srf = container_of(base, struct vmw_user_surface, base);
  749. srf = &user_srf->srf;
  750. ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
  751. if (unlikely(ret != 0)) {
  752. DRM_ERROR("Could not add a reference to a surface.\n");
  753. goto out_no_reference;
  754. }
  755. rep->flags = srf->flags;
  756. rep->format = srf->format;
  757. memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
  758. user_sizes = (struct drm_vmw_size __user *)(unsigned long)
  759. rep->size_addr;
  760. if (user_sizes)
  761. ret = copy_to_user(user_sizes, srf->sizes,
  762. srf->num_sizes * sizeof(*srf->sizes));
  763. if (unlikely(ret != 0)) {
  764. DRM_ERROR("copy_to_user failed %p %u\n",
  765. user_sizes, srf->num_sizes);
  766. ret = -EFAULT;
  767. }
  768. out_bad_resource:
  769. out_no_reference:
  770. ttm_base_object_unref(&base);
  771. return ret;
  772. }