vmwgfx_execbuf.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913
  1. /**************************************************************************
  2. *
  3. * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. #include "vmwgfx_drv.h"
  28. #include "vmwgfx_reg.h"
  29. #include "ttm/ttm_bo_api.h"
  30. #include "ttm/ttm_placement.h"
  31. static int vmw_cmd_invalid(struct vmw_private *dev_priv,
  32. struct vmw_sw_context *sw_context,
  33. SVGA3dCmdHeader *header)
  34. {
  35. return capable(CAP_SYS_ADMIN) ? : -EINVAL;
  36. }
  37. static int vmw_cmd_ok(struct vmw_private *dev_priv,
  38. struct vmw_sw_context *sw_context,
  39. SVGA3dCmdHeader *header)
  40. {
  41. return 0;
  42. }
  43. static int vmw_resource_to_validate_list(struct vmw_sw_context *sw_context,
  44. struct vmw_resource **p_res)
  45. {
  46. int ret = 0;
  47. struct vmw_resource *res = *p_res;
  48. if (!res->on_validate_list) {
  49. if (sw_context->num_ref_resources >= VMWGFX_MAX_VALIDATIONS) {
  50. DRM_ERROR("Too many resources referenced in "
  51. "command stream.\n");
  52. ret = -ENOMEM;
  53. goto out;
  54. }
  55. sw_context->resources[sw_context->num_ref_resources++] = res;
  56. res->on_validate_list = true;
  57. return 0;
  58. }
  59. out:
  60. vmw_resource_unreference(p_res);
  61. return ret;
  62. }
  63. static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
  64. struct vmw_sw_context *sw_context,
  65. SVGA3dCmdHeader *header)
  66. {
  67. struct vmw_resource *ctx;
  68. struct vmw_cid_cmd {
  69. SVGA3dCmdHeader header;
  70. __le32 cid;
  71. } *cmd;
  72. int ret;
  73. cmd = container_of(header, struct vmw_cid_cmd, header);
  74. if (likely(sw_context->cid_valid && cmd->cid == sw_context->last_cid))
  75. return 0;
  76. ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid,
  77. &ctx);
  78. if (unlikely(ret != 0)) {
  79. DRM_ERROR("Could not find or use context %u\n",
  80. (unsigned) cmd->cid);
  81. return ret;
  82. }
  83. sw_context->last_cid = cmd->cid;
  84. sw_context->cid_valid = true;
  85. return vmw_resource_to_validate_list(sw_context, &ctx);
  86. }
  87. static int vmw_cmd_sid_check(struct vmw_private *dev_priv,
  88. struct vmw_sw_context *sw_context,
  89. uint32_t *sid)
  90. {
  91. struct vmw_surface *srf;
  92. int ret;
  93. struct vmw_resource *res;
  94. if (*sid == SVGA3D_INVALID_ID)
  95. return 0;
  96. if (likely((sw_context->sid_valid &&
  97. *sid == sw_context->last_sid))) {
  98. *sid = sw_context->sid_translation;
  99. return 0;
  100. }
  101. ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile,
  102. *sid, &srf);
  103. if (unlikely(ret != 0)) {
  104. DRM_ERROR("Could ot find or use surface 0x%08x "
  105. "address 0x%08lx\n",
  106. (unsigned int) *sid,
  107. (unsigned long) sid);
  108. return ret;
  109. }
  110. sw_context->last_sid = *sid;
  111. sw_context->sid_valid = true;
  112. sw_context->sid_translation = srf->res.id;
  113. *sid = sw_context->sid_translation;
  114. res = &srf->res;
  115. return vmw_resource_to_validate_list(sw_context, &res);
  116. }
  117. static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
  118. struct vmw_sw_context *sw_context,
  119. SVGA3dCmdHeader *header)
  120. {
  121. struct vmw_sid_cmd {
  122. SVGA3dCmdHeader header;
  123. SVGA3dCmdSetRenderTarget body;
  124. } *cmd;
  125. int ret;
  126. ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
  127. if (unlikely(ret != 0))
  128. return ret;
  129. cmd = container_of(header, struct vmw_sid_cmd, header);
  130. ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.target.sid);
  131. return ret;
  132. }
  133. static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
  134. struct vmw_sw_context *sw_context,
  135. SVGA3dCmdHeader *header)
  136. {
  137. struct vmw_sid_cmd {
  138. SVGA3dCmdHeader header;
  139. SVGA3dCmdSurfaceCopy body;
  140. } *cmd;
  141. int ret;
  142. cmd = container_of(header, struct vmw_sid_cmd, header);
  143. ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
  144. if (unlikely(ret != 0))
  145. return ret;
  146. return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
  147. }
  148. static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
  149. struct vmw_sw_context *sw_context,
  150. SVGA3dCmdHeader *header)
  151. {
  152. struct vmw_sid_cmd {
  153. SVGA3dCmdHeader header;
  154. SVGA3dCmdSurfaceStretchBlt body;
  155. } *cmd;
  156. int ret;
  157. cmd = container_of(header, struct vmw_sid_cmd, header);
  158. ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
  159. if (unlikely(ret != 0))
  160. return ret;
  161. return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
  162. }
  163. static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
  164. struct vmw_sw_context *sw_context,
  165. SVGA3dCmdHeader *header)
  166. {
  167. struct vmw_sid_cmd {
  168. SVGA3dCmdHeader header;
  169. SVGA3dCmdBlitSurfaceToScreen body;
  170. } *cmd;
  171. cmd = container_of(header, struct vmw_sid_cmd, header);
  172. return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.srcImage.sid);
  173. }
  174. static int vmw_cmd_present_check(struct vmw_private *dev_priv,
  175. struct vmw_sw_context *sw_context,
  176. SVGA3dCmdHeader *header)
  177. {
  178. struct vmw_sid_cmd {
  179. SVGA3dCmdHeader header;
  180. SVGA3dCmdPresent body;
  181. } *cmd;
  182. cmd = container_of(header, struct vmw_sid_cmd, header);
  183. return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid);
  184. }
  185. static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
  186. struct vmw_sw_context *sw_context,
  187. SVGAGuestPtr *ptr,
  188. struct vmw_dma_buffer **vmw_bo_p)
  189. {
  190. struct vmw_dma_buffer *vmw_bo = NULL;
  191. struct ttm_buffer_object *bo;
  192. uint32_t handle = ptr->gmrId;
  193. struct vmw_relocation *reloc;
  194. uint32_t cur_validate_node;
  195. struct ttm_validate_buffer *val_buf;
  196. int ret;
  197. ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
  198. if (unlikely(ret != 0)) {
  199. DRM_ERROR("Could not find or use GMR region.\n");
  200. return -EINVAL;
  201. }
  202. bo = &vmw_bo->base;
  203. if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
  204. DRM_ERROR("Max number relocations per submission"
  205. " exceeded\n");
  206. ret = -EINVAL;
  207. goto out_no_reloc;
  208. }
  209. reloc = &sw_context->relocs[sw_context->cur_reloc++];
  210. reloc->location = ptr;
  211. cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
  212. if (unlikely(cur_validate_node >= VMWGFX_MAX_VALIDATIONS)) {
  213. DRM_ERROR("Max number of DMA buffers per submission"
  214. " exceeded.\n");
  215. ret = -EINVAL;
  216. goto out_no_reloc;
  217. }
  218. reloc->index = cur_validate_node;
  219. if (unlikely(cur_validate_node == sw_context->cur_val_buf)) {
  220. val_buf = &sw_context->val_bufs[cur_validate_node];
  221. val_buf->bo = ttm_bo_reference(bo);
  222. val_buf->usage = TTM_USAGE_READWRITE;
  223. val_buf->new_sync_obj_arg = (void *) DRM_VMW_FENCE_FLAG_EXEC;
  224. list_add_tail(&val_buf->head, &sw_context->validate_nodes);
  225. ++sw_context->cur_val_buf;
  226. }
  227. *vmw_bo_p = vmw_bo;
  228. return 0;
  229. out_no_reloc:
  230. vmw_dmabuf_unreference(&vmw_bo);
  231. vmw_bo_p = NULL;
  232. return ret;
  233. }
  234. static int vmw_cmd_end_query(struct vmw_private *dev_priv,
  235. struct vmw_sw_context *sw_context,
  236. SVGA3dCmdHeader *header)
  237. {
  238. struct vmw_dma_buffer *vmw_bo;
  239. struct vmw_query_cmd {
  240. SVGA3dCmdHeader header;
  241. SVGA3dCmdEndQuery q;
  242. } *cmd;
  243. int ret;
  244. cmd = container_of(header, struct vmw_query_cmd, header);
  245. ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
  246. if (unlikely(ret != 0))
  247. return ret;
  248. ret = vmw_translate_guest_ptr(dev_priv, sw_context,
  249. &cmd->q.guestResult,
  250. &vmw_bo);
  251. if (unlikely(ret != 0))
  252. return ret;
  253. vmw_dmabuf_unreference(&vmw_bo);
  254. return 0;
  255. }
  256. static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
  257. struct vmw_sw_context *sw_context,
  258. SVGA3dCmdHeader *header)
  259. {
  260. struct vmw_dma_buffer *vmw_bo;
  261. struct vmw_query_cmd {
  262. SVGA3dCmdHeader header;
  263. SVGA3dCmdWaitForQuery q;
  264. } *cmd;
  265. int ret;
  266. cmd = container_of(header, struct vmw_query_cmd, header);
  267. ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
  268. if (unlikely(ret != 0))
  269. return ret;
  270. ret = vmw_translate_guest_ptr(dev_priv, sw_context,
  271. &cmd->q.guestResult,
  272. &vmw_bo);
  273. if (unlikely(ret != 0))
  274. return ret;
  275. vmw_dmabuf_unreference(&vmw_bo);
  276. return 0;
  277. }
  278. static int vmw_cmd_dma(struct vmw_private *dev_priv,
  279. struct vmw_sw_context *sw_context,
  280. SVGA3dCmdHeader *header)
  281. {
  282. struct vmw_dma_buffer *vmw_bo = NULL;
  283. struct ttm_buffer_object *bo;
  284. struct vmw_surface *srf = NULL;
  285. struct vmw_dma_cmd {
  286. SVGA3dCmdHeader header;
  287. SVGA3dCmdSurfaceDMA dma;
  288. } *cmd;
  289. int ret;
  290. struct vmw_resource *res;
  291. cmd = container_of(header, struct vmw_dma_cmd, header);
  292. ret = vmw_translate_guest_ptr(dev_priv, sw_context,
  293. &cmd->dma.guest.ptr,
  294. &vmw_bo);
  295. if (unlikely(ret != 0))
  296. return ret;
  297. bo = &vmw_bo->base;
  298. ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile,
  299. cmd->dma.host.sid, &srf);
  300. if (ret) {
  301. DRM_ERROR("could not find surface\n");
  302. goto out_no_reloc;
  303. }
  304. /*
  305. * Patch command stream with device SID.
  306. */
  307. cmd->dma.host.sid = srf->res.id;
  308. vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header);
  309. vmw_dmabuf_unreference(&vmw_bo);
  310. res = &srf->res;
  311. return vmw_resource_to_validate_list(sw_context, &res);
  312. out_no_reloc:
  313. vmw_dmabuf_unreference(&vmw_bo);
  314. return ret;
  315. }
  316. static int vmw_cmd_draw(struct vmw_private *dev_priv,
  317. struct vmw_sw_context *sw_context,
  318. SVGA3dCmdHeader *header)
  319. {
  320. struct vmw_draw_cmd {
  321. SVGA3dCmdHeader header;
  322. SVGA3dCmdDrawPrimitives body;
  323. } *cmd;
  324. SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
  325. (unsigned long)header + sizeof(*cmd));
  326. SVGA3dPrimitiveRange *range;
  327. uint32_t i;
  328. uint32_t maxnum;
  329. int ret;
  330. ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
  331. if (unlikely(ret != 0))
  332. return ret;
  333. cmd = container_of(header, struct vmw_draw_cmd, header);
  334. maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
  335. if (unlikely(cmd->body.numVertexDecls > maxnum)) {
  336. DRM_ERROR("Illegal number of vertex declarations.\n");
  337. return -EINVAL;
  338. }
  339. for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
  340. ret = vmw_cmd_sid_check(dev_priv, sw_context,
  341. &decl->array.surfaceId);
  342. if (unlikely(ret != 0))
  343. return ret;
  344. }
  345. maxnum = (header->size - sizeof(cmd->body) -
  346. cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
  347. if (unlikely(cmd->body.numRanges > maxnum)) {
  348. DRM_ERROR("Illegal number of index ranges.\n");
  349. return -EINVAL;
  350. }
  351. range = (SVGA3dPrimitiveRange *) decl;
  352. for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
  353. ret = vmw_cmd_sid_check(dev_priv, sw_context,
  354. &range->indexArray.surfaceId);
  355. if (unlikely(ret != 0))
  356. return ret;
  357. }
  358. return 0;
  359. }
  360. static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
  361. struct vmw_sw_context *sw_context,
  362. SVGA3dCmdHeader *header)
  363. {
  364. struct vmw_tex_state_cmd {
  365. SVGA3dCmdHeader header;
  366. SVGA3dCmdSetTextureState state;
  367. };
  368. SVGA3dTextureState *last_state = (SVGA3dTextureState *)
  369. ((unsigned long) header + header->size + sizeof(header));
  370. SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
  371. ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
  372. int ret;
  373. ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
  374. if (unlikely(ret != 0))
  375. return ret;
  376. for (; cur_state < last_state; ++cur_state) {
  377. if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
  378. continue;
  379. ret = vmw_cmd_sid_check(dev_priv, sw_context,
  380. &cur_state->value);
  381. if (unlikely(ret != 0))
  382. return ret;
  383. }
  384. return 0;
  385. }
  386. typedef int (*vmw_cmd_func) (struct vmw_private *,
  387. struct vmw_sw_context *,
  388. SVGA3dCmdHeader *);
  389. #define VMW_CMD_DEF(cmd, func) \
  390. [cmd - SVGA_3D_CMD_BASE] = func
  391. static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
  392. VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid),
  393. VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid),
  394. VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check),
  395. VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check),
  396. VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma),
  397. VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid),
  398. VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid),
  399. VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check),
  400. VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check),
  401. VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check),
  402. VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
  403. &vmw_cmd_set_render_target_check),
  404. VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state),
  405. VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check),
  406. VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check),
  407. VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check),
  408. VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check),
  409. VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check),
  410. VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check),
  411. VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check),
  412. VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check),
  413. VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check),
  414. VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_cid_check),
  415. VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check),
  416. VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
  417. VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
  418. VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check),
  419. VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query),
  420. VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query),
  421. VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
  422. VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
  423. &vmw_cmd_blt_surf_screen_check)
  424. };
  425. static int vmw_cmd_check(struct vmw_private *dev_priv,
  426. struct vmw_sw_context *sw_context,
  427. void *buf, uint32_t *size)
  428. {
  429. uint32_t cmd_id;
  430. uint32_t size_remaining = *size;
  431. SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
  432. int ret;
  433. cmd_id = ((uint32_t *)buf)[0];
  434. if (cmd_id == SVGA_CMD_UPDATE) {
  435. *size = 5 << 2;
  436. return 0;
  437. }
  438. cmd_id = le32_to_cpu(header->id);
  439. *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
  440. cmd_id -= SVGA_3D_CMD_BASE;
  441. if (unlikely(*size > size_remaining))
  442. goto out_err;
  443. if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
  444. goto out_err;
  445. ret = vmw_cmd_funcs[cmd_id](dev_priv, sw_context, header);
  446. if (unlikely(ret != 0))
  447. goto out_err;
  448. return 0;
  449. out_err:
  450. DRM_ERROR("Illegal / Invalid SVGA3D command: %d\n",
  451. cmd_id + SVGA_3D_CMD_BASE);
  452. return -EINVAL;
  453. }
  454. static int vmw_cmd_check_all(struct vmw_private *dev_priv,
  455. struct vmw_sw_context *sw_context,
  456. void *buf,
  457. uint32_t size)
  458. {
  459. int32_t cur_size = size;
  460. int ret;
  461. while (cur_size > 0) {
  462. size = cur_size;
  463. ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
  464. if (unlikely(ret != 0))
  465. return ret;
  466. buf = (void *)((unsigned long) buf + size);
  467. cur_size -= size;
  468. }
  469. if (unlikely(cur_size != 0)) {
  470. DRM_ERROR("Command verifier out of sync.\n");
  471. return -EINVAL;
  472. }
  473. return 0;
  474. }
  475. static void vmw_free_relocations(struct vmw_sw_context *sw_context)
  476. {
  477. sw_context->cur_reloc = 0;
  478. }
  479. static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
  480. {
  481. uint32_t i;
  482. struct vmw_relocation *reloc;
  483. struct ttm_validate_buffer *validate;
  484. struct ttm_buffer_object *bo;
  485. for (i = 0; i < sw_context->cur_reloc; ++i) {
  486. reloc = &sw_context->relocs[i];
  487. validate = &sw_context->val_bufs[reloc->index];
  488. bo = validate->bo;
  489. if (bo->mem.mem_type == TTM_PL_VRAM) {
  490. reloc->location->offset += bo->offset;
  491. reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
  492. } else
  493. reloc->location->gmrId = bo->mem.start;
  494. }
  495. vmw_free_relocations(sw_context);
  496. }
  497. static void vmw_clear_validations(struct vmw_sw_context *sw_context)
  498. {
  499. struct ttm_validate_buffer *entry, *next;
  500. uint32_t i = sw_context->num_ref_resources;
  501. /*
  502. * Drop references to DMA buffers held during command submission.
  503. */
  504. list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
  505. head) {
  506. list_del(&entry->head);
  507. vmw_dmabuf_validate_clear(entry->bo);
  508. ttm_bo_unref(&entry->bo);
  509. sw_context->cur_val_buf--;
  510. }
  511. BUG_ON(sw_context->cur_val_buf != 0);
  512. /*
  513. * Drop references to resources held during command submission.
  514. */
  515. while (i-- > 0) {
  516. sw_context->resources[i]->on_validate_list = false;
  517. vmw_resource_unreference(&sw_context->resources[i]);
  518. }
  519. }
  520. static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
  521. struct ttm_buffer_object *bo)
  522. {
  523. int ret;
  524. /**
  525. * Put BO in VRAM if there is space, otherwise as a GMR.
  526. * If there is no space in VRAM and GMR ids are all used up,
  527. * start evicting GMRs to make room. If the DMA buffer can't be
  528. * used as a GMR, this will return -ENOMEM.
  529. */
  530. ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false, false);
  531. if (likely(ret == 0 || ret == -ERESTARTSYS))
  532. return ret;
  533. /**
  534. * If that failed, try VRAM again, this time evicting
  535. * previous contents.
  536. */
  537. DRM_INFO("Falling through to VRAM.\n");
  538. ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false);
  539. return ret;
  540. }
  541. static int vmw_validate_buffers(struct vmw_private *dev_priv,
  542. struct vmw_sw_context *sw_context)
  543. {
  544. struct ttm_validate_buffer *entry;
  545. int ret;
  546. list_for_each_entry(entry, &sw_context->validate_nodes, head) {
  547. ret = vmw_validate_single_buffer(dev_priv, entry->bo);
  548. if (unlikely(ret != 0))
  549. return ret;
  550. }
  551. return 0;
  552. }
  553. static int vmw_resize_cmd_bounce(struct vmw_sw_context *sw_context,
  554. uint32_t size)
  555. {
  556. if (likely(sw_context->cmd_bounce_size >= size))
  557. return 0;
  558. if (sw_context->cmd_bounce_size == 0)
  559. sw_context->cmd_bounce_size = VMWGFX_CMD_BOUNCE_INIT_SIZE;
  560. while (sw_context->cmd_bounce_size < size) {
  561. sw_context->cmd_bounce_size =
  562. PAGE_ALIGN(sw_context->cmd_bounce_size +
  563. (sw_context->cmd_bounce_size >> 1));
  564. }
  565. if (sw_context->cmd_bounce != NULL)
  566. vfree(sw_context->cmd_bounce);
  567. sw_context->cmd_bounce = vmalloc(sw_context->cmd_bounce_size);
  568. if (sw_context->cmd_bounce == NULL) {
  569. DRM_ERROR("Failed to allocate command bounce buffer.\n");
  570. sw_context->cmd_bounce_size = 0;
  571. return -ENOMEM;
  572. }
  573. return 0;
  574. }
  575. /**
  576. * vmw_execbuf_fence_commands - create and submit a command stream fence
  577. *
  578. * Creates a fence object and submits a command stream marker.
  579. * If this fails for some reason, We sync the fifo and return NULL.
  580. * It is then safe to fence buffers with a NULL pointer.
  581. *
  582. * If @p_handle is not NULL @file_priv must also not be NULL. Creates
  583. * a userspace handle if @p_handle is not NULL, otherwise not.
  584. */
  585. int vmw_execbuf_fence_commands(struct drm_file *file_priv,
  586. struct vmw_private *dev_priv,
  587. struct vmw_fence_obj **p_fence,
  588. uint32_t *p_handle)
  589. {
  590. uint32_t sequence;
  591. int ret;
  592. bool synced = false;
  593. /* p_handle implies file_priv. */
  594. BUG_ON(p_handle != NULL && file_priv == NULL);
  595. ret = vmw_fifo_send_fence(dev_priv, &sequence);
  596. if (unlikely(ret != 0)) {
  597. DRM_ERROR("Fence submission error. Syncing.\n");
  598. synced = true;
  599. }
  600. if (p_handle != NULL)
  601. ret = vmw_user_fence_create(file_priv, dev_priv->fman,
  602. sequence,
  603. DRM_VMW_FENCE_FLAG_EXEC,
  604. p_fence, p_handle);
  605. else
  606. ret = vmw_fence_create(dev_priv->fman, sequence,
  607. DRM_VMW_FENCE_FLAG_EXEC,
  608. p_fence);
  609. if (unlikely(ret != 0 && !synced)) {
  610. (void) vmw_fallback_wait(dev_priv, false, false,
  611. sequence, false,
  612. VMW_FENCE_WAIT_TIMEOUT);
  613. *p_fence = NULL;
  614. }
  615. return 0;
  616. }
  617. int vmw_execbuf_process(struct drm_file *file_priv,
  618. struct vmw_private *dev_priv,
  619. void __user *user_commands,
  620. void *kernel_commands,
  621. uint32_t command_size,
  622. uint64_t throttle_us,
  623. struct drm_vmw_fence_rep __user *user_fence_rep)
  624. {
  625. struct vmw_sw_context *sw_context = &dev_priv->ctx;
  626. struct drm_vmw_fence_rep fence_rep;
  627. struct vmw_fence_obj *fence;
  628. uint32_t handle;
  629. void *cmd;
  630. int ret;
  631. ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
  632. if (unlikely(ret != 0))
  633. return -ERESTARTSYS;
  634. if (kernel_commands == NULL) {
  635. sw_context->kernel = false;
  636. ret = vmw_resize_cmd_bounce(sw_context, command_size);
  637. if (unlikely(ret != 0))
  638. goto out_unlock;
  639. ret = copy_from_user(sw_context->cmd_bounce,
  640. user_commands, command_size);
  641. if (unlikely(ret != 0)) {
  642. ret = -EFAULT;
  643. DRM_ERROR("Failed copying commands.\n");
  644. goto out_unlock;
  645. }
  646. kernel_commands = sw_context->cmd_bounce;
  647. } else
  648. sw_context->kernel = true;
  649. sw_context->tfile = vmw_fpriv(file_priv)->tfile;
  650. sw_context->cid_valid = false;
  651. sw_context->sid_valid = false;
  652. sw_context->cur_reloc = 0;
  653. sw_context->cur_val_buf = 0;
  654. sw_context->num_ref_resources = 0;
  655. INIT_LIST_HEAD(&sw_context->validate_nodes);
  656. ret = vmw_cmd_check_all(dev_priv, sw_context, kernel_commands,
  657. command_size);
  658. if (unlikely(ret != 0))
  659. goto out_err;
  660. ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes);
  661. if (unlikely(ret != 0))
  662. goto out_err;
  663. ret = vmw_validate_buffers(dev_priv, sw_context);
  664. if (unlikely(ret != 0))
  665. goto out_err;
  666. vmw_apply_relocations(sw_context);
  667. if (throttle_us) {
  668. ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.marker_queue,
  669. throttle_us);
  670. if (unlikely(ret != 0))
  671. goto out_throttle;
  672. }
  673. cmd = vmw_fifo_reserve(dev_priv, command_size);
  674. if (unlikely(cmd == NULL)) {
  675. DRM_ERROR("Failed reserving fifo space for commands.\n");
  676. ret = -ENOMEM;
  677. goto out_throttle;
  678. }
  679. memcpy(cmd, kernel_commands, command_size);
  680. vmw_fifo_commit(dev_priv, command_size);
  681. ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
  682. &fence,
  683. (user_fence_rep) ? &handle : NULL);
  684. /*
  685. * This error is harmless, because if fence submission fails,
  686. * vmw_fifo_send_fence will sync. The error will be propagated to
  687. * user-space in @fence_rep
  688. */
  689. if (ret != 0)
  690. DRM_ERROR("Fence submission error. Syncing.\n");
  691. ttm_eu_fence_buffer_objects(&sw_context->validate_nodes,
  692. (void *) fence);
  693. vmw_clear_validations(sw_context);
  694. if (user_fence_rep) {
  695. fence_rep.error = ret;
  696. fence_rep.handle = handle;
  697. fence_rep.seqno = fence->seqno;
  698. vmw_update_seqno(dev_priv, &dev_priv->fifo);
  699. fence_rep.passed_seqno = dev_priv->last_read_seqno;
  700. /*
  701. * copy_to_user errors will be detected by user space not
  702. * seeing fence_rep::error filled in. Typically
  703. * user-space would have pre-set that member to -EFAULT.
  704. */
  705. ret = copy_to_user(user_fence_rep, &fence_rep,
  706. sizeof(fence_rep));
  707. /*
  708. * User-space lost the fence object. We need to sync
  709. * and unreference the handle.
  710. */
  711. if (unlikely(ret != 0) && (fence_rep.error == 0)) {
  712. BUG_ON(fence == NULL);
  713. ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
  714. handle, TTM_REF_USAGE);
  715. DRM_ERROR("Fence copy error. Syncing.\n");
  716. (void) vmw_fence_obj_wait(fence,
  717. fence->signal_mask,
  718. false, false,
  719. VMW_FENCE_WAIT_TIMEOUT);
  720. }
  721. }
  722. if (likely(fence != NULL))
  723. vmw_fence_obj_unreference(&fence);
  724. mutex_unlock(&dev_priv->cmdbuf_mutex);
  725. return 0;
  726. out_err:
  727. vmw_free_relocations(sw_context);
  728. out_throttle:
  729. ttm_eu_backoff_reservation(&sw_context->validate_nodes);
  730. vmw_clear_validations(sw_context);
  731. out_unlock:
  732. mutex_unlock(&dev_priv->cmdbuf_mutex);
  733. return ret;
  734. }
  735. int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
  736. struct drm_file *file_priv)
  737. {
  738. struct vmw_private *dev_priv = vmw_priv(dev);
  739. struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
  740. struct vmw_master *vmaster = vmw_master(file_priv->master);
  741. int ret;
  742. /*
  743. * This will allow us to extend the ioctl argument while
  744. * maintaining backwards compatibility:
  745. * We take different code paths depending on the value of
  746. * arg->version.
  747. */
  748. if (unlikely(arg->version != DRM_VMW_EXECBUF_VERSION)) {
  749. DRM_ERROR("Incorrect execbuf version.\n");
  750. DRM_ERROR("You're running outdated experimental "
  751. "vmwgfx user-space drivers.");
  752. return -EINVAL;
  753. }
  754. ret = ttm_read_lock(&vmaster->lock, true);
  755. if (unlikely(ret != 0))
  756. return ret;
  757. ret = vmw_execbuf_process(file_priv, dev_priv,
  758. (void __user *)(unsigned long)arg->commands,
  759. NULL, arg->command_size, arg->throttle_us,
  760. (void __user *)(unsigned long)arg->fence_rep);
  761. if (unlikely(ret != 0))
  762. goto out_unlock;
  763. vmw_kms_cursor_post_execbuf(dev_priv);
  764. out_unlock:
  765. ttm_read_unlock(&vmaster->lock);
  766. return ret;
  767. }