vmwgfx_execbuf.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719
  1. /**************************************************************************
  2. *
  3. * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. #include "vmwgfx_drv.h"
  28. #include "vmwgfx_reg.h"
  29. #include "ttm/ttm_bo_api.h"
  30. #include "ttm/ttm_placement.h"
  31. static int vmw_cmd_invalid(struct vmw_private *dev_priv,
  32. struct vmw_sw_context *sw_context,
  33. SVGA3dCmdHeader *header)
  34. {
  35. return capable(CAP_SYS_ADMIN) ? : -EINVAL;
  36. }
  37. static int vmw_cmd_ok(struct vmw_private *dev_priv,
  38. struct vmw_sw_context *sw_context,
  39. SVGA3dCmdHeader *header)
  40. {
  41. return 0;
  42. }
  43. static int vmw_cmd_cid_check(struct vmw_private *dev_priv,
  44. struct vmw_sw_context *sw_context,
  45. SVGA3dCmdHeader *header)
  46. {
  47. struct vmw_cid_cmd {
  48. SVGA3dCmdHeader header;
  49. __le32 cid;
  50. } *cmd;
  51. int ret;
  52. cmd = container_of(header, struct vmw_cid_cmd, header);
  53. if (likely(sw_context->cid_valid && cmd->cid == sw_context->last_cid))
  54. return 0;
  55. ret = vmw_context_check(dev_priv, sw_context->tfile, cmd->cid);
  56. if (unlikely(ret != 0)) {
  57. DRM_ERROR("Could not find or use context %u\n",
  58. (unsigned) cmd->cid);
  59. return ret;
  60. }
  61. sw_context->last_cid = cmd->cid;
  62. sw_context->cid_valid = true;
  63. return 0;
  64. }
  65. static int vmw_cmd_sid_check(struct vmw_private *dev_priv,
  66. struct vmw_sw_context *sw_context,
  67. uint32_t *sid)
  68. {
  69. if (*sid == SVGA3D_INVALID_ID)
  70. return 0;
  71. if (unlikely((!sw_context->sid_valid ||
  72. *sid != sw_context->last_sid))) {
  73. int real_id;
  74. int ret = vmw_surface_check(dev_priv, sw_context->tfile,
  75. *sid, &real_id);
  76. if (unlikely(ret != 0)) {
  77. DRM_ERROR("Could ot find or use surface 0x%08x "
  78. "address 0x%08lx\n",
  79. (unsigned int) *sid,
  80. (unsigned long) sid);
  81. return ret;
  82. }
  83. sw_context->last_sid = *sid;
  84. sw_context->sid_valid = true;
  85. *sid = real_id;
  86. sw_context->sid_translation = real_id;
  87. } else
  88. *sid = sw_context->sid_translation;
  89. return 0;
  90. }
  91. static int vmw_cmd_set_render_target_check(struct vmw_private *dev_priv,
  92. struct vmw_sw_context *sw_context,
  93. SVGA3dCmdHeader *header)
  94. {
  95. struct vmw_sid_cmd {
  96. SVGA3dCmdHeader header;
  97. SVGA3dCmdSetRenderTarget body;
  98. } *cmd;
  99. int ret;
  100. ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
  101. if (unlikely(ret != 0))
  102. return ret;
  103. cmd = container_of(header, struct vmw_sid_cmd, header);
  104. ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.target.sid);
  105. return ret;
  106. }
  107. static int vmw_cmd_surface_copy_check(struct vmw_private *dev_priv,
  108. struct vmw_sw_context *sw_context,
  109. SVGA3dCmdHeader *header)
  110. {
  111. struct vmw_sid_cmd {
  112. SVGA3dCmdHeader header;
  113. SVGA3dCmdSurfaceCopy body;
  114. } *cmd;
  115. int ret;
  116. cmd = container_of(header, struct vmw_sid_cmd, header);
  117. ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
  118. if (unlikely(ret != 0))
  119. return ret;
  120. return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
  121. }
  122. static int vmw_cmd_stretch_blt_check(struct vmw_private *dev_priv,
  123. struct vmw_sw_context *sw_context,
  124. SVGA3dCmdHeader *header)
  125. {
  126. struct vmw_sid_cmd {
  127. SVGA3dCmdHeader header;
  128. SVGA3dCmdSurfaceStretchBlt body;
  129. } *cmd;
  130. int ret;
  131. cmd = container_of(header, struct vmw_sid_cmd, header);
  132. ret = vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.src.sid);
  133. if (unlikely(ret != 0))
  134. return ret;
  135. return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.dest.sid);
  136. }
  137. static int vmw_cmd_blt_surf_screen_check(struct vmw_private *dev_priv,
  138. struct vmw_sw_context *sw_context,
  139. SVGA3dCmdHeader *header)
  140. {
  141. struct vmw_sid_cmd {
  142. SVGA3dCmdHeader header;
  143. SVGA3dCmdBlitSurfaceToScreen body;
  144. } *cmd;
  145. cmd = container_of(header, struct vmw_sid_cmd, header);
  146. return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.srcImage.sid);
  147. }
  148. static int vmw_cmd_present_check(struct vmw_private *dev_priv,
  149. struct vmw_sw_context *sw_context,
  150. SVGA3dCmdHeader *header)
  151. {
  152. struct vmw_sid_cmd {
  153. SVGA3dCmdHeader header;
  154. SVGA3dCmdPresent body;
  155. } *cmd;
  156. cmd = container_of(header, struct vmw_sid_cmd, header);
  157. return vmw_cmd_sid_check(dev_priv, sw_context, &cmd->body.sid);
  158. }
  159. static int vmw_translate_guest_ptr(struct vmw_private *dev_priv,
  160. struct vmw_sw_context *sw_context,
  161. SVGAGuestPtr *ptr,
  162. struct vmw_dma_buffer **vmw_bo_p)
  163. {
  164. struct vmw_dma_buffer *vmw_bo = NULL;
  165. struct ttm_buffer_object *bo;
  166. uint32_t handle = ptr->gmrId;
  167. struct vmw_relocation *reloc;
  168. uint32_t cur_validate_node;
  169. struct ttm_validate_buffer *val_buf;
  170. int ret;
  171. ret = vmw_user_dmabuf_lookup(sw_context->tfile, handle, &vmw_bo);
  172. if (unlikely(ret != 0)) {
  173. DRM_ERROR("Could not find or use GMR region.\n");
  174. return -EINVAL;
  175. }
  176. bo = &vmw_bo->base;
  177. if (unlikely(sw_context->cur_reloc >= VMWGFX_MAX_RELOCATIONS)) {
  178. DRM_ERROR("Max number relocations per submission"
  179. " exceeded\n");
  180. ret = -EINVAL;
  181. goto out_no_reloc;
  182. }
  183. reloc = &sw_context->relocs[sw_context->cur_reloc++];
  184. reloc->location = ptr;
  185. cur_validate_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf);
  186. if (unlikely(cur_validate_node >= VMWGFX_MAX_GMRS)) {
  187. DRM_ERROR("Max number of DMA buffers per submission"
  188. " exceeded.\n");
  189. ret = -EINVAL;
  190. goto out_no_reloc;
  191. }
  192. reloc->index = cur_validate_node;
  193. if (unlikely(cur_validate_node == sw_context->cur_val_buf)) {
  194. val_buf = &sw_context->val_bufs[cur_validate_node];
  195. val_buf->bo = ttm_bo_reference(bo);
  196. val_buf->new_sync_obj_arg = (void *) dev_priv;
  197. list_add_tail(&val_buf->head, &sw_context->validate_nodes);
  198. ++sw_context->cur_val_buf;
  199. }
  200. *vmw_bo_p = vmw_bo;
  201. return 0;
  202. out_no_reloc:
  203. vmw_dmabuf_unreference(&vmw_bo);
  204. vmw_bo_p = NULL;
  205. return ret;
  206. }
  207. static int vmw_cmd_end_query(struct vmw_private *dev_priv,
  208. struct vmw_sw_context *sw_context,
  209. SVGA3dCmdHeader *header)
  210. {
  211. struct vmw_dma_buffer *vmw_bo;
  212. struct vmw_query_cmd {
  213. SVGA3dCmdHeader header;
  214. SVGA3dCmdEndQuery q;
  215. } *cmd;
  216. int ret;
  217. cmd = container_of(header, struct vmw_query_cmd, header);
  218. ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
  219. if (unlikely(ret != 0))
  220. return ret;
  221. ret = vmw_translate_guest_ptr(dev_priv, sw_context,
  222. &cmd->q.guestResult,
  223. &vmw_bo);
  224. if (unlikely(ret != 0))
  225. return ret;
  226. vmw_dmabuf_unreference(&vmw_bo);
  227. return 0;
  228. }
  229. static int vmw_cmd_wait_query(struct vmw_private *dev_priv,
  230. struct vmw_sw_context *sw_context,
  231. SVGA3dCmdHeader *header)
  232. {
  233. struct vmw_dma_buffer *vmw_bo;
  234. struct vmw_query_cmd {
  235. SVGA3dCmdHeader header;
  236. SVGA3dCmdWaitForQuery q;
  237. } *cmd;
  238. int ret;
  239. cmd = container_of(header, struct vmw_query_cmd, header);
  240. ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
  241. if (unlikely(ret != 0))
  242. return ret;
  243. ret = vmw_translate_guest_ptr(dev_priv, sw_context,
  244. &cmd->q.guestResult,
  245. &vmw_bo);
  246. if (unlikely(ret != 0))
  247. return ret;
  248. vmw_dmabuf_unreference(&vmw_bo);
  249. return 0;
  250. }
  251. static int vmw_cmd_dma(struct vmw_private *dev_priv,
  252. struct vmw_sw_context *sw_context,
  253. SVGA3dCmdHeader *header)
  254. {
  255. struct vmw_dma_buffer *vmw_bo = NULL;
  256. struct ttm_buffer_object *bo;
  257. struct vmw_surface *srf = NULL;
  258. struct vmw_dma_cmd {
  259. SVGA3dCmdHeader header;
  260. SVGA3dCmdSurfaceDMA dma;
  261. } *cmd;
  262. int ret;
  263. cmd = container_of(header, struct vmw_dma_cmd, header);
  264. ret = vmw_translate_guest_ptr(dev_priv, sw_context,
  265. &cmd->dma.guest.ptr,
  266. &vmw_bo);
  267. if (unlikely(ret != 0))
  268. return ret;
  269. bo = &vmw_bo->base;
  270. ret = vmw_user_surface_lookup_handle(dev_priv, sw_context->tfile,
  271. cmd->dma.host.sid, &srf);
  272. if (ret) {
  273. DRM_ERROR("could not find surface\n");
  274. goto out_no_reloc;
  275. }
  276. /**
  277. * Patch command stream with device SID.
  278. */
  279. cmd->dma.host.sid = srf->res.id;
  280. vmw_kms_cursor_snoop(srf, sw_context->tfile, bo, header);
  281. /**
  282. * FIXME: May deadlock here when called from the
  283. * command parsing code.
  284. */
  285. vmw_surface_unreference(&srf);
  286. out_no_reloc:
  287. vmw_dmabuf_unreference(&vmw_bo);
  288. return ret;
  289. }
  290. static int vmw_cmd_draw(struct vmw_private *dev_priv,
  291. struct vmw_sw_context *sw_context,
  292. SVGA3dCmdHeader *header)
  293. {
  294. struct vmw_draw_cmd {
  295. SVGA3dCmdHeader header;
  296. SVGA3dCmdDrawPrimitives body;
  297. } *cmd;
  298. SVGA3dVertexDecl *decl = (SVGA3dVertexDecl *)(
  299. (unsigned long)header + sizeof(*cmd));
  300. SVGA3dPrimitiveRange *range;
  301. uint32_t i;
  302. uint32_t maxnum;
  303. int ret;
  304. ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
  305. if (unlikely(ret != 0))
  306. return ret;
  307. cmd = container_of(header, struct vmw_draw_cmd, header);
  308. maxnum = (header->size - sizeof(cmd->body)) / sizeof(*decl);
  309. if (unlikely(cmd->body.numVertexDecls > maxnum)) {
  310. DRM_ERROR("Illegal number of vertex declarations.\n");
  311. return -EINVAL;
  312. }
  313. for (i = 0; i < cmd->body.numVertexDecls; ++i, ++decl) {
  314. ret = vmw_cmd_sid_check(dev_priv, sw_context,
  315. &decl->array.surfaceId);
  316. if (unlikely(ret != 0))
  317. return ret;
  318. }
  319. maxnum = (header->size - sizeof(cmd->body) -
  320. cmd->body.numVertexDecls * sizeof(*decl)) / sizeof(*range);
  321. if (unlikely(cmd->body.numRanges > maxnum)) {
  322. DRM_ERROR("Illegal number of index ranges.\n");
  323. return -EINVAL;
  324. }
  325. range = (SVGA3dPrimitiveRange *) decl;
  326. for (i = 0; i < cmd->body.numRanges; ++i, ++range) {
  327. ret = vmw_cmd_sid_check(dev_priv, sw_context,
  328. &range->indexArray.surfaceId);
  329. if (unlikely(ret != 0))
  330. return ret;
  331. }
  332. return 0;
  333. }
  334. static int vmw_cmd_tex_state(struct vmw_private *dev_priv,
  335. struct vmw_sw_context *sw_context,
  336. SVGA3dCmdHeader *header)
  337. {
  338. struct vmw_tex_state_cmd {
  339. SVGA3dCmdHeader header;
  340. SVGA3dCmdSetTextureState state;
  341. };
  342. SVGA3dTextureState *last_state = (SVGA3dTextureState *)
  343. ((unsigned long) header + header->size + sizeof(header));
  344. SVGA3dTextureState *cur_state = (SVGA3dTextureState *)
  345. ((unsigned long) header + sizeof(struct vmw_tex_state_cmd));
  346. int ret;
  347. ret = vmw_cmd_cid_check(dev_priv, sw_context, header);
  348. if (unlikely(ret != 0))
  349. return ret;
  350. for (; cur_state < last_state; ++cur_state) {
  351. if (likely(cur_state->name != SVGA3D_TS_BIND_TEXTURE))
  352. continue;
  353. ret = vmw_cmd_sid_check(dev_priv, sw_context,
  354. &cur_state->value);
  355. if (unlikely(ret != 0))
  356. return ret;
  357. }
  358. return 0;
  359. }
  360. typedef int (*vmw_cmd_func) (struct vmw_private *,
  361. struct vmw_sw_context *,
  362. SVGA3dCmdHeader *);
  363. #define VMW_CMD_DEF(cmd, func) \
  364. [cmd - SVGA_3D_CMD_BASE] = func
  365. static vmw_cmd_func vmw_cmd_funcs[SVGA_3D_CMD_MAX] = {
  366. VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DEFINE, &vmw_cmd_invalid),
  367. VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DESTROY, &vmw_cmd_invalid),
  368. VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_COPY, &vmw_cmd_surface_copy_check),
  369. VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_STRETCHBLT, &vmw_cmd_stretch_blt_check),
  370. VMW_CMD_DEF(SVGA_3D_CMD_SURFACE_DMA, &vmw_cmd_dma),
  371. VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DEFINE, &vmw_cmd_invalid),
  372. VMW_CMD_DEF(SVGA_3D_CMD_CONTEXT_DESTROY, &vmw_cmd_invalid),
  373. VMW_CMD_DEF(SVGA_3D_CMD_SETTRANSFORM, &vmw_cmd_cid_check),
  374. VMW_CMD_DEF(SVGA_3D_CMD_SETZRANGE, &vmw_cmd_cid_check),
  375. VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERSTATE, &vmw_cmd_cid_check),
  376. VMW_CMD_DEF(SVGA_3D_CMD_SETRENDERTARGET,
  377. &vmw_cmd_set_render_target_check),
  378. VMW_CMD_DEF(SVGA_3D_CMD_SETTEXTURESTATE, &vmw_cmd_tex_state),
  379. VMW_CMD_DEF(SVGA_3D_CMD_SETMATERIAL, &vmw_cmd_cid_check),
  380. VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTDATA, &vmw_cmd_cid_check),
  381. VMW_CMD_DEF(SVGA_3D_CMD_SETLIGHTENABLED, &vmw_cmd_cid_check),
  382. VMW_CMD_DEF(SVGA_3D_CMD_SETVIEWPORT, &vmw_cmd_cid_check),
  383. VMW_CMD_DEF(SVGA_3D_CMD_SETCLIPPLANE, &vmw_cmd_cid_check),
  384. VMW_CMD_DEF(SVGA_3D_CMD_CLEAR, &vmw_cmd_cid_check),
  385. VMW_CMD_DEF(SVGA_3D_CMD_PRESENT, &vmw_cmd_present_check),
  386. VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DEFINE, &vmw_cmd_cid_check),
  387. VMW_CMD_DEF(SVGA_3D_CMD_SHADER_DESTROY, &vmw_cmd_cid_check),
  388. VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER, &vmw_cmd_cid_check),
  389. VMW_CMD_DEF(SVGA_3D_CMD_SET_SHADER_CONST, &vmw_cmd_cid_check),
  390. VMW_CMD_DEF(SVGA_3D_CMD_DRAW_PRIMITIVES, &vmw_cmd_draw),
  391. VMW_CMD_DEF(SVGA_3D_CMD_SETSCISSORRECT, &vmw_cmd_cid_check),
  392. VMW_CMD_DEF(SVGA_3D_CMD_BEGIN_QUERY, &vmw_cmd_cid_check),
  393. VMW_CMD_DEF(SVGA_3D_CMD_END_QUERY, &vmw_cmd_end_query),
  394. VMW_CMD_DEF(SVGA_3D_CMD_WAIT_FOR_QUERY, &vmw_cmd_wait_query),
  395. VMW_CMD_DEF(SVGA_3D_CMD_PRESENT_READBACK, &vmw_cmd_ok),
  396. VMW_CMD_DEF(SVGA_3D_CMD_BLIT_SURFACE_TO_SCREEN,
  397. &vmw_cmd_blt_surf_screen_check)
  398. };
  399. static int vmw_cmd_check(struct vmw_private *dev_priv,
  400. struct vmw_sw_context *sw_context,
  401. void *buf, uint32_t *size)
  402. {
  403. uint32_t cmd_id;
  404. uint32_t size_remaining = *size;
  405. SVGA3dCmdHeader *header = (SVGA3dCmdHeader *) buf;
  406. int ret;
  407. cmd_id = ((uint32_t *)buf)[0];
  408. if (cmd_id == SVGA_CMD_UPDATE) {
  409. *size = 5 << 2;
  410. return 0;
  411. }
  412. cmd_id = le32_to_cpu(header->id);
  413. *size = le32_to_cpu(header->size) + sizeof(SVGA3dCmdHeader);
  414. cmd_id -= SVGA_3D_CMD_BASE;
  415. if (unlikely(*size > size_remaining))
  416. goto out_err;
  417. if (unlikely(cmd_id >= SVGA_3D_CMD_MAX - SVGA_3D_CMD_BASE))
  418. goto out_err;
  419. ret = vmw_cmd_funcs[cmd_id](dev_priv, sw_context, header);
  420. if (unlikely(ret != 0))
  421. goto out_err;
  422. return 0;
  423. out_err:
  424. DRM_ERROR("Illegal / Invalid SVGA3D command: %d\n",
  425. cmd_id + SVGA_3D_CMD_BASE);
  426. return -EINVAL;
  427. }
  428. static int vmw_cmd_check_all(struct vmw_private *dev_priv,
  429. struct vmw_sw_context *sw_context,
  430. void *buf, uint32_t size)
  431. {
  432. int32_t cur_size = size;
  433. int ret;
  434. while (cur_size > 0) {
  435. size = cur_size;
  436. ret = vmw_cmd_check(dev_priv, sw_context, buf, &size);
  437. if (unlikely(ret != 0))
  438. return ret;
  439. buf = (void *)((unsigned long) buf + size);
  440. cur_size -= size;
  441. }
  442. if (unlikely(cur_size != 0)) {
  443. DRM_ERROR("Command verifier out of sync.\n");
  444. return -EINVAL;
  445. }
  446. return 0;
  447. }
  448. static void vmw_free_relocations(struct vmw_sw_context *sw_context)
  449. {
  450. sw_context->cur_reloc = 0;
  451. }
  452. static void vmw_apply_relocations(struct vmw_sw_context *sw_context)
  453. {
  454. uint32_t i;
  455. struct vmw_relocation *reloc;
  456. struct ttm_validate_buffer *validate;
  457. struct ttm_buffer_object *bo;
  458. for (i = 0; i < sw_context->cur_reloc; ++i) {
  459. reloc = &sw_context->relocs[i];
  460. validate = &sw_context->val_bufs[reloc->index];
  461. bo = validate->bo;
  462. if (bo->mem.mem_type == TTM_PL_VRAM) {
  463. reloc->location->offset += bo->offset;
  464. reloc->location->gmrId = SVGA_GMR_FRAMEBUFFER;
  465. } else
  466. reloc->location->gmrId = bo->mem.start;
  467. }
  468. vmw_free_relocations(sw_context);
  469. }
  470. static void vmw_clear_validations(struct vmw_sw_context *sw_context)
  471. {
  472. struct ttm_validate_buffer *entry, *next;
  473. list_for_each_entry_safe(entry, next, &sw_context->validate_nodes,
  474. head) {
  475. list_del(&entry->head);
  476. vmw_dmabuf_validate_clear(entry->bo);
  477. ttm_bo_unref(&entry->bo);
  478. sw_context->cur_val_buf--;
  479. }
  480. BUG_ON(sw_context->cur_val_buf != 0);
  481. }
  482. static int vmw_validate_single_buffer(struct vmw_private *dev_priv,
  483. struct ttm_buffer_object *bo)
  484. {
  485. int ret;
  486. /**
  487. * Put BO in VRAM if there is space, otherwise as a GMR.
  488. * If there is no space in VRAM and GMR ids are all used up,
  489. * start evicting GMRs to make room. If the DMA buffer can't be
  490. * used as a GMR, this will return -ENOMEM.
  491. */
  492. ret = ttm_bo_validate(bo, &vmw_vram_gmr_placement, true, false, false);
  493. if (likely(ret == 0 || ret == -ERESTARTSYS))
  494. return ret;
  495. /**
  496. * If that failed, try VRAM again, this time evicting
  497. * previous contents.
  498. */
  499. DRM_INFO("Falling through to VRAM.\n");
  500. ret = ttm_bo_validate(bo, &vmw_vram_placement, true, false, false);
  501. return ret;
  502. }
  503. static int vmw_validate_buffers(struct vmw_private *dev_priv,
  504. struct vmw_sw_context *sw_context)
  505. {
  506. struct ttm_validate_buffer *entry;
  507. int ret;
  508. list_for_each_entry(entry, &sw_context->validate_nodes, head) {
  509. ret = vmw_validate_single_buffer(dev_priv, entry->bo);
  510. if (unlikely(ret != 0))
  511. return ret;
  512. }
  513. return 0;
  514. }
  515. int vmw_execbuf_ioctl(struct drm_device *dev, void *data,
  516. struct drm_file *file_priv)
  517. {
  518. struct vmw_private *dev_priv = vmw_priv(dev);
  519. struct drm_vmw_execbuf_arg *arg = (struct drm_vmw_execbuf_arg *)data;
  520. struct drm_vmw_fence_rep fence_rep;
  521. struct drm_vmw_fence_rep __user *user_fence_rep;
  522. int ret;
  523. void *user_cmd;
  524. void *cmd;
  525. uint32_t sequence;
  526. struct vmw_sw_context *sw_context = &dev_priv->ctx;
  527. struct vmw_master *vmaster = vmw_master(file_priv->master);
  528. ret = ttm_read_lock(&vmaster->lock, true);
  529. if (unlikely(ret != 0))
  530. return ret;
  531. ret = mutex_lock_interruptible(&dev_priv->cmdbuf_mutex);
  532. if (unlikely(ret != 0)) {
  533. ret = -ERESTARTSYS;
  534. goto out_no_cmd_mutex;
  535. }
  536. cmd = vmw_fifo_reserve(dev_priv, arg->command_size);
  537. if (unlikely(cmd == NULL)) {
  538. DRM_ERROR("Failed reserving fifo space for commands.\n");
  539. ret = -ENOMEM;
  540. goto out_unlock;
  541. }
  542. user_cmd = (void __user *)(unsigned long)arg->commands;
  543. ret = copy_from_user(cmd, user_cmd, arg->command_size);
  544. if (unlikely(ret != 0)) {
  545. ret = -EFAULT;
  546. DRM_ERROR("Failed copying commands.\n");
  547. goto out_commit;
  548. }
  549. sw_context->tfile = vmw_fpriv(file_priv)->tfile;
  550. sw_context->cid_valid = false;
  551. sw_context->sid_valid = false;
  552. sw_context->cur_reloc = 0;
  553. sw_context->cur_val_buf = 0;
  554. INIT_LIST_HEAD(&sw_context->validate_nodes);
  555. ret = vmw_cmd_check_all(dev_priv, sw_context, cmd, arg->command_size);
  556. if (unlikely(ret != 0))
  557. goto out_err;
  558. ret = ttm_eu_reserve_buffers(&sw_context->validate_nodes);
  559. if (unlikely(ret != 0))
  560. goto out_err;
  561. ret = vmw_validate_buffers(dev_priv, sw_context);
  562. if (unlikely(ret != 0))
  563. goto out_err;
  564. vmw_apply_relocations(sw_context);
  565. if (arg->throttle_us) {
  566. ret = vmw_wait_lag(dev_priv, &dev_priv->fifo.fence_queue,
  567. arg->throttle_us);
  568. if (unlikely(ret != 0))
  569. goto out_err;
  570. }
  571. vmw_fifo_commit(dev_priv, arg->command_size);
  572. ret = vmw_fifo_send_fence(dev_priv, &sequence);
  573. ttm_eu_fence_buffer_objects(&sw_context->validate_nodes,
  574. (void *)(unsigned long) sequence);
  575. vmw_clear_validations(sw_context);
  576. mutex_unlock(&dev_priv->cmdbuf_mutex);
  577. /*
  578. * This error is harmless, because if fence submission fails,
  579. * vmw_fifo_send_fence will sync.
  580. */
  581. if (ret != 0)
  582. DRM_ERROR("Fence submission error. Syncing.\n");
  583. fence_rep.error = ret;
  584. fence_rep.fence_seq = (uint64_t) sequence;
  585. fence_rep.pad64 = 0;
  586. user_fence_rep = (struct drm_vmw_fence_rep __user *)
  587. (unsigned long)arg->fence_rep;
  588. /*
  589. * copy_to_user errors will be detected by user space not
  590. * seeing fence_rep::error filled in.
  591. */
  592. ret = copy_to_user(user_fence_rep, &fence_rep, sizeof(fence_rep));
  593. vmw_kms_cursor_post_execbuf(dev_priv);
  594. ttm_read_unlock(&vmaster->lock);
  595. return 0;
  596. out_err:
  597. vmw_free_relocations(sw_context);
  598. ttm_eu_backoff_reservation(&sw_context->validate_nodes);
  599. vmw_clear_validations(sw_context);
  600. out_commit:
  601. vmw_fifo_commit(dev_priv, 0);
  602. out_unlock:
  603. mutex_unlock(&dev_priv->cmdbuf_mutex);
  604. out_no_cmd_mutex:
  605. ttm_read_unlock(&vmaster->lock);
  606. return ret;
  607. }