vmwgfx_resource.c 30 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202
  1. /**************************************************************************
  2. *
  3. * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA
  4. * All Rights Reserved.
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the
  8. * "Software"), to deal in the Software without restriction, including
  9. * without limitation the rights to use, copy, modify, merge, publish,
  10. * distribute, sub license, and/or sell copies of the Software, and to
  11. * permit persons to whom the Software is furnished to do so, subject to
  12. * the following conditions:
  13. *
  14. * The above copyright notice and this permission notice (including the
  15. * next paragraph) shall be included in all copies or substantial portions
  16. * of the Software.
  17. *
  18. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20. * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21. * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22. * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23. * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24. * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25. *
  26. **************************************************************************/
  27. #include "vmwgfx_drv.h"
  28. #include "vmwgfx_drm.h"
  29. #include "ttm/ttm_object.h"
  30. #include "ttm/ttm_placement.h"
  31. #include "drmP.h"
  32. #define VMW_RES_CONTEXT ttm_driver_type0
  33. #define VMW_RES_SURFACE ttm_driver_type1
  34. #define VMW_RES_STREAM ttm_driver_type2
  35. /* XXX: This isn't a real hardware flag, but just a hack for kernel to
  36. * know about primary surfaces. Find a better way to accomplish this.
  37. */
  38. #define SVGA3D_SURFACE_HINT_SCANOUT (1 << 9)
  39. struct vmw_user_context {
  40. struct ttm_base_object base;
  41. struct vmw_resource res;
  42. };
  43. struct vmw_user_surface {
  44. struct ttm_base_object base;
  45. struct vmw_surface srf;
  46. };
  47. struct vmw_user_dma_buffer {
  48. struct ttm_base_object base;
  49. struct vmw_dma_buffer dma;
  50. };
  51. struct vmw_bo_user_rep {
  52. uint32_t handle;
  53. uint64_t map_handle;
  54. };
  55. struct vmw_stream {
  56. struct vmw_resource res;
  57. uint32_t stream_id;
  58. };
  59. struct vmw_user_stream {
  60. struct ttm_base_object base;
  61. struct vmw_stream stream;
  62. };
  63. static inline struct vmw_dma_buffer *
  64. vmw_dma_buffer(struct ttm_buffer_object *bo)
  65. {
  66. return container_of(bo, struct vmw_dma_buffer, base);
  67. }
  68. static inline struct vmw_user_dma_buffer *
  69. vmw_user_dma_buffer(struct ttm_buffer_object *bo)
  70. {
  71. struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
  72. return container_of(vmw_bo, struct vmw_user_dma_buffer, dma);
  73. }
  74. struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
  75. {
  76. kref_get(&res->kref);
  77. return res;
  78. }
  79. static void vmw_resource_release(struct kref *kref)
  80. {
  81. struct vmw_resource *res =
  82. container_of(kref, struct vmw_resource, kref);
  83. struct vmw_private *dev_priv = res->dev_priv;
  84. idr_remove(res->idr, res->id);
  85. write_unlock(&dev_priv->resource_lock);
  86. if (likely(res->hw_destroy != NULL))
  87. res->hw_destroy(res);
  88. if (res->res_free != NULL)
  89. res->res_free(res);
  90. else
  91. kfree(res);
  92. write_lock(&dev_priv->resource_lock);
  93. }
  94. void vmw_resource_unreference(struct vmw_resource **p_res)
  95. {
  96. struct vmw_resource *res = *p_res;
  97. struct vmw_private *dev_priv = res->dev_priv;
  98. *p_res = NULL;
  99. write_lock(&dev_priv->resource_lock);
  100. kref_put(&res->kref, vmw_resource_release);
  101. write_unlock(&dev_priv->resource_lock);
  102. }
  103. static int vmw_resource_init(struct vmw_private *dev_priv,
  104. struct vmw_resource *res,
  105. struct idr *idr,
  106. enum ttm_object_type obj_type,
  107. void (*res_free) (struct vmw_resource *res))
  108. {
  109. int ret;
  110. kref_init(&res->kref);
  111. res->hw_destroy = NULL;
  112. res->res_free = res_free;
  113. res->res_type = obj_type;
  114. res->idr = idr;
  115. res->avail = false;
  116. res->dev_priv = dev_priv;
  117. do {
  118. if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
  119. return -ENOMEM;
  120. write_lock(&dev_priv->resource_lock);
  121. ret = idr_get_new_above(idr, res, 1, &res->id);
  122. write_unlock(&dev_priv->resource_lock);
  123. } while (ret == -EAGAIN);
  124. return ret;
  125. }
  126. /**
  127. * vmw_resource_activate
  128. *
  129. * @res: Pointer to the newly created resource
  130. * @hw_destroy: Destroy function. NULL if none.
  131. *
  132. * Activate a resource after the hardware has been made aware of it.
  133. * Set tye destroy function to @destroy. Typically this frees the
  134. * resource and destroys the hardware resources associated with it.
  135. * Activate basically means that the function vmw_resource_lookup will
  136. * find it.
  137. */
  138. static void vmw_resource_activate(struct vmw_resource *res,
  139. void (*hw_destroy) (struct vmw_resource *))
  140. {
  141. struct vmw_private *dev_priv = res->dev_priv;
  142. write_lock(&dev_priv->resource_lock);
  143. res->avail = true;
  144. res->hw_destroy = hw_destroy;
  145. write_unlock(&dev_priv->resource_lock);
  146. }
  147. struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
  148. struct idr *idr, int id)
  149. {
  150. struct vmw_resource *res;
  151. read_lock(&dev_priv->resource_lock);
  152. res = idr_find(idr, id);
  153. if (res && res->avail)
  154. kref_get(&res->kref);
  155. else
  156. res = NULL;
  157. read_unlock(&dev_priv->resource_lock);
  158. if (unlikely(res == NULL))
  159. return NULL;
  160. return res;
  161. }
  162. /**
  163. * Context management:
  164. */
  165. static void vmw_hw_context_destroy(struct vmw_resource *res)
  166. {
  167. struct vmw_private *dev_priv = res->dev_priv;
  168. struct {
  169. SVGA3dCmdHeader header;
  170. SVGA3dCmdDestroyContext body;
  171. } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  172. if (unlikely(cmd == NULL)) {
  173. DRM_ERROR("Failed reserving FIFO space for surface "
  174. "destruction.\n");
  175. return;
  176. }
  177. cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
  178. cmd->header.size = cpu_to_le32(sizeof(cmd->body));
  179. cmd->body.cid = cpu_to_le32(res->id);
  180. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  181. }
  182. static int vmw_context_init(struct vmw_private *dev_priv,
  183. struct vmw_resource *res,
  184. void (*res_free) (struct vmw_resource *res))
  185. {
  186. int ret;
  187. struct {
  188. SVGA3dCmdHeader header;
  189. SVGA3dCmdDefineContext body;
  190. } *cmd;
  191. ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr,
  192. VMW_RES_CONTEXT, res_free);
  193. if (unlikely(ret != 0)) {
  194. if (res_free == NULL)
  195. kfree(res);
  196. else
  197. res_free(res);
  198. return ret;
  199. }
  200. cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  201. if (unlikely(cmd == NULL)) {
  202. DRM_ERROR("Fifo reserve failed.\n");
  203. vmw_resource_unreference(&res);
  204. return -ENOMEM;
  205. }
  206. cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
  207. cmd->header.size = cpu_to_le32(sizeof(cmd->body));
  208. cmd->body.cid = cpu_to_le32(res->id);
  209. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  210. vmw_resource_activate(res, vmw_hw_context_destroy);
  211. return 0;
  212. }
  213. struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
  214. {
  215. struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
  216. int ret;
  217. if (unlikely(res == NULL))
  218. return NULL;
  219. ret = vmw_context_init(dev_priv, res, NULL);
  220. return (ret == 0) ? res : NULL;
  221. }
  222. /**
  223. * User-space context management:
  224. */
  225. static void vmw_user_context_free(struct vmw_resource *res)
  226. {
  227. struct vmw_user_context *ctx =
  228. container_of(res, struct vmw_user_context, res);
  229. kfree(ctx);
  230. }
  231. /**
  232. * This function is called when user space has no more references on the
  233. * base object. It releases the base-object's reference on the resource object.
  234. */
  235. static void vmw_user_context_base_release(struct ttm_base_object **p_base)
  236. {
  237. struct ttm_base_object *base = *p_base;
  238. struct vmw_user_context *ctx =
  239. container_of(base, struct vmw_user_context, base);
  240. struct vmw_resource *res = &ctx->res;
  241. *p_base = NULL;
  242. vmw_resource_unreference(&res);
  243. }
  244. int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
  245. struct drm_file *file_priv)
  246. {
  247. struct vmw_private *dev_priv = vmw_priv(dev);
  248. struct vmw_resource *res;
  249. struct vmw_user_context *ctx;
  250. struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
  251. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  252. int ret = 0;
  253. res = vmw_resource_lookup(dev_priv, &dev_priv->context_idr, arg->cid);
  254. if (unlikely(res == NULL))
  255. return -EINVAL;
  256. if (res->res_free != &vmw_user_context_free) {
  257. ret = -EINVAL;
  258. goto out;
  259. }
  260. ctx = container_of(res, struct vmw_user_context, res);
  261. if (ctx->base.tfile != tfile && !ctx->base.shareable) {
  262. ret = -EPERM;
  263. goto out;
  264. }
  265. ttm_ref_object_base_unref(tfile, ctx->base.hash.key, TTM_REF_USAGE);
  266. out:
  267. vmw_resource_unreference(&res);
  268. return ret;
  269. }
  270. int vmw_context_define_ioctl(struct drm_device *dev, void *data,
  271. struct drm_file *file_priv)
  272. {
  273. struct vmw_private *dev_priv = vmw_priv(dev);
  274. struct vmw_user_context *ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
  275. struct vmw_resource *res;
  276. struct vmw_resource *tmp;
  277. struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
  278. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  279. int ret;
  280. if (unlikely(ctx == NULL))
  281. return -ENOMEM;
  282. res = &ctx->res;
  283. ctx->base.shareable = false;
  284. ctx->base.tfile = NULL;
  285. ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
  286. if (unlikely(ret != 0))
  287. return ret;
  288. tmp = vmw_resource_reference(&ctx->res);
  289. ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
  290. &vmw_user_context_base_release, NULL);
  291. if (unlikely(ret != 0)) {
  292. vmw_resource_unreference(&tmp);
  293. goto out_err;
  294. }
  295. arg->cid = res->id;
  296. out_err:
  297. vmw_resource_unreference(&res);
  298. return ret;
  299. }
  300. int vmw_context_check(struct vmw_private *dev_priv,
  301. struct ttm_object_file *tfile,
  302. int id)
  303. {
  304. struct vmw_resource *res;
  305. int ret = 0;
  306. read_lock(&dev_priv->resource_lock);
  307. res = idr_find(&dev_priv->context_idr, id);
  308. if (res && res->avail) {
  309. struct vmw_user_context *ctx =
  310. container_of(res, struct vmw_user_context, res);
  311. if (ctx->base.tfile != tfile && !ctx->base.shareable)
  312. ret = -EPERM;
  313. } else
  314. ret = -EINVAL;
  315. read_unlock(&dev_priv->resource_lock);
  316. return ret;
  317. }
  318. /**
  319. * Surface management.
  320. */
  321. static void vmw_hw_surface_destroy(struct vmw_resource *res)
  322. {
  323. struct vmw_private *dev_priv = res->dev_priv;
  324. struct {
  325. SVGA3dCmdHeader header;
  326. SVGA3dCmdDestroySurface body;
  327. } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
  328. if (unlikely(cmd == NULL)) {
  329. DRM_ERROR("Failed reserving FIFO space for surface "
  330. "destruction.\n");
  331. return;
  332. }
  333. cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DESTROY);
  334. cmd->header.size = cpu_to_le32(sizeof(cmd->body));
  335. cmd->body.sid = cpu_to_le32(res->id);
  336. vmw_fifo_commit(dev_priv, sizeof(*cmd));
  337. }
  338. void vmw_surface_res_free(struct vmw_resource *res)
  339. {
  340. struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
  341. kfree(srf->sizes);
  342. kfree(srf->snooper.image);
  343. kfree(srf);
  344. }
  345. int vmw_surface_init(struct vmw_private *dev_priv,
  346. struct vmw_surface *srf,
  347. void (*res_free) (struct vmw_resource *res))
  348. {
  349. int ret;
  350. struct {
  351. SVGA3dCmdHeader header;
  352. SVGA3dCmdDefineSurface body;
  353. } *cmd;
  354. SVGA3dSize *cmd_size;
  355. struct vmw_resource *res = &srf->res;
  356. struct drm_vmw_size *src_size;
  357. size_t submit_size;
  358. uint32_t cmd_len;
  359. int i;
  360. BUG_ON(res_free == NULL);
  361. ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr,
  362. VMW_RES_SURFACE, res_free);
  363. if (unlikely(ret != 0)) {
  364. res_free(res);
  365. return ret;
  366. }
  367. submit_size = sizeof(*cmd) + srf->num_sizes * sizeof(SVGA3dSize);
  368. cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
  369. cmd = vmw_fifo_reserve(dev_priv, submit_size);
  370. if (unlikely(cmd == NULL)) {
  371. DRM_ERROR("Fifo reserve failed for create surface.\n");
  372. vmw_resource_unreference(&res);
  373. return -ENOMEM;
  374. }
  375. cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DEFINE);
  376. cmd->header.size = cpu_to_le32(cmd_len);
  377. cmd->body.sid = cpu_to_le32(res->id);
  378. cmd->body.surfaceFlags = cpu_to_le32(srf->flags);
  379. cmd->body.format = cpu_to_le32(srf->format);
  380. for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
  381. cmd->body.face[i].numMipLevels =
  382. cpu_to_le32(srf->mip_levels[i]);
  383. }
  384. cmd += 1;
  385. cmd_size = (SVGA3dSize *) cmd;
  386. src_size = srf->sizes;
  387. for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
  388. cmd_size->width = cpu_to_le32(src_size->width);
  389. cmd_size->height = cpu_to_le32(src_size->height);
  390. cmd_size->depth = cpu_to_le32(src_size->depth);
  391. }
  392. vmw_fifo_commit(dev_priv, submit_size);
  393. vmw_resource_activate(res, vmw_hw_surface_destroy);
  394. return 0;
  395. }
  396. static void vmw_user_surface_free(struct vmw_resource *res)
  397. {
  398. struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
  399. struct vmw_user_surface *user_srf =
  400. container_of(srf, struct vmw_user_surface, srf);
  401. kfree(srf->sizes);
  402. kfree(srf->snooper.image);
  403. kfree(user_srf);
  404. }
  405. int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
  406. struct ttm_object_file *tfile,
  407. uint32_t handle, struct vmw_surface **out)
  408. {
  409. struct vmw_resource *res;
  410. struct vmw_surface *srf;
  411. struct vmw_user_surface *user_srf;
  412. struct ttm_base_object *base;
  413. int ret = -EINVAL;
  414. base = ttm_base_object_lookup(tfile, handle);
  415. if (unlikely(base == NULL))
  416. return -EINVAL;
  417. if (unlikely(base->object_type != VMW_RES_SURFACE))
  418. goto out_bad_resource;
  419. user_srf = container_of(base, struct vmw_user_surface, base);
  420. srf = &user_srf->srf;
  421. res = &srf->res;
  422. read_lock(&dev_priv->resource_lock);
  423. if (!res->avail || res->res_free != &vmw_user_surface_free) {
  424. read_unlock(&dev_priv->resource_lock);
  425. goto out_bad_resource;
  426. }
  427. kref_get(&res->kref);
  428. read_unlock(&dev_priv->resource_lock);
  429. *out = srf;
  430. ret = 0;
  431. out_bad_resource:
  432. ttm_base_object_unref(&base);
  433. return ret;
  434. }
  435. static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
  436. {
  437. struct ttm_base_object *base = *p_base;
  438. struct vmw_user_surface *user_srf =
  439. container_of(base, struct vmw_user_surface, base);
  440. struct vmw_resource *res = &user_srf->srf.res;
  441. *p_base = NULL;
  442. vmw_resource_unreference(&res);
  443. }
  444. int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
  445. struct drm_file *file_priv)
  446. {
  447. struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
  448. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  449. return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
  450. }
  451. int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
  452. struct drm_file *file_priv)
  453. {
  454. struct vmw_private *dev_priv = vmw_priv(dev);
  455. struct vmw_user_surface *user_srf =
  456. kmalloc(sizeof(*user_srf), GFP_KERNEL);
  457. struct vmw_surface *srf;
  458. struct vmw_resource *res;
  459. struct vmw_resource *tmp;
  460. union drm_vmw_surface_create_arg *arg =
  461. (union drm_vmw_surface_create_arg *)data;
  462. struct drm_vmw_surface_create_req *req = &arg->req;
  463. struct drm_vmw_surface_arg *rep = &arg->rep;
  464. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  465. struct drm_vmw_size __user *user_sizes;
  466. int ret;
  467. int i;
  468. if (unlikely(user_srf == NULL))
  469. return -ENOMEM;
  470. srf = &user_srf->srf;
  471. res = &srf->res;
  472. srf->flags = req->flags;
  473. srf->format = req->format;
  474. memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
  475. srf->num_sizes = 0;
  476. for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
  477. srf->num_sizes += srf->mip_levels[i];
  478. if (srf->num_sizes > DRM_VMW_MAX_SURFACE_FACES *
  479. DRM_VMW_MAX_MIP_LEVELS) {
  480. ret = -EINVAL;
  481. goto out_err0;
  482. }
  483. srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
  484. if (unlikely(srf->sizes == NULL)) {
  485. ret = -ENOMEM;
  486. goto out_err0;
  487. }
  488. user_sizes = (struct drm_vmw_size __user *)(unsigned long)
  489. req->size_addr;
  490. ret = copy_from_user(srf->sizes, user_sizes,
  491. srf->num_sizes * sizeof(*srf->sizes));
  492. if (unlikely(ret != 0))
  493. goto out_err1;
  494. if (srf->flags & SVGA3D_SURFACE_HINT_SCANOUT) {
  495. /* we should not send this flag down to hardware since
  496. * its not a official one
  497. */
  498. srf->flags &= ~SVGA3D_SURFACE_HINT_SCANOUT;
  499. srf->scanout = true;
  500. } else if (req->scanout)
  501. srf->scanout = true;
  502. else
  503. srf->scanout = false;
  504. if (srf->scanout &&
  505. srf->num_sizes == 1 &&
  506. srf->sizes[0].width == 64 &&
  507. srf->sizes[0].height == 64 &&
  508. srf->format == SVGA3D_A8R8G8B8) {
  509. srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
  510. /* clear the image */
  511. if (srf->snooper.image) {
  512. memset(srf->snooper.image, 0x00, 64 * 64 * 4);
  513. } else {
  514. DRM_ERROR("Failed to allocate cursor_image\n");
  515. ret = -ENOMEM;
  516. goto out_err1;
  517. }
  518. } else {
  519. srf->snooper.image = NULL;
  520. }
  521. srf->snooper.crtc = NULL;
  522. user_srf->base.shareable = false;
  523. user_srf->base.tfile = NULL;
  524. /**
  525. * From this point, the generic resource management functions
  526. * destroy the object on failure.
  527. */
  528. ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
  529. if (unlikely(ret != 0))
  530. return ret;
  531. tmp = vmw_resource_reference(&srf->res);
  532. ret = ttm_base_object_init(tfile, &user_srf->base,
  533. req->shareable, VMW_RES_SURFACE,
  534. &vmw_user_surface_base_release, NULL);
  535. if (unlikely(ret != 0)) {
  536. vmw_resource_unreference(&tmp);
  537. vmw_resource_unreference(&res);
  538. return ret;
  539. }
  540. rep->sid = user_srf->base.hash.key;
  541. if (rep->sid == SVGA3D_INVALID_ID)
  542. DRM_ERROR("Created bad Surface ID.\n");
  543. vmw_resource_unreference(&res);
  544. return 0;
  545. out_err1:
  546. kfree(srf->sizes);
  547. out_err0:
  548. kfree(user_srf);
  549. return ret;
  550. }
  551. int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
  552. struct drm_file *file_priv)
  553. {
  554. union drm_vmw_surface_reference_arg *arg =
  555. (union drm_vmw_surface_reference_arg *)data;
  556. struct drm_vmw_surface_arg *req = &arg->req;
  557. struct drm_vmw_surface_create_req *rep = &arg->rep;
  558. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  559. struct vmw_surface *srf;
  560. struct vmw_user_surface *user_srf;
  561. struct drm_vmw_size __user *user_sizes;
  562. struct ttm_base_object *base;
  563. int ret = -EINVAL;
  564. base = ttm_base_object_lookup(tfile, req->sid);
  565. if (unlikely(base == NULL)) {
  566. DRM_ERROR("Could not find surface to reference.\n");
  567. return -EINVAL;
  568. }
  569. if (unlikely(base->object_type != VMW_RES_SURFACE))
  570. goto out_bad_resource;
  571. user_srf = container_of(base, struct vmw_user_surface, base);
  572. srf = &user_srf->srf;
  573. ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
  574. if (unlikely(ret != 0)) {
  575. DRM_ERROR("Could not add a reference to a surface.\n");
  576. goto out_no_reference;
  577. }
  578. rep->flags = srf->flags;
  579. rep->format = srf->format;
  580. memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
  581. user_sizes = (struct drm_vmw_size __user *)(unsigned long)
  582. rep->size_addr;
  583. if (user_sizes)
  584. ret = copy_to_user(user_sizes, srf->sizes,
  585. srf->num_sizes * sizeof(*srf->sizes));
  586. if (unlikely(ret != 0))
  587. DRM_ERROR("copy_to_user failed %p %u\n",
  588. user_sizes, srf->num_sizes);
  589. out_bad_resource:
  590. out_no_reference:
  591. ttm_base_object_unref(&base);
  592. return ret;
  593. }
  594. int vmw_surface_check(struct vmw_private *dev_priv,
  595. struct ttm_object_file *tfile,
  596. uint32_t handle, int *id)
  597. {
  598. struct ttm_base_object *base;
  599. struct vmw_user_surface *user_srf;
  600. int ret = -EPERM;
  601. base = ttm_base_object_lookup(tfile, handle);
  602. if (unlikely(base == NULL))
  603. return -EINVAL;
  604. if (unlikely(base->object_type != VMW_RES_SURFACE))
  605. goto out_bad_surface;
  606. user_srf = container_of(base, struct vmw_user_surface, base);
  607. *id = user_srf->srf.res.id;
  608. ret = 0;
  609. out_bad_surface:
  610. /**
  611. * FIXME: May deadlock here when called from the
  612. * command parsing code.
  613. */
  614. ttm_base_object_unref(&base);
  615. return ret;
  616. }
  617. /**
  618. * Buffer management.
  619. */
  620. static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob,
  621. unsigned long num_pages)
  622. {
  623. static size_t bo_user_size = ~0;
  624. size_t page_array_size =
  625. (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK;
  626. if (unlikely(bo_user_size == ~0)) {
  627. bo_user_size = glob->ttm_bo_extra_size +
  628. ttm_round_pot(sizeof(struct vmw_dma_buffer));
  629. }
  630. return bo_user_size + page_array_size;
  631. }
  632. void vmw_dmabuf_gmr_unbind(struct ttm_buffer_object *bo)
  633. {
  634. struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
  635. struct ttm_bo_global *glob = bo->glob;
  636. struct vmw_private *dev_priv =
  637. container_of(bo->bdev, struct vmw_private, bdev);
  638. if (vmw_bo->gmr_bound) {
  639. vmw_gmr_unbind(dev_priv, vmw_bo->gmr_id);
  640. spin_lock(&glob->lru_lock);
  641. ida_remove(&dev_priv->gmr_ida, vmw_bo->gmr_id);
  642. spin_unlock(&glob->lru_lock);
  643. vmw_bo->gmr_bound = false;
  644. }
  645. }
  646. void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
  647. {
  648. struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
  649. struct ttm_bo_global *glob = bo->glob;
  650. vmw_dmabuf_gmr_unbind(bo);
  651. ttm_mem_global_free(glob->mem_glob, bo->acc_size);
  652. kfree(vmw_bo);
  653. }
  654. int vmw_dmabuf_init(struct vmw_private *dev_priv,
  655. struct vmw_dma_buffer *vmw_bo,
  656. size_t size, struct ttm_placement *placement,
  657. bool interruptible,
  658. void (*bo_free) (struct ttm_buffer_object *bo))
  659. {
  660. struct ttm_bo_device *bdev = &dev_priv->bdev;
  661. struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
  662. size_t acc_size;
  663. int ret;
  664. BUG_ON(!bo_free);
  665. acc_size =
  666. vmw_dmabuf_acc_size(bdev->glob,
  667. (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
  668. ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
  669. if (unlikely(ret != 0)) {
  670. /* we must free the bo here as
  671. * ttm_buffer_object_init does so as well */
  672. bo_free(&vmw_bo->base);
  673. return ret;
  674. }
  675. memset(vmw_bo, 0, sizeof(*vmw_bo));
  676. INIT_LIST_HEAD(&vmw_bo->gmr_lru);
  677. INIT_LIST_HEAD(&vmw_bo->validate_list);
  678. vmw_bo->gmr_id = 0;
  679. vmw_bo->gmr_bound = false;
  680. ret = ttm_bo_init(bdev, &vmw_bo->base, size,
  681. ttm_bo_type_device, placement,
  682. 0, 0, interruptible,
  683. NULL, acc_size, bo_free);
  684. return ret;
  685. }
  686. static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
  687. {
  688. struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
  689. struct ttm_bo_global *glob = bo->glob;
  690. vmw_dmabuf_gmr_unbind(bo);
  691. ttm_mem_global_free(glob->mem_glob, bo->acc_size);
  692. kfree(vmw_user_bo);
  693. }
  694. static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
  695. {
  696. struct vmw_user_dma_buffer *vmw_user_bo;
  697. struct ttm_base_object *base = *p_base;
  698. struct ttm_buffer_object *bo;
  699. *p_base = NULL;
  700. if (unlikely(base == NULL))
  701. return;
  702. vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
  703. bo = &vmw_user_bo->dma.base;
  704. ttm_bo_unref(&bo);
  705. }
  706. int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
  707. struct drm_file *file_priv)
  708. {
  709. struct vmw_private *dev_priv = vmw_priv(dev);
  710. union drm_vmw_alloc_dmabuf_arg *arg =
  711. (union drm_vmw_alloc_dmabuf_arg *)data;
  712. struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
  713. struct drm_vmw_dmabuf_rep *rep = &arg->rep;
  714. struct vmw_user_dma_buffer *vmw_user_bo;
  715. struct ttm_buffer_object *tmp;
  716. struct vmw_master *vmaster = vmw_master(file_priv->master);
  717. int ret;
  718. vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
  719. if (unlikely(vmw_user_bo == NULL))
  720. return -ENOMEM;
  721. ret = ttm_read_lock(&vmaster->lock, true);
  722. if (unlikely(ret != 0)) {
  723. kfree(vmw_user_bo);
  724. return ret;
  725. }
  726. ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size,
  727. &vmw_vram_sys_placement, true,
  728. &vmw_user_dmabuf_destroy);
  729. if (unlikely(ret != 0))
  730. return ret;
  731. tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
  732. ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
  733. &vmw_user_bo->base,
  734. false,
  735. ttm_buffer_type,
  736. &vmw_user_dmabuf_release, NULL);
  737. if (unlikely(ret != 0)) {
  738. ttm_bo_unref(&tmp);
  739. } else {
  740. rep->handle = vmw_user_bo->base.hash.key;
  741. rep->map_handle = vmw_user_bo->dma.base.addr_space_offset;
  742. rep->cur_gmr_id = vmw_user_bo->base.hash.key;
  743. rep->cur_gmr_offset = 0;
  744. }
  745. ttm_bo_unref(&tmp);
  746. ttm_read_unlock(&vmaster->lock);
  747. return 0;
  748. }
  749. int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
  750. struct drm_file *file_priv)
  751. {
  752. struct drm_vmw_unref_dmabuf_arg *arg =
  753. (struct drm_vmw_unref_dmabuf_arg *)data;
  754. return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
  755. arg->handle,
  756. TTM_REF_USAGE);
  757. }
  758. uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
  759. uint32_t cur_validate_node)
  760. {
  761. struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
  762. if (likely(vmw_bo->on_validate_list))
  763. return vmw_bo->cur_validate_node;
  764. vmw_bo->cur_validate_node = cur_validate_node;
  765. vmw_bo->on_validate_list = true;
  766. return cur_validate_node;
  767. }
  768. void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo)
  769. {
  770. struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
  771. vmw_bo->on_validate_list = false;
  772. }
  773. uint32_t vmw_dmabuf_gmr(struct ttm_buffer_object *bo)
  774. {
  775. struct vmw_dma_buffer *vmw_bo;
  776. if (bo->mem.mem_type == TTM_PL_VRAM)
  777. return SVGA_GMR_FRAMEBUFFER;
  778. vmw_bo = vmw_dma_buffer(bo);
  779. return (vmw_bo->gmr_bound) ? vmw_bo->gmr_id : SVGA_GMR_NULL;
  780. }
  781. void vmw_dmabuf_set_gmr(struct ttm_buffer_object *bo, uint32_t id)
  782. {
  783. struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
  784. vmw_bo->gmr_bound = true;
  785. vmw_bo->gmr_id = id;
  786. }
  787. int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
  788. uint32_t handle, struct vmw_dma_buffer **out)
  789. {
  790. struct vmw_user_dma_buffer *vmw_user_bo;
  791. struct ttm_base_object *base;
  792. base = ttm_base_object_lookup(tfile, handle);
  793. if (unlikely(base == NULL)) {
  794. printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
  795. (unsigned long)handle);
  796. return -ESRCH;
  797. }
  798. if (unlikely(base->object_type != ttm_buffer_type)) {
  799. ttm_base_object_unref(&base);
  800. printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
  801. (unsigned long)handle);
  802. return -EINVAL;
  803. }
  804. vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
  805. (void)ttm_bo_reference(&vmw_user_bo->dma.base);
  806. ttm_base_object_unref(&base);
  807. *out = &vmw_user_bo->dma;
  808. return 0;
  809. }
  810. /**
  811. * TODO: Implement a gmr id eviction mechanism. Currently we just fail
  812. * when we're out of ids, causing GMR space to be allocated
  813. * out of VRAM.
  814. */
  815. int vmw_gmr_id_alloc(struct vmw_private *dev_priv, uint32_t *p_id)
  816. {
  817. struct ttm_bo_global *glob = dev_priv->bdev.glob;
  818. int id;
  819. int ret;
  820. do {
  821. if (unlikely(ida_pre_get(&dev_priv->gmr_ida, GFP_KERNEL) == 0))
  822. return -ENOMEM;
  823. spin_lock(&glob->lru_lock);
  824. ret = ida_get_new(&dev_priv->gmr_ida, &id);
  825. spin_unlock(&glob->lru_lock);
  826. } while (ret == -EAGAIN);
  827. if (unlikely(ret != 0))
  828. return ret;
  829. if (unlikely(id >= dev_priv->max_gmr_ids)) {
  830. spin_lock(&glob->lru_lock);
  831. ida_remove(&dev_priv->gmr_ida, id);
  832. spin_unlock(&glob->lru_lock);
  833. return -EBUSY;
  834. }
  835. *p_id = (uint32_t) id;
  836. return 0;
  837. }
  838. /*
  839. * Stream managment
  840. */
  841. static void vmw_stream_destroy(struct vmw_resource *res)
  842. {
  843. struct vmw_private *dev_priv = res->dev_priv;
  844. struct vmw_stream *stream;
  845. int ret;
  846. DRM_INFO("%s: unref\n", __func__);
  847. stream = container_of(res, struct vmw_stream, res);
  848. ret = vmw_overlay_unref(dev_priv, stream->stream_id);
  849. WARN_ON(ret != 0);
  850. }
  851. static int vmw_stream_init(struct vmw_private *dev_priv,
  852. struct vmw_stream *stream,
  853. void (*res_free) (struct vmw_resource *res))
  854. {
  855. struct vmw_resource *res = &stream->res;
  856. int ret;
  857. ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr,
  858. VMW_RES_STREAM, res_free);
  859. if (unlikely(ret != 0)) {
  860. if (res_free == NULL)
  861. kfree(stream);
  862. else
  863. res_free(&stream->res);
  864. return ret;
  865. }
  866. ret = vmw_overlay_claim(dev_priv, &stream->stream_id);
  867. if (ret) {
  868. vmw_resource_unreference(&res);
  869. return ret;
  870. }
  871. DRM_INFO("%s: claimed\n", __func__);
  872. vmw_resource_activate(&stream->res, vmw_stream_destroy);
  873. return 0;
  874. }
  875. /**
  876. * User-space context management:
  877. */
  878. static void vmw_user_stream_free(struct vmw_resource *res)
  879. {
  880. struct vmw_user_stream *stream =
  881. container_of(res, struct vmw_user_stream, stream.res);
  882. kfree(stream);
  883. }
  884. /**
  885. * This function is called when user space has no more references on the
  886. * base object. It releases the base-object's reference on the resource object.
  887. */
  888. static void vmw_user_stream_base_release(struct ttm_base_object **p_base)
  889. {
  890. struct ttm_base_object *base = *p_base;
  891. struct vmw_user_stream *stream =
  892. container_of(base, struct vmw_user_stream, base);
  893. struct vmw_resource *res = &stream->stream.res;
  894. *p_base = NULL;
  895. vmw_resource_unreference(&res);
  896. }
  897. int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
  898. struct drm_file *file_priv)
  899. {
  900. struct vmw_private *dev_priv = vmw_priv(dev);
  901. struct vmw_resource *res;
  902. struct vmw_user_stream *stream;
  903. struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
  904. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  905. int ret = 0;
  906. res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, arg->stream_id);
  907. if (unlikely(res == NULL))
  908. return -EINVAL;
  909. if (res->res_free != &vmw_user_stream_free) {
  910. ret = -EINVAL;
  911. goto out;
  912. }
  913. stream = container_of(res, struct vmw_user_stream, stream.res);
  914. if (stream->base.tfile != tfile) {
  915. ret = -EINVAL;
  916. goto out;
  917. }
  918. ttm_ref_object_base_unref(tfile, stream->base.hash.key, TTM_REF_USAGE);
  919. out:
  920. vmw_resource_unreference(&res);
  921. return ret;
  922. }
  923. int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
  924. struct drm_file *file_priv)
  925. {
  926. struct vmw_private *dev_priv = vmw_priv(dev);
  927. struct vmw_user_stream *stream = kmalloc(sizeof(*stream), GFP_KERNEL);
  928. struct vmw_resource *res;
  929. struct vmw_resource *tmp;
  930. struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
  931. struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
  932. int ret;
  933. if (unlikely(stream == NULL))
  934. return -ENOMEM;
  935. res = &stream->stream.res;
  936. stream->base.shareable = false;
  937. stream->base.tfile = NULL;
  938. ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
  939. if (unlikely(ret != 0))
  940. return ret;
  941. tmp = vmw_resource_reference(res);
  942. ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
  943. &vmw_user_stream_base_release, NULL);
  944. if (unlikely(ret != 0)) {
  945. vmw_resource_unreference(&tmp);
  946. goto out_err;
  947. }
  948. arg->stream_id = res->id;
  949. out_err:
  950. vmw_resource_unreference(&res);
  951. return ret;
  952. }
  953. int vmw_user_stream_lookup(struct vmw_private *dev_priv,
  954. struct ttm_object_file *tfile,
  955. uint32_t *inout_id, struct vmw_resource **out)
  956. {
  957. struct vmw_user_stream *stream;
  958. struct vmw_resource *res;
  959. int ret;
  960. res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, *inout_id);
  961. if (unlikely(res == NULL))
  962. return -EINVAL;
  963. if (res->res_free != &vmw_user_stream_free) {
  964. ret = -EINVAL;
  965. goto err_ref;
  966. }
  967. stream = container_of(res, struct vmw_user_stream, stream.res);
  968. if (stream->base.tfile != tfile) {
  969. ret = -EPERM;
  970. goto err_ref;
  971. }
  972. *inout_id = stream->stream.stream_id;
  973. *out = res;
  974. return 0;
  975. err_ref:
  976. vmw_resource_unreference(&res);
  977. return ret;
  978. }