exynos_drm_gem.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815
  1. /* exynos_drm_gem.c
  2. *
  3. * Copyright (c) 2011 Samsung Electronics Co., Ltd.
  4. * Author: Inki Dae <inki.dae@samsung.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License as published by the
  8. * Free Software Foundation; either version 2 of the License, or (at your
  9. * option) any later version.
  10. */
  11. #include <drm/drmP.h>
  12. #include <linux/shmem_fs.h>
  13. #include <drm/exynos_drm.h>
  14. #include "exynos_drm_drv.h"
  15. #include "exynos_drm_gem.h"
  16. #include "exynos_drm_buf.h"
  17. static unsigned int convert_to_vm_err_msg(int msg)
  18. {
  19. unsigned int out_msg;
  20. switch (msg) {
  21. case 0:
  22. case -ERESTARTSYS:
  23. case -EINTR:
  24. out_msg = VM_FAULT_NOPAGE;
  25. break;
  26. case -ENOMEM:
  27. out_msg = VM_FAULT_OOM;
  28. break;
  29. default:
  30. out_msg = VM_FAULT_SIGBUS;
  31. break;
  32. }
  33. return out_msg;
  34. }
  35. static int check_gem_flags(unsigned int flags)
  36. {
  37. if (flags & ~(EXYNOS_BO_MASK)) {
  38. DRM_ERROR("invalid flags.\n");
  39. return -EINVAL;
  40. }
  41. return 0;
  42. }
  43. static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
  44. struct vm_area_struct *vma)
  45. {
  46. DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
  47. /* non-cachable as default. */
  48. if (obj->flags & EXYNOS_BO_CACHABLE)
  49. vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
  50. else if (obj->flags & EXYNOS_BO_WC)
  51. vma->vm_page_prot =
  52. pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
  53. else
  54. vma->vm_page_prot =
  55. pgprot_noncached(vm_get_page_prot(vma->vm_flags));
  56. }
  57. static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
  58. {
  59. /* TODO */
  60. return roundup(size, PAGE_SIZE);
  61. }
  62. static int exynos_drm_gem_map_buf(struct drm_gem_object *obj,
  63. struct vm_area_struct *vma,
  64. unsigned long f_vaddr,
  65. pgoff_t page_offset)
  66. {
  67. struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
  68. struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
  69. struct scatterlist *sgl;
  70. unsigned long pfn;
  71. int i;
  72. if (!buf->sgt)
  73. return -EINTR;
  74. if (page_offset >= (buf->size >> PAGE_SHIFT)) {
  75. DRM_ERROR("invalid page offset\n");
  76. return -EINVAL;
  77. }
  78. sgl = buf->sgt->sgl;
  79. for_each_sg(buf->sgt->sgl, sgl, buf->sgt->nents, i) {
  80. if (page_offset < (sgl->length >> PAGE_SHIFT))
  81. break;
  82. page_offset -= (sgl->length >> PAGE_SHIFT);
  83. }
  84. pfn = __phys_to_pfn(sg_phys(sgl)) + page_offset;
  85. return vm_insert_mixed(vma, f_vaddr, pfn);
  86. }
  87. static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
  88. struct drm_file *file_priv,
  89. unsigned int *handle)
  90. {
  91. int ret;
  92. /*
  93. * allocate a id of idr table where the obj is registered
  94. * and handle has the id what user can see.
  95. */
  96. ret = drm_gem_handle_create(file_priv, obj, handle);
  97. if (ret)
  98. return ret;
  99. DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
  100. /* drop reference from allocate - handle holds it now. */
  101. drm_gem_object_unreference_unlocked(obj);
  102. return 0;
  103. }
  104. void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
  105. {
  106. struct drm_gem_object *obj;
  107. struct exynos_drm_gem_buf *buf;
  108. DRM_DEBUG_KMS("%s\n", __FILE__);
  109. obj = &exynos_gem_obj->base;
  110. buf = exynos_gem_obj->buffer;
  111. DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
  112. /*
  113. * do not release memory region from exporter.
  114. *
  115. * the region will be released by exporter
  116. * once dmabuf's refcount becomes 0.
  117. */
  118. if (obj->import_attach)
  119. goto out;
  120. exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
  121. out:
  122. exynos_drm_fini_buf(obj->dev, buf);
  123. exynos_gem_obj->buffer = NULL;
  124. if (obj->map_list.map)
  125. drm_gem_free_mmap_offset(obj);
  126. /* release file pointer to gem object. */
  127. drm_gem_object_release(obj);
  128. kfree(exynos_gem_obj);
  129. exynos_gem_obj = NULL;
  130. }
  131. unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
  132. unsigned int gem_handle,
  133. struct drm_file *file_priv)
  134. {
  135. struct exynos_drm_gem_obj *exynos_gem_obj;
  136. struct drm_gem_object *obj;
  137. obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
  138. if (!obj) {
  139. DRM_ERROR("failed to lookup gem object.\n");
  140. return 0;
  141. }
  142. exynos_gem_obj = to_exynos_gem_obj(obj);
  143. drm_gem_object_unreference_unlocked(obj);
  144. return exynos_gem_obj->buffer->size;
  145. }
  146. struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
  147. unsigned long size)
  148. {
  149. struct exynos_drm_gem_obj *exynos_gem_obj;
  150. struct drm_gem_object *obj;
  151. int ret;
  152. exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
  153. if (!exynos_gem_obj) {
  154. DRM_ERROR("failed to allocate exynos gem object\n");
  155. return NULL;
  156. }
  157. exynos_gem_obj->size = size;
  158. obj = &exynos_gem_obj->base;
  159. ret = drm_gem_object_init(dev, obj, size);
  160. if (ret < 0) {
  161. DRM_ERROR("failed to initialize gem object\n");
  162. kfree(exynos_gem_obj);
  163. return NULL;
  164. }
  165. DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
  166. return exynos_gem_obj;
  167. }
  168. struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
  169. unsigned int flags,
  170. unsigned long size)
  171. {
  172. struct exynos_drm_gem_obj *exynos_gem_obj;
  173. struct exynos_drm_gem_buf *buf;
  174. int ret;
  175. if (!size) {
  176. DRM_ERROR("invalid size.\n");
  177. return ERR_PTR(-EINVAL);
  178. }
  179. size = roundup_gem_size(size, flags);
  180. DRM_DEBUG_KMS("%s\n", __FILE__);
  181. ret = check_gem_flags(flags);
  182. if (ret)
  183. return ERR_PTR(ret);
  184. buf = exynos_drm_init_buf(dev, size);
  185. if (!buf)
  186. return ERR_PTR(-ENOMEM);
  187. exynos_gem_obj = exynos_drm_gem_init(dev, size);
  188. if (!exynos_gem_obj) {
  189. ret = -ENOMEM;
  190. goto err_fini_buf;
  191. }
  192. exynos_gem_obj->buffer = buf;
  193. /* set memory type and cache attribute from user side. */
  194. exynos_gem_obj->flags = flags;
  195. ret = exynos_drm_alloc_buf(dev, buf, flags);
  196. if (ret < 0) {
  197. drm_gem_object_release(&exynos_gem_obj->base);
  198. goto err_fini_buf;
  199. }
  200. return exynos_gem_obj;
  201. err_fini_buf:
  202. exynos_drm_fini_buf(dev, buf);
  203. return ERR_PTR(ret);
  204. }
  205. int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
  206. struct drm_file *file_priv)
  207. {
  208. struct drm_exynos_gem_create *args = data;
  209. struct exynos_drm_gem_obj *exynos_gem_obj;
  210. int ret;
  211. DRM_DEBUG_KMS("%s\n", __FILE__);
  212. exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
  213. if (IS_ERR(exynos_gem_obj))
  214. return PTR_ERR(exynos_gem_obj);
  215. ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
  216. &args->handle);
  217. if (ret) {
  218. exynos_drm_gem_destroy(exynos_gem_obj);
  219. return ret;
  220. }
  221. return 0;
  222. }
  223. dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
  224. unsigned int gem_handle,
  225. struct drm_file *filp)
  226. {
  227. struct exynos_drm_gem_obj *exynos_gem_obj;
  228. struct drm_gem_object *obj;
  229. obj = drm_gem_object_lookup(dev, filp, gem_handle);
  230. if (!obj) {
  231. DRM_ERROR("failed to lookup gem object.\n");
  232. return ERR_PTR(-EINVAL);
  233. }
  234. exynos_gem_obj = to_exynos_gem_obj(obj);
  235. return &exynos_gem_obj->buffer->dma_addr;
  236. }
  237. void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
  238. unsigned int gem_handle,
  239. struct drm_file *filp)
  240. {
  241. struct exynos_drm_gem_obj *exynos_gem_obj;
  242. struct drm_gem_object *obj;
  243. obj = drm_gem_object_lookup(dev, filp, gem_handle);
  244. if (!obj) {
  245. DRM_ERROR("failed to lookup gem object.\n");
  246. return;
  247. }
  248. exynos_gem_obj = to_exynos_gem_obj(obj);
  249. drm_gem_object_unreference_unlocked(obj);
  250. /*
  251. * decrease obj->refcount one more time because we has already
  252. * increased it at exynos_drm_gem_get_dma_addr().
  253. */
  254. drm_gem_object_unreference_unlocked(obj);
  255. }
  256. int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
  257. struct drm_file *file_priv)
  258. {
  259. struct drm_exynos_gem_map_off *args = data;
  260. DRM_DEBUG_KMS("%s\n", __FILE__);
  261. DRM_DEBUG_KMS("handle = 0x%x, offset = 0x%lx\n",
  262. args->handle, (unsigned long)args->offset);
  263. if (!(dev->driver->driver_features & DRIVER_GEM)) {
  264. DRM_ERROR("does not support GEM.\n");
  265. return -ENODEV;
  266. }
  267. return exynos_drm_gem_dumb_map_offset(file_priv, dev, args->handle,
  268. &args->offset);
  269. }
  270. static struct drm_file *exynos_drm_find_drm_file(struct drm_device *drm_dev,
  271. struct file *filp)
  272. {
  273. struct drm_file *file_priv;
  274. /* find current process's drm_file from filelist. */
  275. list_for_each_entry(file_priv, &drm_dev->filelist, lhead)
  276. if (file_priv->filp == filp)
  277. return file_priv;
  278. WARN_ON(1);
  279. return ERR_PTR(-EFAULT);
  280. }
  281. static int exynos_drm_gem_mmap_buffer(struct file *filp,
  282. struct vm_area_struct *vma)
  283. {
  284. struct drm_gem_object *obj = filp->private_data;
  285. struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
  286. struct drm_device *drm_dev = obj->dev;
  287. struct exynos_drm_gem_buf *buffer;
  288. struct drm_file *file_priv;
  289. unsigned long vm_size;
  290. int ret;
  291. DRM_DEBUG_KMS("%s\n", __FILE__);
  292. vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
  293. vma->vm_private_data = obj;
  294. vma->vm_ops = drm_dev->driver->gem_vm_ops;
  295. /* restore it to driver's fops. */
  296. filp->f_op = fops_get(drm_dev->driver->fops);
  297. file_priv = exynos_drm_find_drm_file(drm_dev, filp);
  298. if (IS_ERR(file_priv))
  299. return PTR_ERR(file_priv);
  300. /* restore it to drm_file. */
  301. filp->private_data = file_priv;
  302. update_vm_cache_attr(exynos_gem_obj, vma);
  303. vm_size = vma->vm_end - vma->vm_start;
  304. /*
  305. * a buffer contains information to physically continuous memory
  306. * allocated by user request or at framebuffer creation.
  307. */
  308. buffer = exynos_gem_obj->buffer;
  309. /* check if user-requested size is valid. */
  310. if (vm_size > buffer->size)
  311. return -EINVAL;
  312. ret = dma_mmap_attrs(drm_dev->dev, vma, buffer->pages,
  313. buffer->dma_addr, buffer->size,
  314. &buffer->dma_attrs);
  315. if (ret < 0) {
  316. DRM_ERROR("failed to mmap.\n");
  317. return ret;
  318. }
  319. /*
  320. * take a reference to this mapping of the object. And this reference
  321. * is unreferenced by the corresponding vm_close call.
  322. */
  323. drm_gem_object_reference(obj);
  324. drm_vm_open_locked(drm_dev, vma);
  325. return 0;
  326. }
  327. static const struct file_operations exynos_drm_gem_fops = {
  328. .mmap = exynos_drm_gem_mmap_buffer,
  329. };
  330. int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
  331. struct drm_file *file_priv)
  332. {
  333. struct drm_exynos_gem_mmap *args = data;
  334. struct drm_gem_object *obj;
  335. unsigned int addr;
  336. DRM_DEBUG_KMS("%s\n", __FILE__);
  337. if (!(dev->driver->driver_features & DRIVER_GEM)) {
  338. DRM_ERROR("does not support GEM.\n");
  339. return -ENODEV;
  340. }
  341. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  342. if (!obj) {
  343. DRM_ERROR("failed to lookup gem object.\n");
  344. return -EINVAL;
  345. }
  346. /*
  347. * We have to use gem object and its fops for specific mmaper,
  348. * but vm_mmap() can deliver only filp. So we have to change
  349. * filp->f_op and filp->private_data temporarily, then restore
  350. * again. So it is important to keep lock until restoration the
  351. * settings to prevent others from misuse of filp->f_op or
  352. * filp->private_data.
  353. */
  354. mutex_lock(&dev->struct_mutex);
  355. /*
  356. * Set specific mmper's fops. And it will be restored by
  357. * exynos_drm_gem_mmap_buffer to dev->driver->fops.
  358. * This is used to call specific mapper temporarily.
  359. */
  360. file_priv->filp->f_op = &exynos_drm_gem_fops;
  361. /*
  362. * Set gem object to private_data so that specific mmaper
  363. * can get the gem object. And it will be restored by
  364. * exynos_drm_gem_mmap_buffer to drm_file.
  365. */
  366. file_priv->filp->private_data = obj;
  367. addr = vm_mmap(file_priv->filp, 0, args->size,
  368. PROT_READ | PROT_WRITE, MAP_SHARED, 0);
  369. drm_gem_object_unreference(obj);
  370. if (IS_ERR((void *)addr)) {
  371. /* check filp->f_op, filp->private_data are restored */
  372. if (file_priv->filp->f_op == &exynos_drm_gem_fops) {
  373. file_priv->filp->f_op = fops_get(dev->driver->fops);
  374. file_priv->filp->private_data = file_priv;
  375. }
  376. mutex_unlock(&dev->struct_mutex);
  377. return PTR_ERR((void *)addr);
  378. }
  379. mutex_unlock(&dev->struct_mutex);
  380. args->mapped = addr;
  381. DRM_DEBUG_KMS("mapped = 0x%lx\n", (unsigned long)args->mapped);
  382. return 0;
  383. }
  384. int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
  385. struct drm_file *file_priv)
  386. { struct exynos_drm_gem_obj *exynos_gem_obj;
  387. struct drm_exynos_gem_info *args = data;
  388. struct drm_gem_object *obj;
  389. mutex_lock(&dev->struct_mutex);
  390. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  391. if (!obj) {
  392. DRM_ERROR("failed to lookup gem object.\n");
  393. mutex_unlock(&dev->struct_mutex);
  394. return -EINVAL;
  395. }
  396. exynos_gem_obj = to_exynos_gem_obj(obj);
  397. args->flags = exynos_gem_obj->flags;
  398. args->size = exynos_gem_obj->size;
  399. drm_gem_object_unreference(obj);
  400. mutex_unlock(&dev->struct_mutex);
  401. return 0;
  402. }
  403. struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma)
  404. {
  405. struct vm_area_struct *vma_copy;
  406. vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
  407. if (!vma_copy)
  408. return NULL;
  409. if (vma->vm_ops && vma->vm_ops->open)
  410. vma->vm_ops->open(vma);
  411. if (vma->vm_file)
  412. get_file(vma->vm_file);
  413. memcpy(vma_copy, vma, sizeof(*vma));
  414. vma_copy->vm_mm = NULL;
  415. vma_copy->vm_next = NULL;
  416. vma_copy->vm_prev = NULL;
  417. return vma_copy;
  418. }
  419. void exynos_gem_put_vma(struct vm_area_struct *vma)
  420. {
  421. if (!vma)
  422. return;
  423. if (vma->vm_ops && vma->vm_ops->close)
  424. vma->vm_ops->close(vma);
  425. if (vma->vm_file)
  426. fput(vma->vm_file);
  427. kfree(vma);
  428. }
  429. int exynos_gem_get_pages_from_userptr(unsigned long start,
  430. unsigned int npages,
  431. struct page **pages,
  432. struct vm_area_struct *vma)
  433. {
  434. int get_npages;
  435. /* the memory region mmaped with VM_PFNMAP. */
  436. if (vma_is_io(vma)) {
  437. unsigned int i;
  438. for (i = 0; i < npages; ++i, start += PAGE_SIZE) {
  439. unsigned long pfn;
  440. int ret = follow_pfn(vma, start, &pfn);
  441. if (ret)
  442. return ret;
  443. pages[i] = pfn_to_page(pfn);
  444. }
  445. if (i != npages) {
  446. DRM_ERROR("failed to get user_pages.\n");
  447. return -EINVAL;
  448. }
  449. return 0;
  450. }
  451. get_npages = get_user_pages(current, current->mm, start,
  452. npages, 1, 1, pages, NULL);
  453. get_npages = max(get_npages, 0);
  454. if (get_npages != npages) {
  455. DRM_ERROR("failed to get user_pages.\n");
  456. while (get_npages)
  457. put_page(pages[--get_npages]);
  458. return -EFAULT;
  459. }
  460. return 0;
  461. }
  462. void exynos_gem_put_pages_to_userptr(struct page **pages,
  463. unsigned int npages,
  464. struct vm_area_struct *vma)
  465. {
  466. if (!vma_is_io(vma)) {
  467. unsigned int i;
  468. for (i = 0; i < npages; i++) {
  469. set_page_dirty_lock(pages[i]);
  470. /*
  471. * undo the reference we took when populating
  472. * the table.
  473. */
  474. put_page(pages[i]);
  475. }
  476. }
  477. }
  478. int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
  479. struct sg_table *sgt,
  480. enum dma_data_direction dir)
  481. {
  482. int nents;
  483. mutex_lock(&drm_dev->struct_mutex);
  484. nents = dma_map_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
  485. if (!nents) {
  486. DRM_ERROR("failed to map sgl with dma.\n");
  487. mutex_unlock(&drm_dev->struct_mutex);
  488. return nents;
  489. }
  490. mutex_unlock(&drm_dev->struct_mutex);
  491. return 0;
  492. }
  493. void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
  494. struct sg_table *sgt,
  495. enum dma_data_direction dir)
  496. {
  497. dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
  498. }
  499. int exynos_drm_gem_init_object(struct drm_gem_object *obj)
  500. {
  501. DRM_DEBUG_KMS("%s\n", __FILE__);
  502. return 0;
  503. }
  504. void exynos_drm_gem_free_object(struct drm_gem_object *obj)
  505. {
  506. struct exynos_drm_gem_obj *exynos_gem_obj;
  507. struct exynos_drm_gem_buf *buf;
  508. DRM_DEBUG_KMS("%s\n", __FILE__);
  509. exynos_gem_obj = to_exynos_gem_obj(obj);
  510. buf = exynos_gem_obj->buffer;
  511. if (obj->import_attach)
  512. drm_prime_gem_destroy(obj, buf->sgt);
  513. exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
  514. }
  515. int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
  516. struct drm_device *dev,
  517. struct drm_mode_create_dumb *args)
  518. {
  519. struct exynos_drm_gem_obj *exynos_gem_obj;
  520. int ret;
  521. DRM_DEBUG_KMS("%s\n", __FILE__);
  522. /*
  523. * alocate memory to be used for framebuffer.
  524. * - this callback would be called by user application
  525. * with DRM_IOCTL_MODE_CREATE_DUMB command.
  526. */
  527. args->pitch = args->width * ((args->bpp + 7) / 8);
  528. args->size = args->pitch * args->height;
  529. exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
  530. if (IS_ERR(exynos_gem_obj))
  531. return PTR_ERR(exynos_gem_obj);
  532. ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
  533. &args->handle);
  534. if (ret) {
  535. exynos_drm_gem_destroy(exynos_gem_obj);
  536. return ret;
  537. }
  538. return 0;
  539. }
  540. int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
  541. struct drm_device *dev, uint32_t handle,
  542. uint64_t *offset)
  543. {
  544. struct drm_gem_object *obj;
  545. int ret = 0;
  546. DRM_DEBUG_KMS("%s\n", __FILE__);
  547. mutex_lock(&dev->struct_mutex);
  548. /*
  549. * get offset of memory allocated for drm framebuffer.
  550. * - this callback would be called by user application
  551. * with DRM_IOCTL_MODE_MAP_DUMB command.
  552. */
  553. obj = drm_gem_object_lookup(dev, file_priv, handle);
  554. if (!obj) {
  555. DRM_ERROR("failed to lookup gem object.\n");
  556. ret = -EINVAL;
  557. goto unlock;
  558. }
  559. if (!obj->map_list.map) {
  560. ret = drm_gem_create_mmap_offset(obj);
  561. if (ret)
  562. goto out;
  563. }
  564. *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
  565. DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
  566. out:
  567. drm_gem_object_unreference(obj);
  568. unlock:
  569. mutex_unlock(&dev->struct_mutex);
  570. return ret;
  571. }
  572. int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
  573. struct drm_device *dev,
  574. unsigned int handle)
  575. {
  576. int ret;
  577. DRM_DEBUG_KMS("%s\n", __FILE__);
  578. /*
  579. * obj->refcount and obj->handle_count are decreased and
  580. * if both them are 0 then exynos_drm_gem_free_object()
  581. * would be called by callback to release resources.
  582. */
  583. ret = drm_gem_handle_delete(file_priv, handle);
  584. if (ret < 0) {
  585. DRM_ERROR("failed to delete drm_gem_handle.\n");
  586. return ret;
  587. }
  588. return 0;
  589. }
  590. int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  591. {
  592. struct drm_gem_object *obj = vma->vm_private_data;
  593. struct drm_device *dev = obj->dev;
  594. unsigned long f_vaddr;
  595. pgoff_t page_offset;
  596. int ret;
  597. page_offset = ((unsigned long)vmf->virtual_address -
  598. vma->vm_start) >> PAGE_SHIFT;
  599. f_vaddr = (unsigned long)vmf->virtual_address;
  600. mutex_lock(&dev->struct_mutex);
  601. ret = exynos_drm_gem_map_buf(obj, vma, f_vaddr, page_offset);
  602. if (ret < 0)
  603. DRM_ERROR("failed to map a buffer with user.\n");
  604. mutex_unlock(&dev->struct_mutex);
  605. return convert_to_vm_err_msg(ret);
  606. }
  607. int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
  608. {
  609. struct exynos_drm_gem_obj *exynos_gem_obj;
  610. struct drm_gem_object *obj;
  611. int ret;
  612. DRM_DEBUG_KMS("%s\n", __FILE__);
  613. /* set vm_area_struct. */
  614. ret = drm_gem_mmap(filp, vma);
  615. if (ret < 0) {
  616. DRM_ERROR("failed to mmap.\n");
  617. return ret;
  618. }
  619. obj = vma->vm_private_data;
  620. exynos_gem_obj = to_exynos_gem_obj(obj);
  621. ret = check_gem_flags(exynos_gem_obj->flags);
  622. if (ret) {
  623. drm_gem_vm_close(vma);
  624. drm_gem_free_mmap_offset(obj);
  625. return ret;
  626. }
  627. vma->vm_flags &= ~VM_PFNMAP;
  628. vma->vm_flags |= VM_MIXEDMAP;
  629. update_vm_cache_attr(exynos_gem_obj, vma);
  630. return ret;
  631. }