exynos_drm_gem.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794
  1. /* exynos_drm_gem.c
  2. *
  3. * Copyright (c) 2011 Samsung Electronics Co., Ltd.
  4. * Author: Inki Dae <inki.dae@samsung.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License as published by the
  8. * Free Software Foundation; either version 2 of the License, or (at your
  9. * option) any later version.
  10. */
  11. #include <drm/drmP.h>
  12. #include <linux/shmem_fs.h>
  13. #include <drm/exynos_drm.h>
  14. #include "exynos_drm_drv.h"
  15. #include "exynos_drm_gem.h"
  16. #include "exynos_drm_buf.h"
  17. static unsigned int convert_to_vm_err_msg(int msg)
  18. {
  19. unsigned int out_msg;
  20. switch (msg) {
  21. case 0:
  22. case -ERESTARTSYS:
  23. case -EINTR:
  24. out_msg = VM_FAULT_NOPAGE;
  25. break;
  26. case -ENOMEM:
  27. out_msg = VM_FAULT_OOM;
  28. break;
  29. default:
  30. out_msg = VM_FAULT_SIGBUS;
  31. break;
  32. }
  33. return out_msg;
  34. }
  35. static int check_gem_flags(unsigned int flags)
  36. {
  37. if (flags & ~(EXYNOS_BO_MASK)) {
  38. DRM_ERROR("invalid flags.\n");
  39. return -EINVAL;
  40. }
  41. return 0;
  42. }
  43. static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
  44. struct vm_area_struct *vma)
  45. {
  46. DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
  47. /* non-cachable as default. */
  48. if (obj->flags & EXYNOS_BO_CACHABLE)
  49. vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
  50. else if (obj->flags & EXYNOS_BO_WC)
  51. vma->vm_page_prot =
  52. pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
  53. else
  54. vma->vm_page_prot =
  55. pgprot_noncached(vm_get_page_prot(vma->vm_flags));
  56. }
  57. static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
  58. {
  59. /* TODO */
  60. return roundup(size, PAGE_SIZE);
  61. }
  62. static int exynos_drm_gem_map_buf(struct drm_gem_object *obj,
  63. struct vm_area_struct *vma,
  64. unsigned long f_vaddr,
  65. pgoff_t page_offset)
  66. {
  67. struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
  68. struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
  69. struct scatterlist *sgl;
  70. unsigned long pfn;
  71. int i;
  72. if (!buf->sgt)
  73. return -EINTR;
  74. if (page_offset >= (buf->size >> PAGE_SHIFT)) {
  75. DRM_ERROR("invalid page offset\n");
  76. return -EINVAL;
  77. }
  78. sgl = buf->sgt->sgl;
  79. for_each_sg(buf->sgt->sgl, sgl, buf->sgt->nents, i) {
  80. if (page_offset < (sgl->length >> PAGE_SHIFT))
  81. break;
  82. page_offset -= (sgl->length >> PAGE_SHIFT);
  83. }
  84. pfn = __phys_to_pfn(sg_phys(sgl)) + page_offset;
  85. return vm_insert_mixed(vma, f_vaddr, pfn);
  86. }
  87. static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
  88. struct drm_file *file_priv,
  89. unsigned int *handle)
  90. {
  91. int ret;
  92. /*
  93. * allocate a id of idr table where the obj is registered
  94. * and handle has the id what user can see.
  95. */
  96. ret = drm_gem_handle_create(file_priv, obj, handle);
  97. if (ret)
  98. return ret;
  99. DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
  100. /* drop reference from allocate - handle holds it now. */
  101. drm_gem_object_unreference_unlocked(obj);
  102. return 0;
  103. }
  104. void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
  105. {
  106. struct drm_gem_object *obj;
  107. struct exynos_drm_gem_buf *buf;
  108. obj = &exynos_gem_obj->base;
  109. buf = exynos_gem_obj->buffer;
  110. DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
  111. /*
  112. * do not release memory region from exporter.
  113. *
  114. * the region will be released by exporter
  115. * once dmabuf's refcount becomes 0.
  116. */
  117. if (obj->import_attach)
  118. goto out;
  119. exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
  120. out:
  121. exynos_drm_fini_buf(obj->dev, buf);
  122. exynos_gem_obj->buffer = NULL;
  123. if (obj->map_list.map)
  124. drm_gem_free_mmap_offset(obj);
  125. /* release file pointer to gem object. */
  126. drm_gem_object_release(obj);
  127. kfree(exynos_gem_obj);
  128. exynos_gem_obj = NULL;
  129. }
  130. unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
  131. unsigned int gem_handle,
  132. struct drm_file *file_priv)
  133. {
  134. struct exynos_drm_gem_obj *exynos_gem_obj;
  135. struct drm_gem_object *obj;
  136. obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
  137. if (!obj) {
  138. DRM_ERROR("failed to lookup gem object.\n");
  139. return 0;
  140. }
  141. exynos_gem_obj = to_exynos_gem_obj(obj);
  142. drm_gem_object_unreference_unlocked(obj);
  143. return exynos_gem_obj->buffer->size;
  144. }
  145. struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
  146. unsigned long size)
  147. {
  148. struct exynos_drm_gem_obj *exynos_gem_obj;
  149. struct drm_gem_object *obj;
  150. int ret;
  151. exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
  152. if (!exynos_gem_obj) {
  153. DRM_ERROR("failed to allocate exynos gem object\n");
  154. return NULL;
  155. }
  156. exynos_gem_obj->size = size;
  157. obj = &exynos_gem_obj->base;
  158. ret = drm_gem_object_init(dev, obj, size);
  159. if (ret < 0) {
  160. DRM_ERROR("failed to initialize gem object\n");
  161. kfree(exynos_gem_obj);
  162. return NULL;
  163. }
  164. DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
  165. return exynos_gem_obj;
  166. }
  167. struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
  168. unsigned int flags,
  169. unsigned long size)
  170. {
  171. struct exynos_drm_gem_obj *exynos_gem_obj;
  172. struct exynos_drm_gem_buf *buf;
  173. int ret;
  174. if (!size) {
  175. DRM_ERROR("invalid size.\n");
  176. return ERR_PTR(-EINVAL);
  177. }
  178. size = roundup_gem_size(size, flags);
  179. ret = check_gem_flags(flags);
  180. if (ret)
  181. return ERR_PTR(ret);
  182. buf = exynos_drm_init_buf(dev, size);
  183. if (!buf)
  184. return ERR_PTR(-ENOMEM);
  185. exynos_gem_obj = exynos_drm_gem_init(dev, size);
  186. if (!exynos_gem_obj) {
  187. ret = -ENOMEM;
  188. goto err_fini_buf;
  189. }
  190. exynos_gem_obj->buffer = buf;
  191. /* set memory type and cache attribute from user side. */
  192. exynos_gem_obj->flags = flags;
  193. ret = exynos_drm_alloc_buf(dev, buf, flags);
  194. if (ret < 0)
  195. goto err_gem_fini;
  196. return exynos_gem_obj;
  197. err_gem_fini:
  198. drm_gem_object_release(&exynos_gem_obj->base);
  199. kfree(exynos_gem_obj);
  200. err_fini_buf:
  201. exynos_drm_fini_buf(dev, buf);
  202. return ERR_PTR(ret);
  203. }
  204. int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
  205. struct drm_file *file_priv)
  206. {
  207. struct drm_exynos_gem_create *args = data;
  208. struct exynos_drm_gem_obj *exynos_gem_obj;
  209. int ret;
  210. exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
  211. if (IS_ERR(exynos_gem_obj))
  212. return PTR_ERR(exynos_gem_obj);
  213. ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
  214. &args->handle);
  215. if (ret) {
  216. exynos_drm_gem_destroy(exynos_gem_obj);
  217. return ret;
  218. }
  219. return 0;
  220. }
  221. dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
  222. unsigned int gem_handle,
  223. struct drm_file *filp)
  224. {
  225. struct exynos_drm_gem_obj *exynos_gem_obj;
  226. struct drm_gem_object *obj;
  227. obj = drm_gem_object_lookup(dev, filp, gem_handle);
  228. if (!obj) {
  229. DRM_ERROR("failed to lookup gem object.\n");
  230. return ERR_PTR(-EINVAL);
  231. }
  232. exynos_gem_obj = to_exynos_gem_obj(obj);
  233. return &exynos_gem_obj->buffer->dma_addr;
  234. }
  235. void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
  236. unsigned int gem_handle,
  237. struct drm_file *filp)
  238. {
  239. struct exynos_drm_gem_obj *exynos_gem_obj;
  240. struct drm_gem_object *obj;
  241. obj = drm_gem_object_lookup(dev, filp, gem_handle);
  242. if (!obj) {
  243. DRM_ERROR("failed to lookup gem object.\n");
  244. return;
  245. }
  246. exynos_gem_obj = to_exynos_gem_obj(obj);
  247. drm_gem_object_unreference_unlocked(obj);
  248. /*
  249. * decrease obj->refcount one more time because we has already
  250. * increased it at exynos_drm_gem_get_dma_addr().
  251. */
  252. drm_gem_object_unreference_unlocked(obj);
  253. }
  254. int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
  255. struct drm_file *file_priv)
  256. {
  257. struct drm_exynos_gem_map_off *args = data;
  258. DRM_DEBUG_KMS("handle = 0x%x, offset = 0x%lx\n",
  259. args->handle, (unsigned long)args->offset);
  260. if (!(dev->driver->driver_features & DRIVER_GEM)) {
  261. DRM_ERROR("does not support GEM.\n");
  262. return -ENODEV;
  263. }
  264. return exynos_drm_gem_dumb_map_offset(file_priv, dev, args->handle,
  265. &args->offset);
  266. }
  267. static struct drm_file *exynos_drm_find_drm_file(struct drm_device *drm_dev,
  268. struct file *filp)
  269. {
  270. struct drm_file *file_priv;
  271. /* find current process's drm_file from filelist. */
  272. list_for_each_entry(file_priv, &drm_dev->filelist, lhead)
  273. if (file_priv->filp == filp)
  274. return file_priv;
  275. WARN_ON(1);
  276. return ERR_PTR(-EFAULT);
  277. }
  278. static int exynos_drm_gem_mmap_buffer(struct file *filp,
  279. struct vm_area_struct *vma)
  280. {
  281. struct drm_gem_object *obj = filp->private_data;
  282. struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
  283. struct drm_device *drm_dev = obj->dev;
  284. struct exynos_drm_gem_buf *buffer;
  285. struct drm_file *file_priv;
  286. unsigned long vm_size;
  287. int ret;
  288. vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
  289. vma->vm_private_data = obj;
  290. vma->vm_ops = drm_dev->driver->gem_vm_ops;
  291. /* restore it to driver's fops. */
  292. filp->f_op = fops_get(drm_dev->driver->fops);
  293. file_priv = exynos_drm_find_drm_file(drm_dev, filp);
  294. if (IS_ERR(file_priv))
  295. return PTR_ERR(file_priv);
  296. /* restore it to drm_file. */
  297. filp->private_data = file_priv;
  298. update_vm_cache_attr(exynos_gem_obj, vma);
  299. vm_size = vma->vm_end - vma->vm_start;
  300. /*
  301. * a buffer contains information to physically continuous memory
  302. * allocated by user request or at framebuffer creation.
  303. */
  304. buffer = exynos_gem_obj->buffer;
  305. /* check if user-requested size is valid. */
  306. if (vm_size > buffer->size)
  307. return -EINVAL;
  308. ret = dma_mmap_attrs(drm_dev->dev, vma, buffer->pages,
  309. buffer->dma_addr, buffer->size,
  310. &buffer->dma_attrs);
  311. if (ret < 0) {
  312. DRM_ERROR("failed to mmap.\n");
  313. return ret;
  314. }
  315. /*
  316. * take a reference to this mapping of the object. And this reference
  317. * is unreferenced by the corresponding vm_close call.
  318. */
  319. drm_gem_object_reference(obj);
  320. drm_vm_open_locked(drm_dev, vma);
  321. return 0;
  322. }
  323. static const struct file_operations exynos_drm_gem_fops = {
  324. .mmap = exynos_drm_gem_mmap_buffer,
  325. };
  326. int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
  327. struct drm_file *file_priv)
  328. {
  329. struct drm_exynos_gem_mmap *args = data;
  330. struct drm_gem_object *obj;
  331. unsigned long addr;
  332. if (!(dev->driver->driver_features & DRIVER_GEM)) {
  333. DRM_ERROR("does not support GEM.\n");
  334. return -ENODEV;
  335. }
  336. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  337. if (!obj) {
  338. DRM_ERROR("failed to lookup gem object.\n");
  339. return -EINVAL;
  340. }
  341. /*
  342. * We have to use gem object and its fops for specific mmaper,
  343. * but vm_mmap() can deliver only filp. So we have to change
  344. * filp->f_op and filp->private_data temporarily, then restore
  345. * again. So it is important to keep lock until restoration the
  346. * settings to prevent others from misuse of filp->f_op or
  347. * filp->private_data.
  348. */
  349. mutex_lock(&dev->struct_mutex);
  350. /*
  351. * Set specific mmper's fops. And it will be restored by
  352. * exynos_drm_gem_mmap_buffer to dev->driver->fops.
  353. * This is used to call specific mapper temporarily.
  354. */
  355. file_priv->filp->f_op = &exynos_drm_gem_fops;
  356. /*
  357. * Set gem object to private_data so that specific mmaper
  358. * can get the gem object. And it will be restored by
  359. * exynos_drm_gem_mmap_buffer to drm_file.
  360. */
  361. file_priv->filp->private_data = obj;
  362. addr = vm_mmap(file_priv->filp, 0, args->size,
  363. PROT_READ | PROT_WRITE, MAP_SHARED, 0);
  364. drm_gem_object_unreference(obj);
  365. if (IS_ERR_VALUE(addr)) {
  366. /* check filp->f_op, filp->private_data are restored */
  367. if (file_priv->filp->f_op == &exynos_drm_gem_fops) {
  368. file_priv->filp->f_op = fops_get(dev->driver->fops);
  369. file_priv->filp->private_data = file_priv;
  370. }
  371. mutex_unlock(&dev->struct_mutex);
  372. return (int)addr;
  373. }
  374. mutex_unlock(&dev->struct_mutex);
  375. args->mapped = addr;
  376. DRM_DEBUG_KMS("mapped = 0x%lx\n", (unsigned long)args->mapped);
  377. return 0;
  378. }
  379. int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
  380. struct drm_file *file_priv)
  381. { struct exynos_drm_gem_obj *exynos_gem_obj;
  382. struct drm_exynos_gem_info *args = data;
  383. struct drm_gem_object *obj;
  384. mutex_lock(&dev->struct_mutex);
  385. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  386. if (!obj) {
  387. DRM_ERROR("failed to lookup gem object.\n");
  388. mutex_unlock(&dev->struct_mutex);
  389. return -EINVAL;
  390. }
  391. exynos_gem_obj = to_exynos_gem_obj(obj);
  392. args->flags = exynos_gem_obj->flags;
  393. args->size = exynos_gem_obj->size;
  394. drm_gem_object_unreference(obj);
  395. mutex_unlock(&dev->struct_mutex);
  396. return 0;
  397. }
  398. struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma)
  399. {
  400. struct vm_area_struct *vma_copy;
  401. vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
  402. if (!vma_copy)
  403. return NULL;
  404. if (vma->vm_ops && vma->vm_ops->open)
  405. vma->vm_ops->open(vma);
  406. if (vma->vm_file)
  407. get_file(vma->vm_file);
  408. memcpy(vma_copy, vma, sizeof(*vma));
  409. vma_copy->vm_mm = NULL;
  410. vma_copy->vm_next = NULL;
  411. vma_copy->vm_prev = NULL;
  412. return vma_copy;
  413. }
  414. void exynos_gem_put_vma(struct vm_area_struct *vma)
  415. {
  416. if (!vma)
  417. return;
  418. if (vma->vm_ops && vma->vm_ops->close)
  419. vma->vm_ops->close(vma);
  420. if (vma->vm_file)
  421. fput(vma->vm_file);
  422. kfree(vma);
  423. }
  424. int exynos_gem_get_pages_from_userptr(unsigned long start,
  425. unsigned int npages,
  426. struct page **pages,
  427. struct vm_area_struct *vma)
  428. {
  429. int get_npages;
  430. /* the memory region mmaped with VM_PFNMAP. */
  431. if (vma_is_io(vma)) {
  432. unsigned int i;
  433. for (i = 0; i < npages; ++i, start += PAGE_SIZE) {
  434. unsigned long pfn;
  435. int ret = follow_pfn(vma, start, &pfn);
  436. if (ret)
  437. return ret;
  438. pages[i] = pfn_to_page(pfn);
  439. }
  440. if (i != npages) {
  441. DRM_ERROR("failed to get user_pages.\n");
  442. return -EINVAL;
  443. }
  444. return 0;
  445. }
  446. get_npages = get_user_pages(current, current->mm, start,
  447. npages, 1, 1, pages, NULL);
  448. get_npages = max(get_npages, 0);
  449. if (get_npages != npages) {
  450. DRM_ERROR("failed to get user_pages.\n");
  451. while (get_npages)
  452. put_page(pages[--get_npages]);
  453. return -EFAULT;
  454. }
  455. return 0;
  456. }
  457. void exynos_gem_put_pages_to_userptr(struct page **pages,
  458. unsigned int npages,
  459. struct vm_area_struct *vma)
  460. {
  461. if (!vma_is_io(vma)) {
  462. unsigned int i;
  463. for (i = 0; i < npages; i++) {
  464. set_page_dirty_lock(pages[i]);
  465. /*
  466. * undo the reference we took when populating
  467. * the table.
  468. */
  469. put_page(pages[i]);
  470. }
  471. }
  472. }
  473. int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
  474. struct sg_table *sgt,
  475. enum dma_data_direction dir)
  476. {
  477. int nents;
  478. mutex_lock(&drm_dev->struct_mutex);
  479. nents = dma_map_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
  480. if (!nents) {
  481. DRM_ERROR("failed to map sgl with dma.\n");
  482. mutex_unlock(&drm_dev->struct_mutex);
  483. return nents;
  484. }
  485. mutex_unlock(&drm_dev->struct_mutex);
  486. return 0;
  487. }
  488. void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
  489. struct sg_table *sgt,
  490. enum dma_data_direction dir)
  491. {
  492. dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
  493. }
  494. int exynos_drm_gem_init_object(struct drm_gem_object *obj)
  495. {
  496. return 0;
  497. }
  498. void exynos_drm_gem_free_object(struct drm_gem_object *obj)
  499. {
  500. struct exynos_drm_gem_obj *exynos_gem_obj;
  501. struct exynos_drm_gem_buf *buf;
  502. exynos_gem_obj = to_exynos_gem_obj(obj);
  503. buf = exynos_gem_obj->buffer;
  504. if (obj->import_attach)
  505. drm_prime_gem_destroy(obj, buf->sgt);
  506. exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
  507. }
  508. int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
  509. struct drm_device *dev,
  510. struct drm_mode_create_dumb *args)
  511. {
  512. struct exynos_drm_gem_obj *exynos_gem_obj;
  513. int ret;
  514. /*
  515. * alocate memory to be used for framebuffer.
  516. * - this callback would be called by user application
  517. * with DRM_IOCTL_MODE_CREATE_DUMB command.
  518. */
  519. args->pitch = args->width * ((args->bpp + 7) / 8);
  520. args->size = args->pitch * args->height;
  521. exynos_gem_obj = exynos_drm_gem_create(dev, EXYNOS_BO_CONTIG |
  522. EXYNOS_BO_WC, args->size);
  523. if (IS_ERR(exynos_gem_obj))
  524. return PTR_ERR(exynos_gem_obj);
  525. ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
  526. &args->handle);
  527. if (ret) {
  528. exynos_drm_gem_destroy(exynos_gem_obj);
  529. return ret;
  530. }
  531. return 0;
  532. }
  533. int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
  534. struct drm_device *dev, uint32_t handle,
  535. uint64_t *offset)
  536. {
  537. struct drm_gem_object *obj;
  538. int ret = 0;
  539. mutex_lock(&dev->struct_mutex);
  540. /*
  541. * get offset of memory allocated for drm framebuffer.
  542. * - this callback would be called by user application
  543. * with DRM_IOCTL_MODE_MAP_DUMB command.
  544. */
  545. obj = drm_gem_object_lookup(dev, file_priv, handle);
  546. if (!obj) {
  547. DRM_ERROR("failed to lookup gem object.\n");
  548. ret = -EINVAL;
  549. goto unlock;
  550. }
  551. if (!obj->map_list.map) {
  552. ret = drm_gem_create_mmap_offset(obj);
  553. if (ret)
  554. goto out;
  555. }
  556. *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
  557. DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
  558. out:
  559. drm_gem_object_unreference(obj);
  560. unlock:
  561. mutex_unlock(&dev->struct_mutex);
  562. return ret;
  563. }
  564. int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
  565. struct drm_device *dev,
  566. unsigned int handle)
  567. {
  568. int ret;
  569. /*
  570. * obj->refcount and obj->handle_count are decreased and
  571. * if both them are 0 then exynos_drm_gem_free_object()
  572. * would be called by callback to release resources.
  573. */
  574. ret = drm_gem_handle_delete(file_priv, handle);
  575. if (ret < 0) {
  576. DRM_ERROR("failed to delete drm_gem_handle.\n");
  577. return ret;
  578. }
  579. return 0;
  580. }
  581. int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  582. {
  583. struct drm_gem_object *obj = vma->vm_private_data;
  584. struct drm_device *dev = obj->dev;
  585. unsigned long f_vaddr;
  586. pgoff_t page_offset;
  587. int ret;
  588. page_offset = ((unsigned long)vmf->virtual_address -
  589. vma->vm_start) >> PAGE_SHIFT;
  590. f_vaddr = (unsigned long)vmf->virtual_address;
  591. mutex_lock(&dev->struct_mutex);
  592. ret = exynos_drm_gem_map_buf(obj, vma, f_vaddr, page_offset);
  593. if (ret < 0)
  594. DRM_ERROR("failed to map a buffer with user.\n");
  595. mutex_unlock(&dev->struct_mutex);
  596. return convert_to_vm_err_msg(ret);
  597. }
  598. int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
  599. {
  600. struct exynos_drm_gem_obj *exynos_gem_obj;
  601. struct drm_gem_object *obj;
  602. int ret;
  603. /* set vm_area_struct. */
  604. ret = drm_gem_mmap(filp, vma);
  605. if (ret < 0) {
  606. DRM_ERROR("failed to mmap.\n");
  607. return ret;
  608. }
  609. obj = vma->vm_private_data;
  610. exynos_gem_obj = to_exynos_gem_obj(obj);
  611. ret = check_gem_flags(exynos_gem_obj->flags);
  612. if (ret) {
  613. drm_gem_vm_close(vma);
  614. drm_gem_free_mmap_offset(obj);
  615. return ret;
  616. }
  617. vma->vm_flags &= ~VM_PFNMAP;
  618. vma->vm_flags |= VM_MIXEDMAP;
  619. update_vm_cache_attr(exynos_gem_obj, vma);
  620. return ret;
  621. }