exynos_drm_gem.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794
  1. /* exynos_drm_gem.c
  2. *
  3. * Copyright (c) 2011 Samsung Electronics Co., Ltd.
  4. * Author: Inki Dae <inki.dae@samsung.com>
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License as published by the
  8. * Free Software Foundation; either version 2 of the License, or (at your
  9. * option) any later version.
  10. */
  11. #include <drm/drmP.h>
  12. #include <linux/shmem_fs.h>
  13. #include <drm/exynos_drm.h>
  14. #include "exynos_drm_drv.h"
  15. #include "exynos_drm_gem.h"
  16. #include "exynos_drm_buf.h"
  17. static unsigned int convert_to_vm_err_msg(int msg)
  18. {
  19. unsigned int out_msg;
  20. switch (msg) {
  21. case 0:
  22. case -ERESTARTSYS:
  23. case -EINTR:
  24. out_msg = VM_FAULT_NOPAGE;
  25. break;
  26. case -ENOMEM:
  27. out_msg = VM_FAULT_OOM;
  28. break;
  29. default:
  30. out_msg = VM_FAULT_SIGBUS;
  31. break;
  32. }
  33. return out_msg;
  34. }
  35. static int check_gem_flags(unsigned int flags)
  36. {
  37. if (flags & ~(EXYNOS_BO_MASK)) {
  38. DRM_ERROR("invalid flags.\n");
  39. return -EINVAL;
  40. }
  41. return 0;
  42. }
  43. static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
  44. struct vm_area_struct *vma)
  45. {
  46. DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
  47. /* non-cachable as default. */
  48. if (obj->flags & EXYNOS_BO_CACHABLE)
  49. vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
  50. else if (obj->flags & EXYNOS_BO_WC)
  51. vma->vm_page_prot =
  52. pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
  53. else
  54. vma->vm_page_prot =
  55. pgprot_noncached(vm_get_page_prot(vma->vm_flags));
  56. }
  57. static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
  58. {
  59. /* TODO */
  60. return roundup(size, PAGE_SIZE);
  61. }
  62. static int exynos_drm_gem_map_buf(struct drm_gem_object *obj,
  63. struct vm_area_struct *vma,
  64. unsigned long f_vaddr,
  65. pgoff_t page_offset)
  66. {
  67. struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
  68. struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
  69. struct scatterlist *sgl;
  70. unsigned long pfn;
  71. int i;
  72. if (!buf->sgt)
  73. return -EINTR;
  74. if (page_offset >= (buf->size >> PAGE_SHIFT)) {
  75. DRM_ERROR("invalid page offset\n");
  76. return -EINVAL;
  77. }
  78. sgl = buf->sgt->sgl;
  79. for_each_sg(buf->sgt->sgl, sgl, buf->sgt->nents, i) {
  80. if (page_offset < (sgl->length >> PAGE_SHIFT))
  81. break;
  82. page_offset -= (sgl->length >> PAGE_SHIFT);
  83. }
  84. pfn = __phys_to_pfn(sg_phys(sgl)) + page_offset;
  85. return vm_insert_mixed(vma, f_vaddr, pfn);
  86. }
  87. static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
  88. struct drm_file *file_priv,
  89. unsigned int *handle)
  90. {
  91. int ret;
  92. /*
  93. * allocate a id of idr table where the obj is registered
  94. * and handle has the id what user can see.
  95. */
  96. ret = drm_gem_handle_create(file_priv, obj, handle);
  97. if (ret)
  98. return ret;
  99. DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
  100. /* drop reference from allocate - handle holds it now. */
  101. drm_gem_object_unreference_unlocked(obj);
  102. return 0;
  103. }
  104. void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
  105. {
  106. struct drm_gem_object *obj;
  107. struct exynos_drm_gem_buf *buf;
  108. DRM_DEBUG_KMS("%s\n", __FILE__);
  109. obj = &exynos_gem_obj->base;
  110. buf = exynos_gem_obj->buffer;
  111. DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
  112. /*
  113. * do not release memory region from exporter.
  114. *
  115. * the region will be released by exporter
  116. * once dmabuf's refcount becomes 0.
  117. */
  118. if (obj->import_attach)
  119. goto out;
  120. exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
  121. out:
  122. exynos_drm_fini_buf(obj->dev, buf);
  123. exynos_gem_obj->buffer = NULL;
  124. if (obj->map_list.map)
  125. drm_gem_free_mmap_offset(obj);
  126. /* release file pointer to gem object. */
  127. drm_gem_object_release(obj);
  128. kfree(exynos_gem_obj);
  129. exynos_gem_obj = NULL;
  130. }
  131. struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
  132. unsigned long size)
  133. {
  134. struct exynos_drm_gem_obj *exynos_gem_obj;
  135. struct drm_gem_object *obj;
  136. int ret;
  137. exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
  138. if (!exynos_gem_obj) {
  139. DRM_ERROR("failed to allocate exynos gem object\n");
  140. return NULL;
  141. }
  142. exynos_gem_obj->size = size;
  143. obj = &exynos_gem_obj->base;
  144. ret = drm_gem_object_init(dev, obj, size);
  145. if (ret < 0) {
  146. DRM_ERROR("failed to initialize gem object\n");
  147. kfree(exynos_gem_obj);
  148. return NULL;
  149. }
  150. DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
  151. return exynos_gem_obj;
  152. }
  153. struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
  154. unsigned int flags,
  155. unsigned long size)
  156. {
  157. struct exynos_drm_gem_obj *exynos_gem_obj;
  158. struct exynos_drm_gem_buf *buf;
  159. int ret;
  160. if (!size) {
  161. DRM_ERROR("invalid size.\n");
  162. return ERR_PTR(-EINVAL);
  163. }
  164. size = roundup_gem_size(size, flags);
  165. DRM_DEBUG_KMS("%s\n", __FILE__);
  166. ret = check_gem_flags(flags);
  167. if (ret)
  168. return ERR_PTR(ret);
  169. buf = exynos_drm_init_buf(dev, size);
  170. if (!buf)
  171. return ERR_PTR(-ENOMEM);
  172. exynos_gem_obj = exynos_drm_gem_init(dev, size);
  173. if (!exynos_gem_obj) {
  174. ret = -ENOMEM;
  175. goto err_fini_buf;
  176. }
  177. exynos_gem_obj->buffer = buf;
  178. /* set memory type and cache attribute from user side. */
  179. exynos_gem_obj->flags = flags;
  180. ret = exynos_drm_alloc_buf(dev, buf, flags);
  181. if (ret < 0) {
  182. drm_gem_object_release(&exynos_gem_obj->base);
  183. goto err_fini_buf;
  184. }
  185. return exynos_gem_obj;
  186. err_fini_buf:
  187. exynos_drm_fini_buf(dev, buf);
  188. return ERR_PTR(ret);
  189. }
  190. int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
  191. struct drm_file *file_priv)
  192. {
  193. struct drm_exynos_gem_create *args = data;
  194. struct exynos_drm_gem_obj *exynos_gem_obj;
  195. int ret;
  196. DRM_DEBUG_KMS("%s\n", __FILE__);
  197. exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
  198. if (IS_ERR(exynos_gem_obj))
  199. return PTR_ERR(exynos_gem_obj);
  200. ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
  201. &args->handle);
  202. if (ret) {
  203. exynos_drm_gem_destroy(exynos_gem_obj);
  204. return ret;
  205. }
  206. return 0;
  207. }
  208. dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
  209. unsigned int gem_handle,
  210. struct drm_file *filp)
  211. {
  212. struct exynos_drm_gem_obj *exynos_gem_obj;
  213. struct drm_gem_object *obj;
  214. obj = drm_gem_object_lookup(dev, filp, gem_handle);
  215. if (!obj) {
  216. DRM_ERROR("failed to lookup gem object.\n");
  217. return ERR_PTR(-EINVAL);
  218. }
  219. exynos_gem_obj = to_exynos_gem_obj(obj);
  220. return &exynos_gem_obj->buffer->dma_addr;
  221. }
  222. void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
  223. unsigned int gem_handle,
  224. struct drm_file *filp)
  225. {
  226. struct exynos_drm_gem_obj *exynos_gem_obj;
  227. struct drm_gem_object *obj;
  228. obj = drm_gem_object_lookup(dev, filp, gem_handle);
  229. if (!obj) {
  230. DRM_ERROR("failed to lookup gem object.\n");
  231. return;
  232. }
  233. exynos_gem_obj = to_exynos_gem_obj(obj);
  234. drm_gem_object_unreference_unlocked(obj);
  235. /*
  236. * decrease obj->refcount one more time because we has already
  237. * increased it at exynos_drm_gem_get_dma_addr().
  238. */
  239. drm_gem_object_unreference_unlocked(obj);
  240. }
  241. int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
  242. struct drm_file *file_priv)
  243. {
  244. struct drm_exynos_gem_map_off *args = data;
  245. DRM_DEBUG_KMS("%s\n", __FILE__);
  246. DRM_DEBUG_KMS("handle = 0x%x, offset = 0x%lx\n",
  247. args->handle, (unsigned long)args->offset);
  248. if (!(dev->driver->driver_features & DRIVER_GEM)) {
  249. DRM_ERROR("does not support GEM.\n");
  250. return -ENODEV;
  251. }
  252. return exynos_drm_gem_dumb_map_offset(file_priv, dev, args->handle,
  253. &args->offset);
  254. }
  255. static struct drm_file *exynos_drm_find_drm_file(struct drm_device *drm_dev,
  256. struct file *filp)
  257. {
  258. struct drm_file *file_priv;
  259. /* find current process's drm_file from filelist. */
  260. list_for_each_entry(file_priv, &drm_dev->filelist, lhead)
  261. if (file_priv->filp == filp)
  262. return file_priv;
  263. WARN_ON(1);
  264. return ERR_PTR(-EFAULT);
  265. }
  266. static int exynos_drm_gem_mmap_buffer(struct file *filp,
  267. struct vm_area_struct *vma)
  268. {
  269. struct drm_gem_object *obj = filp->private_data;
  270. struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
  271. struct drm_device *drm_dev = obj->dev;
  272. struct exynos_drm_gem_buf *buffer;
  273. struct drm_file *file_priv;
  274. unsigned long vm_size;
  275. int ret;
  276. DRM_DEBUG_KMS("%s\n", __FILE__);
  277. vma->vm_flags |= VM_IO | VM_DONTEXPAND | VM_DONTDUMP;
  278. vma->vm_private_data = obj;
  279. vma->vm_ops = drm_dev->driver->gem_vm_ops;
  280. /* restore it to driver's fops. */
  281. filp->f_op = fops_get(drm_dev->driver->fops);
  282. file_priv = exynos_drm_find_drm_file(drm_dev, filp);
  283. if (IS_ERR(file_priv))
  284. return PTR_ERR(file_priv);
  285. /* restore it to drm_file. */
  286. filp->private_data = file_priv;
  287. update_vm_cache_attr(exynos_gem_obj, vma);
  288. vm_size = vma->vm_end - vma->vm_start;
  289. /*
  290. * a buffer contains information to physically continuous memory
  291. * allocated by user request or at framebuffer creation.
  292. */
  293. buffer = exynos_gem_obj->buffer;
  294. /* check if user-requested size is valid. */
  295. if (vm_size > buffer->size)
  296. return -EINVAL;
  297. ret = dma_mmap_attrs(drm_dev->dev, vma, buffer->pages,
  298. buffer->dma_addr, buffer->size,
  299. &buffer->dma_attrs);
  300. if (ret < 0) {
  301. DRM_ERROR("failed to mmap.\n");
  302. return ret;
  303. }
  304. /*
  305. * take a reference to this mapping of the object. And this reference
  306. * is unreferenced by the corresponding vm_close call.
  307. */
  308. drm_gem_object_reference(obj);
  309. drm_vm_open_locked(drm_dev, vma);
  310. return 0;
  311. }
  312. static const struct file_operations exynos_drm_gem_fops = {
  313. .mmap = exynos_drm_gem_mmap_buffer,
  314. };
  315. int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
  316. struct drm_file *file_priv)
  317. {
  318. struct drm_exynos_gem_mmap *args = data;
  319. struct drm_gem_object *obj;
  320. unsigned int addr;
  321. DRM_DEBUG_KMS("%s\n", __FILE__);
  322. if (!(dev->driver->driver_features & DRIVER_GEM)) {
  323. DRM_ERROR("does not support GEM.\n");
  324. return -ENODEV;
  325. }
  326. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  327. if (!obj) {
  328. DRM_ERROR("failed to lookup gem object.\n");
  329. return -EINVAL;
  330. }
  331. /*
  332. * We have to use gem object and its fops for specific mmaper,
  333. * but vm_mmap() can deliver only filp. So we have to change
  334. * filp->f_op and filp->private_data temporarily, then restore
  335. * again. So it is important to keep lock until restoration the
  336. * settings to prevent others from misuse of filp->f_op or
  337. * filp->private_data.
  338. */
  339. mutex_lock(&dev->struct_mutex);
  340. /*
  341. * Set specific mmper's fops. And it will be restored by
  342. * exynos_drm_gem_mmap_buffer to dev->driver->fops.
  343. * This is used to call specific mapper temporarily.
  344. */
  345. file_priv->filp->f_op = &exynos_drm_gem_fops;
  346. /*
  347. * Set gem object to private_data so that specific mmaper
  348. * can get the gem object. And it will be restored by
  349. * exynos_drm_gem_mmap_buffer to drm_file.
  350. */
  351. file_priv->filp->private_data = obj;
  352. addr = vm_mmap(file_priv->filp, 0, args->size,
  353. PROT_READ | PROT_WRITE, MAP_SHARED, 0);
  354. drm_gem_object_unreference(obj);
  355. if (IS_ERR((void *)addr)) {
  356. /* check filp->f_op, filp->private_data are restored */
  357. if (file_priv->filp->f_op == &exynos_drm_gem_fops) {
  358. file_priv->filp->f_op = fops_get(dev->driver->fops);
  359. file_priv->filp->private_data = file_priv;
  360. }
  361. mutex_unlock(&dev->struct_mutex);
  362. return PTR_ERR((void *)addr);
  363. }
  364. mutex_unlock(&dev->struct_mutex);
  365. args->mapped = addr;
  366. DRM_DEBUG_KMS("mapped = 0x%lx\n", (unsigned long)args->mapped);
  367. return 0;
  368. }
  369. int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
  370. struct drm_file *file_priv)
  371. { struct exynos_drm_gem_obj *exynos_gem_obj;
  372. struct drm_exynos_gem_info *args = data;
  373. struct drm_gem_object *obj;
  374. mutex_lock(&dev->struct_mutex);
  375. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  376. if (!obj) {
  377. DRM_ERROR("failed to lookup gem object.\n");
  378. mutex_unlock(&dev->struct_mutex);
  379. return -EINVAL;
  380. }
  381. exynos_gem_obj = to_exynos_gem_obj(obj);
  382. args->flags = exynos_gem_obj->flags;
  383. args->size = exynos_gem_obj->size;
  384. drm_gem_object_unreference(obj);
  385. mutex_unlock(&dev->struct_mutex);
  386. return 0;
  387. }
  388. struct vm_area_struct *exynos_gem_get_vma(struct vm_area_struct *vma)
  389. {
  390. struct vm_area_struct *vma_copy;
  391. vma_copy = kmalloc(sizeof(*vma_copy), GFP_KERNEL);
  392. if (!vma_copy)
  393. return NULL;
  394. if (vma->vm_ops && vma->vm_ops->open)
  395. vma->vm_ops->open(vma);
  396. if (vma->vm_file)
  397. get_file(vma->vm_file);
  398. memcpy(vma_copy, vma, sizeof(*vma));
  399. vma_copy->vm_mm = NULL;
  400. vma_copy->vm_next = NULL;
  401. vma_copy->vm_prev = NULL;
  402. return vma_copy;
  403. }
  404. void exynos_gem_put_vma(struct vm_area_struct *vma)
  405. {
  406. if (!vma)
  407. return;
  408. if (vma->vm_ops && vma->vm_ops->close)
  409. vma->vm_ops->close(vma);
  410. if (vma->vm_file)
  411. fput(vma->vm_file);
  412. kfree(vma);
  413. }
  414. int exynos_gem_get_pages_from_userptr(unsigned long start,
  415. unsigned int npages,
  416. struct page **pages,
  417. struct vm_area_struct *vma)
  418. {
  419. int get_npages;
  420. /* the memory region mmaped with VM_PFNMAP. */
  421. if (vma_is_io(vma)) {
  422. unsigned int i;
  423. for (i = 0; i < npages; ++i, start += PAGE_SIZE) {
  424. unsigned long pfn;
  425. int ret = follow_pfn(vma, start, &pfn);
  426. if (ret)
  427. return ret;
  428. pages[i] = pfn_to_page(pfn);
  429. }
  430. if (i != npages) {
  431. DRM_ERROR("failed to get user_pages.\n");
  432. return -EINVAL;
  433. }
  434. return 0;
  435. }
  436. get_npages = get_user_pages(current, current->mm, start,
  437. npages, 1, 1, pages, NULL);
  438. get_npages = max(get_npages, 0);
  439. if (get_npages != npages) {
  440. DRM_ERROR("failed to get user_pages.\n");
  441. while (get_npages)
  442. put_page(pages[--get_npages]);
  443. return -EFAULT;
  444. }
  445. return 0;
  446. }
  447. void exynos_gem_put_pages_to_userptr(struct page **pages,
  448. unsigned int npages,
  449. struct vm_area_struct *vma)
  450. {
  451. if (!vma_is_io(vma)) {
  452. unsigned int i;
  453. for (i = 0; i < npages; i++) {
  454. set_page_dirty_lock(pages[i]);
  455. /*
  456. * undo the reference we took when populating
  457. * the table.
  458. */
  459. put_page(pages[i]);
  460. }
  461. }
  462. }
  463. int exynos_gem_map_sgt_with_dma(struct drm_device *drm_dev,
  464. struct sg_table *sgt,
  465. enum dma_data_direction dir)
  466. {
  467. int nents;
  468. mutex_lock(&drm_dev->struct_mutex);
  469. nents = dma_map_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
  470. if (!nents) {
  471. DRM_ERROR("failed to map sgl with dma.\n");
  472. mutex_unlock(&drm_dev->struct_mutex);
  473. return nents;
  474. }
  475. mutex_unlock(&drm_dev->struct_mutex);
  476. return 0;
  477. }
  478. void exynos_gem_unmap_sgt_from_dma(struct drm_device *drm_dev,
  479. struct sg_table *sgt,
  480. enum dma_data_direction dir)
  481. {
  482. dma_unmap_sg(drm_dev->dev, sgt->sgl, sgt->nents, dir);
  483. }
  484. int exynos_drm_gem_init_object(struct drm_gem_object *obj)
  485. {
  486. DRM_DEBUG_KMS("%s\n", __FILE__);
  487. return 0;
  488. }
  489. void exynos_drm_gem_free_object(struct drm_gem_object *obj)
  490. {
  491. struct exynos_drm_gem_obj *exynos_gem_obj;
  492. struct exynos_drm_gem_buf *buf;
  493. DRM_DEBUG_KMS("%s\n", __FILE__);
  494. exynos_gem_obj = to_exynos_gem_obj(obj);
  495. buf = exynos_gem_obj->buffer;
  496. if (obj->import_attach)
  497. drm_prime_gem_destroy(obj, buf->sgt);
  498. exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
  499. }
  500. int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
  501. struct drm_device *dev,
  502. struct drm_mode_create_dumb *args)
  503. {
  504. struct exynos_drm_gem_obj *exynos_gem_obj;
  505. int ret;
  506. DRM_DEBUG_KMS("%s\n", __FILE__);
  507. /*
  508. * alocate memory to be used for framebuffer.
  509. * - this callback would be called by user application
  510. * with DRM_IOCTL_MODE_CREATE_DUMB command.
  511. */
  512. args->pitch = args->width * ((args->bpp + 7) / 8);
  513. args->size = args->pitch * args->height;
  514. exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
  515. if (IS_ERR(exynos_gem_obj))
  516. return PTR_ERR(exynos_gem_obj);
  517. ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
  518. &args->handle);
  519. if (ret) {
  520. exynos_drm_gem_destroy(exynos_gem_obj);
  521. return ret;
  522. }
  523. return 0;
  524. }
  525. int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
  526. struct drm_device *dev, uint32_t handle,
  527. uint64_t *offset)
  528. {
  529. struct drm_gem_object *obj;
  530. int ret = 0;
  531. DRM_DEBUG_KMS("%s\n", __FILE__);
  532. mutex_lock(&dev->struct_mutex);
  533. /*
  534. * get offset of memory allocated for drm framebuffer.
  535. * - this callback would be called by user application
  536. * with DRM_IOCTL_MODE_MAP_DUMB command.
  537. */
  538. obj = drm_gem_object_lookup(dev, file_priv, handle);
  539. if (!obj) {
  540. DRM_ERROR("failed to lookup gem object.\n");
  541. ret = -EINVAL;
  542. goto unlock;
  543. }
  544. if (!obj->map_list.map) {
  545. ret = drm_gem_create_mmap_offset(obj);
  546. if (ret)
  547. goto out;
  548. }
  549. *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
  550. DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
  551. out:
  552. drm_gem_object_unreference(obj);
  553. unlock:
  554. mutex_unlock(&dev->struct_mutex);
  555. return ret;
  556. }
  557. int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
  558. struct drm_device *dev,
  559. unsigned int handle)
  560. {
  561. int ret;
  562. DRM_DEBUG_KMS("%s\n", __FILE__);
  563. /*
  564. * obj->refcount and obj->handle_count are decreased and
  565. * if both them are 0 then exynos_drm_gem_free_object()
  566. * would be called by callback to release resources.
  567. */
  568. ret = drm_gem_handle_delete(file_priv, handle);
  569. if (ret < 0) {
  570. DRM_ERROR("failed to delete drm_gem_handle.\n");
  571. return ret;
  572. }
  573. return 0;
  574. }
  575. int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  576. {
  577. struct drm_gem_object *obj = vma->vm_private_data;
  578. struct drm_device *dev = obj->dev;
  579. unsigned long f_vaddr;
  580. pgoff_t page_offset;
  581. int ret;
  582. page_offset = ((unsigned long)vmf->virtual_address -
  583. vma->vm_start) >> PAGE_SHIFT;
  584. f_vaddr = (unsigned long)vmf->virtual_address;
  585. mutex_lock(&dev->struct_mutex);
  586. ret = exynos_drm_gem_map_buf(obj, vma, f_vaddr, page_offset);
  587. if (ret < 0)
  588. DRM_ERROR("failed to map a buffer with user.\n");
  589. mutex_unlock(&dev->struct_mutex);
  590. return convert_to_vm_err_msg(ret);
  591. }
  592. int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
  593. {
  594. struct exynos_drm_gem_obj *exynos_gem_obj;
  595. struct drm_gem_object *obj;
  596. int ret;
  597. DRM_DEBUG_KMS("%s\n", __FILE__);
  598. /* set vm_area_struct. */
  599. ret = drm_gem_mmap(filp, vma);
  600. if (ret < 0) {
  601. DRM_ERROR("failed to mmap.\n");
  602. return ret;
  603. }
  604. obj = vma->vm_private_data;
  605. exynos_gem_obj = to_exynos_gem_obj(obj);
  606. ret = check_gem_flags(exynos_gem_obj->flags);
  607. if (ret) {
  608. drm_gem_vm_close(vma);
  609. drm_gem_free_mmap_offset(obj);
  610. return ret;
  611. }
  612. vma->vm_flags &= ~VM_PFNMAP;
  613. vma->vm_flags |= VM_MIXEDMAP;
  614. update_vm_cache_attr(exynos_gem_obj, vma);
  615. return ret;
  616. }