exynos_drm_gem.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796
  1. /* exynos_drm_gem.c
  2. *
  3. * Copyright (c) 2011 Samsung Electronics Co., Ltd.
  4. * Author: Inki Dae <inki.dae@samsung.com>
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the "Software"),
  8. * to deal in the Software without restriction, including without limitation
  9. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10. * and/or sell copies of the Software, and to permit persons to whom the
  11. * Software is furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice (including the next
  14. * paragraph) shall be included in all copies or substantial portions of the
  15. * Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  20. * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  21. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  22. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  23. * OTHER DEALINGS IN THE SOFTWARE.
  24. */
  25. #include <drm/drmP.h>
  26. #include <linux/shmem_fs.h>
  27. #include <drm/exynos_drm.h>
  28. #include "exynos_drm_drv.h"
  29. #include "exynos_drm_gem.h"
  30. #include "exynos_drm_buf.h"
  31. static unsigned int convert_to_vm_err_msg(int msg)
  32. {
  33. unsigned int out_msg;
  34. switch (msg) {
  35. case 0:
  36. case -ERESTARTSYS:
  37. case -EINTR:
  38. out_msg = VM_FAULT_NOPAGE;
  39. break;
  40. case -ENOMEM:
  41. out_msg = VM_FAULT_OOM;
  42. break;
  43. default:
  44. out_msg = VM_FAULT_SIGBUS;
  45. break;
  46. }
  47. return out_msg;
  48. }
  49. static int check_gem_flags(unsigned int flags)
  50. {
  51. if (flags & ~(EXYNOS_BO_MASK)) {
  52. DRM_ERROR("invalid flags.\n");
  53. return -EINVAL;
  54. }
  55. return 0;
  56. }
  57. static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
  58. struct vm_area_struct *vma)
  59. {
  60. DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
  61. /* non-cachable as default. */
  62. if (obj->flags & EXYNOS_BO_CACHABLE)
  63. vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
  64. else if (obj->flags & EXYNOS_BO_WC)
  65. vma->vm_page_prot =
  66. pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
  67. else
  68. vma->vm_page_prot =
  69. pgprot_noncached(vm_get_page_prot(vma->vm_flags));
  70. }
  71. static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
  72. {
  73. if (!IS_NONCONTIG_BUFFER(flags)) {
  74. if (size >= SZ_1M)
  75. return roundup(size, SECTION_SIZE);
  76. else if (size >= SZ_64K)
  77. return roundup(size, SZ_64K);
  78. else
  79. goto out;
  80. }
  81. out:
  82. return roundup(size, PAGE_SIZE);
  83. }
  84. struct page **exynos_gem_get_pages(struct drm_gem_object *obj,
  85. gfp_t gfpmask)
  86. {
  87. struct page *p, **pages;
  88. int i, npages;
  89. npages = obj->size >> PAGE_SHIFT;
  90. pages = drm_malloc_ab(npages, sizeof(struct page *));
  91. if (pages == NULL)
  92. return ERR_PTR(-ENOMEM);
  93. for (i = 0; i < npages; i++) {
  94. p = alloc_page(gfpmask);
  95. if (IS_ERR(p))
  96. goto fail;
  97. pages[i] = p;
  98. }
  99. return pages;
  100. fail:
  101. while (--i)
  102. __free_page(pages[i]);
  103. drm_free_large(pages);
  104. return ERR_CAST(p);
  105. }
  106. static void exynos_gem_put_pages(struct drm_gem_object *obj,
  107. struct page **pages)
  108. {
  109. int npages;
  110. npages = obj->size >> PAGE_SHIFT;
  111. while (--npages >= 0)
  112. __free_page(pages[npages]);
  113. drm_free_large(pages);
  114. }
  115. static int exynos_drm_gem_map_pages(struct drm_gem_object *obj,
  116. struct vm_area_struct *vma,
  117. unsigned long f_vaddr,
  118. pgoff_t page_offset)
  119. {
  120. struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
  121. struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
  122. unsigned long pfn;
  123. if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
  124. if (!buf->pages)
  125. return -EINTR;
  126. pfn = page_to_pfn(buf->pages[page_offset++]);
  127. } else
  128. pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset;
  129. return vm_insert_mixed(vma, f_vaddr, pfn);
  130. }
  131. static int exynos_drm_gem_get_pages(struct drm_gem_object *obj)
  132. {
  133. struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
  134. struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
  135. struct scatterlist *sgl;
  136. struct page **pages;
  137. unsigned int npages, i = 0;
  138. int ret;
  139. if (buf->pages) {
  140. DRM_DEBUG_KMS("already allocated.\n");
  141. return -EINVAL;
  142. }
  143. pages = exynos_gem_get_pages(obj, GFP_HIGHUSER_MOVABLE);
  144. if (IS_ERR(pages)) {
  145. DRM_ERROR("failed to get pages.\n");
  146. return PTR_ERR(pages);
  147. }
  148. npages = obj->size >> PAGE_SHIFT;
  149. buf->page_size = PAGE_SIZE;
  150. buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
  151. if (!buf->sgt) {
  152. DRM_ERROR("failed to allocate sg table.\n");
  153. ret = -ENOMEM;
  154. goto err;
  155. }
  156. ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
  157. if (ret < 0) {
  158. DRM_ERROR("failed to initialize sg table.\n");
  159. ret = -EFAULT;
  160. goto err1;
  161. }
  162. sgl = buf->sgt->sgl;
  163. /* set all pages to sg list. */
  164. while (i < npages) {
  165. sg_set_page(sgl, pages[i], PAGE_SIZE, 0);
  166. sg_dma_address(sgl) = page_to_phys(pages[i]);
  167. i++;
  168. sgl = sg_next(sgl);
  169. }
  170. /* add some codes for UNCACHED type here. TODO */
  171. buf->pages = pages;
  172. return ret;
  173. err1:
  174. kfree(buf->sgt);
  175. buf->sgt = NULL;
  176. err:
  177. exynos_gem_put_pages(obj, pages);
  178. return ret;
  179. }
  180. static void exynos_drm_gem_put_pages(struct drm_gem_object *obj)
  181. {
  182. struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
  183. struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
  184. /*
  185. * if buffer typs is EXYNOS_BO_NONCONTIG then release all pages
  186. * allocated at gem fault handler.
  187. */
  188. sg_free_table(buf->sgt);
  189. kfree(buf->sgt);
  190. buf->sgt = NULL;
  191. exynos_gem_put_pages(obj, buf->pages);
  192. buf->pages = NULL;
  193. /* add some codes for UNCACHED type here. TODO */
  194. }
  195. static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
  196. struct drm_file *file_priv,
  197. unsigned int *handle)
  198. {
  199. int ret;
  200. /*
  201. * allocate a id of idr table where the obj is registered
  202. * and handle has the id what user can see.
  203. */
  204. ret = drm_gem_handle_create(file_priv, obj, handle);
  205. if (ret)
  206. return ret;
  207. DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
  208. /* drop reference from allocate - handle holds it now. */
  209. drm_gem_object_unreference_unlocked(obj);
  210. return 0;
  211. }
  212. void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
  213. {
  214. struct drm_gem_object *obj;
  215. struct exynos_drm_gem_buf *buf;
  216. DRM_DEBUG_KMS("%s\n", __FILE__);
  217. obj = &exynos_gem_obj->base;
  218. buf = exynos_gem_obj->buffer;
  219. DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
  220. if (!buf->pages)
  221. return;
  222. /*
  223. * do not release memory region from exporter.
  224. *
  225. * the region will be released by exporter
  226. * once dmabuf's refcount becomes 0.
  227. */
  228. if (obj->import_attach)
  229. goto out;
  230. if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG)
  231. exynos_drm_gem_put_pages(obj);
  232. else
  233. exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
  234. out:
  235. exynos_drm_fini_buf(obj->dev, buf);
  236. exynos_gem_obj->buffer = NULL;
  237. if (obj->map_list.map)
  238. drm_gem_free_mmap_offset(obj);
  239. /* release file pointer to gem object. */
  240. drm_gem_object_release(obj);
  241. kfree(exynos_gem_obj);
  242. exynos_gem_obj = NULL;
  243. }
  244. struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
  245. unsigned long size)
  246. {
  247. struct exynos_drm_gem_obj *exynos_gem_obj;
  248. struct drm_gem_object *obj;
  249. int ret;
  250. exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
  251. if (!exynos_gem_obj) {
  252. DRM_ERROR("failed to allocate exynos gem object\n");
  253. return NULL;
  254. }
  255. exynos_gem_obj->size = size;
  256. obj = &exynos_gem_obj->base;
  257. ret = drm_gem_object_init(dev, obj, size);
  258. if (ret < 0) {
  259. DRM_ERROR("failed to initialize gem object\n");
  260. kfree(exynos_gem_obj);
  261. return NULL;
  262. }
  263. DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
  264. return exynos_gem_obj;
  265. }
  266. struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
  267. unsigned int flags,
  268. unsigned long size)
  269. {
  270. struct exynos_drm_gem_obj *exynos_gem_obj;
  271. struct exynos_drm_gem_buf *buf;
  272. int ret;
  273. if (!size) {
  274. DRM_ERROR("invalid size.\n");
  275. return ERR_PTR(-EINVAL);
  276. }
  277. size = roundup_gem_size(size, flags);
  278. DRM_DEBUG_KMS("%s\n", __FILE__);
  279. ret = check_gem_flags(flags);
  280. if (ret)
  281. return ERR_PTR(ret);
  282. buf = exynos_drm_init_buf(dev, size);
  283. if (!buf)
  284. return ERR_PTR(-ENOMEM);
  285. exynos_gem_obj = exynos_drm_gem_init(dev, size);
  286. if (!exynos_gem_obj) {
  287. ret = -ENOMEM;
  288. goto err_fini_buf;
  289. }
  290. exynos_gem_obj->buffer = buf;
  291. /* set memory type and cache attribute from user side. */
  292. exynos_gem_obj->flags = flags;
  293. /*
  294. * allocate all pages as desired size if user wants to allocate
  295. * physically non-continuous memory.
  296. */
  297. if (flags & EXYNOS_BO_NONCONTIG) {
  298. ret = exynos_drm_gem_get_pages(&exynos_gem_obj->base);
  299. if (ret < 0) {
  300. drm_gem_object_release(&exynos_gem_obj->base);
  301. goto err_fini_buf;
  302. }
  303. } else {
  304. ret = exynos_drm_alloc_buf(dev, buf, flags);
  305. if (ret < 0) {
  306. drm_gem_object_release(&exynos_gem_obj->base);
  307. goto err_fini_buf;
  308. }
  309. }
  310. return exynos_gem_obj;
  311. err_fini_buf:
  312. exynos_drm_fini_buf(dev, buf);
  313. return ERR_PTR(ret);
  314. }
  315. int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
  316. struct drm_file *file_priv)
  317. {
  318. struct drm_exynos_gem_create *args = data;
  319. struct exynos_drm_gem_obj *exynos_gem_obj;
  320. int ret;
  321. DRM_DEBUG_KMS("%s\n", __FILE__);
  322. exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
  323. if (IS_ERR(exynos_gem_obj))
  324. return PTR_ERR(exynos_gem_obj);
  325. ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
  326. &args->handle);
  327. if (ret) {
  328. exynos_drm_gem_destroy(exynos_gem_obj);
  329. return ret;
  330. }
  331. return 0;
  332. }
  333. void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
  334. unsigned int gem_handle,
  335. struct drm_file *file_priv)
  336. {
  337. struct exynos_drm_gem_obj *exynos_gem_obj;
  338. struct drm_gem_object *obj;
  339. obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
  340. if (!obj) {
  341. DRM_ERROR("failed to lookup gem object.\n");
  342. return ERR_PTR(-EINVAL);
  343. }
  344. exynos_gem_obj = to_exynos_gem_obj(obj);
  345. if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
  346. DRM_DEBUG_KMS("not support NONCONTIG type.\n");
  347. drm_gem_object_unreference_unlocked(obj);
  348. /* TODO */
  349. return ERR_PTR(-EINVAL);
  350. }
  351. return &exynos_gem_obj->buffer->dma_addr;
  352. }
  353. void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
  354. unsigned int gem_handle,
  355. struct drm_file *file_priv)
  356. {
  357. struct exynos_drm_gem_obj *exynos_gem_obj;
  358. struct drm_gem_object *obj;
  359. obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
  360. if (!obj) {
  361. DRM_ERROR("failed to lookup gem object.\n");
  362. return;
  363. }
  364. exynos_gem_obj = to_exynos_gem_obj(obj);
  365. if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
  366. DRM_DEBUG_KMS("not support NONCONTIG type.\n");
  367. drm_gem_object_unreference_unlocked(obj);
  368. /* TODO */
  369. return;
  370. }
  371. drm_gem_object_unreference_unlocked(obj);
  372. /*
  373. * decrease obj->refcount one more time because we has already
  374. * increased it at exynos_drm_gem_get_dma_addr().
  375. */
  376. drm_gem_object_unreference_unlocked(obj);
  377. }
  378. int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
  379. struct drm_file *file_priv)
  380. {
  381. struct drm_exynos_gem_map_off *args = data;
  382. DRM_DEBUG_KMS("%s\n", __FILE__);
  383. DRM_DEBUG_KMS("handle = 0x%x, offset = 0x%lx\n",
  384. args->handle, (unsigned long)args->offset);
  385. if (!(dev->driver->driver_features & DRIVER_GEM)) {
  386. DRM_ERROR("does not support GEM.\n");
  387. return -ENODEV;
  388. }
  389. return exynos_drm_gem_dumb_map_offset(file_priv, dev, args->handle,
  390. &args->offset);
  391. }
  392. static int exynos_drm_gem_mmap_buffer(struct file *filp,
  393. struct vm_area_struct *vma)
  394. {
  395. struct drm_gem_object *obj = filp->private_data;
  396. struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
  397. struct exynos_drm_gem_buf *buffer;
  398. unsigned long pfn, vm_size, usize, uaddr = vma->vm_start;
  399. int ret;
  400. DRM_DEBUG_KMS("%s\n", __FILE__);
  401. vma->vm_flags |= (VM_IO | VM_RESERVED);
  402. update_vm_cache_attr(exynos_gem_obj, vma);
  403. vm_size = usize = vma->vm_end - vma->vm_start;
  404. /*
  405. * a buffer contains information to physically continuous memory
  406. * allocated by user request or at framebuffer creation.
  407. */
  408. buffer = exynos_gem_obj->buffer;
  409. /* check if user-requested size is valid. */
  410. if (vm_size > buffer->size)
  411. return -EINVAL;
  412. if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
  413. int i = 0;
  414. if (!buffer->pages)
  415. return -EINVAL;
  416. vma->vm_flags |= VM_MIXEDMAP;
  417. do {
  418. ret = vm_insert_page(vma, uaddr, buffer->pages[i++]);
  419. if (ret) {
  420. DRM_ERROR("failed to remap user space.\n");
  421. return ret;
  422. }
  423. uaddr += PAGE_SIZE;
  424. usize -= PAGE_SIZE;
  425. } while (usize > 0);
  426. } else {
  427. /*
  428. * get page frame number to physical memory to be mapped
  429. * to user space.
  430. */
  431. pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >>
  432. PAGE_SHIFT;
  433. DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
  434. if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size,
  435. vma->vm_page_prot)) {
  436. DRM_ERROR("failed to remap pfn range.\n");
  437. return -EAGAIN;
  438. }
  439. }
  440. return 0;
  441. }
  442. static const struct file_operations exynos_drm_gem_fops = {
  443. .mmap = exynos_drm_gem_mmap_buffer,
  444. };
  445. int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
  446. struct drm_file *file_priv)
  447. {
  448. struct drm_exynos_gem_mmap *args = data;
  449. struct drm_gem_object *obj;
  450. unsigned int addr;
  451. DRM_DEBUG_KMS("%s\n", __FILE__);
  452. if (!(dev->driver->driver_features & DRIVER_GEM)) {
  453. DRM_ERROR("does not support GEM.\n");
  454. return -ENODEV;
  455. }
  456. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  457. if (!obj) {
  458. DRM_ERROR("failed to lookup gem object.\n");
  459. return -EINVAL;
  460. }
  461. obj->filp->f_op = &exynos_drm_gem_fops;
  462. obj->filp->private_data = obj;
  463. addr = vm_mmap(obj->filp, 0, args->size,
  464. PROT_READ | PROT_WRITE, MAP_SHARED, 0);
  465. drm_gem_object_unreference_unlocked(obj);
  466. if (IS_ERR((void *)addr))
  467. return PTR_ERR((void *)addr);
  468. args->mapped = addr;
  469. DRM_DEBUG_KMS("mapped = 0x%lx\n", (unsigned long)args->mapped);
  470. return 0;
  471. }
  472. int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
  473. struct drm_file *file_priv)
  474. { struct exynos_drm_gem_obj *exynos_gem_obj;
  475. struct drm_exynos_gem_info *args = data;
  476. struct drm_gem_object *obj;
  477. mutex_lock(&dev->struct_mutex);
  478. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  479. if (!obj) {
  480. DRM_ERROR("failed to lookup gem object.\n");
  481. mutex_unlock(&dev->struct_mutex);
  482. return -EINVAL;
  483. }
  484. exynos_gem_obj = to_exynos_gem_obj(obj);
  485. args->flags = exynos_gem_obj->flags;
  486. args->size = exynos_gem_obj->size;
  487. drm_gem_object_unreference(obj);
  488. mutex_unlock(&dev->struct_mutex);
  489. return 0;
  490. }
  491. int exynos_drm_gem_init_object(struct drm_gem_object *obj)
  492. {
  493. DRM_DEBUG_KMS("%s\n", __FILE__);
  494. return 0;
  495. }
  496. void exynos_drm_gem_free_object(struct drm_gem_object *obj)
  497. {
  498. struct exynos_drm_gem_obj *exynos_gem_obj;
  499. struct exynos_drm_gem_buf *buf;
  500. DRM_DEBUG_KMS("%s\n", __FILE__);
  501. exynos_gem_obj = to_exynos_gem_obj(obj);
  502. buf = exynos_gem_obj->buffer;
  503. if (obj->import_attach)
  504. drm_prime_gem_destroy(obj, buf->sgt);
  505. exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
  506. }
  507. int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
  508. struct drm_device *dev,
  509. struct drm_mode_create_dumb *args)
  510. {
  511. struct exynos_drm_gem_obj *exynos_gem_obj;
  512. int ret;
  513. DRM_DEBUG_KMS("%s\n", __FILE__);
  514. /*
  515. * alocate memory to be used for framebuffer.
  516. * - this callback would be called by user application
  517. * with DRM_IOCTL_MODE_CREATE_DUMB command.
  518. */
  519. args->pitch = args->width * ((args->bpp + 7) / 8);
  520. args->size = args->pitch * args->height;
  521. exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
  522. if (IS_ERR(exynos_gem_obj))
  523. return PTR_ERR(exynos_gem_obj);
  524. ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
  525. &args->handle);
  526. if (ret) {
  527. exynos_drm_gem_destroy(exynos_gem_obj);
  528. return ret;
  529. }
  530. return 0;
  531. }
  532. int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
  533. struct drm_device *dev, uint32_t handle,
  534. uint64_t *offset)
  535. {
  536. struct drm_gem_object *obj;
  537. int ret = 0;
  538. DRM_DEBUG_KMS("%s\n", __FILE__);
  539. mutex_lock(&dev->struct_mutex);
  540. /*
  541. * get offset of memory allocated for drm framebuffer.
  542. * - this callback would be called by user application
  543. * with DRM_IOCTL_MODE_MAP_DUMB command.
  544. */
  545. obj = drm_gem_object_lookup(dev, file_priv, handle);
  546. if (!obj) {
  547. DRM_ERROR("failed to lookup gem object.\n");
  548. ret = -EINVAL;
  549. goto unlock;
  550. }
  551. if (!obj->map_list.map) {
  552. ret = drm_gem_create_mmap_offset(obj);
  553. if (ret)
  554. goto out;
  555. }
  556. *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
  557. DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
  558. out:
  559. drm_gem_object_unreference(obj);
  560. unlock:
  561. mutex_unlock(&dev->struct_mutex);
  562. return ret;
  563. }
  564. int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
  565. struct drm_device *dev,
  566. unsigned int handle)
  567. {
  568. int ret;
  569. DRM_DEBUG_KMS("%s\n", __FILE__);
  570. /*
  571. * obj->refcount and obj->handle_count are decreased and
  572. * if both them are 0 then exynos_drm_gem_free_object()
  573. * would be called by callback to release resources.
  574. */
  575. ret = drm_gem_handle_delete(file_priv, handle);
  576. if (ret < 0) {
  577. DRM_ERROR("failed to delete drm_gem_handle.\n");
  578. return ret;
  579. }
  580. return 0;
  581. }
  582. int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  583. {
  584. struct drm_gem_object *obj = vma->vm_private_data;
  585. struct drm_device *dev = obj->dev;
  586. unsigned long f_vaddr;
  587. pgoff_t page_offset;
  588. int ret;
  589. page_offset = ((unsigned long)vmf->virtual_address -
  590. vma->vm_start) >> PAGE_SHIFT;
  591. f_vaddr = (unsigned long)vmf->virtual_address;
  592. mutex_lock(&dev->struct_mutex);
  593. ret = exynos_drm_gem_map_pages(obj, vma, f_vaddr, page_offset);
  594. if (ret < 0)
  595. DRM_ERROR("failed to map pages.\n");
  596. mutex_unlock(&dev->struct_mutex);
  597. return convert_to_vm_err_msg(ret);
  598. }
  599. int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
  600. {
  601. struct exynos_drm_gem_obj *exynos_gem_obj;
  602. struct drm_gem_object *obj;
  603. int ret;
  604. DRM_DEBUG_KMS("%s\n", __FILE__);
  605. /* set vm_area_struct. */
  606. ret = drm_gem_mmap(filp, vma);
  607. if (ret < 0) {
  608. DRM_ERROR("failed to mmap.\n");
  609. return ret;
  610. }
  611. obj = vma->vm_private_data;
  612. exynos_gem_obj = to_exynos_gem_obj(obj);
  613. ret = check_gem_flags(exynos_gem_obj->flags);
  614. if (ret) {
  615. drm_gem_vm_close(vma);
  616. drm_gem_free_mmap_offset(obj);
  617. return ret;
  618. }
  619. vma->vm_flags &= ~VM_PFNMAP;
  620. vma->vm_flags |= VM_MIXEDMAP;
  621. update_vm_cache_attr(exynos_gem_obj, vma);
  622. return ret;
  623. }