exynos_drm_gem.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806
  1. /* exynos_drm_gem.c
  2. *
  3. * Copyright (c) 2011 Samsung Electronics Co., Ltd.
  4. * Author: Inki Dae <inki.dae@samsung.com>
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the "Software"),
  8. * to deal in the Software without restriction, including without limitation
  9. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10. * and/or sell copies of the Software, and to permit persons to whom the
  11. * Software is furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice (including the next
  14. * paragraph) shall be included in all copies or substantial portions of the
  15. * Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  20. * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  21. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  22. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  23. * OTHER DEALINGS IN THE SOFTWARE.
  24. */
  25. #include "drmP.h"
  26. #include "drm.h"
  27. #include <linux/shmem_fs.h>
  28. #include <drm/exynos_drm.h>
  29. #include "exynos_drm_drv.h"
  30. #include "exynos_drm_gem.h"
  31. #include "exynos_drm_buf.h"
  32. static unsigned int convert_to_vm_err_msg(int msg)
  33. {
  34. unsigned int out_msg;
  35. switch (msg) {
  36. case 0:
  37. case -ERESTARTSYS:
  38. case -EINTR:
  39. out_msg = VM_FAULT_NOPAGE;
  40. break;
  41. case -ENOMEM:
  42. out_msg = VM_FAULT_OOM;
  43. break;
  44. default:
  45. out_msg = VM_FAULT_SIGBUS;
  46. break;
  47. }
  48. return out_msg;
  49. }
  50. static int check_gem_flags(unsigned int flags)
  51. {
  52. if (flags & ~(EXYNOS_BO_MASK)) {
  53. DRM_ERROR("invalid flags.\n");
  54. return -EINVAL;
  55. }
  56. return 0;
  57. }
  58. static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
  59. struct vm_area_struct *vma)
  60. {
  61. DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
  62. /* non-cachable as default. */
  63. if (obj->flags & EXYNOS_BO_CACHABLE)
  64. vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
  65. else if (obj->flags & EXYNOS_BO_WC)
  66. vma->vm_page_prot =
  67. pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
  68. else
  69. vma->vm_page_prot =
  70. pgprot_noncached(vm_get_page_prot(vma->vm_flags));
  71. }
  72. static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
  73. {
  74. if (!IS_NONCONTIG_BUFFER(flags)) {
  75. if (size >= SZ_1M)
  76. return roundup(size, SECTION_SIZE);
  77. else if (size >= SZ_64K)
  78. return roundup(size, SZ_64K);
  79. else
  80. goto out;
  81. }
  82. out:
  83. return roundup(size, PAGE_SIZE);
  84. }
  85. struct page **exynos_gem_get_pages(struct drm_gem_object *obj,
  86. gfp_t gfpmask)
  87. {
  88. struct page *p, **pages;
  89. int i, npages;
  90. npages = obj->size >> PAGE_SHIFT;
  91. pages = drm_malloc_ab(npages, sizeof(struct page *));
  92. if (pages == NULL)
  93. return ERR_PTR(-ENOMEM);
  94. for (i = 0; i < npages; i++) {
  95. p = alloc_page(gfpmask);
  96. if (IS_ERR(p))
  97. goto fail;
  98. pages[i] = p;
  99. }
  100. return pages;
  101. fail:
  102. while (i--)
  103. __free_page(pages[i]);
  104. drm_free_large(pages);
  105. return ERR_PTR(PTR_ERR(p));
  106. }
  107. static void exynos_gem_put_pages(struct drm_gem_object *obj,
  108. struct page **pages,
  109. bool dirty, bool accessed)
  110. {
  111. int i, npages;
  112. npages = obj->size >> PAGE_SHIFT;
  113. for (i = 0; i < npages; i++) {
  114. if (dirty)
  115. set_page_dirty(pages[i]);
  116. if (accessed)
  117. mark_page_accessed(pages[i]);
  118. /* Undo the reference we took when populating the table */
  119. page_cache_release(pages[i]);
  120. }
  121. drm_free_large(pages);
  122. }
  123. static int exynos_drm_gem_map_pages(struct drm_gem_object *obj,
  124. struct vm_area_struct *vma,
  125. unsigned long f_vaddr,
  126. pgoff_t page_offset)
  127. {
  128. struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
  129. struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
  130. unsigned long pfn;
  131. if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
  132. if (!buf->pages)
  133. return -EINTR;
  134. pfn = page_to_pfn(buf->pages[page_offset++]);
  135. } else
  136. pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset;
  137. return vm_insert_mixed(vma, f_vaddr, pfn);
  138. }
  139. static int exynos_drm_gem_get_pages(struct drm_gem_object *obj)
  140. {
  141. struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
  142. struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
  143. struct scatterlist *sgl;
  144. struct page **pages;
  145. unsigned int npages, i = 0;
  146. int ret;
  147. if (buf->pages) {
  148. DRM_DEBUG_KMS("already allocated.\n");
  149. return -EINVAL;
  150. }
  151. pages = exynos_gem_get_pages(obj, GFP_HIGHUSER_MOVABLE);
  152. if (IS_ERR(pages)) {
  153. DRM_ERROR("failed to get pages.\n");
  154. return PTR_ERR(pages);
  155. }
  156. npages = obj->size >> PAGE_SHIFT;
  157. buf->page_size = PAGE_SIZE;
  158. buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
  159. if (!buf->sgt) {
  160. DRM_ERROR("failed to allocate sg table.\n");
  161. ret = -ENOMEM;
  162. goto err;
  163. }
  164. ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
  165. if (ret < 0) {
  166. DRM_ERROR("failed to initialize sg table.\n");
  167. ret = -EFAULT;
  168. goto err1;
  169. }
  170. sgl = buf->sgt->sgl;
  171. /* set all pages to sg list. */
  172. while (i < npages) {
  173. sg_set_page(sgl, pages[i], PAGE_SIZE, 0);
  174. sg_dma_address(sgl) = page_to_phys(pages[i]);
  175. i++;
  176. sgl = sg_next(sgl);
  177. }
  178. /* add some codes for UNCACHED type here. TODO */
  179. buf->pages = pages;
  180. return ret;
  181. err1:
  182. kfree(buf->sgt);
  183. buf->sgt = NULL;
  184. err:
  185. exynos_gem_put_pages(obj, pages, true, false);
  186. return ret;
  187. }
  188. static void exynos_drm_gem_put_pages(struct drm_gem_object *obj)
  189. {
  190. struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
  191. struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
  192. /*
  193. * if buffer typs is EXYNOS_BO_NONCONTIG then release all pages
  194. * allocated at gem fault handler.
  195. */
  196. sg_free_table(buf->sgt);
  197. kfree(buf->sgt);
  198. buf->sgt = NULL;
  199. exynos_gem_put_pages(obj, buf->pages, true, false);
  200. buf->pages = NULL;
  201. /* add some codes for UNCACHED type here. TODO */
  202. }
  203. static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
  204. struct drm_file *file_priv,
  205. unsigned int *handle)
  206. {
  207. int ret;
  208. /*
  209. * allocate a id of idr table where the obj is registered
  210. * and handle has the id what user can see.
  211. */
  212. ret = drm_gem_handle_create(file_priv, obj, handle);
  213. if (ret)
  214. return ret;
  215. DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
  216. /* drop reference from allocate - handle holds it now. */
  217. drm_gem_object_unreference_unlocked(obj);
  218. return 0;
  219. }
  220. void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
  221. {
  222. struct drm_gem_object *obj;
  223. struct exynos_drm_gem_buf *buf;
  224. DRM_DEBUG_KMS("%s\n", __FILE__);
  225. obj = &exynos_gem_obj->base;
  226. buf = exynos_gem_obj->buffer;
  227. DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
  228. if (!buf->pages)
  229. return;
  230. /*
  231. * do not release memory region from exporter.
  232. *
  233. * the region will be released by exporter
  234. * once dmabuf's refcount becomes 0.
  235. */
  236. if (obj->import_attach)
  237. goto out;
  238. if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG)
  239. exynos_drm_gem_put_pages(obj);
  240. else
  241. exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
  242. out:
  243. exynos_drm_fini_buf(obj->dev, buf);
  244. exynos_gem_obj->buffer = NULL;
  245. if (obj->map_list.map)
  246. drm_gem_free_mmap_offset(obj);
  247. /* release file pointer to gem object. */
  248. drm_gem_object_release(obj);
  249. kfree(exynos_gem_obj);
  250. exynos_gem_obj = NULL;
  251. }
  252. struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
  253. unsigned long size)
  254. {
  255. struct exynos_drm_gem_obj *exynos_gem_obj;
  256. struct drm_gem_object *obj;
  257. int ret;
  258. exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
  259. if (!exynos_gem_obj) {
  260. DRM_ERROR("failed to allocate exynos gem object\n");
  261. return NULL;
  262. }
  263. exynos_gem_obj->size = size;
  264. obj = &exynos_gem_obj->base;
  265. ret = drm_gem_object_init(dev, obj, size);
  266. if (ret < 0) {
  267. DRM_ERROR("failed to initialize gem object\n");
  268. kfree(exynos_gem_obj);
  269. return NULL;
  270. }
  271. DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
  272. return exynos_gem_obj;
  273. }
  274. struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
  275. unsigned int flags,
  276. unsigned long size)
  277. {
  278. struct exynos_drm_gem_obj *exynos_gem_obj;
  279. struct exynos_drm_gem_buf *buf;
  280. int ret;
  281. if (!size) {
  282. DRM_ERROR("invalid size.\n");
  283. return ERR_PTR(-EINVAL);
  284. }
  285. size = roundup_gem_size(size, flags);
  286. DRM_DEBUG_KMS("%s\n", __FILE__);
  287. ret = check_gem_flags(flags);
  288. if (ret)
  289. return ERR_PTR(ret);
  290. buf = exynos_drm_init_buf(dev, size);
  291. if (!buf)
  292. return ERR_PTR(-ENOMEM);
  293. exynos_gem_obj = exynos_drm_gem_init(dev, size);
  294. if (!exynos_gem_obj) {
  295. ret = -ENOMEM;
  296. goto err_fini_buf;
  297. }
  298. exynos_gem_obj->buffer = buf;
  299. /* set memory type and cache attribute from user side. */
  300. exynos_gem_obj->flags = flags;
  301. /*
  302. * allocate all pages as desired size if user wants to allocate
  303. * physically non-continuous memory.
  304. */
  305. if (flags & EXYNOS_BO_NONCONTIG) {
  306. ret = exynos_drm_gem_get_pages(&exynos_gem_obj->base);
  307. if (ret < 0) {
  308. drm_gem_object_release(&exynos_gem_obj->base);
  309. goto err_fini_buf;
  310. }
  311. } else {
  312. ret = exynos_drm_alloc_buf(dev, buf, flags);
  313. if (ret < 0) {
  314. drm_gem_object_release(&exynos_gem_obj->base);
  315. goto err_fini_buf;
  316. }
  317. }
  318. return exynos_gem_obj;
  319. err_fini_buf:
  320. exynos_drm_fini_buf(dev, buf);
  321. return ERR_PTR(ret);
  322. }
  323. int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
  324. struct drm_file *file_priv)
  325. {
  326. struct drm_exynos_gem_create *args = data;
  327. struct exynos_drm_gem_obj *exynos_gem_obj;
  328. int ret;
  329. DRM_DEBUG_KMS("%s\n", __FILE__);
  330. exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
  331. if (IS_ERR(exynos_gem_obj))
  332. return PTR_ERR(exynos_gem_obj);
  333. ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
  334. &args->handle);
  335. if (ret) {
  336. exynos_drm_gem_destroy(exynos_gem_obj);
  337. return ret;
  338. }
  339. return 0;
  340. }
  341. void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
  342. unsigned int gem_handle,
  343. struct drm_file *file_priv)
  344. {
  345. struct exynos_drm_gem_obj *exynos_gem_obj;
  346. struct drm_gem_object *obj;
  347. obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
  348. if (!obj) {
  349. DRM_ERROR("failed to lookup gem object.\n");
  350. return ERR_PTR(-EINVAL);
  351. }
  352. exynos_gem_obj = to_exynos_gem_obj(obj);
  353. if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
  354. DRM_DEBUG_KMS("not support NONCONTIG type.\n");
  355. drm_gem_object_unreference_unlocked(obj);
  356. /* TODO */
  357. return ERR_PTR(-EINVAL);
  358. }
  359. return &exynos_gem_obj->buffer->dma_addr;
  360. }
  361. void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
  362. unsigned int gem_handle,
  363. struct drm_file *file_priv)
  364. {
  365. struct exynos_drm_gem_obj *exynos_gem_obj;
  366. struct drm_gem_object *obj;
  367. obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
  368. if (!obj) {
  369. DRM_ERROR("failed to lookup gem object.\n");
  370. return;
  371. }
  372. exynos_gem_obj = to_exynos_gem_obj(obj);
  373. if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
  374. DRM_DEBUG_KMS("not support NONCONTIG type.\n");
  375. drm_gem_object_unreference_unlocked(obj);
  376. /* TODO */
  377. return;
  378. }
  379. drm_gem_object_unreference_unlocked(obj);
  380. /*
  381. * decrease obj->refcount one more time because we has already
  382. * increased it at exynos_drm_gem_get_dma_addr().
  383. */
  384. drm_gem_object_unreference_unlocked(obj);
  385. }
  386. int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
  387. struct drm_file *file_priv)
  388. {
  389. struct drm_exynos_gem_map_off *args = data;
  390. DRM_DEBUG_KMS("%s\n", __FILE__);
  391. DRM_DEBUG_KMS("handle = 0x%x, offset = 0x%lx\n",
  392. args->handle, (unsigned long)args->offset);
  393. if (!(dev->driver->driver_features & DRIVER_GEM)) {
  394. DRM_ERROR("does not support GEM.\n");
  395. return -ENODEV;
  396. }
  397. return exynos_drm_gem_dumb_map_offset(file_priv, dev, args->handle,
  398. &args->offset);
  399. }
  400. static int exynos_drm_gem_mmap_buffer(struct file *filp,
  401. struct vm_area_struct *vma)
  402. {
  403. struct drm_gem_object *obj = filp->private_data;
  404. struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
  405. struct exynos_drm_gem_buf *buffer;
  406. unsigned long pfn, vm_size, usize, uaddr = vma->vm_start;
  407. int ret;
  408. DRM_DEBUG_KMS("%s\n", __FILE__);
  409. vma->vm_flags |= (VM_IO | VM_RESERVED);
  410. update_vm_cache_attr(exynos_gem_obj, vma);
  411. vm_size = usize = vma->vm_end - vma->vm_start;
  412. /*
  413. * a buffer contains information to physically continuous memory
  414. * allocated by user request or at framebuffer creation.
  415. */
  416. buffer = exynos_gem_obj->buffer;
  417. /* check if user-requested size is valid. */
  418. if (vm_size > buffer->size)
  419. return -EINVAL;
  420. if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
  421. int i = 0;
  422. if (!buffer->pages)
  423. return -EINVAL;
  424. vma->vm_flags |= VM_MIXEDMAP;
  425. do {
  426. ret = vm_insert_page(vma, uaddr, buffer->pages[i++]);
  427. if (ret) {
  428. DRM_ERROR("failed to remap user space.\n");
  429. return ret;
  430. }
  431. uaddr += PAGE_SIZE;
  432. usize -= PAGE_SIZE;
  433. } while (usize > 0);
  434. } else {
  435. /*
  436. * get page frame number to physical memory to be mapped
  437. * to user space.
  438. */
  439. pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >>
  440. PAGE_SHIFT;
  441. DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
  442. if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size,
  443. vma->vm_page_prot)) {
  444. DRM_ERROR("failed to remap pfn range.\n");
  445. return -EAGAIN;
  446. }
  447. }
  448. return 0;
  449. }
  450. static const struct file_operations exynos_drm_gem_fops = {
  451. .mmap = exynos_drm_gem_mmap_buffer,
  452. };
  453. int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
  454. struct drm_file *file_priv)
  455. {
  456. struct drm_exynos_gem_mmap *args = data;
  457. struct drm_gem_object *obj;
  458. unsigned int addr;
  459. DRM_DEBUG_KMS("%s\n", __FILE__);
  460. if (!(dev->driver->driver_features & DRIVER_GEM)) {
  461. DRM_ERROR("does not support GEM.\n");
  462. return -ENODEV;
  463. }
  464. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  465. if (!obj) {
  466. DRM_ERROR("failed to lookup gem object.\n");
  467. return -EINVAL;
  468. }
  469. obj->filp->f_op = &exynos_drm_gem_fops;
  470. obj->filp->private_data = obj;
  471. addr = vm_mmap(obj->filp, 0, args->size,
  472. PROT_READ | PROT_WRITE, MAP_SHARED, 0);
  473. drm_gem_object_unreference_unlocked(obj);
  474. if (IS_ERR((void *)addr))
  475. return PTR_ERR((void *)addr);
  476. args->mapped = addr;
  477. DRM_DEBUG_KMS("mapped = 0x%lx\n", (unsigned long)args->mapped);
  478. return 0;
  479. }
  480. int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
  481. struct drm_file *file_priv)
  482. { struct exynos_drm_gem_obj *exynos_gem_obj;
  483. struct drm_exynos_gem_info *args = data;
  484. struct drm_gem_object *obj;
  485. mutex_lock(&dev->struct_mutex);
  486. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  487. if (!obj) {
  488. DRM_ERROR("failed to lookup gem object.\n");
  489. mutex_unlock(&dev->struct_mutex);
  490. return -EINVAL;
  491. }
  492. exynos_gem_obj = to_exynos_gem_obj(obj);
  493. args->flags = exynos_gem_obj->flags;
  494. args->size = exynos_gem_obj->size;
  495. drm_gem_object_unreference(obj);
  496. mutex_unlock(&dev->struct_mutex);
  497. return 0;
  498. }
  499. int exynos_drm_gem_init_object(struct drm_gem_object *obj)
  500. {
  501. DRM_DEBUG_KMS("%s\n", __FILE__);
  502. return 0;
  503. }
  504. void exynos_drm_gem_free_object(struct drm_gem_object *obj)
  505. {
  506. struct exynos_drm_gem_obj *exynos_gem_obj;
  507. struct exynos_drm_gem_buf *buf;
  508. DRM_DEBUG_KMS("%s\n", __FILE__);
  509. exynos_gem_obj = to_exynos_gem_obj(obj);
  510. buf = exynos_gem_obj->buffer;
  511. if (obj->import_attach)
  512. drm_prime_gem_destroy(obj, buf->sgt);
  513. exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
  514. }
  515. int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
  516. struct drm_device *dev,
  517. struct drm_mode_create_dumb *args)
  518. {
  519. struct exynos_drm_gem_obj *exynos_gem_obj;
  520. int ret;
  521. DRM_DEBUG_KMS("%s\n", __FILE__);
  522. /*
  523. * alocate memory to be used for framebuffer.
  524. * - this callback would be called by user application
  525. * with DRM_IOCTL_MODE_CREATE_DUMB command.
  526. */
  527. args->pitch = args->width * ((args->bpp + 7) / 8);
  528. args->size = PAGE_ALIGN(args->pitch * args->height);
  529. exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
  530. if (IS_ERR(exynos_gem_obj))
  531. return PTR_ERR(exynos_gem_obj);
  532. ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
  533. &args->handle);
  534. if (ret) {
  535. exynos_drm_gem_destroy(exynos_gem_obj);
  536. return ret;
  537. }
  538. return 0;
  539. }
  540. int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
  541. struct drm_device *dev, uint32_t handle,
  542. uint64_t *offset)
  543. {
  544. struct drm_gem_object *obj;
  545. int ret = 0;
  546. DRM_DEBUG_KMS("%s\n", __FILE__);
  547. mutex_lock(&dev->struct_mutex);
  548. /*
  549. * get offset of memory allocated for drm framebuffer.
  550. * - this callback would be called by user application
  551. * with DRM_IOCTL_MODE_MAP_DUMB command.
  552. */
  553. obj = drm_gem_object_lookup(dev, file_priv, handle);
  554. if (!obj) {
  555. DRM_ERROR("failed to lookup gem object.\n");
  556. ret = -EINVAL;
  557. goto unlock;
  558. }
  559. if (!obj->map_list.map) {
  560. ret = drm_gem_create_mmap_offset(obj);
  561. if (ret)
  562. goto out;
  563. }
  564. *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
  565. DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
  566. out:
  567. drm_gem_object_unreference(obj);
  568. unlock:
  569. mutex_unlock(&dev->struct_mutex);
  570. return ret;
  571. }
  572. int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
  573. struct drm_device *dev,
  574. unsigned int handle)
  575. {
  576. int ret;
  577. DRM_DEBUG_KMS("%s\n", __FILE__);
  578. /*
  579. * obj->refcount and obj->handle_count are decreased and
  580. * if both them are 0 then exynos_drm_gem_free_object()
  581. * would be called by callback to release resources.
  582. */
  583. ret = drm_gem_handle_delete(file_priv, handle);
  584. if (ret < 0) {
  585. DRM_ERROR("failed to delete drm_gem_handle.\n");
  586. return ret;
  587. }
  588. return 0;
  589. }
  590. int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  591. {
  592. struct drm_gem_object *obj = vma->vm_private_data;
  593. struct drm_device *dev = obj->dev;
  594. unsigned long f_vaddr;
  595. pgoff_t page_offset;
  596. int ret;
  597. page_offset = ((unsigned long)vmf->virtual_address -
  598. vma->vm_start) >> PAGE_SHIFT;
  599. f_vaddr = (unsigned long)vmf->virtual_address;
  600. mutex_lock(&dev->struct_mutex);
  601. ret = exynos_drm_gem_map_pages(obj, vma, f_vaddr, page_offset);
  602. if (ret < 0)
  603. DRM_ERROR("failed to map pages.\n");
  604. mutex_unlock(&dev->struct_mutex);
  605. return convert_to_vm_err_msg(ret);
  606. }
  607. int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
  608. {
  609. struct exynos_drm_gem_obj *exynos_gem_obj;
  610. struct drm_gem_object *obj;
  611. int ret;
  612. DRM_DEBUG_KMS("%s\n", __FILE__);
  613. /* set vm_area_struct. */
  614. ret = drm_gem_mmap(filp, vma);
  615. if (ret < 0) {
  616. DRM_ERROR("failed to mmap.\n");
  617. return ret;
  618. }
  619. obj = vma->vm_private_data;
  620. exynos_gem_obj = to_exynos_gem_obj(obj);
  621. ret = check_gem_flags(exynos_gem_obj->flags);
  622. if (ret) {
  623. drm_gem_vm_close(vma);
  624. drm_gem_free_mmap_offset(obj);
  625. return ret;
  626. }
  627. vma->vm_flags &= ~VM_PFNMAP;
  628. vma->vm_flags |= VM_MIXEDMAP;
  629. update_vm_cache_attr(exynos_gem_obj, vma);
  630. return ret;
  631. }