exynos_drm_gem.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804
  1. /* exynos_drm_gem.c
  2. *
  3. * Copyright (c) 2011 Samsung Electronics Co., Ltd.
  4. * Author: Inki Dae <inki.dae@samsung.com>
  5. *
  6. * Permission is hereby granted, free of charge, to any person obtaining a
  7. * copy of this software and associated documentation files (the "Software"),
  8. * to deal in the Software without restriction, including without limitation
  9. * the rights to use, copy, modify, merge, publish, distribute, sublicense,
  10. * and/or sell copies of the Software, and to permit persons to whom the
  11. * Software is furnished to do so, subject to the following conditions:
  12. *
  13. * The above copyright notice and this permission notice (including the next
  14. * paragraph) shall be included in all copies or substantial portions of the
  15. * Software.
  16. *
  17. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  18. * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  19. * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  20. * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
  21. * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  22. * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  23. * OTHER DEALINGS IN THE SOFTWARE.
  24. */
  25. #include "drmP.h"
  26. #include "drm.h"
  27. #include <linux/shmem_fs.h>
  28. #include <drm/exynos_drm.h>
  29. #include "exynos_drm_drv.h"
  30. #include "exynos_drm_gem.h"
  31. #include "exynos_drm_buf.h"
  32. static unsigned int convert_to_vm_err_msg(int msg)
  33. {
  34. unsigned int out_msg;
  35. switch (msg) {
  36. case 0:
  37. case -ERESTARTSYS:
  38. case -EINTR:
  39. out_msg = VM_FAULT_NOPAGE;
  40. break;
  41. case -ENOMEM:
  42. out_msg = VM_FAULT_OOM;
  43. break;
  44. default:
  45. out_msg = VM_FAULT_SIGBUS;
  46. break;
  47. }
  48. return out_msg;
  49. }
  50. static int check_gem_flags(unsigned int flags)
  51. {
  52. if (flags & ~(EXYNOS_BO_MASK)) {
  53. DRM_ERROR("invalid flags.\n");
  54. return -EINVAL;
  55. }
  56. return 0;
  57. }
  58. static void update_vm_cache_attr(struct exynos_drm_gem_obj *obj,
  59. struct vm_area_struct *vma)
  60. {
  61. DRM_DEBUG_KMS("flags = 0x%x\n", obj->flags);
  62. /* non-cachable as default. */
  63. if (obj->flags & EXYNOS_BO_CACHABLE)
  64. vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
  65. else if (obj->flags & EXYNOS_BO_WC)
  66. vma->vm_page_prot =
  67. pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
  68. else
  69. vma->vm_page_prot =
  70. pgprot_noncached(vm_get_page_prot(vma->vm_flags));
  71. }
  72. static unsigned long roundup_gem_size(unsigned long size, unsigned int flags)
  73. {
  74. if (!IS_NONCONTIG_BUFFER(flags)) {
  75. if (size >= SZ_1M)
  76. return roundup(size, SECTION_SIZE);
  77. else if (size >= SZ_64K)
  78. return roundup(size, SZ_64K);
  79. else
  80. goto out;
  81. }
  82. out:
  83. return roundup(size, PAGE_SIZE);
  84. }
  85. struct page **exynos_gem_get_pages(struct drm_gem_object *obj,
  86. gfp_t gfpmask)
  87. {
  88. struct inode *inode;
  89. struct address_space *mapping;
  90. struct page *p, **pages;
  91. int i, npages;
  92. /* This is the shared memory object that backs the GEM resource */
  93. inode = obj->filp->f_path.dentry->d_inode;
  94. mapping = inode->i_mapping;
  95. npages = obj->size >> PAGE_SHIFT;
  96. pages = drm_malloc_ab(npages, sizeof(struct page *));
  97. if (pages == NULL)
  98. return ERR_PTR(-ENOMEM);
  99. gfpmask |= mapping_gfp_mask(mapping);
  100. for (i = 0; i < npages; i++) {
  101. p = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
  102. if (IS_ERR(p))
  103. goto fail;
  104. pages[i] = p;
  105. }
  106. return pages;
  107. fail:
  108. while (i--)
  109. page_cache_release(pages[i]);
  110. drm_free_large(pages);
  111. return ERR_PTR(PTR_ERR(p));
  112. }
  113. static void exynos_gem_put_pages(struct drm_gem_object *obj,
  114. struct page **pages,
  115. bool dirty, bool accessed)
  116. {
  117. int i, npages;
  118. npages = obj->size >> PAGE_SHIFT;
  119. for (i = 0; i < npages; i++) {
  120. if (dirty)
  121. set_page_dirty(pages[i]);
  122. if (accessed)
  123. mark_page_accessed(pages[i]);
  124. /* Undo the reference we took when populating the table */
  125. page_cache_release(pages[i]);
  126. }
  127. drm_free_large(pages);
  128. }
  129. static int exynos_drm_gem_map_pages(struct drm_gem_object *obj,
  130. struct vm_area_struct *vma,
  131. unsigned long f_vaddr,
  132. pgoff_t page_offset)
  133. {
  134. struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
  135. struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
  136. unsigned long pfn;
  137. if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
  138. if (!buf->pages)
  139. return -EINTR;
  140. pfn = page_to_pfn(buf->pages[page_offset++]);
  141. } else
  142. pfn = (buf->dma_addr >> PAGE_SHIFT) + page_offset;
  143. return vm_insert_mixed(vma, f_vaddr, pfn);
  144. }
  145. static int exynos_drm_gem_get_pages(struct drm_gem_object *obj)
  146. {
  147. struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
  148. struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
  149. struct scatterlist *sgl;
  150. struct page **pages;
  151. unsigned int npages, i = 0;
  152. int ret;
  153. if (buf->pages) {
  154. DRM_DEBUG_KMS("already allocated.\n");
  155. return -EINVAL;
  156. }
  157. pages = exynos_gem_get_pages(obj, GFP_KERNEL);
  158. if (IS_ERR(pages)) {
  159. DRM_ERROR("failed to get pages.\n");
  160. return PTR_ERR(pages);
  161. }
  162. npages = obj->size >> PAGE_SHIFT;
  163. buf->page_size = PAGE_SIZE;
  164. buf->sgt = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
  165. if (!buf->sgt) {
  166. DRM_ERROR("failed to allocate sg table.\n");
  167. ret = -ENOMEM;
  168. goto err;
  169. }
  170. ret = sg_alloc_table(buf->sgt, npages, GFP_KERNEL);
  171. if (ret < 0) {
  172. DRM_ERROR("failed to initialize sg table.\n");
  173. ret = -EFAULT;
  174. goto err1;
  175. }
  176. sgl = buf->sgt->sgl;
  177. /* set all pages to sg list. */
  178. while (i < npages) {
  179. sg_set_page(sgl, pages[i], PAGE_SIZE, 0);
  180. sg_dma_address(sgl) = page_to_phys(pages[i]);
  181. i++;
  182. sgl = sg_next(sgl);
  183. }
  184. /* add some codes for UNCACHED type here. TODO */
  185. buf->pages = pages;
  186. return ret;
  187. err1:
  188. kfree(buf->sgt);
  189. buf->sgt = NULL;
  190. err:
  191. exynos_gem_put_pages(obj, pages, true, false);
  192. return ret;
  193. }
  194. static void exynos_drm_gem_put_pages(struct drm_gem_object *obj)
  195. {
  196. struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
  197. struct exynos_drm_gem_buf *buf = exynos_gem_obj->buffer;
  198. /*
  199. * if buffer typs is EXYNOS_BO_NONCONTIG then release all pages
  200. * allocated at gem fault handler.
  201. */
  202. sg_free_table(buf->sgt);
  203. kfree(buf->sgt);
  204. buf->sgt = NULL;
  205. exynos_gem_put_pages(obj, buf->pages, true, false);
  206. buf->pages = NULL;
  207. /* add some codes for UNCACHED type here. TODO */
  208. }
  209. static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
  210. struct drm_file *file_priv,
  211. unsigned int *handle)
  212. {
  213. int ret;
  214. /*
  215. * allocate a id of idr table where the obj is registered
  216. * and handle has the id what user can see.
  217. */
  218. ret = drm_gem_handle_create(file_priv, obj, handle);
  219. if (ret)
  220. return ret;
  221. DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
  222. /* drop reference from allocate - handle holds it now. */
  223. drm_gem_object_unreference_unlocked(obj);
  224. return 0;
  225. }
  226. void exynos_drm_gem_destroy(struct exynos_drm_gem_obj *exynos_gem_obj)
  227. {
  228. struct drm_gem_object *obj;
  229. struct exynos_drm_gem_buf *buf;
  230. DRM_DEBUG_KMS("%s\n", __FILE__);
  231. obj = &exynos_gem_obj->base;
  232. buf = exynos_gem_obj->buffer;
  233. DRM_DEBUG_KMS("handle count = %d\n", atomic_read(&obj->handle_count));
  234. if (!buf->pages)
  235. return;
  236. if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG)
  237. exynos_drm_gem_put_pages(obj);
  238. else
  239. exynos_drm_free_buf(obj->dev, exynos_gem_obj->flags, buf);
  240. exynos_drm_fini_buf(obj->dev, buf);
  241. exynos_gem_obj->buffer = NULL;
  242. if (obj->map_list.map)
  243. drm_gem_free_mmap_offset(obj);
  244. /* release file pointer to gem object. */
  245. drm_gem_object_release(obj);
  246. kfree(exynos_gem_obj);
  247. exynos_gem_obj = NULL;
  248. }
  249. struct exynos_drm_gem_obj *exynos_drm_gem_init(struct drm_device *dev,
  250. unsigned long size)
  251. {
  252. struct exynos_drm_gem_obj *exynos_gem_obj;
  253. struct drm_gem_object *obj;
  254. int ret;
  255. exynos_gem_obj = kzalloc(sizeof(*exynos_gem_obj), GFP_KERNEL);
  256. if (!exynos_gem_obj) {
  257. DRM_ERROR("failed to allocate exynos gem object\n");
  258. return NULL;
  259. }
  260. exynos_gem_obj->size = size;
  261. obj = &exynos_gem_obj->base;
  262. ret = drm_gem_object_init(dev, obj, size);
  263. if (ret < 0) {
  264. DRM_ERROR("failed to initialize gem object\n");
  265. kfree(exynos_gem_obj);
  266. return NULL;
  267. }
  268. DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
  269. return exynos_gem_obj;
  270. }
  271. struct exynos_drm_gem_obj *exynos_drm_gem_create(struct drm_device *dev,
  272. unsigned int flags,
  273. unsigned long size)
  274. {
  275. struct exynos_drm_gem_obj *exynos_gem_obj;
  276. struct exynos_drm_gem_buf *buf;
  277. int ret;
  278. if (!size) {
  279. DRM_ERROR("invalid size.\n");
  280. return ERR_PTR(-EINVAL);
  281. }
  282. size = roundup_gem_size(size, flags);
  283. DRM_DEBUG_KMS("%s\n", __FILE__);
  284. ret = check_gem_flags(flags);
  285. if (ret)
  286. return ERR_PTR(ret);
  287. buf = exynos_drm_init_buf(dev, size);
  288. if (!buf)
  289. return ERR_PTR(-ENOMEM);
  290. exynos_gem_obj = exynos_drm_gem_init(dev, size);
  291. if (!exynos_gem_obj) {
  292. ret = -ENOMEM;
  293. goto err_fini_buf;
  294. }
  295. exynos_gem_obj->buffer = buf;
  296. /* set memory type and cache attribute from user side. */
  297. exynos_gem_obj->flags = flags;
  298. /*
  299. * allocate all pages as desired size if user wants to allocate
  300. * physically non-continuous memory.
  301. */
  302. if (flags & EXYNOS_BO_NONCONTIG) {
  303. ret = exynos_drm_gem_get_pages(&exynos_gem_obj->base);
  304. if (ret < 0) {
  305. drm_gem_object_release(&exynos_gem_obj->base);
  306. goto err_fini_buf;
  307. }
  308. } else {
  309. ret = exynos_drm_alloc_buf(dev, buf, flags);
  310. if (ret < 0) {
  311. drm_gem_object_release(&exynos_gem_obj->base);
  312. goto err_fini_buf;
  313. }
  314. }
  315. return exynos_gem_obj;
  316. err_fini_buf:
  317. exynos_drm_fini_buf(dev, buf);
  318. return ERR_PTR(ret);
  319. }
  320. int exynos_drm_gem_create_ioctl(struct drm_device *dev, void *data,
  321. struct drm_file *file_priv)
  322. {
  323. struct drm_exynos_gem_create *args = data;
  324. struct exynos_drm_gem_obj *exynos_gem_obj;
  325. int ret;
  326. DRM_DEBUG_KMS("%s\n", __FILE__);
  327. exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
  328. if (IS_ERR(exynos_gem_obj))
  329. return PTR_ERR(exynos_gem_obj);
  330. ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
  331. &args->handle);
  332. if (ret) {
  333. exynos_drm_gem_destroy(exynos_gem_obj);
  334. return ret;
  335. }
  336. return 0;
  337. }
  338. void *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
  339. unsigned int gem_handle,
  340. struct drm_file *file_priv)
  341. {
  342. struct exynos_drm_gem_obj *exynos_gem_obj;
  343. struct drm_gem_object *obj;
  344. obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
  345. if (!obj) {
  346. DRM_ERROR("failed to lookup gem object.\n");
  347. return ERR_PTR(-EINVAL);
  348. }
  349. exynos_gem_obj = to_exynos_gem_obj(obj);
  350. if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
  351. DRM_DEBUG_KMS("not support NONCONTIG type.\n");
  352. drm_gem_object_unreference_unlocked(obj);
  353. /* TODO */
  354. return ERR_PTR(-EINVAL);
  355. }
  356. return &exynos_gem_obj->buffer->dma_addr;
  357. }
  358. void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
  359. unsigned int gem_handle,
  360. struct drm_file *file_priv)
  361. {
  362. struct exynos_drm_gem_obj *exynos_gem_obj;
  363. struct drm_gem_object *obj;
  364. obj = drm_gem_object_lookup(dev, file_priv, gem_handle);
  365. if (!obj) {
  366. DRM_ERROR("failed to lookup gem object.\n");
  367. return;
  368. }
  369. exynos_gem_obj = to_exynos_gem_obj(obj);
  370. if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
  371. DRM_DEBUG_KMS("not support NONCONTIG type.\n");
  372. drm_gem_object_unreference_unlocked(obj);
  373. /* TODO */
  374. return;
  375. }
  376. drm_gem_object_unreference_unlocked(obj);
  377. /*
  378. * decrease obj->refcount one more time because we has already
  379. * increased it at exynos_drm_gem_get_dma_addr().
  380. */
  381. drm_gem_object_unreference_unlocked(obj);
  382. }
  383. int exynos_drm_gem_map_offset_ioctl(struct drm_device *dev, void *data,
  384. struct drm_file *file_priv)
  385. {
  386. struct drm_exynos_gem_map_off *args = data;
  387. DRM_DEBUG_KMS("%s\n", __FILE__);
  388. DRM_DEBUG_KMS("handle = 0x%x, offset = 0x%lx\n",
  389. args->handle, (unsigned long)args->offset);
  390. if (!(dev->driver->driver_features & DRIVER_GEM)) {
  391. DRM_ERROR("does not support GEM.\n");
  392. return -ENODEV;
  393. }
  394. return exynos_drm_gem_dumb_map_offset(file_priv, dev, args->handle,
  395. &args->offset);
  396. }
  397. static int exynos_drm_gem_mmap_buffer(struct file *filp,
  398. struct vm_area_struct *vma)
  399. {
  400. struct drm_gem_object *obj = filp->private_data;
  401. struct exynos_drm_gem_obj *exynos_gem_obj = to_exynos_gem_obj(obj);
  402. struct exynos_drm_gem_buf *buffer;
  403. unsigned long pfn, vm_size, usize, uaddr = vma->vm_start;
  404. int ret;
  405. DRM_DEBUG_KMS("%s\n", __FILE__);
  406. vma->vm_flags |= (VM_IO | VM_RESERVED);
  407. update_vm_cache_attr(exynos_gem_obj, vma);
  408. vm_size = usize = vma->vm_end - vma->vm_start;
  409. /*
  410. * a buffer contains information to physically continuous memory
  411. * allocated by user request or at framebuffer creation.
  412. */
  413. buffer = exynos_gem_obj->buffer;
  414. /* check if user-requested size is valid. */
  415. if (vm_size > buffer->size)
  416. return -EINVAL;
  417. if (exynos_gem_obj->flags & EXYNOS_BO_NONCONTIG) {
  418. int i = 0;
  419. if (!buffer->pages)
  420. return -EINVAL;
  421. vma->vm_flags |= VM_MIXEDMAP;
  422. do {
  423. ret = vm_insert_page(vma, uaddr, buffer->pages[i++]);
  424. if (ret) {
  425. DRM_ERROR("failed to remap user space.\n");
  426. return ret;
  427. }
  428. uaddr += PAGE_SIZE;
  429. usize -= PAGE_SIZE;
  430. } while (usize > 0);
  431. } else {
  432. /*
  433. * get page frame number to physical memory to be mapped
  434. * to user space.
  435. */
  436. pfn = ((unsigned long)exynos_gem_obj->buffer->dma_addr) >>
  437. PAGE_SHIFT;
  438. DRM_DEBUG_KMS("pfn = 0x%lx\n", pfn);
  439. if (remap_pfn_range(vma, vma->vm_start, pfn, vm_size,
  440. vma->vm_page_prot)) {
  441. DRM_ERROR("failed to remap pfn range.\n");
  442. return -EAGAIN;
  443. }
  444. }
  445. return 0;
  446. }
  447. static const struct file_operations exynos_drm_gem_fops = {
  448. .mmap = exynos_drm_gem_mmap_buffer,
  449. };
  450. int exynos_drm_gem_mmap_ioctl(struct drm_device *dev, void *data,
  451. struct drm_file *file_priv)
  452. {
  453. struct drm_exynos_gem_mmap *args = data;
  454. struct drm_gem_object *obj;
  455. unsigned int addr;
  456. DRM_DEBUG_KMS("%s\n", __FILE__);
  457. if (!(dev->driver->driver_features & DRIVER_GEM)) {
  458. DRM_ERROR("does not support GEM.\n");
  459. return -ENODEV;
  460. }
  461. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  462. if (!obj) {
  463. DRM_ERROR("failed to lookup gem object.\n");
  464. return -EINVAL;
  465. }
  466. obj->filp->f_op = &exynos_drm_gem_fops;
  467. obj->filp->private_data = obj;
  468. addr = vm_mmap(obj->filp, 0, args->size,
  469. PROT_READ | PROT_WRITE, MAP_SHARED, 0);
  470. drm_gem_object_unreference_unlocked(obj);
  471. if (IS_ERR((void *)addr))
  472. return PTR_ERR((void *)addr);
  473. args->mapped = addr;
  474. DRM_DEBUG_KMS("mapped = 0x%lx\n", (unsigned long)args->mapped);
  475. return 0;
  476. }
  477. int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
  478. struct drm_file *file_priv)
  479. { struct exynos_drm_gem_obj *exynos_gem_obj;
  480. struct drm_exynos_gem_info *args = data;
  481. struct drm_gem_object *obj;
  482. mutex_lock(&dev->struct_mutex);
  483. obj = drm_gem_object_lookup(dev, file_priv, args->handle);
  484. if (!obj) {
  485. DRM_ERROR("failed to lookup gem object.\n");
  486. mutex_unlock(&dev->struct_mutex);
  487. return -EINVAL;
  488. }
  489. exynos_gem_obj = to_exynos_gem_obj(obj);
  490. args->flags = exynos_gem_obj->flags;
  491. args->size = exynos_gem_obj->size;
  492. drm_gem_object_unreference(obj);
  493. mutex_unlock(&dev->struct_mutex);
  494. return 0;
  495. }
  496. int exynos_drm_gem_init_object(struct drm_gem_object *obj)
  497. {
  498. DRM_DEBUG_KMS("%s\n", __FILE__);
  499. return 0;
  500. }
  501. void exynos_drm_gem_free_object(struct drm_gem_object *obj)
  502. {
  503. struct exynos_drm_gem_obj *exynos_gem_obj;
  504. struct exynos_drm_gem_buf *buf;
  505. DRM_DEBUG_KMS("%s\n", __FILE__);
  506. exynos_gem_obj = to_exynos_gem_obj(obj);
  507. buf = exynos_gem_obj->buffer;
  508. if (obj->import_attach)
  509. drm_prime_gem_destroy(obj, buf->sgt);
  510. exynos_drm_gem_destroy(to_exynos_gem_obj(obj));
  511. }
  512. int exynos_drm_gem_dumb_create(struct drm_file *file_priv,
  513. struct drm_device *dev,
  514. struct drm_mode_create_dumb *args)
  515. {
  516. struct exynos_drm_gem_obj *exynos_gem_obj;
  517. int ret;
  518. DRM_DEBUG_KMS("%s\n", __FILE__);
  519. /*
  520. * alocate memory to be used for framebuffer.
  521. * - this callback would be called by user application
  522. * with DRM_IOCTL_MODE_CREATE_DUMB command.
  523. */
  524. args->pitch = args->width * args->bpp >> 3;
  525. args->size = PAGE_ALIGN(args->pitch * args->height);
  526. exynos_gem_obj = exynos_drm_gem_create(dev, args->flags, args->size);
  527. if (IS_ERR(exynos_gem_obj))
  528. return PTR_ERR(exynos_gem_obj);
  529. ret = exynos_drm_gem_handle_create(&exynos_gem_obj->base, file_priv,
  530. &args->handle);
  531. if (ret) {
  532. exynos_drm_gem_destroy(exynos_gem_obj);
  533. return ret;
  534. }
  535. return 0;
  536. }
  537. int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
  538. struct drm_device *dev, uint32_t handle,
  539. uint64_t *offset)
  540. {
  541. struct drm_gem_object *obj;
  542. int ret = 0;
  543. DRM_DEBUG_KMS("%s\n", __FILE__);
  544. mutex_lock(&dev->struct_mutex);
  545. /*
  546. * get offset of memory allocated for drm framebuffer.
  547. * - this callback would be called by user application
  548. * with DRM_IOCTL_MODE_MAP_DUMB command.
  549. */
  550. obj = drm_gem_object_lookup(dev, file_priv, handle);
  551. if (!obj) {
  552. DRM_ERROR("failed to lookup gem object.\n");
  553. ret = -EINVAL;
  554. goto unlock;
  555. }
  556. if (!obj->map_list.map) {
  557. ret = drm_gem_create_mmap_offset(obj);
  558. if (ret)
  559. goto out;
  560. }
  561. *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
  562. DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
  563. out:
  564. drm_gem_object_unreference(obj);
  565. unlock:
  566. mutex_unlock(&dev->struct_mutex);
  567. return ret;
  568. }
  569. int exynos_drm_gem_dumb_destroy(struct drm_file *file_priv,
  570. struct drm_device *dev,
  571. unsigned int handle)
  572. {
  573. int ret;
  574. DRM_DEBUG_KMS("%s\n", __FILE__);
  575. /*
  576. * obj->refcount and obj->handle_count are decreased and
  577. * if both them are 0 then exynos_drm_gem_free_object()
  578. * would be called by callback to release resources.
  579. */
  580. ret = drm_gem_handle_delete(file_priv, handle);
  581. if (ret < 0) {
  582. DRM_ERROR("failed to delete drm_gem_handle.\n");
  583. return ret;
  584. }
  585. return 0;
  586. }
  587. int exynos_drm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  588. {
  589. struct drm_gem_object *obj = vma->vm_private_data;
  590. struct drm_device *dev = obj->dev;
  591. unsigned long f_vaddr;
  592. pgoff_t page_offset;
  593. int ret;
  594. page_offset = ((unsigned long)vmf->virtual_address -
  595. vma->vm_start) >> PAGE_SHIFT;
  596. f_vaddr = (unsigned long)vmf->virtual_address;
  597. mutex_lock(&dev->struct_mutex);
  598. ret = exynos_drm_gem_map_pages(obj, vma, f_vaddr, page_offset);
  599. if (ret < 0)
  600. DRM_ERROR("failed to map pages.\n");
  601. mutex_unlock(&dev->struct_mutex);
  602. return convert_to_vm_err_msg(ret);
  603. }
  604. int exynos_drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
  605. {
  606. struct exynos_drm_gem_obj *exynos_gem_obj;
  607. struct drm_gem_object *obj;
  608. int ret;
  609. DRM_DEBUG_KMS("%s\n", __FILE__);
  610. /* set vm_area_struct. */
  611. ret = drm_gem_mmap(filp, vma);
  612. if (ret < 0) {
  613. DRM_ERROR("failed to mmap.\n");
  614. return ret;
  615. }
  616. obj = vma->vm_private_data;
  617. exynos_gem_obj = to_exynos_gem_obj(obj);
  618. ret = check_gem_flags(exynos_gem_obj->flags);
  619. if (ret) {
  620. drm_gem_vm_close(vma);
  621. drm_gem_free_mmap_offset(obj);
  622. return ret;
  623. }
  624. vma->vm_flags &= ~VM_PFNMAP;
  625. vma->vm_flags |= VM_MIXEDMAP;
  626. update_vm_cache_attr(exynos_gem_obj, vma);
  627. return ret;
  628. }