armada_gem.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611
  1. /*
  2. * Copyright (C) 2012 Russell King
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. */
  8. #include <linux/dma-buf.h>
  9. #include <linux/dma-mapping.h>
  10. #include <linux/shmem_fs.h>
  11. #include <drm/drmP.h>
  12. #include "armada_drm.h"
  13. #include "armada_gem.h"
  14. #include <drm/armada_drm.h>
  15. #include "armada_ioctlP.h"
  16. static int armada_gem_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
  17. {
  18. struct armada_gem_object *obj = drm_to_armada_gem(vma->vm_private_data);
  19. unsigned long addr = (unsigned long)vmf->virtual_address;
  20. unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
  21. int ret;
  22. pfn += (addr - vma->vm_start) >> PAGE_SHIFT;
  23. ret = vm_insert_pfn(vma, addr, pfn);
  24. switch (ret) {
  25. case 0:
  26. case -EBUSY:
  27. return VM_FAULT_NOPAGE;
  28. case -ENOMEM:
  29. return VM_FAULT_OOM;
  30. default:
  31. return VM_FAULT_SIGBUS;
  32. }
  33. }
  34. const struct vm_operations_struct armada_gem_vm_ops = {
  35. .fault = armada_gem_vm_fault,
  36. .open = drm_gem_vm_open,
  37. .close = drm_gem_vm_close,
  38. };
  39. static size_t roundup_gem_size(size_t size)
  40. {
  41. return roundup(size, PAGE_SIZE);
  42. }
  43. /* dev->struct_mutex is held here */
  44. void armada_gem_free_object(struct drm_gem_object *obj)
  45. {
  46. struct armada_gem_object *dobj = drm_to_armada_gem(obj);
  47. DRM_DEBUG_DRIVER("release obj %p\n", dobj);
  48. drm_gem_free_mmap_offset(&dobj->obj);
  49. if (dobj->page) {
  50. /* page backed memory */
  51. unsigned int order = get_order(dobj->obj.size);
  52. __free_pages(dobj->page, order);
  53. } else if (dobj->linear) {
  54. /* linear backed memory */
  55. drm_mm_remove_node(dobj->linear);
  56. kfree(dobj->linear);
  57. if (dobj->addr)
  58. iounmap(dobj->addr);
  59. }
  60. if (dobj->obj.import_attach) {
  61. /* We only ever display imported data */
  62. dma_buf_unmap_attachment(dobj->obj.import_attach, dobj->sgt,
  63. DMA_TO_DEVICE);
  64. drm_prime_gem_destroy(&dobj->obj, NULL);
  65. }
  66. drm_gem_object_release(&dobj->obj);
  67. kfree(dobj);
  68. }
  69. int
  70. armada_gem_linear_back(struct drm_device *dev, struct armada_gem_object *obj)
  71. {
  72. struct armada_private *priv = dev->dev_private;
  73. size_t size = obj->obj.size;
  74. if (obj->page || obj->linear)
  75. return 0;
  76. /*
  77. * If it is a small allocation (typically cursor, which will
  78. * be 32x64 or 64x32 ARGB pixels) try to get it from the system.
  79. * Framebuffers will never be this small (our minimum size for
  80. * framebuffers is larger than this anyway.) Such objects are
  81. * only accessed by the CPU so we don't need any special handing
  82. * here.
  83. */
  84. if (size <= 8192) {
  85. unsigned int order = get_order(size);
  86. struct page *p = alloc_pages(GFP_KERNEL, order);
  87. if (p) {
  88. obj->addr = page_address(p);
  89. obj->phys_addr = page_to_phys(p);
  90. obj->page = p;
  91. memset(obj->addr, 0, PAGE_ALIGN(size));
  92. }
  93. }
  94. /*
  95. * We could grab something from CMA if it's enabled, but that
  96. * involves building in a problem:
  97. *
  98. * CMA's interface uses dma_alloc_coherent(), which provides us
  99. * with an CPU virtual address and a device address.
  100. *
  101. * The CPU virtual address may be either an address in the kernel
  102. * direct mapped region (for example, as it would be on x86) or
  103. * it may be remapped into another part of kernel memory space
  104. * (eg, as it would be on ARM.) This means virt_to_phys() on the
  105. * returned virtual address is invalid depending on the architecture
  106. * implementation.
  107. *
  108. * The device address may also not be a physical address; it may
  109. * be that there is some kind of remapping between the device and
  110. * system RAM, which makes the use of the device address also
  111. * unsafe to re-use as a physical address.
  112. *
  113. * This makes DRM usage of dma_alloc_coherent() in a generic way
  114. * at best very questionable and unsafe.
  115. */
  116. /* Otherwise, grab it from our linear allocation */
  117. if (!obj->page) {
  118. struct drm_mm_node *node;
  119. unsigned align = min_t(unsigned, size, SZ_2M);
  120. void __iomem *ptr;
  121. int ret;
  122. node = kzalloc(sizeof(*node), GFP_KERNEL);
  123. if (!node)
  124. return -ENOSPC;
  125. mutex_lock(&dev->struct_mutex);
  126. ret = drm_mm_insert_node(&priv->linear, node, size, align,
  127. DRM_MM_SEARCH_DEFAULT);
  128. mutex_unlock(&dev->struct_mutex);
  129. if (ret) {
  130. kfree(node);
  131. return ret;
  132. }
  133. obj->linear = node;
  134. /* Ensure that the memory we're returning is cleared. */
  135. ptr = ioremap_wc(obj->linear->start, size);
  136. if (!ptr) {
  137. mutex_lock(&dev->struct_mutex);
  138. drm_mm_remove_node(obj->linear);
  139. mutex_unlock(&dev->struct_mutex);
  140. kfree(obj->linear);
  141. obj->linear = NULL;
  142. return -ENOMEM;
  143. }
  144. memset_io(ptr, 0, size);
  145. iounmap(ptr);
  146. obj->phys_addr = obj->linear->start;
  147. obj->dev_addr = obj->linear->start;
  148. }
  149. DRM_DEBUG_DRIVER("obj %p phys %#x dev %#x\n",
  150. obj, obj->phys_addr, obj->dev_addr);
  151. return 0;
  152. }
  153. void *
  154. armada_gem_map_object(struct drm_device *dev, struct armada_gem_object *dobj)
  155. {
  156. /* only linear objects need to be ioremap'd */
  157. if (!dobj->addr && dobj->linear)
  158. dobj->addr = ioremap_wc(dobj->phys_addr, dobj->obj.size);
  159. return dobj->addr;
  160. }
  161. struct armada_gem_object *
  162. armada_gem_alloc_private_object(struct drm_device *dev, size_t size)
  163. {
  164. struct armada_gem_object *obj;
  165. size = roundup_gem_size(size);
  166. obj = kzalloc(sizeof(*obj), GFP_KERNEL);
  167. if (!obj)
  168. return NULL;
  169. drm_gem_private_object_init(dev, &obj->obj, size);
  170. obj->dev_addr = DMA_ERROR_CODE;
  171. DRM_DEBUG_DRIVER("alloc private obj %p size %zu\n", obj, size);
  172. return obj;
  173. }
  174. struct armada_gem_object *armada_gem_alloc_object(struct drm_device *dev,
  175. size_t size)
  176. {
  177. struct armada_gem_object *obj;
  178. struct address_space *mapping;
  179. size = roundup_gem_size(size);
  180. obj = kzalloc(sizeof(*obj), GFP_KERNEL);
  181. if (!obj)
  182. return NULL;
  183. if (drm_gem_object_init(dev, &obj->obj, size)) {
  184. kfree(obj);
  185. return NULL;
  186. }
  187. obj->dev_addr = DMA_ERROR_CODE;
  188. mapping = obj->obj.filp->f_path.dentry->d_inode->i_mapping;
  189. mapping_set_gfp_mask(mapping, GFP_HIGHUSER | __GFP_RECLAIMABLE);
  190. DRM_DEBUG_DRIVER("alloc obj %p size %zu\n", obj, size);
  191. return obj;
  192. }
  193. /* Dumb alloc support */
  194. int armada_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
  195. struct drm_mode_create_dumb *args)
  196. {
  197. struct armada_gem_object *dobj;
  198. u32 handle;
  199. size_t size;
  200. int ret;
  201. args->pitch = armada_pitch(args->width, args->bpp);
  202. args->size = size = args->pitch * args->height;
  203. dobj = armada_gem_alloc_private_object(dev, size);
  204. if (dobj == NULL)
  205. return -ENOMEM;
  206. ret = armada_gem_linear_back(dev, dobj);
  207. if (ret)
  208. goto err;
  209. ret = drm_gem_handle_create(file, &dobj->obj, &handle);
  210. if (ret)
  211. goto err;
  212. args->handle = handle;
  213. /* drop reference from allocate - handle holds it now */
  214. DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
  215. err:
  216. drm_gem_object_unreference_unlocked(&dobj->obj);
  217. return ret;
  218. }
  219. int armada_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
  220. uint32_t handle, uint64_t *offset)
  221. {
  222. struct armada_gem_object *obj;
  223. int ret = 0;
  224. mutex_lock(&dev->struct_mutex);
  225. obj = armada_gem_object_lookup(dev, file, handle);
  226. if (!obj) {
  227. DRM_ERROR("failed to lookup gem object\n");
  228. ret = -EINVAL;
  229. goto err_unlock;
  230. }
  231. /* Don't allow imported objects to be mapped */
  232. if (obj->obj.import_attach) {
  233. ret = -EINVAL;
  234. goto err_unlock;
  235. }
  236. ret = drm_gem_create_mmap_offset(&obj->obj);
  237. if (ret == 0) {
  238. *offset = drm_vma_node_offset_addr(&obj->obj.vma_node);
  239. DRM_DEBUG_DRIVER("handle %#x offset %llx\n", handle, *offset);
  240. }
  241. drm_gem_object_unreference(&obj->obj);
  242. err_unlock:
  243. mutex_unlock(&dev->struct_mutex);
  244. return ret;
  245. }
  246. int armada_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
  247. uint32_t handle)
  248. {
  249. return drm_gem_handle_delete(file, handle);
  250. }
  251. /* Private driver gem ioctls */
  252. int armada_gem_create_ioctl(struct drm_device *dev, void *data,
  253. struct drm_file *file)
  254. {
  255. struct drm_armada_gem_create *args = data;
  256. struct armada_gem_object *dobj;
  257. size_t size;
  258. u32 handle;
  259. int ret;
  260. if (args->size == 0)
  261. return -ENOMEM;
  262. size = args->size;
  263. dobj = armada_gem_alloc_object(dev, size);
  264. if (dobj == NULL)
  265. return -ENOMEM;
  266. ret = drm_gem_handle_create(file, &dobj->obj, &handle);
  267. if (ret)
  268. goto err;
  269. args->handle = handle;
  270. /* drop reference from allocate - handle holds it now */
  271. DRM_DEBUG_DRIVER("obj %p size %zu handle %#x\n", dobj, size, handle);
  272. err:
  273. drm_gem_object_unreference_unlocked(&dobj->obj);
  274. return ret;
  275. }
  276. /* Map a shmem-backed object into process memory space */
  277. int armada_gem_mmap_ioctl(struct drm_device *dev, void *data,
  278. struct drm_file *file)
  279. {
  280. struct drm_armada_gem_mmap *args = data;
  281. struct armada_gem_object *dobj;
  282. unsigned long addr;
  283. dobj = armada_gem_object_lookup(dev, file, args->handle);
  284. if (dobj == NULL)
  285. return -ENOENT;
  286. if (!dobj->obj.filp) {
  287. drm_gem_object_unreference(&dobj->obj);
  288. return -EINVAL;
  289. }
  290. addr = vm_mmap(dobj->obj.filp, 0, args->size, PROT_READ | PROT_WRITE,
  291. MAP_SHARED, args->offset);
  292. drm_gem_object_unreference(&dobj->obj);
  293. if (IS_ERR_VALUE(addr))
  294. return addr;
  295. args->addr = addr;
  296. return 0;
  297. }
  298. int armada_gem_pwrite_ioctl(struct drm_device *dev, void *data,
  299. struct drm_file *file)
  300. {
  301. struct drm_armada_gem_pwrite *args = data;
  302. struct armada_gem_object *dobj;
  303. char __user *ptr;
  304. int ret;
  305. DRM_DEBUG_DRIVER("handle %u off %u size %u ptr 0x%llx\n",
  306. args->handle, args->offset, args->size, args->ptr);
  307. if (args->size == 0)
  308. return 0;
  309. ptr = (char __user *)(uintptr_t)args->ptr;
  310. if (!access_ok(VERIFY_READ, ptr, args->size))
  311. return -EFAULT;
  312. ret = fault_in_multipages_readable(ptr, args->size);
  313. if (ret)
  314. return ret;
  315. dobj = armada_gem_object_lookup(dev, file, args->handle);
  316. if (dobj == NULL)
  317. return -ENOENT;
  318. /* Must be a kernel-mapped object */
  319. if (!dobj->addr)
  320. return -EINVAL;
  321. if (args->offset > dobj->obj.size ||
  322. args->size > dobj->obj.size - args->offset) {
  323. DRM_ERROR("invalid size: object size %u\n", dobj->obj.size);
  324. ret = -EINVAL;
  325. goto unref;
  326. }
  327. if (copy_from_user(dobj->addr + args->offset, ptr, args->size)) {
  328. ret = -EFAULT;
  329. } else if (dobj->update) {
  330. dobj->update(dobj->update_data);
  331. ret = 0;
  332. }
  333. unref:
  334. drm_gem_object_unreference_unlocked(&dobj->obj);
  335. return ret;
  336. }
  337. /* Prime support */
  338. struct sg_table *
  339. armada_gem_prime_map_dma_buf(struct dma_buf_attachment *attach,
  340. enum dma_data_direction dir)
  341. {
  342. struct drm_gem_object *obj = attach->dmabuf->priv;
  343. struct armada_gem_object *dobj = drm_to_armada_gem(obj);
  344. struct scatterlist *sg;
  345. struct sg_table *sgt;
  346. int i, num;
  347. sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
  348. if (!sgt)
  349. return NULL;
  350. if (dobj->obj.filp) {
  351. struct address_space *mapping;
  352. gfp_t gfp;
  353. int count;
  354. count = dobj->obj.size / PAGE_SIZE;
  355. if (sg_alloc_table(sgt, count, GFP_KERNEL))
  356. goto free_sgt;
  357. mapping = file_inode(dobj->obj.filp)->i_mapping;
  358. gfp = mapping_gfp_mask(mapping);
  359. for_each_sg(sgt->sgl, sg, count, i) {
  360. struct page *page;
  361. page = shmem_read_mapping_page_gfp(mapping, i, gfp);
  362. if (IS_ERR(page)) {
  363. num = i;
  364. goto release;
  365. }
  366. sg_set_page(sg, page, PAGE_SIZE, 0);
  367. }
  368. if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0) {
  369. num = sgt->nents;
  370. goto release;
  371. }
  372. } else if (dobj->page) {
  373. /* Single contiguous page */
  374. if (sg_alloc_table(sgt, 1, GFP_KERNEL))
  375. goto free_sgt;
  376. sg_set_page(sgt->sgl, dobj->page, dobj->obj.size, 0);
  377. if (dma_map_sg(attach->dev, sgt->sgl, sgt->nents, dir) == 0)
  378. goto free_table;
  379. } else if (dobj->linear) {
  380. /* Single contiguous physical region - no struct page */
  381. if (sg_alloc_table(sgt, 1, GFP_KERNEL))
  382. goto free_sgt;
  383. sg_dma_address(sgt->sgl) = dobj->dev_addr;
  384. sg_dma_len(sgt->sgl) = dobj->obj.size;
  385. } else {
  386. goto free_sgt;
  387. }
  388. return sgt;
  389. release:
  390. for_each_sg(sgt->sgl, sg, num, i)
  391. page_cache_release(sg_page(sg));
  392. free_table:
  393. sg_free_table(sgt);
  394. free_sgt:
  395. kfree(sgt);
  396. return NULL;
  397. }
  398. static void armada_gem_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
  399. struct sg_table *sgt, enum dma_data_direction dir)
  400. {
  401. struct drm_gem_object *obj = attach->dmabuf->priv;
  402. struct armada_gem_object *dobj = drm_to_armada_gem(obj);
  403. int i;
  404. if (!dobj->linear)
  405. dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
  406. if (dobj->obj.filp) {
  407. struct scatterlist *sg;
  408. for_each_sg(sgt->sgl, sg, sgt->nents, i)
  409. page_cache_release(sg_page(sg));
  410. }
  411. sg_free_table(sgt);
  412. kfree(sgt);
  413. }
  414. static void *armada_gem_dmabuf_no_kmap(struct dma_buf *buf, unsigned long n)
  415. {
  416. return NULL;
  417. }
  418. static void
  419. armada_gem_dmabuf_no_kunmap(struct dma_buf *buf, unsigned long n, void *addr)
  420. {
  421. }
  422. static int
  423. armada_gem_dmabuf_mmap(struct dma_buf *buf, struct vm_area_struct *vma)
  424. {
  425. return -EINVAL;
  426. }
  427. static const struct dma_buf_ops armada_gem_prime_dmabuf_ops = {
  428. .map_dma_buf = armada_gem_prime_map_dma_buf,
  429. .unmap_dma_buf = armada_gem_prime_unmap_dma_buf,
  430. .release = drm_gem_dmabuf_release,
  431. .kmap_atomic = armada_gem_dmabuf_no_kmap,
  432. .kunmap_atomic = armada_gem_dmabuf_no_kunmap,
  433. .kmap = armada_gem_dmabuf_no_kmap,
  434. .kunmap = armada_gem_dmabuf_no_kunmap,
  435. .mmap = armada_gem_dmabuf_mmap,
  436. };
  437. struct dma_buf *
  438. armada_gem_prime_export(struct drm_device *dev, struct drm_gem_object *obj,
  439. int flags)
  440. {
  441. return dma_buf_export(obj, &armada_gem_prime_dmabuf_ops, obj->size,
  442. O_RDWR);
  443. }
  444. struct drm_gem_object *
  445. armada_gem_prime_import(struct drm_device *dev, struct dma_buf *buf)
  446. {
  447. struct dma_buf_attachment *attach;
  448. struct armada_gem_object *dobj;
  449. if (buf->ops == &armada_gem_prime_dmabuf_ops) {
  450. struct drm_gem_object *obj = buf->priv;
  451. if (obj->dev == dev) {
  452. /*
  453. * Importing our own dmabuf(s) increases the
  454. * refcount on the gem object itself.
  455. */
  456. drm_gem_object_reference(obj);
  457. dma_buf_put(buf);
  458. return obj;
  459. }
  460. }
  461. attach = dma_buf_attach(buf, dev->dev);
  462. if (IS_ERR(attach))
  463. return ERR_CAST(attach);
  464. dobj = armada_gem_alloc_private_object(dev, buf->size);
  465. if (!dobj) {
  466. dma_buf_detach(buf, attach);
  467. return ERR_PTR(-ENOMEM);
  468. }
  469. dobj->obj.import_attach = attach;
  470. /*
  471. * Don't call dma_buf_map_attachment() here - it maps the
  472. * scatterlist immediately for DMA, and this is not always
  473. * an appropriate thing to do.
  474. */
  475. return &dobj->obj;
  476. }
  477. int armada_gem_map_import(struct armada_gem_object *dobj)
  478. {
  479. int ret;
  480. dobj->sgt = dma_buf_map_attachment(dobj->obj.import_attach,
  481. DMA_TO_DEVICE);
  482. if (!dobj->sgt) {
  483. DRM_ERROR("dma_buf_map_attachment() returned NULL\n");
  484. return -EINVAL;
  485. }
  486. if (IS_ERR(dobj->sgt)) {
  487. ret = PTR_ERR(dobj->sgt);
  488. dobj->sgt = NULL;
  489. DRM_ERROR("dma_buf_map_attachment() error: %d\n", ret);
  490. return ret;
  491. }
  492. if (dobj->sgt->nents > 1) {
  493. DRM_ERROR("dma_buf_map_attachment() returned an (unsupported) scattered list\n");
  494. return -EINVAL;
  495. }
  496. if (sg_dma_len(dobj->sgt->sgl) < dobj->obj.size) {
  497. DRM_ERROR("dma_buf_map_attachment() returned a small buffer\n");
  498. return -EINVAL;
  499. }
  500. dobj->dev_addr = sg_dma_address(dobj->sgt->sgl);
  501. return 0;
  502. }