videobuf2-dma-contig.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776
  1. /*
  2. * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
  3. *
  4. * Copyright (C) 2010 Samsung Electronics
  5. *
  6. * Author: Pawel Osciak <pawel@osciak.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License as published by
  10. * the Free Software Foundation.
  11. */
  12. #include <linux/dma-buf.h>
  13. #include <linux/module.h>
  14. #include <linux/scatterlist.h>
  15. #include <linux/sched.h>
  16. #include <linux/slab.h>
  17. #include <linux/dma-mapping.h>
  18. #include <media/videobuf2-core.h>
  19. #include <media/videobuf2-dma-contig.h>
  20. #include <media/videobuf2-memops.h>
  21. struct vb2_dc_conf {
  22. struct device *dev;
  23. };
  24. struct vb2_dc_buf {
  25. struct device *dev;
  26. void *vaddr;
  27. unsigned long size;
  28. dma_addr_t dma_addr;
  29. enum dma_data_direction dma_dir;
  30. struct sg_table *dma_sgt;
  31. /* MMAP related */
  32. struct vb2_vmarea_handler handler;
  33. atomic_t refcount;
  34. struct sg_table *sgt_base;
  35. /* USERPTR related */
  36. struct vm_area_struct *vma;
  37. /* DMABUF related */
  38. struct dma_buf_attachment *db_attach;
  39. };
  40. /*********************************************/
  41. /* scatterlist table functions */
  42. /*********************************************/
  43. static void vb2_dc_sgt_foreach_page(struct sg_table *sgt,
  44. void (*cb)(struct page *pg))
  45. {
  46. struct scatterlist *s;
  47. unsigned int i;
  48. for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
  49. struct page *page = sg_page(s);
  50. unsigned int n_pages = PAGE_ALIGN(s->offset + s->length)
  51. >> PAGE_SHIFT;
  52. unsigned int j;
  53. for (j = 0; j < n_pages; ++j, ++page)
  54. cb(page);
  55. }
  56. }
  57. static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
  58. {
  59. struct scatterlist *s;
  60. dma_addr_t expected = sg_dma_address(sgt->sgl);
  61. unsigned int i;
  62. unsigned long size = 0;
  63. for_each_sg(sgt->sgl, s, sgt->nents, i) {
  64. if (sg_dma_address(s) != expected)
  65. break;
  66. expected = sg_dma_address(s) + sg_dma_len(s);
  67. size += sg_dma_len(s);
  68. }
  69. return size;
  70. }
  71. /*********************************************/
  72. /* callbacks for all buffers */
  73. /*********************************************/
  74. static void *vb2_dc_cookie(void *buf_priv)
  75. {
  76. struct vb2_dc_buf *buf = buf_priv;
  77. return &buf->dma_addr;
  78. }
  79. static void *vb2_dc_vaddr(void *buf_priv)
  80. {
  81. struct vb2_dc_buf *buf = buf_priv;
  82. return buf->vaddr;
  83. }
  84. static unsigned int vb2_dc_num_users(void *buf_priv)
  85. {
  86. struct vb2_dc_buf *buf = buf_priv;
  87. return atomic_read(&buf->refcount);
  88. }
  89. static void vb2_dc_prepare(void *buf_priv)
  90. {
  91. struct vb2_dc_buf *buf = buf_priv;
  92. struct sg_table *sgt = buf->dma_sgt;
  93. /* DMABUF exporter will flush the cache for us */
  94. if (!sgt || buf->db_attach)
  95. return;
  96. dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
  97. }
  98. static void vb2_dc_finish(void *buf_priv)
  99. {
  100. struct vb2_dc_buf *buf = buf_priv;
  101. struct sg_table *sgt = buf->dma_sgt;
  102. /* DMABUF exporter will flush the cache for us */
  103. if (!sgt || buf->db_attach)
  104. return;
  105. dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
  106. }
  107. /*********************************************/
  108. /* callbacks for MMAP buffers */
  109. /*********************************************/
  110. static void vb2_dc_put(void *buf_priv)
  111. {
  112. struct vb2_dc_buf *buf = buf_priv;
  113. if (!atomic_dec_and_test(&buf->refcount))
  114. return;
  115. if (buf->sgt_base) {
  116. sg_free_table(buf->sgt_base);
  117. kfree(buf->sgt_base);
  118. }
  119. dma_free_coherent(buf->dev, buf->size, buf->vaddr, buf->dma_addr);
  120. put_device(buf->dev);
  121. kfree(buf);
  122. }
  123. static void *vb2_dc_alloc(void *alloc_ctx, unsigned long size)
  124. {
  125. struct vb2_dc_conf *conf = alloc_ctx;
  126. struct device *dev = conf->dev;
  127. struct vb2_dc_buf *buf;
  128. buf = kzalloc(sizeof *buf, GFP_KERNEL);
  129. if (!buf)
  130. return ERR_PTR(-ENOMEM);
  131. /* align image size to PAGE_SIZE */
  132. size = PAGE_ALIGN(size);
  133. buf->vaddr = dma_alloc_coherent(dev, size, &buf->dma_addr, GFP_KERNEL);
  134. if (!buf->vaddr) {
  135. dev_err(dev, "dma_alloc_coherent of size %ld failed\n", size);
  136. kfree(buf);
  137. return ERR_PTR(-ENOMEM);
  138. }
  139. /* Prevent the device from being released while the buffer is used */
  140. buf->dev = get_device(dev);
  141. buf->size = size;
  142. buf->handler.refcount = &buf->refcount;
  143. buf->handler.put = vb2_dc_put;
  144. buf->handler.arg = buf;
  145. atomic_inc(&buf->refcount);
  146. return buf;
  147. }
  148. static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
  149. {
  150. struct vb2_dc_buf *buf = buf_priv;
  151. int ret;
  152. if (!buf) {
  153. printk(KERN_ERR "No buffer to map\n");
  154. return -EINVAL;
  155. }
  156. /*
  157. * dma_mmap_* uses vm_pgoff as in-buffer offset, but we want to
  158. * map whole buffer
  159. */
  160. vma->vm_pgoff = 0;
  161. ret = dma_mmap_coherent(buf->dev, vma, buf->vaddr,
  162. buf->dma_addr, buf->size);
  163. if (ret) {
  164. pr_err("Remapping memory failed, error: %d\n", ret);
  165. return ret;
  166. }
  167. vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
  168. vma->vm_private_data = &buf->handler;
  169. vma->vm_ops = &vb2_common_vm_ops;
  170. vma->vm_ops->open(vma);
  171. pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %ld\n",
  172. __func__, (unsigned long)buf->dma_addr, vma->vm_start,
  173. buf->size);
  174. return 0;
  175. }
  176. /*********************************************/
  177. /* DMABUF ops for exporters */
  178. /*********************************************/
  179. struct vb2_dc_attachment {
  180. struct sg_table sgt;
  181. enum dma_data_direction dir;
  182. };
  183. static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev,
  184. struct dma_buf_attachment *dbuf_attach)
  185. {
  186. struct vb2_dc_attachment *attach;
  187. unsigned int i;
  188. struct scatterlist *rd, *wr;
  189. struct sg_table *sgt;
  190. struct vb2_dc_buf *buf = dbuf->priv;
  191. int ret;
  192. attach = kzalloc(sizeof(*attach), GFP_KERNEL);
  193. if (!attach)
  194. return -ENOMEM;
  195. sgt = &attach->sgt;
  196. /* Copy the buf->base_sgt scatter list to the attachment, as we can't
  197. * map the same scatter list to multiple attachments at the same time.
  198. */
  199. ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
  200. if (ret) {
  201. kfree(attach);
  202. return -ENOMEM;
  203. }
  204. rd = buf->sgt_base->sgl;
  205. wr = sgt->sgl;
  206. for (i = 0; i < sgt->orig_nents; ++i) {
  207. sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
  208. rd = sg_next(rd);
  209. wr = sg_next(wr);
  210. }
  211. attach->dir = DMA_NONE;
  212. dbuf_attach->priv = attach;
  213. return 0;
  214. }
  215. static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
  216. struct dma_buf_attachment *db_attach)
  217. {
  218. struct vb2_dc_attachment *attach = db_attach->priv;
  219. struct sg_table *sgt;
  220. if (!attach)
  221. return;
  222. sgt = &attach->sgt;
  223. /* release the scatterlist cache */
  224. if (attach->dir != DMA_NONE)
  225. dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
  226. attach->dir);
  227. sg_free_table(sgt);
  228. kfree(attach);
  229. db_attach->priv = NULL;
  230. }
  231. static struct sg_table *vb2_dc_dmabuf_ops_map(
  232. struct dma_buf_attachment *db_attach, enum dma_data_direction dir)
  233. {
  234. struct vb2_dc_attachment *attach = db_attach->priv;
  235. /* stealing dmabuf mutex to serialize map/unmap operations */
  236. struct mutex *lock = &db_attach->dmabuf->lock;
  237. struct sg_table *sgt;
  238. int ret;
  239. mutex_lock(lock);
  240. sgt = &attach->sgt;
  241. /* return previously mapped sg table */
  242. if (attach->dir == dir) {
  243. mutex_unlock(lock);
  244. return sgt;
  245. }
  246. /* release any previous cache */
  247. if (attach->dir != DMA_NONE) {
  248. dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents,
  249. attach->dir);
  250. attach->dir = DMA_NONE;
  251. }
  252. /* mapping to the client with new direction */
  253. ret = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, dir);
  254. if (ret <= 0) {
  255. pr_err("failed to map scatterlist\n");
  256. mutex_unlock(lock);
  257. return ERR_PTR(-EIO);
  258. }
  259. attach->dir = dir;
  260. mutex_unlock(lock);
  261. return sgt;
  262. }
  263. static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
  264. struct sg_table *sgt, enum dma_data_direction dir)
  265. {
  266. /* nothing to be done here */
  267. }
  268. static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
  269. {
  270. /* drop reference obtained in vb2_dc_get_dmabuf */
  271. vb2_dc_put(dbuf->priv);
  272. }
  273. static void *vb2_dc_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum)
  274. {
  275. struct vb2_dc_buf *buf = dbuf->priv;
  276. return buf->vaddr + pgnum * PAGE_SIZE;
  277. }
  278. static void *vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf)
  279. {
  280. struct vb2_dc_buf *buf = dbuf->priv;
  281. return buf->vaddr;
  282. }
  283. static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
  284. struct vm_area_struct *vma)
  285. {
  286. return vb2_dc_mmap(dbuf->priv, vma);
  287. }
  288. static struct dma_buf_ops vb2_dc_dmabuf_ops = {
  289. .attach = vb2_dc_dmabuf_ops_attach,
  290. .detach = vb2_dc_dmabuf_ops_detach,
  291. .map_dma_buf = vb2_dc_dmabuf_ops_map,
  292. .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
  293. .kmap = vb2_dc_dmabuf_ops_kmap,
  294. .kmap_atomic = vb2_dc_dmabuf_ops_kmap,
  295. .vmap = vb2_dc_dmabuf_ops_vmap,
  296. .mmap = vb2_dc_dmabuf_ops_mmap,
  297. .release = vb2_dc_dmabuf_ops_release,
  298. };
  299. static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
  300. {
  301. int ret;
  302. struct sg_table *sgt;
  303. sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
  304. if (!sgt) {
  305. dev_err(buf->dev, "failed to alloc sg table\n");
  306. return NULL;
  307. }
  308. ret = dma_get_sgtable(buf->dev, sgt, buf->vaddr, buf->dma_addr,
  309. buf->size);
  310. if (ret < 0) {
  311. dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
  312. kfree(sgt);
  313. return NULL;
  314. }
  315. return sgt;
  316. }
  317. static struct dma_buf *vb2_dc_get_dmabuf(void *buf_priv)
  318. {
  319. struct vb2_dc_buf *buf = buf_priv;
  320. struct dma_buf *dbuf;
  321. if (!buf->sgt_base)
  322. buf->sgt_base = vb2_dc_get_base_sgt(buf);
  323. if (WARN_ON(!buf->sgt_base))
  324. return NULL;
  325. dbuf = dma_buf_export(buf, &vb2_dc_dmabuf_ops, buf->size, 0);
  326. if (IS_ERR(dbuf))
  327. return NULL;
  328. /* dmabuf keeps reference to vb2 buffer */
  329. atomic_inc(&buf->refcount);
  330. return dbuf;
  331. }
  332. /*********************************************/
  333. /* callbacks for USERPTR buffers */
  334. /*********************************************/
  335. static inline int vma_is_io(struct vm_area_struct *vma)
  336. {
  337. return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
  338. }
  339. static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
  340. int n_pages, struct vm_area_struct *vma, int write)
  341. {
  342. if (vma_is_io(vma)) {
  343. unsigned int i;
  344. for (i = 0; i < n_pages; ++i, start += PAGE_SIZE) {
  345. unsigned long pfn;
  346. int ret = follow_pfn(vma, start, &pfn);
  347. if (ret) {
  348. pr_err("no page for address %lu\n", start);
  349. return ret;
  350. }
  351. pages[i] = pfn_to_page(pfn);
  352. }
  353. } else {
  354. int n;
  355. n = get_user_pages(current, current->mm, start & PAGE_MASK,
  356. n_pages, write, 1, pages, NULL);
  357. /* negative error means that no page was pinned */
  358. n = max(n, 0);
  359. if (n != n_pages) {
  360. pr_err("got only %d of %d user pages\n", n, n_pages);
  361. while (n)
  362. put_page(pages[--n]);
  363. return -EFAULT;
  364. }
  365. }
  366. return 0;
  367. }
  368. static void vb2_dc_put_dirty_page(struct page *page)
  369. {
  370. set_page_dirty_lock(page);
  371. put_page(page);
  372. }
  373. static void vb2_dc_put_userptr(void *buf_priv)
  374. {
  375. struct vb2_dc_buf *buf = buf_priv;
  376. struct sg_table *sgt = buf->dma_sgt;
  377. dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
  378. if (!vma_is_io(buf->vma))
  379. vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page);
  380. sg_free_table(sgt);
  381. kfree(sgt);
  382. vb2_put_vma(buf->vma);
  383. kfree(buf);
  384. }
  385. static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
  386. unsigned long size, int write)
  387. {
  388. struct vb2_dc_conf *conf = alloc_ctx;
  389. struct vb2_dc_buf *buf;
  390. unsigned long start;
  391. unsigned long end;
  392. unsigned long offset;
  393. struct page **pages;
  394. int n_pages;
  395. int ret = 0;
  396. struct vm_area_struct *vma;
  397. struct sg_table *sgt;
  398. unsigned long contig_size;
  399. unsigned long dma_align = dma_get_cache_alignment();
  400. /* Only cache aligned DMA transfers are reliable */
  401. if (!IS_ALIGNED(vaddr | size, dma_align)) {
  402. pr_debug("user data must be aligned to %lu bytes\n", dma_align);
  403. return ERR_PTR(-EINVAL);
  404. }
  405. if (!size) {
  406. pr_debug("size is zero\n");
  407. return ERR_PTR(-EINVAL);
  408. }
  409. buf = kzalloc(sizeof *buf, GFP_KERNEL);
  410. if (!buf)
  411. return ERR_PTR(-ENOMEM);
  412. buf->dev = conf->dev;
  413. buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
  414. start = vaddr & PAGE_MASK;
  415. offset = vaddr & ~PAGE_MASK;
  416. end = PAGE_ALIGN(vaddr + size);
  417. n_pages = (end - start) >> PAGE_SHIFT;
  418. pages = kmalloc(n_pages * sizeof(pages[0]), GFP_KERNEL);
  419. if (!pages) {
  420. ret = -ENOMEM;
  421. pr_err("failed to allocate pages table\n");
  422. goto fail_buf;
  423. }
  424. /* current->mm->mmap_sem is taken by videobuf2 core */
  425. vma = find_vma(current->mm, vaddr);
  426. if (!vma) {
  427. pr_err("no vma for address %lu\n", vaddr);
  428. ret = -EFAULT;
  429. goto fail_pages;
  430. }
  431. if (vma->vm_end < vaddr + size) {
  432. pr_err("vma at %lu is too small for %lu bytes\n", vaddr, size);
  433. ret = -EFAULT;
  434. goto fail_pages;
  435. }
  436. buf->vma = vb2_get_vma(vma);
  437. if (!buf->vma) {
  438. pr_err("failed to copy vma\n");
  439. ret = -ENOMEM;
  440. goto fail_pages;
  441. }
  442. /* extract page list from userspace mapping */
  443. ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, write);
  444. if (ret) {
  445. pr_err("failed to get user pages\n");
  446. goto fail_vma;
  447. }
  448. sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
  449. if (!sgt) {
  450. pr_err("failed to allocate sg table\n");
  451. ret = -ENOMEM;
  452. goto fail_get_user_pages;
  453. }
  454. ret = sg_alloc_table_from_pages(sgt, pages, n_pages,
  455. offset, size, GFP_KERNEL);
  456. if (ret) {
  457. pr_err("failed to initialize sg table\n");
  458. goto fail_sgt;
  459. }
  460. /* pages are no longer needed */
  461. kfree(pages);
  462. pages = NULL;
  463. sgt->nents = dma_map_sg(buf->dev, sgt->sgl, sgt->orig_nents,
  464. buf->dma_dir);
  465. if (sgt->nents <= 0) {
  466. pr_err("failed to map scatterlist\n");
  467. ret = -EIO;
  468. goto fail_sgt_init;
  469. }
  470. contig_size = vb2_dc_get_contiguous_size(sgt);
  471. if (contig_size < size) {
  472. pr_err("contiguous mapping is too small %lu/%lu\n",
  473. contig_size, size);
  474. ret = -EFAULT;
  475. goto fail_map_sg;
  476. }
  477. buf->dma_addr = sg_dma_address(sgt->sgl);
  478. buf->size = size;
  479. buf->dma_sgt = sgt;
  480. return buf;
  481. fail_map_sg:
  482. dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
  483. fail_sgt_init:
  484. if (!vma_is_io(buf->vma))
  485. vb2_dc_sgt_foreach_page(sgt, put_page);
  486. sg_free_table(sgt);
  487. fail_sgt:
  488. kfree(sgt);
  489. fail_get_user_pages:
  490. if (pages && !vma_is_io(buf->vma))
  491. while (n_pages)
  492. put_page(pages[--n_pages]);
  493. fail_vma:
  494. vb2_put_vma(buf->vma);
  495. fail_pages:
  496. kfree(pages); /* kfree is NULL-proof */
  497. fail_buf:
  498. kfree(buf);
  499. return ERR_PTR(ret);
  500. }
  501. /*********************************************/
  502. /* callbacks for DMABUF buffers */
  503. /*********************************************/
  504. static int vb2_dc_map_dmabuf(void *mem_priv)
  505. {
  506. struct vb2_dc_buf *buf = mem_priv;
  507. struct sg_table *sgt;
  508. unsigned long contig_size;
  509. if (WARN_ON(!buf->db_attach)) {
  510. pr_err("trying to pin a non attached buffer\n");
  511. return -EINVAL;
  512. }
  513. if (WARN_ON(buf->dma_sgt)) {
  514. pr_err("dmabuf buffer is already pinned\n");
  515. return 0;
  516. }
  517. /* get the associated scatterlist for this buffer */
  518. sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir);
  519. if (IS_ERR_OR_NULL(sgt)) {
  520. pr_err("Error getting dmabuf scatterlist\n");
  521. return -EINVAL;
  522. }
  523. /* checking if dmabuf is big enough to store contiguous chunk */
  524. contig_size = vb2_dc_get_contiguous_size(sgt);
  525. if (contig_size < buf->size) {
  526. pr_err("contiguous chunk is too small %lu/%lu b\n",
  527. contig_size, buf->size);
  528. dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
  529. return -EFAULT;
  530. }
  531. buf->dma_addr = sg_dma_address(sgt->sgl);
  532. buf->dma_sgt = sgt;
  533. return 0;
  534. }
  535. static void vb2_dc_unmap_dmabuf(void *mem_priv)
  536. {
  537. struct vb2_dc_buf *buf = mem_priv;
  538. struct sg_table *sgt = buf->dma_sgt;
  539. if (WARN_ON(!buf->db_attach)) {
  540. pr_err("trying to unpin a not attached buffer\n");
  541. return;
  542. }
  543. if (WARN_ON(!sgt)) {
  544. pr_err("dmabuf buffer is already unpinned\n");
  545. return;
  546. }
  547. dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir);
  548. buf->dma_addr = 0;
  549. buf->dma_sgt = NULL;
  550. }
  551. static void vb2_dc_detach_dmabuf(void *mem_priv)
  552. {
  553. struct vb2_dc_buf *buf = mem_priv;
  554. /* if vb2 works correctly you should never detach mapped buffer */
  555. if (WARN_ON(buf->dma_addr))
  556. vb2_dc_unmap_dmabuf(buf);
  557. /* detach this attachment */
  558. dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
  559. kfree(buf);
  560. }
  561. static void *vb2_dc_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf,
  562. unsigned long size, int write)
  563. {
  564. struct vb2_dc_conf *conf = alloc_ctx;
  565. struct vb2_dc_buf *buf;
  566. struct dma_buf_attachment *dba;
  567. if (dbuf->size < size)
  568. return ERR_PTR(-EFAULT);
  569. buf = kzalloc(sizeof(*buf), GFP_KERNEL);
  570. if (!buf)
  571. return ERR_PTR(-ENOMEM);
  572. buf->dev = conf->dev;
  573. /* create attachment for the dmabuf with the user device */
  574. dba = dma_buf_attach(dbuf, buf->dev);
  575. if (IS_ERR(dba)) {
  576. pr_err("failed to attach dmabuf\n");
  577. kfree(buf);
  578. return dba;
  579. }
  580. buf->dma_dir = write ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
  581. buf->size = size;
  582. buf->db_attach = dba;
  583. return buf;
  584. }
  585. /*********************************************/
  586. /* DMA CONTIG exported functions */
  587. /*********************************************/
  588. const struct vb2_mem_ops vb2_dma_contig_memops = {
  589. .alloc = vb2_dc_alloc,
  590. .put = vb2_dc_put,
  591. .get_dmabuf = vb2_dc_get_dmabuf,
  592. .cookie = vb2_dc_cookie,
  593. .vaddr = vb2_dc_vaddr,
  594. .mmap = vb2_dc_mmap,
  595. .get_userptr = vb2_dc_get_userptr,
  596. .put_userptr = vb2_dc_put_userptr,
  597. .prepare = vb2_dc_prepare,
  598. .finish = vb2_dc_finish,
  599. .map_dmabuf = vb2_dc_map_dmabuf,
  600. .unmap_dmabuf = vb2_dc_unmap_dmabuf,
  601. .attach_dmabuf = vb2_dc_attach_dmabuf,
  602. .detach_dmabuf = vb2_dc_detach_dmabuf,
  603. .num_users = vb2_dc_num_users,
  604. };
  605. EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
  606. void *vb2_dma_contig_init_ctx(struct device *dev)
  607. {
  608. struct vb2_dc_conf *conf;
  609. conf = kzalloc(sizeof *conf, GFP_KERNEL);
  610. if (!conf)
  611. return ERR_PTR(-ENOMEM);
  612. conf->dev = dev;
  613. return conf;
  614. }
  615. EXPORT_SYMBOL_GPL(vb2_dma_contig_init_ctx);
  616. void vb2_dma_contig_cleanup_ctx(void *alloc_ctx)
  617. {
  618. kfree(alloc_ctx);
  619. }
  620. EXPORT_SYMBOL_GPL(vb2_dma_contig_cleanup_ctx);
  621. MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
  622. MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
  623. MODULE_LICENSE("GPL");