|
@@ -423,6 +423,39 @@ static inline int vma_is_io(struct vm_area_struct *vma)
|
|
|
return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
|
|
|
}
|
|
|
|
|
|
+static int vb2_dc_get_user_pfn(unsigned long start, int n_pages,
|
|
|
+ struct vm_area_struct *vma, unsigned long *res)
|
|
|
+{
|
|
|
+ unsigned long pfn, start_pfn, prev_pfn;
|
|
|
+ unsigned int i;
|
|
|
+ int ret;
|
|
|
+
|
|
|
+ if (!vma_is_io(vma))
|
|
|
+ return -EFAULT;
|
|
|
+
|
|
|
+ ret = follow_pfn(vma, start, &pfn);
|
|
|
+ if (ret)
|
|
|
+ return ret;
|
|
|
+
|
|
|
+ start_pfn = pfn;
|
|
|
+ start += PAGE_SIZE;
|
|
|
+
|
|
|
+ for (i = 1; i < n_pages; ++i, start += PAGE_SIZE) {
|
|
|
+ prev_pfn = pfn;
|
|
|
+ ret = follow_pfn(vma, start, &pfn);
|
|
|
+
|
|
|
+ if (ret) {
|
|
|
+ pr_err("no page for address %lu\n", start);
|
|
|
+ return ret;
|
|
|
+ }
|
|
|
+ if (pfn != prev_pfn + 1)
|
|
|
+ return -EINVAL;
|
|
|
+ }
|
|
|
+
|
|
|
+ *res = start_pfn;
|
|
|
+ return 0;
|
|
|
+}
|
|
|
+
|
|
|
static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
|
|
|
int n_pages, struct vm_area_struct *vma, int write)
|
|
|
{
|
|
@@ -433,6 +466,9 @@ static int vb2_dc_get_user_pages(unsigned long start, struct page **pages,
|
|
|
unsigned long pfn;
|
|
|
int ret = follow_pfn(vma, start, &pfn);
|
|
|
|
|
|
+ if (!pfn_valid(pfn))
|
|
|
+ return -EINVAL;
|
|
|
+
|
|
|
if (ret) {
|
|
|
pr_err("no page for address %lu\n", start);
|
|
|
return ret;
|
|
@@ -468,16 +504,49 @@ static void vb2_dc_put_userptr(void *buf_priv)
|
|
|
struct vb2_dc_buf *buf = buf_priv;
|
|
|
struct sg_table *sgt = buf->dma_sgt;
|
|
|
|
|
|
- dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
|
|
|
- if (!vma_is_io(buf->vma))
|
|
|
- vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page);
|
|
|
+ if (sgt) {
|
|
|
+ dma_unmap_sg(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir);
|
|
|
+ if (!vma_is_io(buf->vma))
|
|
|
+ vb2_dc_sgt_foreach_page(sgt, vb2_dc_put_dirty_page);
|
|
|
|
|
|
- sg_free_table(sgt);
|
|
|
- kfree(sgt);
|
|
|
+ sg_free_table(sgt);
|
|
|
+ kfree(sgt);
|
|
|
+ }
|
|
|
vb2_put_vma(buf->vma);
|
|
|
kfree(buf);
|
|
|
}
|
|
|
|
|
|
+/*
|
|
|
+ * For some kind of reserved memory there might be no struct page available,
|
|
|
+ * so all that can be done to support such 'pages' is to try to convert
|
|
|
+ * pfn to dma address or at the last resort just assume that
|
|
|
+ * dma address == physical address (like it has been assumed in earlier version
|
|
|
+ * of videobuf2-dma-contig
|
|
|
+ */
|
|
|
+
|
|
|
+#ifdef __arch_pfn_to_dma
|
|
|
+static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
|
|
|
+{
|
|
|
+ return (dma_addr_t)__arch_pfn_to_dma(dev, pfn);
|
|
|
+}
|
|
|
+#elif defined(__pfn_to_bus)
|
|
|
+static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
|
|
|
+{
|
|
|
+ return (dma_addr_t)__pfn_to_bus(pfn);
|
|
|
+}
|
|
|
+#elif defined(__pfn_to_phys)
|
|
|
+static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
|
|
|
+{
|
|
|
+ return (dma_addr_t)__pfn_to_phys(pfn);
|
|
|
+}
|
|
|
+#else
|
|
|
+static inline dma_addr_t vb2_dc_pfn_to_dma(struct device *dev, unsigned long pfn)
|
|
|
+{
|
|
|
+ /* really, we cannot do anything better at this point */
|
|
|
+ return (dma_addr_t)(pfn) << PAGE_SHIFT;
|
|
|
+}
|
|
|
+#endif
|
|
|
+
|
|
|
static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
|
|
|
unsigned long size, int write)
|
|
|
{
|
|
@@ -548,6 +617,14 @@ static void *vb2_dc_get_userptr(void *alloc_ctx, unsigned long vaddr,
|
|
|
/* extract page list from userspace mapping */
|
|
|
ret = vb2_dc_get_user_pages(start, pages, n_pages, vma, write);
|
|
|
if (ret) {
|
|
|
+ unsigned long pfn;
|
|
|
+ if (vb2_dc_get_user_pfn(start, n_pages, vma, &pfn) == 0) {
|
|
|
+ buf->dma_addr = vb2_dc_pfn_to_dma(buf->dev, pfn);
|
|
|
+ buf->size = size;
|
|
|
+ kfree(pages);
|
|
|
+ return buf;
|
|
|
+ }
|
|
|
+
|
|
|
pr_err("failed to get user pages\n");
|
|
|
goto fail_vma;
|
|
|
}
|