|
@@ -611,16 +611,22 @@ int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
|
|
|
{
|
|
|
int ret = -ENXIO;
|
|
|
#ifdef CONFIG_MMU
|
|
|
+ unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
|
|
|
+ unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
|
|
|
unsigned long pfn = dma_to_pfn(dev, dma_addr);
|
|
|
+ unsigned long off = vma->vm_pgoff;
|
|
|
+
|
|
|
vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
|
|
|
|
|
|
if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
|
|
|
return ret;
|
|
|
|
|
|
- ret = remap_pfn_range(vma, vma->vm_start,
|
|
|
- pfn + vma->vm_pgoff,
|
|
|
- vma->vm_end - vma->vm_start,
|
|
|
- vma->vm_page_prot);
|
|
|
+ if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
|
|
|
+ ret = remap_pfn_range(vma, vma->vm_start,
|
|
|
+ pfn + off,
|
|
|
+ vma->vm_end - vma->vm_start,
|
|
|
+ vma->vm_page_prot);
|
|
|
+ }
|
|
|
#endif /* CONFIG_MMU */
|
|
|
|
|
|
return ret;
|