|
@@ -54,13 +54,24 @@ static pgprot_t drm_io_prot(uint32_t map_type, struct vm_area_struct *vma)
|
|
pgprot_val(tmp) |= _PAGE_NO_CACHE;
|
|
pgprot_val(tmp) |= _PAGE_NO_CACHE;
|
|
if (map_type == _DRM_REGISTERS)
|
|
if (map_type == _DRM_REGISTERS)
|
|
pgprot_val(tmp) |= _PAGE_GUARDED;
|
|
pgprot_val(tmp) |= _PAGE_GUARDED;
|
|
-#endif
|
|
|
|
-#if defined(__ia64__)
|
|
|
|
|
|
+#elif defined(__ia64__)
|
|
if (efi_range_is_wc(vma->vm_start, vma->vm_end -
|
|
if (efi_range_is_wc(vma->vm_start, vma->vm_end -
|
|
vma->vm_start))
|
|
vma->vm_start))
|
|
tmp = pgprot_writecombine(tmp);
|
|
tmp = pgprot_writecombine(tmp);
|
|
else
|
|
else
|
|
tmp = pgprot_noncached(tmp);
|
|
tmp = pgprot_noncached(tmp);
|
|
|
|
+#elif defined(__sparc__)
|
|
|
|
+ tmp = pgprot_noncached(tmp);
|
|
|
|
+#endif
|
|
|
|
+ return tmp;
|
|
|
|
+}
|
|
|
|
+
|
|
|
|
+static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
|
|
|
|
+{
|
|
|
|
+ pgprot_t tmp = vm_get_page_prot(vma->vm_flags);
|
|
|
|
+
|
|
|
|
+#if defined(__powerpc__) && defined(CONFIG_NOT_COHERENT_CACHE)
|
|
|
|
+ tmp |= _PAGE_NO_CACHE;
|
|
#endif
|
|
#endif
|
|
return tmp;
|
|
return tmp;
|
|
}
|
|
}
|
|
@@ -603,9 +614,6 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
|
|
offset = dev->driver->get_reg_ofs(dev);
|
|
offset = dev->driver->get_reg_ofs(dev);
|
|
vma->vm_flags |= VM_IO; /* not in core dump */
|
|
vma->vm_flags |= VM_IO; /* not in core dump */
|
|
vma->vm_page_prot = drm_io_prot(map->type, vma);
|
|
vma->vm_page_prot = drm_io_prot(map->type, vma);
|
|
-#ifdef __sparc__
|
|
|
|
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
|
|
|
|
-#endif
|
|
|
|
if (io_remap_pfn_range(vma, vma->vm_start,
|
|
if (io_remap_pfn_range(vma, vma->vm_start,
|
|
(map->offset + offset) >> PAGE_SHIFT,
|
|
(map->offset + offset) >> PAGE_SHIFT,
|
|
vma->vm_end - vma->vm_start,
|
|
vma->vm_end - vma->vm_start,
|
|
@@ -624,6 +632,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
|
|
page_to_pfn(virt_to_page(map->handle)),
|
|
page_to_pfn(virt_to_page(map->handle)),
|
|
vma->vm_end - vma->vm_start, vma->vm_page_prot))
|
|
vma->vm_end - vma->vm_start, vma->vm_page_prot))
|
|
return -EAGAIN;
|
|
return -EAGAIN;
|
|
|
|
+ vma->vm_page_prot = drm_dma_prot(map->type, vma);
|
|
/* fall through to _DRM_SHM */
|
|
/* fall through to _DRM_SHM */
|
|
case _DRM_SHM:
|
|
case _DRM_SHM:
|
|
vma->vm_ops = &drm_vm_shm_ops;
|
|
vma->vm_ops = &drm_vm_shm_ops;
|
|
@@ -631,6 +640,7 @@ static int drm_mmap_locked(struct file *filp, struct vm_area_struct *vma)
|
|
/* Don't let this area swap. Change when
|
|
/* Don't let this area swap. Change when
|
|
DRM_KERNEL advisory is supported. */
|
|
DRM_KERNEL advisory is supported. */
|
|
vma->vm_flags |= VM_RESERVED;
|
|
vma->vm_flags |= VM_RESERVED;
|
|
|
|
+ vma->vm_page_prot = drm_dma_prot(map->type, vma);
|
|
break;
|
|
break;
|
|
case _DRM_SCATTER_GATHER:
|
|
case _DRM_SCATTER_GATHER:
|
|
vma->vm_ops = &drm_vm_sg_ops;
|
|
vma->vm_ops = &drm_vm_sg_ops;
|