|
@@ -669,29 +669,28 @@ static void free_pfn_range(u64 paddr, unsigned long size)
|
|
|
*
|
|
|
* If the vma has a linear pfn mapping for the entire range, we get the prot
|
|
|
* from pte and reserve the entire vma range with single reserve_pfn_range call.
|
|
|
- * Otherwise, we reserve the entire vma range, my ging through the PTEs page
|
|
|
- * by page to get physical address and protection.
|
|
|
*/
|
|
|
int track_pfn_vma_copy(struct vm_area_struct *vma)
|
|
|
{
|
|
|
- int retval = 0;
|
|
|
- unsigned long i, j;
|
|
|
resource_size_t paddr;
|
|
|
unsigned long prot;
|
|
|
- unsigned long vma_start = vma->vm_start;
|
|
|
- unsigned long vma_end = vma->vm_end;
|
|
|
- unsigned long vma_size = vma_end - vma_start;
|
|
|
+ unsigned long vma_size = vma->vm_end - vma->vm_start;
|
|
|
pgprot_t pgprot;
|
|
|
|
|
|
if (!pat_enabled)
|
|
|
return 0;
|
|
|
|
|
|
+ /*
|
|
|
+ * For now, only handle remap_pfn_range() vmas where
|
|
|
+ * is_linear_pfn_mapping() == TRUE. Handling of
|
|
|
+ * vm_insert_pfn() is TBD.
|
|
|
+ */
|
|
|
if (is_linear_pfn_mapping(vma)) {
|
|
|
/*
|
|
|
* reserve the whole chunk covered by vma. We need the
|
|
|
* starting address and protection from pte.
|
|
|
*/
|
|
|
- if (follow_phys(vma, vma_start, 0, &prot, &paddr)) {
|
|
|
+ if (follow_phys(vma, vma->vm_start, 0, &prot, &paddr)) {
|
|
|
WARN_ON_ONCE(1);
|
|
|
return -EINVAL;
|
|
|
}
|
|
@@ -699,28 +698,7 @@ int track_pfn_vma_copy(struct vm_area_struct *vma)
|
|
|
return reserve_pfn_range(paddr, vma_size, &pgprot, 1);
|
|
|
}
|
|
|
|
|
|
- /* reserve entire vma page by page, using pfn and prot from pte */
|
|
|
- for (i = 0; i < vma_size; i += PAGE_SIZE) {
|
|
|
- if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
|
|
|
- continue;
|
|
|
-
|
|
|
- pgprot = __pgprot(prot);
|
|
|
- retval = reserve_pfn_range(paddr, PAGE_SIZE, &pgprot, 1);
|
|
|
- if (retval)
|
|
|
- goto cleanup_ret;
|
|
|
- }
|
|
|
return 0;
|
|
|
-
|
|
|
-cleanup_ret:
|
|
|
- /* Reserve error: Cleanup partial reservation and return error */
|
|
|
- for (j = 0; j < i; j += PAGE_SIZE) {
|
|
|
- if (follow_phys(vma, vma_start + j, 0, &prot, &paddr))
|
|
|
- continue;
|
|
|
-
|
|
|
- free_pfn_range(paddr, PAGE_SIZE);
|
|
|
- }
|
|
|
-
|
|
|
- return retval;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -730,50 +708,28 @@ cleanup_ret:
|
|
|
* prot is passed in as a parameter for the new mapping. If the vma has a
|
|
|
* linear pfn mapping for the entire range reserve the entire vma range with
|
|
|
* single reserve_pfn_range call.
|
|
|
- * Otherwise, we look t the pfn and size and reserve only the specified range
|
|
|
- * page by page.
|
|
|
- *
|
|
|
- * Note that this function can be called with caller trying to map only a
|
|
|
- * subrange/page inside the vma.
|
|
|
*/
|
|
|
int track_pfn_vma_new(struct vm_area_struct *vma, pgprot_t *prot,
|
|
|
unsigned long pfn, unsigned long size)
|
|
|
{
|
|
|
- int retval = 0;
|
|
|
- unsigned long i, j;
|
|
|
- resource_size_t base_paddr;
|
|
|
resource_size_t paddr;
|
|
|
- unsigned long vma_start = vma->vm_start;
|
|
|
- unsigned long vma_end = vma->vm_end;
|
|
|
- unsigned long vma_size = vma_end - vma_start;
|
|
|
+ unsigned long vma_size = vma->vm_end - vma->vm_start;
|
|
|
|
|
|
if (!pat_enabled)
|
|
|
return 0;
|
|
|
|
|
|
+ /*
|
|
|
+ * For now, only handle remap_pfn_range() vmas where
|
|
|
+ * is_linear_pfn_mapping() == TRUE. Handling of
|
|
|
+ * vm_insert_pfn() is TBD.
|
|
|
+ */
|
|
|
if (is_linear_pfn_mapping(vma)) {
|
|
|
/* reserve the whole chunk starting from vm_pgoff */
|
|
|
paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
|
|
|
return reserve_pfn_range(paddr, vma_size, prot, 0);
|
|
|
}
|
|
|
|
|
|
- /* reserve page by page using pfn and size */
|
|
|
- base_paddr = (resource_size_t)pfn << PAGE_SHIFT;
|
|
|
- for (i = 0; i < size; i += PAGE_SIZE) {
|
|
|
- paddr = base_paddr + i;
|
|
|
- retval = reserve_pfn_range(paddr, PAGE_SIZE, prot, 0);
|
|
|
- if (retval)
|
|
|
- goto cleanup_ret;
|
|
|
- }
|
|
|
return 0;
|
|
|
-
|
|
|
-cleanup_ret:
|
|
|
- /* Reserve error: Cleanup partial reservation and return error */
|
|
|
- for (j = 0; j < i; j += PAGE_SIZE) {
|
|
|
- paddr = base_paddr + j;
|
|
|
- free_pfn_range(paddr, PAGE_SIZE);
|
|
|
- }
|
|
|
-
|
|
|
- return retval;
|
|
|
}
|
|
|
|
|
|
/*
|
|
@@ -784,39 +740,23 @@ cleanup_ret:
|
|
|
void untrack_pfn_vma(struct vm_area_struct *vma, unsigned long pfn,
|
|
|
unsigned long size)
|
|
|
{
|
|
|
- unsigned long i;
|
|
|
resource_size_t paddr;
|
|
|
- unsigned long prot;
|
|
|
- unsigned long vma_start = vma->vm_start;
|
|
|
- unsigned long vma_end = vma->vm_end;
|
|
|
- unsigned long vma_size = vma_end - vma_start;
|
|
|
+ unsigned long vma_size = vma->vm_end - vma->vm_start;
|
|
|
|
|
|
if (!pat_enabled)
|
|
|
return;
|
|
|
|
|
|
+ /*
|
|
|
+ * For now, only handle remap_pfn_range() vmas where
|
|
|
+ * is_linear_pfn_mapping() == TRUE. Handling of
|
|
|
+ * vm_insert_pfn() is TBD.
|
|
|
+ */
|
|
|
if (is_linear_pfn_mapping(vma)) {
|
|
|
/* free the whole chunk starting from vm_pgoff */
|
|
|
paddr = (resource_size_t)vma->vm_pgoff << PAGE_SHIFT;
|
|
|
free_pfn_range(paddr, vma_size);
|
|
|
return;
|
|
|
}
|
|
|
-
|
|
|
- if (size != 0 && size != vma_size) {
|
|
|
- /* free page by page, using pfn and size */
|
|
|
- paddr = (resource_size_t)pfn << PAGE_SHIFT;
|
|
|
- for (i = 0; i < size; i += PAGE_SIZE) {
|
|
|
- paddr = paddr + i;
|
|
|
- free_pfn_range(paddr, PAGE_SIZE);
|
|
|
- }
|
|
|
- } else {
|
|
|
- /* free entire vma, page by page, using the pfn from pte */
|
|
|
- for (i = 0; i < vma_size; i += PAGE_SIZE) {
|
|
|
- if (follow_phys(vma, vma_start + i, 0, &prot, &paddr))
|
|
|
- continue;
|
|
|
-
|
|
|
- free_pfn_range(paddr, PAGE_SIZE);
|
|
|
- }
|
|
|
- }
|
|
|
}
|
|
|
|
|
|
pgprot_t pgprot_writecombine(pgprot_t prot)
|