Browse Source

ARM: Pass VMA to copy_user_highpage() implementations

Our copy_user_highpage() implementations may require cache maintainence.
Ensure that implementations have all necessary details to perform this
maintainence.

Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Russell King 15 years ago
parent
commit
f00a75c094

+ 4 - 3
arch/arm/include/asm/page.h

@@ -117,11 +117,12 @@
 #endif
 
 struct page;
+struct vm_area_struct;
 
 struct cpu_user_fns {
 	void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
 	void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
-			unsigned long vaddr);
+			unsigned long vaddr, struct vm_area_struct *vma);
 };
 
 #ifdef MULTI_USER
@@ -137,7 +138,7 @@ extern struct cpu_user_fns cpu_user;
 
 extern void __cpu_clear_user_highpage(struct page *page, unsigned long vaddr);
 extern void __cpu_copy_user_highpage(struct page *to, struct page *from,
-			unsigned long vaddr);
+			unsigned long vaddr, struct vm_area_struct *vma);
 #endif
 
 #define clear_user_highpage(page,vaddr)		\
@@ -145,7 +146,7 @@ extern void __cpu_copy_user_highpage(struct page *to, struct page *from,
 
 #define __HAVE_ARCH_COPY_USER_HIGHPAGE
 #define copy_user_highpage(to,from,vaddr,vma)	\
-	__cpu_copy_user_highpage(to, from, vaddr)
+	__cpu_copy_user_highpage(to, from, vaddr, vma)
 
 #define clear_page(page)	memset((void *)(page), 0, PAGE_SIZE)
 extern void copy_page(void *to, const void *from);

+ 1 - 1
arch/arm/mm/copypage-feroceon.c

@@ -68,7 +68,7 @@ feroceon_copy_user_page(void *kto, const void *kfrom)
 }
 
 void feroceon_copy_user_highpage(struct page *to, struct page *from,
-	unsigned long vaddr)
+	unsigned long vaddr, struct vm_area_struct *vma)
 {
 	void *kto, *kfrom;
 

+ 1 - 1
arch/arm/mm/copypage-v3.c

@@ -38,7 +38,7 @@ v3_copy_user_page(void *kto, const void *kfrom)
 }
 
 void v3_copy_user_highpage(struct page *to, struct page *from,
-	unsigned long vaddr)
+	unsigned long vaddr, struct vm_area_struct *vma)
 {
 	void *kto, *kfrom;
 

+ 1 - 1
arch/arm/mm/copypage-v4mc.c

@@ -69,7 +69,7 @@ mc_copy_user_page(void *from, void *to)
 }
 
 void v4_mc_copy_user_highpage(struct page *to, struct page *from,
-	unsigned long vaddr)
+	unsigned long vaddr, struct vm_area_struct *vma)
 {
 	void *kto = kmap_atomic(to, KM_USER1);
 

+ 1 - 1
arch/arm/mm/copypage-v4wb.c

@@ -48,7 +48,7 @@ v4wb_copy_user_page(void *kto, const void *kfrom)
 }
 
 void v4wb_copy_user_highpage(struct page *to, struct page *from,
-	unsigned long vaddr)
+	unsigned long vaddr, struct vm_area_struct *vma)
 {
 	void *kto, *kfrom;
 

+ 1 - 1
arch/arm/mm/copypage-v4wt.c

@@ -44,7 +44,7 @@ v4wt_copy_user_page(void *kto, const void *kfrom)
 }
 
 void v4wt_copy_user_highpage(struct page *to, struct page *from,
-	unsigned long vaddr)
+	unsigned long vaddr, struct vm_area_struct *vma)
 {
 	void *kto, *kfrom;
 

+ 2 - 2
arch/arm/mm/copypage-v6.c

@@ -34,7 +34,7 @@ static DEFINE_SPINLOCK(v6_lock);
  * attack the kernel's existing mapping of these pages.
  */
 static void v6_copy_user_highpage_nonaliasing(struct page *to,
-	struct page *from, unsigned long vaddr)
+	struct page *from, unsigned long vaddr, struct vm_area_struct *vma)
 {
 	void *kto, *kfrom;
 
@@ -73,7 +73,7 @@ static void discard_old_kernel_data(void *kto)
  * Copy the page, taking account of the cache colour.
  */
 static void v6_copy_user_highpage_aliasing(struct page *to,
-	struct page *from, unsigned long vaddr)
+	struct page *from, unsigned long vaddr, struct vm_area_struct *vma)
 {
 	unsigned int offset = CACHE_COLOUR(vaddr);
 	unsigned long kfrom, kto;

+ 1 - 1
arch/arm/mm/copypage-xsc3.c

@@ -71,7 +71,7 @@ xsc3_mc_copy_user_page(void *kto, const void *kfrom)
 }
 
 void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
-	unsigned long vaddr)
+	unsigned long vaddr, struct vm_area_struct *vma)
 {
 	void *kto, *kfrom;
 

+ 1 - 1
arch/arm/mm/copypage-xscale.c

@@ -91,7 +91,7 @@ mc_copy_user_page(void *from, void *to)
 }
 
 void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
-	unsigned long vaddr)
+	unsigned long vaddr, struct vm_area_struct *vma)
 {
 	void *kto = kmap_atomic(to, KM_USER1);