浏览代码

sh: Kill off __{copy,clear}_user_page().

Now that copy_to_user_page()/copy_from_user_page() are wired up, we
can drop the old __copy_xxx() implementations. Now that the page
colouring scheme has changed via kmap_coherent(), we can avoid the
flush in these specific helpers.

Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Paul Mundt 17 年之前
父节点
当前提交
af39c16bd8
共有 3 个文件被更改,包括 1 次插入110 次删除
  1. 0 45
      arch/sh/mm/clear_page.S
  2. 0 61
      arch/sh/mm/copy_page.S
  3. 1 4
      include/asm-sh/page.h

+ 0 - 45
arch/sh/mm/clear_page.S

@@ -150,48 +150,3 @@ ENTRY(__clear_user)
 	.long	8b, .Lbad_clear_user
 	.long	9b, .Lbad_clear_user
 .previous
-
-#if defined(CONFIG_CPU_SH4)
-/*
- * __clear_user_page
- * @to: P3 address (with same color)
- * @orig_to: P1 address
- *
- * void __clear_user_page(void *to, void *orig_to)
- */
-
-/*
- * r0 --- scratch 
- * r4 --- to
- * r5 --- orig_to
- * r6 --- to + PAGE_SIZE
- */
-ENTRY(__clear_user_page)
-	mov.l	.Lpsz,r0
-	mov	r4,r6
-	add	r0,r6
-	mov	#0,r0
-	!
-1:	ocbi	@r5
-	add	#32,r5
-	movca.l	r0,@r4
-	mov	r4,r1
-	add	#32,r4
-	mov.l	r0,@-r4
-	mov.l	r0,@-r4
-	mov.l	r0,@-r4
-	mov.l	r0,@-r4
-	mov.l	r0,@-r4
-	mov.l	r0,@-r4
-	mov.l	r0,@-r4
-	add	#28,r4
-	cmp/eq	r6,r4
-	bf/s	1b
-	 ocbwb	@r1
-	!
-	rts
-	 nop
-.Lpsz:	.long	PAGE_SIZE
-
-#endif
-

+ 0 - 61
arch/sh/mm/copy_page.S

@@ -68,67 +68,6 @@ ENTRY(copy_page_slow)
 	rts
 	 nop
 
-#if defined(CONFIG_CPU_SH4)
-/*
- * __copy_user_page
- * @to: P1 address (with same color)
- * @from: P1 address
- * @orig_to: P1 address
- *
- * void __copy_user_page(void *to, void *from, void *orig_to)
- */
-
-/*
- * r0, r1, r2, r3, r4, r5, r6, r7 --- scratch 
- * r8 --- from + PAGE_SIZE
- * r9 --- orig_to
- * r10 --- to
- * r11 --- from
- */
-ENTRY(__copy_user_page)
-	mov.l	r8,@-r15
-	mov.l	r9,@-r15
-	mov.l	r10,@-r15
-	mov.l	r11,@-r15
-	mov	r4,r10
-	mov	r5,r11
-	mov	r6,r9
-	mov	r5,r8
-	mov.l	.Lpsz,r0
-	add	r0,r8
-	!
-1:	ocbi	@r9
-	add	#32,r9
-	mov.l	@r11+,r0
-	mov.l	@r11+,r1
-	mov.l	@r11+,r2
-	mov.l	@r11+,r3
-	mov.l	@r11+,r4
-	mov.l	@r11+,r5
-	mov.l	@r11+,r6
-	mov.l	@r11+,r7
-	movca.l	r0,@r10
-	mov	r10,r0
-	add	#32,r10
-	mov.l	r7,@-r10
-	mov.l	r6,@-r10
-	mov.l	r5,@-r10
-	mov.l	r4,@-r10
-	mov.l	r3,@-r10
-	mov.l	r2,@-r10
-	mov.l	r1,@-r10
-	ocbwb	@r0
-	cmp/eq	r11,r8
-	bf/s	1b
-	 add	#28,r10
-	!
-	mov.l	@r15+,r11
-	mov.l	@r15+,r10
-	mov.l	@r15+,r9
-	mov.l	@r15+,r8
-	rts
-	 nop
-#endif
 	.align 2
 .Lpsz:	.long	PAGE_SIZE
 /*

+ 1 - 4
include/asm-sh/page.h

@@ -74,10 +74,7 @@ extern void copy_page_nommu(void *to, void *from);
 	(defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB))
 struct page;
 struct vm_area_struct;
-extern void clear_user_page(void *to, unsigned long address, struct page *pg);
-extern void copy_user_page(void *to, void *from, unsigned long address, struct page *pg);
-extern void __clear_user_page(void *to, void *orig_to);
-extern void __copy_user_page(void *to, void *from, void *orig_to);
+extern void clear_user_page(void *to, unsigned long address, struct page *page);
 #ifdef CONFIG_CPU_SH4
 extern void copy_user_highpage(struct page *to, struct page *from,
 			       unsigned long vaddr, struct vm_area_struct *vma);