Эх сурвалжийг харах

[PATCH] s390: put sys_call_table into .rodata section and write protect it

Put s390's syscall tables into .rodata section and write protect this
section to prevent misuse of it.  Suggested by Arjan van de Ven
<arjan@infradead.org>.

Cc: Arjan van de Ven <arjan@infradead.org>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Heiko Carstens 19 жил өмнө
parent
commit
d882b17251

+ 6 - 3
arch/s390/kernel/entry.S

@@ -228,8 +228,9 @@ sysc_do_svc:
 sysc_nr_ok:
 sysc_nr_ok:
 	mvc	SP_ARGS(4,%r15),SP_R7(%r15)
 	mvc	SP_ARGS(4,%r15),SP_R7(%r15)
 sysc_do_restart:
 sysc_do_restart:
+	l	%r8,BASED(.Lsysc_table)
 	tm	__TI_flags+3(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
 	tm	__TI_flags+3(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
-        l       %r8,sys_call_table-system_call(%r7,%r13) # get system call addr.
+	l	%r8,0(%r7,%r8)	  # get system call addr.
         bnz     BASED(sysc_tracesys)
         bnz     BASED(sysc_tracesys)
         basr    %r14,%r8          # call sys_xxxx
         basr    %r14,%r8          # call sys_xxxx
         st      %r2,SP_R2(%r15)   # store return value (change R2 on stack)
         st      %r2,SP_R2(%r15)   # store return value (change R2 on stack)
@@ -330,9 +331,10 @@ sysc_tracesys:
 	basr	%r14,%r1
 	basr	%r14,%r1
 	clc	SP_R2(4,%r15),BASED(.Lnr_syscalls)
 	clc	SP_R2(4,%r15),BASED(.Lnr_syscalls)
 	bnl	BASED(sysc_tracenogo)
 	bnl	BASED(sysc_tracenogo)
+	l	%r8,BASED(.Lsysc_table)
 	l	%r7,SP_R2(%r15)        # strace might have changed the 
 	l	%r7,SP_R2(%r15)        # strace might have changed the 
 	sll	%r7,2                  #  system call
 	sll	%r7,2                  #  system call
-	l	%r8,sys_call_table-system_call(%r7,%r13)
+	l	%r8,0(%r7,%r8)
 sysc_tracego:
 sysc_tracego:
 	lm	%r3,%r6,SP_R3(%r15)
 	lm	%r3,%r6,SP_R3(%r15)
 	l	%r2,SP_ORIG_R2(%r15)
 	l	%r2,SP_ORIG_R2(%r15)
@@ -1009,6 +1011,7 @@ cleanup_io_leave_insn:
 .Ltrace:       .long  syscall_trace
 .Ltrace:       .long  syscall_trace
 .Lvfork:       .long  sys_vfork
 .Lvfork:       .long  sys_vfork
 .Lschedtail:   .long  schedule_tail
 .Lschedtail:   .long  schedule_tail
+.Lsysc_table:  .long  sys_call_table
 
 
 .Lcritical_start:
 .Lcritical_start:
                .long  __critical_start + 0x80000000
                .long  __critical_start + 0x80000000
@@ -1017,8 +1020,8 @@ cleanup_io_leave_insn:
 .Lcleanup_critical:
 .Lcleanup_critical:
                .long  cleanup_critical
                .long  cleanup_critical
 
 
+	       .section .rodata, "a"
 #define SYSCALL(esa,esame,emu)	.long esa
 #define SYSCALL(esa,esame,emu)	.long esa
 sys_call_table:
 sys_call_table:
 #include "syscalls.S"
 #include "syscalls.S"
 #undef SYSCALL
 #undef SYSCALL
-

+ 1 - 0
arch/s390/kernel/entry64.S

@@ -991,6 +991,7 @@ cleanup_io_leave_insn:
 .Lcritical_end:
 .Lcritical_end:
                .quad  __critical_end
                .quad  __critical_end
 
 
+	       .section .rodata, "a"
 #define SYSCALL(esa,esame,emu)	.long esame
 #define SYSCALL(esa,esame,emu)	.long esame
 sys_call_table:
 sys_call_table:
 #include "syscalls.S"
 #include "syscalls.S"

+ 21 - 14
arch/s390/mm/init.c

@@ -23,6 +23,7 @@
 #include <linux/init.h>
 #include <linux/init.h>
 #include <linux/pagemap.h>
 #include <linux/pagemap.h>
 #include <linux/bootmem.h>
 #include <linux/bootmem.h>
+#include <linux/pfn.h>
 
 
 #include <asm/processor.h>
 #include <asm/processor.h>
 #include <asm/system.h>
 #include <asm/system.h>
@@ -33,6 +34,7 @@
 #include <asm/lowcore.h>
 #include <asm/lowcore.h>
 #include <asm/tlb.h>
 #include <asm/tlb.h>
 #include <asm/tlbflush.h>
 #include <asm/tlbflush.h>
+#include <asm/sections.h>
 
 
 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
 
 
@@ -89,17 +91,6 @@ void show_mem(void)
         printk("%d pages swap cached\n",cached);
         printk("%d pages swap cached\n",cached);
 }
 }
 
 
-/* References to section boundaries */
-
-extern unsigned long _text;
-extern unsigned long _etext;
-extern unsigned long _edata;
-extern unsigned long __bss_start;
-extern unsigned long _end;
-
-extern unsigned long __init_begin;
-extern unsigned long __init_end;
-
 extern unsigned long __initdata zholes_size[];
 extern unsigned long __initdata zholes_size[];
 /*
 /*
  * paging_init() sets up the page tables
  * paging_init() sets up the page tables
@@ -116,6 +107,10 @@ void __init paging_init(void)
         unsigned long pfn = 0;
         unsigned long pfn = 0;
         unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE;
         unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) | _KERNSEG_TABLE;
         static const int ssm_mask = 0x04000000L;
         static const int ssm_mask = 0x04000000L;
+	unsigned long ro_start_pfn, ro_end_pfn;
+
+	ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata);
+	ro_end_pfn = PFN_UP((unsigned long)&__end_rodata);
 
 
 	/* unmap whole virtual address space */
 	/* unmap whole virtual address space */
 	
 	
@@ -143,7 +138,10 @@ void __init paging_init(void)
                 pg_dir++;
                 pg_dir++;
 
 
                 for (tmp = 0 ; tmp < PTRS_PER_PTE ; tmp++,pg_table++) {
                 for (tmp = 0 ; tmp < PTRS_PER_PTE ; tmp++,pg_table++) {
-                        pte = pfn_pte(pfn, PAGE_KERNEL);
+			if (pfn >= ro_start_pfn && pfn < ro_end_pfn)
+				pte = pfn_pte(pfn, __pgprot(_PAGE_RO));
+			else
+				pte = pfn_pte(pfn, PAGE_KERNEL);
                         if (pfn >= max_low_pfn)
                         if (pfn >= max_low_pfn)
                                 pte_clear(&init_mm, 0, &pte);
                                 pte_clear(&init_mm, 0, &pte);
                         set_pte(pg_table, pte);
                         set_pte(pg_table, pte);
@@ -175,6 +173,7 @@ void __init paging_init(void)
 }
 }
 
 
 #else /* CONFIG_64BIT */
 #else /* CONFIG_64BIT */
+
 void __init paging_init(void)
 void __init paging_init(void)
 {
 {
         pgd_t * pg_dir;
         pgd_t * pg_dir;
@@ -186,13 +185,15 @@ void __init paging_init(void)
         unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) |
         unsigned long pgdir_k = (__pa(swapper_pg_dir) & PAGE_MASK) |
           _KERN_REGION_TABLE;
           _KERN_REGION_TABLE;
 	static const int ssm_mask = 0x04000000L;
 	static const int ssm_mask = 0x04000000L;
-
 	unsigned long zones_size[MAX_NR_ZONES];
 	unsigned long zones_size[MAX_NR_ZONES];
 	unsigned long dma_pfn, high_pfn;
 	unsigned long dma_pfn, high_pfn;
+	unsigned long ro_start_pfn, ro_end_pfn;
 
 
 	memset(zones_size, 0, sizeof(zones_size));
 	memset(zones_size, 0, sizeof(zones_size));
 	dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT;
 	dma_pfn = MAX_DMA_ADDRESS >> PAGE_SHIFT;
 	high_pfn = max_low_pfn;
 	high_pfn = max_low_pfn;
+	ro_start_pfn = PFN_DOWN((unsigned long)&__start_rodata);
+	ro_end_pfn = PFN_UP((unsigned long)&__end_rodata);
 
 
 	if (dma_pfn > high_pfn)
 	if (dma_pfn > high_pfn)
 		zones_size[ZONE_DMA] = high_pfn;
 		zones_size[ZONE_DMA] = high_pfn;
@@ -231,7 +232,10 @@ void __init paging_init(void)
                         pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
                         pmd_populate_kernel(&init_mm, pm_dir, pt_dir);
 	
 	
                         for (k = 0 ; k < PTRS_PER_PTE ; k++,pt_dir++) {
                         for (k = 0 ; k < PTRS_PER_PTE ; k++,pt_dir++) {
-                                pte = pfn_pte(pfn, PAGE_KERNEL);
+				if (pfn >= ro_start_pfn && pfn < ro_end_pfn)
+					pte = pfn_pte(pfn, __pgprot(_PAGE_RO));
+				else
+					pte = pfn_pte(pfn, PAGE_KERNEL);
                                 if (pfn >= max_low_pfn) {
                                 if (pfn >= max_low_pfn) {
                                         pte_clear(&init_mm, 0, &pte); 
                                         pte_clear(&init_mm, 0, &pte); 
                                         continue;
                                         continue;
@@ -282,6 +286,9 @@ void __init mem_init(void)
                 reservedpages << (PAGE_SHIFT-10),
                 reservedpages << (PAGE_SHIFT-10),
                 datasize >>10,
                 datasize >>10,
                 initsize >> 10);
                 initsize >> 10);
+	printk("Write protected kernel read-only data: %#lx - %#lx\n",
+	       (unsigned long)&__start_rodata,
+	       PFN_ALIGN((unsigned long)&__end_rodata) - 1);
 }
 }
 
 
 void free_initmem(void)
 void free_initmem(void)