浏览代码

x86: ignore the sys_getcpu() tcache parameter

dont use the vgetcpu tcache - it's causing problems with tasks
migrating, they'll see the old cache up to a jiffy after the
migration, further increasing the costs of the migration.

In the worst case they see a complete bogus information from
the tcache, when a sys_getcpu() call "invalidated" the cache
info by incrementing the jiffies _and_ the cpuid info in the
cache and the following vdso_getcpu() call happens after
vdso_jiffies have been incremented.

Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Ulrich Drepper <drepper@redhat.com>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Ingo Molnar 17 年之前
父节点
当前提交
4307d1e5ad
共有 2 个文件被更改,包括 3 次插入36 次删除
  1. 2 17
      arch/x86/vdso/vgetcpu.c
  2. 1 19
      kernel/sys.c

+ 2 - 17
arch/x86/vdso/vgetcpu.c

@@ -13,32 +13,17 @@
 #include <asm/vgtod.h>
 #include <asm/vgtod.h>
 #include "vextern.h"
 #include "vextern.h"
 
 
-long __vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
+long __vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unused)
 {
 {
 	unsigned int dummy, p;
 	unsigned int dummy, p;
-	unsigned long j = 0;
 
 
-	/* Fast cache - only recompute value once per jiffies and avoid
-	   relatively costly rdtscp/cpuid otherwise.
-	   This works because the scheduler usually keeps the process
-	   on the same CPU and this syscall doesn't guarantee its
-	   results anyways.
-	   We do this here because otherwise user space would do it on
-	   its own in a likely inferior way (no access to jiffies).
-	   If you don't like it pass NULL. */
-	if (tcache && tcache->blob[0] == (j = *vdso_jiffies)) {
-		p = tcache->blob[1];
-	} else if (*vdso_vgetcpu_mode == VGETCPU_RDTSCP) {
+	if (*vdso_vgetcpu_mode == VGETCPU_RDTSCP) {
 		/* Load per CPU data from RDTSCP */
 		/* Load per CPU data from RDTSCP */
 		rdtscp(dummy, dummy, p);
 		rdtscp(dummy, dummy, p);
 	} else {
 	} else {
 		/* Load per CPU data from GDT */
 		/* Load per CPU data from GDT */
 		asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
 		asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
 	}
 	}
-	if (tcache) {
-		tcache->blob[0] = j;
-		tcache->blob[1] = p;
-	}
 	if (cpu)
 	if (cpu)
 		*cpu = p & 0xfff;
 		*cpu = p & 0xfff;
 	if (node)
 	if (node)

+ 1 - 19
kernel/sys.c

@@ -1750,7 +1750,7 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
 }
 }
 
 
 asmlinkage long sys_getcpu(unsigned __user *cpup, unsigned __user *nodep,
 asmlinkage long sys_getcpu(unsigned __user *cpup, unsigned __user *nodep,
-	   		   struct getcpu_cache __user *cache)
+			   struct getcpu_cache __user *unused)
 {
 {
 	int err = 0;
 	int err = 0;
 	int cpu = raw_smp_processor_id();
 	int cpu = raw_smp_processor_id();
@@ -1758,24 +1758,6 @@ asmlinkage long sys_getcpu(unsigned __user *cpup, unsigned __user *nodep,
 		err |= put_user(cpu, cpup);
 		err |= put_user(cpu, cpup);
 	if (nodep)
 	if (nodep)
 		err |= put_user(cpu_to_node(cpu), nodep);
 		err |= put_user(cpu_to_node(cpu), nodep);
-	if (cache) {
-		/*
-		 * The cache is not needed for this implementation,
-		 * but make sure user programs pass something
-		 * valid. vsyscall implementations can instead make
-		 * good use of the cache. Only use t0 and t1 because
-		 * these are available in both 32bit and 64bit ABI (no
-		 * need for a compat_getcpu). 32bit has enough
-		 * padding
-		 */
-		unsigned long t0, t1;
-		get_user(t0, &cache->blob[0]);
-		get_user(t1, &cache->blob[1]);
-		t0++;
-		t1++;
-		put_user(t0, &cache->blob[0]);
-		put_user(t1, &cache->blob[1]);
-	}
 	return err ? -EFAULT : 0;
 	return err ? -EFAULT : 0;
 }
 }