|
@@ -53,7 +53,6 @@ DEFINE_VVAR(int, vgetcpu_mode);
|
|
DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
|
|
DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
|
|
{
|
|
{
|
|
.lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
|
|
.lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock),
|
|
- .sysctl_enabled = 1,
|
|
|
|
};
|
|
};
|
|
|
|
|
|
void update_vsyscall_tz(void)
|
|
void update_vsyscall_tz(void)
|
|
@@ -103,15 +102,6 @@ static __always_inline int gettimeofday(struct timeval *tv, struct timezone *tz)
|
|
return ret;
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
|
|
-static __always_inline long time_syscall(long *t)
|
|
|
|
-{
|
|
|
|
- long secs;
|
|
|
|
- asm volatile("syscall"
|
|
|
|
- : "=a" (secs)
|
|
|
|
- : "0" (__NR_time),"D" (t) : __syscall_clobber);
|
|
|
|
- return secs;
|
|
|
|
-}
|
|
|
|
-
|
|
|
|
static __always_inline void do_vgettimeofday(struct timeval * tv)
|
|
static __always_inline void do_vgettimeofday(struct timeval * tv)
|
|
{
|
|
{
|
|
cycle_t now, base, mask, cycle_delta;
|
|
cycle_t now, base, mask, cycle_delta;
|
|
@@ -122,8 +112,7 @@ static __always_inline void do_vgettimeofday(struct timeval * tv)
|
|
seq = read_seqbegin(&VVAR(vsyscall_gtod_data).lock);
|
|
seq = read_seqbegin(&VVAR(vsyscall_gtod_data).lock);
|
|
|
|
|
|
vread = VVAR(vsyscall_gtod_data).clock.vread;
|
|
vread = VVAR(vsyscall_gtod_data).clock.vread;
|
|
- if (unlikely(!VVAR(vsyscall_gtod_data).sysctl_enabled ||
|
|
|
|
- !vread)) {
|
|
|
|
|
|
+ if (unlikely(!vread)) {
|
|
gettimeofday(tv,NULL);
|
|
gettimeofday(tv,NULL);
|
|
return;
|
|
return;
|
|
}
|
|
}
|
|
@@ -165,8 +154,6 @@ time_t __vsyscall(1) vtime(time_t *t)
|
|
{
|
|
{
|
|
unsigned seq;
|
|
unsigned seq;
|
|
time_t result;
|
|
time_t result;
|
|
- if (unlikely(!VVAR(vsyscall_gtod_data).sysctl_enabled))
|
|
|
|
- return time_syscall(t);
|
|
|
|
|
|
|
|
do {
|
|
do {
|
|
seq = read_seqbegin(&VVAR(vsyscall_gtod_data).lock);
|
|
seq = read_seqbegin(&VVAR(vsyscall_gtod_data).lock);
|
|
@@ -227,22 +214,6 @@ static long __vsyscall(3) venosys_1(void)
|
|
return -ENOSYS;
|
|
return -ENOSYS;
|
|
}
|
|
}
|
|
|
|
|
|
-#ifdef CONFIG_SYSCTL
|
|
|
|
-static ctl_table kernel_table2[] = {
|
|
|
|
- { .procname = "vsyscall64",
|
|
|
|
- .data = &vsyscall_gtod_data.sysctl_enabled, .maxlen = sizeof(int),
|
|
|
|
- .mode = 0644,
|
|
|
|
- .proc_handler = proc_dointvec },
|
|
|
|
- {}
|
|
|
|
-};
|
|
|
|
-
|
|
|
|
-static ctl_table kernel_root_table2[] = {
|
|
|
|
- { .procname = "kernel", .mode = 0555,
|
|
|
|
- .child = kernel_table2 },
|
|
|
|
- {}
|
|
|
|
-};
|
|
|
|
-#endif
|
|
|
|
-
|
|
|
|
/* Assume __initcall executes before all user space. Hopefully kmod
|
|
/* Assume __initcall executes before all user space. Hopefully kmod
|
|
doesn't violate that. We'll find out if it does. */
|
|
doesn't violate that. We'll find out if it does. */
|
|
static void __cpuinit vsyscall_set_cpu(int cpu)
|
|
static void __cpuinit vsyscall_set_cpu(int cpu)
|
|
@@ -301,9 +272,6 @@ static int __init vsyscall_init(void)
|
|
BUG_ON((unsigned long) &vtime != VSYSCALL_ADDR(__NR_vtime));
|
|
BUG_ON((unsigned long) &vtime != VSYSCALL_ADDR(__NR_vtime));
|
|
BUG_ON((VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE)));
|
|
BUG_ON((VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE)));
|
|
BUG_ON((unsigned long) &vgetcpu != VSYSCALL_ADDR(__NR_vgetcpu));
|
|
BUG_ON((unsigned long) &vgetcpu != VSYSCALL_ADDR(__NR_vgetcpu));
|
|
-#ifdef CONFIG_SYSCTL
|
|
|
|
- register_sysctl_table(kernel_root_table2);
|
|
|
|
-#endif
|
|
|
|
on_each_cpu(cpu_vsyscall_init, NULL, 1);
|
|
on_each_cpu(cpu_vsyscall_init, NULL, 1);
|
|
/* notifier priority > KVM */
|
|
/* notifier priority > KVM */
|
|
hotcpu_notifier(cpu_vsyscall_notifier, 30);
|
|
hotcpu_notifier(cpu_vsyscall_notifier, 30);
|