|
@@ -49,15 +49,8 @@
|
|
|
__attribute__ ((unused, __section__(".vsyscall_" #nr))) notrace
|
|
|
#define __syscall_clobber "r11","cx","memory"
|
|
|
|
|
|
-/*
|
|
|
- * vsyscall_gtod_data contains data that is :
|
|
|
- * - readonly from vsyscalls
|
|
|
- * - written by timer interrupt or systcl (/proc/sys/kernel/vsyscall64)
|
|
|
- * Try to keep this structure as small as possible to avoid cache line ping pongs
|
|
|
- */
|
|
|
-int __vgetcpu_mode __section_vgetcpu_mode;
|
|
|
-
|
|
|
-struct vsyscall_gtod_data __vsyscall_gtod_data __section_vsyscall_gtod_data =
|
|
|
+DEFINE_VVAR(int, vgetcpu_mode);
|
|
|
+DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) =
|
|
|
{
|
|
|
.lock = SEQLOCK_UNLOCKED,
|
|
|
.sysctl_enabled = 1,
|
|
@@ -97,7 +90,7 @@ void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
|
|
|
*/
|
|
|
static __always_inline void do_get_tz(struct timezone * tz)
|
|
|
{
|
|
|
- *tz = __vsyscall_gtod_data.sys_tz;
|
|
|
+ *tz = VVAR(vsyscall_gtod_data).sys_tz;
|
|
|
}
|
|
|
|
|
|
static __always_inline int gettimeofday(struct timeval *tv, struct timezone *tz)
|
|
@@ -126,23 +119,24 @@ static __always_inline void do_vgettimeofday(struct timeval * tv)
|
|
|
unsigned long mult, shift, nsec;
|
|
|
cycle_t (*vread)(void);
|
|
|
do {
|
|
|
- seq = read_seqbegin(&__vsyscall_gtod_data.lock);
|
|
|
+ seq = read_seqbegin(&VVAR(vsyscall_gtod_data).lock);
|
|
|
|
|
|
- vread = __vsyscall_gtod_data.clock.vread;
|
|
|
- if (unlikely(!__vsyscall_gtod_data.sysctl_enabled || !vread)) {
|
|
|
+ vread = VVAR(vsyscall_gtod_data).clock.vread;
|
|
|
+ if (unlikely(!VVAR(vsyscall_gtod_data).sysctl_enabled ||
|
|
|
+ !vread)) {
|
|
|
gettimeofday(tv,NULL);
|
|
|
return;
|
|
|
}
|
|
|
|
|
|
now = vread();
|
|
|
- base = __vsyscall_gtod_data.clock.cycle_last;
|
|
|
- mask = __vsyscall_gtod_data.clock.mask;
|
|
|
- mult = __vsyscall_gtod_data.clock.mult;
|
|
|
- shift = __vsyscall_gtod_data.clock.shift;
|
|
|
+ base = VVAR(vsyscall_gtod_data).clock.cycle_last;
|
|
|
+ mask = VVAR(vsyscall_gtod_data).clock.mask;
|
|
|
+ mult = VVAR(vsyscall_gtod_data).clock.mult;
|
|
|
+ shift = VVAR(vsyscall_gtod_data).clock.shift;
|
|
|
|
|
|
- tv->tv_sec = __vsyscall_gtod_data.wall_time_sec;
|
|
|
- nsec = __vsyscall_gtod_data.wall_time_nsec;
|
|
|
- } while (read_seqretry(&__vsyscall_gtod_data.lock, seq));
|
|
|
+ tv->tv_sec = VVAR(vsyscall_gtod_data).wall_time_sec;
|
|
|
+ nsec = VVAR(vsyscall_gtod_data).wall_time_nsec;
|
|
|
+ } while (read_seqretry(&VVAR(vsyscall_gtod_data).lock, seq));
|
|
|
|
|
|
/* calculate interval: */
|
|
|
cycle_delta = (now - base) & mask;
|
|
@@ -171,15 +165,15 @@ time_t __vsyscall(1) vtime(time_t *t)
|
|
|
{
|
|
|
unsigned seq;
|
|
|
time_t result;
|
|
|
- if (unlikely(!__vsyscall_gtod_data.sysctl_enabled))
|
|
|
+ if (unlikely(!VVAR(vsyscall_gtod_data).sysctl_enabled))
|
|
|
return time_syscall(t);
|
|
|
|
|
|
do {
|
|
|
- seq = read_seqbegin(&__vsyscall_gtod_data.lock);
|
|
|
+ seq = read_seqbegin(&VVAR(vsyscall_gtod_data).lock);
|
|
|
|
|
|
- result = __vsyscall_gtod_data.wall_time_sec;
|
|
|
+ result = VVAR(vsyscall_gtod_data).wall_time_sec;
|
|
|
|
|
|
- } while (read_seqretry(&__vsyscall_gtod_data.lock, seq));
|
|
|
+ } while (read_seqretry(&VVAR(vsyscall_gtod_data).lock, seq));
|
|
|
|
|
|
if (t)
|
|
|
*t = result;
|
|
@@ -208,9 +202,9 @@ vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
|
|
|
We do this here because otherwise user space would do it on
|
|
|
its own in a likely inferior way (no access to jiffies).
|
|
|
If you don't like it pass NULL. */
|
|
|
- if (tcache && tcache->blob[0] == (j = __jiffies)) {
|
|
|
+ if (tcache && tcache->blob[0] == (j = VVAR(jiffies))) {
|
|
|
p = tcache->blob[1];
|
|
|
- } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
|
|
|
+ } else if (VVAR(vgetcpu_mode) == VGETCPU_RDTSCP) {
|
|
|
/* Load per CPU data from RDTSCP */
|
|
|
native_read_tscp(&p);
|
|
|
} else {
|