summaryrefslogtreecommitdiff
path: root/arch/x86_64/kernel/vsyscall.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86_64/kernel/vsyscall.c')
-rw-r--r--arch/x86_64/kernel/vsyscall.c68
1 files changed, 47 insertions, 21 deletions
diff --git a/arch/x86_64/kernel/vsyscall.c b/arch/x86_64/kernel/vsyscall.c
index b43c698cf7d3..dc32cef96195 100644
--- a/arch/x86_64/kernel/vsyscall.c
+++ b/arch/x86_64/kernel/vsyscall.c
@@ -45,14 +45,34 @@
#define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr)))
#define __syscall_clobber "r11","rcx","memory"
+#define __pa_vsymbol(x) \
+ ({unsigned long v; \
+ extern char __vsyscall_0; \
+ asm("" : "=r" (v) : "0" (x)); \
+ ((v - VSYSCALL_FIRST_PAGE) + __pa_symbol(&__vsyscall_0)); })
+/*
+ * vsyscall_gtod_data contains data that is :
+ * - readonly from vsyscalls
+ * - writen by timer interrupt or systcl (/proc/sys/kernel/vsyscall64)
+ * Try to keep this structure as small as possible to avoid cache line ping pongs
+ */
struct vsyscall_gtod_data_t {
- seqlock_t lock;
- int sysctl_enabled;
- struct timeval wall_time_tv;
+ seqlock_t lock;
+
+ /* open coded 'struct timespec' */
+ time_t wall_time_sec;
+ u32 wall_time_nsec;
+
+ int sysctl_enabled;
struct timezone sys_tz;
- cycle_t offset_base;
- struct clocksource clock;
+ struct { /* extract of a clocksource struct */
+ cycle_t (*vread)(void);
+ cycle_t cycle_last;
+ cycle_t mask;
+ u32 mult;
+ u32 shift;
+ } clock;
};
int __vgetcpu_mode __section_vgetcpu_mode;
@@ -68,9 +88,13 @@ void update_vsyscall(struct timespec *wall_time, struct clocksource *clock)
write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
/* copy vsyscall data */
- vsyscall_gtod_data.clock = *clock;
- vsyscall_gtod_data.wall_time_tv.tv_sec = wall_time->tv_sec;
- vsyscall_gtod_data.wall_time_tv.tv_usec = wall_time->tv_nsec/1000;
+ vsyscall_gtod_data.clock.vread = clock->vread;
+ vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
+ vsyscall_gtod_data.clock.mask = clock->mask;
+ vsyscall_gtod_data.clock.mult = clock->mult;
+ vsyscall_gtod_data.clock.shift = clock->shift;
+ vsyscall_gtod_data.wall_time_sec = wall_time->tv_sec;
+ vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec;
vsyscall_gtod_data.sys_tz = sys_tz;
write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
}
@@ -105,7 +129,8 @@ static __always_inline long time_syscall(long *t)
static __always_inline void do_vgettimeofday(struct timeval * tv)
{
cycle_t now, base, mask, cycle_delta;
- unsigned long seq, mult, shift, nsec_delta;
+ unsigned seq;
+ unsigned long mult, shift, nsec;
cycle_t (*vread)(void);
do {
seq = read_seqbegin(&__vsyscall_gtod_data.lock);
@@ -121,21 +146,20 @@ static __always_inline void do_vgettimeofday(struct timeval * tv)
mult = __vsyscall_gtod_data.clock.mult;
shift = __vsyscall_gtod_data.clock.shift;
- *tv = __vsyscall_gtod_data.wall_time_tv;
-
+ tv->tv_sec = __vsyscall_gtod_data.wall_time_sec;
+ nsec = __vsyscall_gtod_data.wall_time_nsec;
} while (read_seqretry(&__vsyscall_gtod_data.lock, seq));
/* calculate interval: */
cycle_delta = (now - base) & mask;
/* convert to nsecs: */
- nsec_delta = (cycle_delta * mult) >> shift;
+ nsec += (cycle_delta * mult) >> shift;
- /* convert to usecs and add to timespec: */
- tv->tv_usec += nsec_delta / NSEC_PER_USEC;
- while (tv->tv_usec > USEC_PER_SEC) {
+ while (nsec >= NSEC_PER_SEC) {
tv->tv_sec += 1;
- tv->tv_usec -= USEC_PER_SEC;
+ nsec -= NSEC_PER_SEC;
}
+ tv->tv_usec = nsec / NSEC_PER_USEC;
}
int __vsyscall(0) vgettimeofday(struct timeval * tv, struct timezone * tz)
@@ -151,11 +175,13 @@ int __vsyscall(0) vgettimeofday(struct timeval * tv, struct timezone * tz)
* unlikely */
time_t __vsyscall(1) vtime(time_t *t)
{
+ time_t result;
if (unlikely(!__vsyscall_gtod_data.sysctl_enabled))
return time_syscall(t);
- else if (t)
- *t = __vsyscall_gtod_data.wall_time_tv.tv_sec;
- return __vsyscall_gtod_data.wall_time_tv.tv_sec;
+ result = __vsyscall_gtod_data.wall_time_sec;
+ if (t)
+ *t = result;
+ return result;
}
/* Fast way to get current CPU and node.
@@ -224,10 +250,10 @@ static int vsyscall_sysctl_change(ctl_table *ctl, int write, struct file * filp,
return ret;
/* gcc has some trouble with __va(__pa()), so just do it this
way. */
- map1 = ioremap(__pa_symbol(&vsysc1), 2);
+ map1 = ioremap(__pa_vsymbol(&vsysc1), 2);
if (!map1)
return -ENOMEM;
- map2 = ioremap(__pa_symbol(&vsysc2), 2);
+ map2 = ioremap(__pa_vsymbol(&vsysc2), 2);
if (!map2) {
ret = -ENOMEM;
goto out;