[ltt-dev] [LTTNG PATCH] Add TRACE_CLOCK option to clock_gettime vDSO
Julien Desfossez
julien.desfossez at polymtl.ca
Thu Nov 4 18:57:44 EDT 2010
This option allows the user to fetch the current time in sec.nsec based
on the current TSC using the LTTng infrastructure.
If the TSC is not synchronized we return an error to the user.
The main change from the previous version of this patch is the
conversion of the TSC in sec.nsec instead of encoding it in the timespec
struct. Also now we check if the TSC is synchronized and we export this
information along with the CPU frequency in the vDSO shared structure.
And now we protect the access to the CPU frequency with a seqlock.
The next step is to make this code work even if vDSOs are not available
and then to find a way to call {get,put}_trace_clock from inside the
vDSO to activate the archtitecture specific debug infrastructure if
necessary, but this should probably come in a different patch.
Signed-off-by: Julien Desfossez <julien.desfossez at polymtl.ca>
---
arch/x86/include/asm/trace-clock.h | 1 +
arch/x86/include/asm/vgtod.h | 2 +
arch/x86/kernel/vsyscall_64.c | 5 ++++
arch/x86/vdso/vclock_gettime.c | 39 ++++++++++++++++++++++++++++++++++++
include/linux/time.h | 1 +
5 files changed, 48 insertions(+), 0 deletions(-)
diff --git a/arch/x86/include/asm/trace-clock.h b/arch/x86/include/asm/trace-clock.h
index 01bc2f5..c1fd160 100644
--- a/arch/x86/include/asm/trace-clock.h
+++ b/arch/x86/include/asm/trace-clock.h
@@ -14,6 +14,7 @@
#include <asm/system.h>
#include <asm/processor.h>
#include <asm/atomic.h>
+#include <asm/vgtod.h>
/* Minimum duration of a probe, in cycles */
#define TRACE_CLOCK_MIN_PROBE_DURATION 200
diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h
index 3d61e20..7b6f50b 100644
--- a/arch/x86/include/asm/vgtod.h
+++ b/arch/x86/include/asm/vgtod.h
@@ -12,6 +12,8 @@ struct vsyscall_gtod_data {
u32 wall_time_nsec;
int sysctl_enabled;
+ int trace_clock_is_sync;
+ u64 scaled_cpu_khz;
struct timezone sys_tz;
struct { /* extract of a clocksource struct */
cycle_t (*vread)(void);
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index dcbb28c..b5d52ab 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -44,6 +44,8 @@
#include <asm/desc.h>
#include <asm/topology.h>
#include <asm/vgtod.h>
+#include <asm/trace-clock.h>
+#include <asm/timer.h>
#define __vsyscall(nr) \
__attribute__ ((unused, __section__(".vsyscall_" #nr))) notrace
@@ -61,6 +63,7 @@ struct vsyscall_gtod_data __vsyscall_gtod_data __section_vsyscall_gtod_data =
{
.lock = SEQLOCK_UNLOCKED,
.sysctl_enabled = 1,
+ .trace_clock_is_sync = 0,
};
void update_vsyscall_tz(void)
@@ -89,6 +92,8 @@ void update_vsyscall(struct timespec *wall_time, struct timespec *wtm,
vsyscall_gtod_data.wall_time_nsec = wall_time->tv_nsec;
vsyscall_gtod_data.wall_to_monotonic = *wtm;
vsyscall_gtod_data.wall_time_coarse = __current_kernel_time();
+ vsyscall_gtod_data.scaled_cpu_khz = (u64)cpu_khz >> CYC2NS_SCALE_FACTOR;
+ vsyscall_gtod_data.trace_clock_is_sync = _trace_clock_is_sync;
write_sequnlock_irqrestore(&vsyscall_gtod_data.lock, flags);
}
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
index ee55754..90f069c 100644
--- a/arch/x86/vdso/vclock_gettime.c
+++ b/arch/x86/vdso/vclock_gettime.c
@@ -22,6 +22,8 @@
#include <asm/hpet.h>
#include <asm/unistd.h>
#include <asm/io.h>
+#include <asm/trace-clock.h>
+#include <asm/timer.h>
#include "vextern.h"
#define gtod vdso_vsyscall_gtod_data
@@ -111,6 +113,37 @@ notrace static noinline int do_monotonic_coarse(struct timespec *ts)
return 0;
}
+#ifdef CONFIG_X86
+notrace static noinline int do_trace_clock(struct timespec *ts)
+{
+ cycle_t cycles;
+ unsigned long tmp, freq, seq;
+
+ if (!gtod->trace_clock_is_sync)
+ return -EPERM;
+
+ /* Copy of the version in kernel/tsc.c which we cannot directly access
+ *
+ * Surround the RDTSC by barriers, to make sure it's not
+ * speculated to outside the seqlock critical section and
+ * does not cause time warps:
+ */
+ do {
+ seq = read_seqbegin(>od->lock);
+ rdtsc_barrier();
+ cycles = (cycle_t)vget_cycles();
+ rdtsc_barrier();
+ freq = gtod->scaled_cpu_khz;
+ } while (unlikely(read_seqretry(>od->lock, seq)));
+
+ tmp = cycles * (USEC_PER_SEC / freq) >> CYC2NS_SCALE_FACTOR;
+ ts->tv_nsec = do_div(tmp, NSEC_PER_SEC);
+ ts->tv_sec = tmp;
+
+ return 0;
+}
+#endif /* CONFIG_X86 */
+
notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
{
if (likely(gtod->sysctl_enabled))
@@ -127,6 +160,12 @@ notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
return do_realtime_coarse(ts);
case CLOCK_MONOTONIC_COARSE:
return do_monotonic_coarse(ts);
+#ifdef CONFIG_X86
+ case CLOCK_TRACE:
+ return do_trace_clock(ts);
+#endif
+ default:
+ return -EINVAL;
}
return vdso_fallback_gettime(clock, ts);
}
diff --git a/include/linux/time.h b/include/linux/time.h
index 9f15ac7..bf638ff 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -290,6 +290,7 @@ struct itimerval {
#define CLOCK_MONOTONIC_RAW 4
#define CLOCK_REALTIME_COARSE 5
#define CLOCK_MONOTONIC_COARSE 6
+#define CLOCK_TRACE 7
/*
* The IDs of various hardware clocks:
--
1.7.0.4
More information about the lttng-dev
mailing list