12345678910111213141516171819202122232425262728293031323334353637 |
- /* This code runs in userspace. */
- #define DISABLE_BRANCH_PROFILING
- #include <asm/vgtod.h>
- notrace cycle_t __vsyscall_fn vread_tsc(void)
- {
- cycle_t ret;
- u64 last;
- /*
- * Empirically, a fence (of type that depends on the CPU)
- * before rdtsc is enough to ensure that rdtsc is ordered
- * with respect to loads. The various CPU manuals are unclear
- * as to whether rdtsc can be reordered with later loads,
- * but no one has ever seen it happen.
- */
- rdtsc_barrier();
- ret = (cycle_t)vget_cycles();
- last = VVAR(vsyscall_gtod_data).clock.cycle_last;
- if (likely(ret >= last))
- return ret;
- /*
- * GCC likes to generate cmov here, but this branch is extremely
- * predictable (it's just a funciton of time and the likely is
- * very likely) and there's a data dependence, so force GCC
- * to generate a branch instead. I don't barrier() because
- * we don't actually need a barrier, and if this function
- * ever gets inlined it will generate worse code.
- */
- asm volatile ("");
- return last;
- }
|