vread_tsc_64.c 1007 B

12345678910111213141516171819202122232425262728293031323334353637
  1. /* This code runs in userspace. */
  2. #define DISABLE_BRANCH_PROFILING
  3. #include <asm/vgtod.h>
  4. notrace cycle_t __vsyscall_fn vread_tsc(void)
  5. {
  6. cycle_t ret;
  7. u64 last;
  8. /*
  9. * Empirically, a fence (of type that depends on the CPU)
  10. * before rdtsc is enough to ensure that rdtsc is ordered
  11. * with respect to loads. The various CPU manuals are unclear
  12. * as to whether rdtsc can be reordered with later loads,
  13. * but no one has ever seen it happen.
  14. */
  15. rdtsc_barrier();
  16. ret = (cycle_t)vget_cycles();
  17. last = VVAR(vsyscall_gtod_data).clock.cycle_last;
  18. if (likely(ret >= last))
  19. return ret;
  20. /*
  21. * GCC likes to generate cmov here, but this branch is extremely
  22. * predictable (it's just a funciton of time and the likely is
  23. * very likely) and there's a data dependence, so force GCC
  24. * to generate a branch instead. I don't barrier() because
  25. * we don't actually need a barrier, and if this function
  26. * ever gets inlined it will generate worse code.
  27. */
  28. asm volatile ("");
  29. return last;
  30. }