paravirt-spinlocks.c 1.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748
  1. /*
  2. * Split spinlock implementation out into its own file, so it can be
  3. * compiled in a FTRACE-compatible way.
  4. */
  5. #include <linux/spinlock.h>
  6. #include <linux/export.h>
  7. #include <linux/jump_label.h>
  8. #include <asm/paravirt.h>
  9. __visible void __native_queued_spin_unlock(struct qspinlock *lock)
  10. {
  11. native_queued_spin_unlock(lock);
  12. }
  13. PV_CALLEE_SAVE_REGS_THUNK(__native_queued_spin_unlock);
  14. bool pv_is_native_spin_unlock(void)
  15. {
  16. return pv_lock_ops.queued_spin_unlock.func ==
  17. __raw_callee_save___native_queued_spin_unlock;
  18. }
  19. __visible bool __native_vcpu_is_preempted(int cpu)
  20. {
  21. return false;
  22. }
  23. PV_CALLEE_SAVE_REGS_THUNK(__native_vcpu_is_preempted);
  24. bool pv_is_native_vcpu_is_preempted(void)
  25. {
  26. return pv_lock_ops.vcpu_is_preempted.func ==
  27. __raw_callee_save___native_vcpu_is_preempted;
  28. }
  29. struct pv_lock_ops pv_lock_ops = {
  30. #ifdef CONFIG_SMP
  31. .queued_spin_lock_slowpath = native_queued_spin_lock_slowpath,
  32. .queued_spin_unlock = PV_CALLEE_SAVE(__native_queued_spin_unlock),
  33. .wait = paravirt_nop,
  34. .kick = paravirt_nop,
  35. .vcpu_is_preempted = PV_CALLEE_SAVE(__native_vcpu_is_preempted),
  36. #endif /* SMP */
  37. };
  38. EXPORT_SYMBOL(pv_lock_ops);
  39. struct static_key paravirt_ticketlocks_enabled = STATIC_KEY_INIT_FALSE;
  40. EXPORT_SYMBOL(paravirt_ticketlocks_enabled);