sched.c 1.1 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556
  1. #include <linux/sched.h>
  2. #include <linux/math64.h>
  3. #include <linux/percpu.h>
  4. #include <linux/irqflags.h>
  5. #include <asm/cpufeature.h>
  6. #include <asm/processor.h>
  7. #ifdef CONFIG_SMP
  8. static DEFINE_PER_CPU(struct aperfmperf, old_perf_sched);
  9. static unsigned long scale_aperfmperf(void)
  10. {
  11. struct aperfmperf val, *old = &__get_cpu_var(old_perf_sched);
  12. unsigned long ratio, flags;
  13. local_irq_save(flags);
  14. get_aperfmperf(&val);
  15. local_irq_restore(flags);
  16. ratio = calc_aperfmperf_ratio(old, &val);
  17. *old = val;
  18. return ratio;
  19. }
  20. unsigned long arch_scale_freq_power(struct sched_domain *sd, int cpu)
  21. {
  22. /*
  23. * do aperf/mperf on the cpu level because it includes things
  24. * like turbo mode, which are relevant to full cores.
  25. */
  26. if (boot_cpu_has(X86_FEATURE_APERFMPERF))
  27. return scale_aperfmperf();
  28. /*
  29. * maybe have something cpufreq here
  30. */
  31. return default_scale_freq_power(sd, cpu);
  32. }
  33. unsigned long arch_scale_smt_power(struct sched_domain *sd, int cpu)
  34. {
  35. /*
  36. * aperf/mperf already includes the smt gain
  37. */
  38. if (boot_cpu_has(X86_FEATURE_APERFMPERF))
  39. return SCHED_LOAD_SCALE;
  40. return default_scale_smt_power(sd, cpu);
  41. }
  42. #endif