hmp.h 3.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119
  1. /*
  2. * Copyright (C) 2016 MediaTek Inc.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  11. * See http://www.gnu.org/licenses/gpl-2.0.html for more details.
  12. */
  13. /* Heterogenous multi processor common utils */
  14. LIST_HEAD(hmp_domains);
  15. /*
  16. * Consider EAS if only EAS enabled, but HMP
  17. * if hybrid enabled and system is over-utilized.
  18. */
  19. static bool sd_overutilized(struct sched_domain *sd);
  20. extern bool sched_smp_initialized;
  21. static inline bool should_hmp(int cpu)
  22. {
  23. #ifdef CONFIG_MTK_SCHED_EAS_POWER_SUPPORT
  24. struct rq *rq = cpu_rq(cpu);
  25. struct sched_domain *sd;
  26. rcu_read_lock();
  27. sd = rcu_dereference(rq->sd);
  28. if (sched_feat(ENERGY_AWARE) && sd) {
  29. for_each_domain(cpu, sd) {
  30. if (sd_overutilized(sd)) {
  31. rcu_read_unlock();
  32. return sched_smp_initialized &&
  33. sched_feat(SCHED_HMP);
  34. }
  35. }
  36. rcu_read_unlock();
  37. return false;
  38. }
  39. rcu_read_unlock();
  40. #endif
  41. return sched_smp_initialized && sched_feat(SCHED_HMP);
  42. }
  43. extern int cpu_park(int cpu);
  44. #ifdef CONFIG_SCHED_HMP
  45. /* CPU cluster statistics for task migration control */
  46. #define HMP_GB (0x1000)
  47. #define HMP_SELECT_RQ (0x2000)
  48. #define HMP_LB (0x4000)
  49. #define HMP_MAX_LOAD (NICE_0_LOAD - 1)
  50. #define __LOAD_AVG_MAX 47742 /* FIXME, maximum possible load avg */
  51. #ifdef CONFIG_SCHED_HMP_PRIO_FILTER
  52. unsigned int hmp_up_prio = NICE_TO_PRIO(CONFIG_SCHED_HMP_PRIO_FILTER_VAL);
  53. #define task_low_priority(prio) ((prio >= hmp_up_prio)?1:0)
  54. #define cfs_nr_dequeuing_low_prio(cpu) \
  55. cpu_rq(cpu)->cfs.avg.nr_dequeuing_low_prio
  56. #define cfs_reset_nr_dequeuing_low_prio(cpu) \
  57. (cfs_nr_dequeuing_low_prio(cpu) = 0)
  58. #else
  59. #define task_low_priority(prio) (0)
  60. #define cfs_reset_nr_dequeuing_low_prio(cpu)
  61. #endif
  62. /* Schedule entity */
  63. #define se_load(se) se->avg.loadwop_avg
  64. /* #define se_contrib(se) se->avg.load_avg_contrib */
  65. /* CPU related : load information */
  66. #define cfs_load(cpu) cpu_rq(cpu)->cfs.avg.loadwop_avg
  67. #define cfs_contrib(cpu) cpu_rq(cpu)->cfs.avg.loadwop_avg
  68. /* CPU related : the number of tasks */
  69. #define cfs_nr_normal_prio(cpu) cpu_rq(cpu)->cfs.avg.nr_normal_prio
  70. #define cfs_nr_pending(cpu) cpu_rq(cpu)->cfs.avg.nr_pending
  71. #define cfs_length(cpu) cpu_rq(cpu)->cfs.h_nr_running
  72. #define rq_length(cpu) cpu_rq(cpu)->nr_running
  73. inline int hmp_fork_balance(struct task_struct *p, int prev_cpu);
  74. static int hmp_select_task_rq_fair(int sd_flag, struct task_struct *p,
  75. int prev_cpu, int new_cpu);
  76. static unsigned int hmp_idle_pull(int this_cpu);
  77. static void hmp_force_up_migration(int this_cpu);
  78. static void hmp_online_cpu(int cpu);
  79. static void hmp_offline_cpu(int cpu);
  80. static inline void
  81. hmp_enqueue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se);
  82. static inline void
  83. hmp_dequeue_entity_load_avg(struct cfs_rq *cfs_rq, struct sched_entity *se);
  84. static inline void hmp_next_up_delay(struct sched_entity *se, int cpu);
  85. #else
  86. #define se_load(se) 0
  87. static inline int hmp_fork_balance(struct task_struct *p, int prev_cpu)
  88. {
  89. return prev_cpu;
  90. }
  91. static inline void hmp_force_up_migration(int this_cpu) {}
  92. static inline int hmp_select_task_rq_fair(int sd_flag, struct task_struct *p,
  93. int prev_cpu, int new_cpu) { return new_cpu; }
  94. static inline void hmp_online_cpu(int cpu) {}
  95. static inline void hmp_offline_cpu(int cpu) {}
  96. static inline int hmp_idle_pull(int this_cpu) { return 0; }
  97. #endif /* CONFIG_SCHED_HMP */