sched_plus.h 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142
  1. /*
  2. * Copyright (C) 2017 MediaTek Inc.
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
  11. * See http://www.gnu.org/licenses/gpl-2.0.html for more details.
  12. */
  13. extern unsigned int capacity_margin;
  14. extern void unthrottle_offline_rt_rqs(struct rq *rq);
  15. DECLARE_PER_CPU(struct hmp_domain *, hmp_cpu_domain);
  16. #include "../../drivers/misc/mediatek/base/power/include/mtk_upower.h"
  17. #include "../../drivers/misc/mediatek/include/mt-plat/eas_ctrl.h"
  18. extern int l_plus_cpu;
  19. extern unsigned long get_cpu_util(int cpu);
  20. extern void init_sched_groups_capacity(int cpu, struct sched_domain *sd);
  21. extern unsigned int capacity_margin;
  22. #ifdef CONFIG_SMP
  23. #ifdef CONFIG_ARM64
  24. extern unsigned long arch_scale_get_max_freq(int cpu);
  25. extern unsigned long arch_scale_get_min_freq(int cpu);
  26. #else
  27. static inline unsigned long arch_scale_get_max_freq(int cpu) { return 0; }
  28. static inline unsigned long arch_scale_get_min_freq(int cpu) { return 0; }
  29. #endif
  30. #endif
  31. #define SCHED_ENHANCED_ATTR 0x40000
  32. int select_task_prefer_cpu(struct task_struct *p, int new_cpu);
  33. int
  34. sched_setattr_enhanced(struct task_struct *p, const struct sched_attr *attr);
  35. int task_prefer_little(struct task_struct *p);
  36. int task_prefer_big(struct task_struct *p);
  37. int task_prefer_fit(struct task_struct *p, int cpu);
  38. int task_prefer_match(struct task_struct *p, int cpu);
  39. int
  40. task_prefer_match_on_cpu(struct task_struct *p, int src_cpu, int target_cpu);
  41. unsigned long cluster_max_capacity(void);
  42. inline unsigned long task_uclamped_min_w_ceiling(struct task_struct *p);
  43. inline unsigned int freq_util(unsigned long util);
  44. #define LB_POLICY_SHIFT 16
  45. #define LB_CPU_MASK ((1 << LB_POLICY_SHIFT) - 1)
  46. #define LB_PREV (0x0 << LB_POLICY_SHIFT)
  47. #define LB_FORK (0x1 << LB_POLICY_SHIFT)
  48. #define LB_SMP (0x2 << LB_POLICY_SHIFT)
  49. #define LB_HMP (0x4 << LB_POLICY_SHIFT)
  50. #define LB_EAS (0x8 << LB_POLICY_SHIFT)
  51. #define LB_HINT (0x10 << LB_POLICY_SHIFT)
  52. #define LB_EAS_AFFINE (0x18 << LB_POLICY_SHIFT)
  53. #define LB_EAS_LB (0x28 << LB_POLICY_SHIFT)
  54. #define LB_THERMAL (0x48 << LB_POLICY_SHIFT)
  55. #define MIGR_LOAD_BALANCE 1
  56. #define MIGR_UP_MIGRATE 2
  57. #define MIGR_DOWN_MIGRATE 3
  58. #define MIGR_IDLE_RUNNING 4
  59. #define MIGR_ROTATION 5
  60. #define TASK_ROTATION_THRESHOLD_NS 6000000
  61. #define HEAVY_TASK_NUM 4
  62. struct task_rotate_reset_uclamp_work {
  63. struct work_struct w;
  64. };
  65. extern struct task_rotate_reset_uclamp_work task_rotate_reset_uclamp_works;
  66. extern bool set_uclamp;
  67. extern void task_rotate_work_init(void);
  68. extern void check_for_migration(struct rq *rq, struct task_struct *p);
  69. extern void task_check_for_rotation(struct rq *rq);
  70. extern void set_sched_rotation_enable(bool enable);
  71. static inline int is_reserved(int cpu)
  72. {
  73. struct rq *rq = cpu_rq(cpu);
  74. return (rq->active_balance != 0);
  75. }
  76. static inline bool is_max_capacity_cpu(int cpu)
  77. {
  78. return capacity_orig_of(cpu) == SCHED_CAPACITY_SCALE;
  79. }
  80. int select_task_prefer_cpu(struct task_struct *p, int new_cpu);
  81. int task_prefer_little(struct task_struct *p);
  82. int task_prefer_big(struct task_struct *p);
  83. int task_prefer_fit(struct task_struct *p, int cpu);
  84. int task_prefer_match(struct task_struct *p, int cpu);
  85. int
  86. task_prefer_match_on_cpu(struct task_struct *p, int src_cpu, int target_cpu);
  87. /*
  88. *for isolation interface
  89. */
  90. #ifdef CONFIG_HOTPLUG_CPU
  91. extern int sched_isolate_count(const cpumask_t *mask, bool include_offline);
  92. extern int sched_isolate_cpu(int cpu);
  93. extern int sched_deisolate_cpu(int cpu);
  94. extern int sched_deisolate_cpu_unlocked(int cpu);
  95. #else
  96. static inline int sched_isolate_count(const cpumask_t *mask,
  97. bool include_offline)
  98. {
  99. cpumask_t count_mask;
  100. if (include_offline)
  101. cpumask_andnot(&count_mask, mask, cpu_online_mask);
  102. else
  103. return 0;
  104. return cpumask_weight(&count_mask);
  105. }
  106. static inline int sched_isolate_cpu(int cpu)
  107. {
  108. return 0;
  109. }
  110. static inline int sched_deisolate_cpu(int cpu)
  111. {
  112. return 0;
  113. }
  114. static inline int sched_deisolate_cpu_unlocked(int cpu)
  115. {
  116. return 0;
  117. }
  118. #endif
  119. enum iso_prio_t {ISO_CUSTOMIZE, ISO_TURBO, ISO_SCHED, ISO_UNSET};
  120. extern int set_cpu_isolation(enum iso_prio_t prio, struct cpumask *cpumask_ptr);
  121. extern int unset_cpu_isolation(enum iso_prio_t prio);
  122. extern struct cpumask cpu_all_masks;
  123. extern enum iso_prio_t iso_prio;