cpufreq_interactive.c 51 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983
  1. /*
  2. * drivers/cpufreq/cpufreq_interactive.c
  3. *
  4. * Copyright (C) 2010 Google, Inc.
  5. *
  6. * This software is licensed under the terms of the GNU General Public
  7. * License version 2, as published by the Free Software Foundation, and
  8. * may be copied, distributed, and modified under those terms.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * Author: Mike Chan (mike@android.com)
  16. *
  17. */
  18. #include <linux/cpu.h>
  19. #include <linux/cpumask.h>
  20. #include <linux/cpufreq.h>
  21. #include <linux/module.h>
  22. #include <linux/moduleparam.h>
  23. #include <linux/rwsem.h>
  24. #include <linux/sched.h>
  25. #include <linux/tick.h>
  26. #include <linux/time.h>
  27. #include <linux/timer.h>
  28. #include <linux/workqueue.h>
  29. #include <linux/kthread.h>
  30. #include <linux/slab.h>
  31. #include <linux/kernel_stat.h>
  32. #include <asm/cputime.h>
  33. #define CREATE_TRACE_POINTS
  34. #include <trace/events/cpufreq_interactive.h>
  35. #define CONFIG_MODE_AUTO_CHANGE
  36. #define CONFIG_RETENTION_CHANGE
  37. static int active_count;
  38. struct cpufreq_interactive_cpuinfo {
  39. struct timer_list cpu_timer;
  40. struct timer_list cpu_slack_timer;
  41. spinlock_t load_lock; /* protects the next 4 fields */
  42. u64 time_in_idle;
  43. u64 time_in_idle_timestamp;
  44. u64 cputime_speedadj;
  45. u64 cputime_speedadj_timestamp;
  46. struct cpufreq_policy *policy;
  47. struct cpufreq_frequency_table *freq_table;
  48. unsigned int target_freq;
  49. unsigned int floor_freq;
  50. u64 floor_validate_time;
  51. u64 hispeed_validate_time;
  52. struct rw_semaphore enable_sem;
  53. int governor_enabled;
  54. int prev_load;
  55. int minfreq_boost;
  56. };
  57. static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
  58. /* realtime thread handles frequency scaling */
  59. static struct task_struct *speedchange_task;
  60. static cpumask_t speedchange_cpumask;
  61. static spinlock_t speedchange_cpumask_lock;
  62. static struct mutex gov_lock;
  63. /* Hi speed to bump to from lo speed when load burst (default max) */
  64. static unsigned int hispeed_freq;
  65. /* Go to hi speed when CPU load at or above this value. */
  66. #define DEFAULT_GO_HISPEED_LOAD 99
  67. static unsigned long go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
  68. /* Sampling down factor to be applied to min_sample_time at max freq */
  69. static unsigned int sampling_down_factor;
  70. /* Target load. Lower values result in higher CPU speeds. */
  71. #define DEFAULT_TARGET_LOAD 90
  72. static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
  73. static spinlock_t target_loads_lock;
  74. static unsigned int *target_loads = default_target_loads;
  75. static int ntarget_loads = ARRAY_SIZE(default_target_loads);
  76. /*
  77. * The minimum amount of time to spend at a frequency before we can ramp down.
  78. */
  79. #define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
  80. static unsigned long min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
  81. /*
  82. * The sample rate of the timer used to increase frequency
  83. */
  84. #define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
  85. static unsigned long timer_rate = DEFAULT_TIMER_RATE;
  86. /* Busy SDF parameters*/
  87. #define MIN_BUSY_TIME (100 * USEC_PER_MSEC)
  88. /*
  89. * Wait this long before raising speed above hispeed, by default a single
  90. * timer interval.
  91. */
  92. #define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
  93. static unsigned int default_above_hispeed_delay[] = {
  94. DEFAULT_ABOVE_HISPEED_DELAY };
  95. static spinlock_t above_hispeed_delay_lock;
  96. static unsigned int *above_hispeed_delay = default_above_hispeed_delay;
  97. static int nabove_hispeed_delay = ARRAY_SIZE(default_above_hispeed_delay);
  98. /* Non-zero means indefinite speed boost active */
  99. static int boost_val;
  100. /* Duration of a boot pulse in usecs */
  101. static int boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
  102. /* End time of boost pulse in ktime converted to usecs */
  103. static u64 boostpulse_endtime;
  104. /*
  105. * Max additional time to wait in idle, beyond timer_rate, at speeds above
  106. * minimum before wakeup to reduce speed, or -1 if unnecessary.
  107. */
  108. #define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
  109. static int timer_slack_val = DEFAULT_TIMER_SLACK;
  110. static bool io_is_busy;
  111. #ifdef CONFIG_MODE_AUTO_CHANGE
  112. struct cpufreq_loadinfo {
  113. unsigned int load;
  114. unsigned int freq;
  115. u64 timestamp;
  116. };
  117. static DEFINE_PER_CPU(struct cpufreq_loadinfo, loadinfo);
  118. static spinlock_t mode_lock;
  119. #define MULTI_MODE 2
  120. #define SINGLE_MODE 1
  121. #define NO_MODE 0
  122. static unsigned int mode = 0;
  123. static unsigned int enforced_mode = 0;
  124. static u64 mode_check_timestamp = 0;
  125. #define DEFAULT_MULTI_ENTER_TIME (4 * DEFAULT_TIMER_RATE)
  126. static unsigned long multi_enter_time = DEFAULT_MULTI_ENTER_TIME;
  127. static unsigned long time_in_multi_enter = 0;
  128. static unsigned int multi_enter_load = 4 * DEFAULT_TARGET_LOAD;
  129. #define DEFAULT_MULTI_EXIT_TIME (16 * DEFAULT_TIMER_RATE)
  130. static unsigned long multi_exit_time = DEFAULT_MULTI_EXIT_TIME;
  131. static unsigned long time_in_multi_exit = 0;
  132. static unsigned int multi_exit_load = 4 * DEFAULT_TARGET_LOAD;
  133. #define DEFAULT_SINGLE_ENTER_TIME (8 * DEFAULT_TIMER_RATE)
  134. static unsigned long single_enter_time = DEFAULT_SINGLE_ENTER_TIME;
  135. static unsigned long time_in_single_enter = 0;
  136. static unsigned int single_enter_load = DEFAULT_TARGET_LOAD;
  137. #define DEFAULT_SINGLE_EXIT_TIME (4 * DEFAULT_TIMER_RATE)
  138. static unsigned long single_exit_time = DEFAULT_SINGLE_EXIT_TIME;
  139. static unsigned long time_in_single_exit = 0;
  140. static unsigned int single_exit_load = DEFAULT_TARGET_LOAD;
  141. static unsigned int param_index = 0;
  142. static unsigned int cur_param_index = 0;
  143. #define MAX_PARAM_SET 4 /* ((MULTI_MODE | SINGLE_MODE | NO_MODE) + 1) */
  144. static unsigned int hispeed_freq_set[MAX_PARAM_SET];
  145. static unsigned long go_hispeed_load_set[MAX_PARAM_SET];
  146. static unsigned int *target_loads_set[MAX_PARAM_SET];
  147. static int ntarget_loads_set[MAX_PARAM_SET];
  148. static unsigned long min_sample_time_set[MAX_PARAM_SET];
  149. static unsigned long timer_rate_set[MAX_PARAM_SET];
  150. static unsigned int *above_hispeed_delay_set[MAX_PARAM_SET];
  151. static int nabove_hispeed_delay_set[MAX_PARAM_SET];
  152. static unsigned int sampling_down_factor_set[MAX_PARAM_SET];
  153. #endif /* CONFIG_MODE_AUTO_CHANGE */
  154. #ifdef CONFIG_RETENTION_CHANGE
  155. static void do_toggle_retention(struct work_struct *work);
  156. extern void msm_pm_retention_mode_enable(bool enable);
  157. static struct workqueue_struct *retention_toggle_wq;
  158. static struct work_struct retention_toggle_work;
  159. static int mode_count = 0;
  160. #endif
  161. /*
  162. * If the max load among other CPUs is higher than up_threshold_any_cpu_load
  163. * and if the highest frequency among the other CPUs is higher than
  164. * up_threshold_any_cpu_freq then do not let the frequency to drop below
  165. * sync_freq
  166. */
  167. static unsigned int up_threshold_any_cpu_load;
  168. static unsigned int sync_freq;
  169. static unsigned int up_threshold_any_cpu_freq;
  170. static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
  171. unsigned int event);
  172. #define DYN_DEFER (1)
  173. #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
  174. static
  175. #endif
  176. struct cpufreq_governor cpufreq_gov_interactive = {
  177. .name = "interactive",
  178. .governor = cpufreq_governor_interactive,
  179. .max_transition_latency = 10000000,
  180. .owner = THIS_MODULE,
  181. };
  182. static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
  183. cputime64_t *wall)
  184. {
  185. u64 idle_time;
  186. u64 cur_wall_time;
  187. u64 busy_time;
  188. cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
  189. busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
  190. busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
  191. busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
  192. busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
  193. busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
  194. busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
  195. idle_time = cur_wall_time - busy_time;
  196. if (wall)
  197. *wall = jiffies_to_usecs(cur_wall_time);
  198. return jiffies_to_usecs(idle_time);
  199. }
  200. #ifdef DYN_DEFER
  201. static inline void timer_set_nondeferrable(struct timer_list *timer)
  202. {
  203. timer->base =
  204. ((struct tvec_base *)((unsigned long)timer->base &
  205. ~TBASE_DEFERRABLE_FLAG));
  206. }
  207. static inline void timer_set_deferrable(struct timer_list *timer)
  208. {
  209. timer->base =
  210. ((struct tvec_base *)((unsigned long)timer->base |
  211. TBASE_DEFERRABLE_FLAG));
  212. }
  213. #endif
  214. static inline cputime64_t get_cpu_idle_time(unsigned int cpu,
  215. cputime64_t *wall)
  216. {
  217. u64 idle_time = get_cpu_idle_time_us(cpu, wall);
  218. if (idle_time == -1ULL)
  219. idle_time = get_cpu_idle_time_jiffy(cpu, wall);
  220. else if (!io_is_busy)
  221. idle_time += get_cpu_iowait_time_us(cpu, wall);
  222. return idle_time;
  223. }
  224. static void cpufreq_interactive_timer_resched(
  225. struct cpufreq_interactive_cpuinfo *pcpu)
  226. {
  227. unsigned long expires;
  228. unsigned long flags;
  229. spin_lock_irqsave(&pcpu->load_lock, flags);
  230. pcpu->time_in_idle =
  231. get_cpu_idle_time(smp_processor_id(),
  232. &pcpu->time_in_idle_timestamp);
  233. pcpu->cputime_speedadj = 0;
  234. pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
  235. expires = jiffies + usecs_to_jiffies(timer_rate);
  236. #ifdef DYN_DEFER
  237. if (pcpu->target_freq > pcpu->policy->min)
  238. timer_set_nondeferrable(&pcpu->cpu_timer);
  239. else
  240. timer_set_deferrable(&pcpu->cpu_timer);
  241. #endif
  242. mod_timer_pinned(&pcpu->cpu_timer, expires);
  243. if (timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) {
  244. expires += usecs_to_jiffies(timer_slack_val);
  245. mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
  246. }
  247. spin_unlock_irqrestore(&pcpu->load_lock, flags);
  248. }
  249. /* The caller shall take enable_sem write semaphore to avoid any timer race.
  250. * The cpu_timer and cpu_slack_timer must be deactivated when calling this
  251. * function.
  252. */
  253. static void cpufreq_interactive_timer_start(int cpu)
  254. {
  255. struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
  256. unsigned long expires = jiffies + usecs_to_jiffies(timer_rate);
  257. unsigned long flags;
  258. pcpu->cpu_timer.expires = expires;
  259. del_timer_sync(&pcpu->cpu_timer);
  260. add_timer_on(&pcpu->cpu_timer, cpu);
  261. if (timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) {
  262. expires += usecs_to_jiffies(timer_slack_val);
  263. pcpu->cpu_slack_timer.expires = expires;
  264. del_timer_sync(&pcpu->cpu_slack_timer);
  265. add_timer_on(&pcpu->cpu_slack_timer, cpu);
  266. }
  267. spin_lock_irqsave(&pcpu->load_lock, flags);
  268. pcpu->time_in_idle =
  269. get_cpu_idle_time(cpu, &pcpu->time_in_idle_timestamp);
  270. pcpu->cputime_speedadj = 0;
  271. pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
  272. spin_unlock_irqrestore(&pcpu->load_lock, flags);
  273. }
  274. static unsigned int freq_to_above_hispeed_delay(unsigned int freq)
  275. {
  276. int i;
  277. unsigned int ret;
  278. unsigned long flags;
  279. spin_lock_irqsave(&above_hispeed_delay_lock, flags);
  280. for (i = 0; i < nabove_hispeed_delay - 1 &&
  281. freq >= above_hispeed_delay[i+1]; i += 2)
  282. ;
  283. ret = above_hispeed_delay[i];
  284. ret = (ret > (1 * USEC_PER_MSEC)) ? (ret - (1 * USEC_PER_MSEC)) : ret;
  285. spin_unlock_irqrestore(&above_hispeed_delay_lock, flags);
  286. return ret;
  287. }
  288. static unsigned int freq_to_targetload(unsigned int freq)
  289. {
  290. int i;
  291. unsigned int ret;
  292. unsigned long flags;
  293. spin_lock_irqsave(&target_loads_lock, flags);
  294. for (i = 0; i < ntarget_loads - 1 && freq >= target_loads[i+1]; i += 2)
  295. ;
  296. ret = target_loads[i];
  297. spin_unlock_irqrestore(&target_loads_lock, flags);
  298. return ret;
  299. }
  300. /*
  301. * If increasing frequencies never map to a lower target load then
  302. * choose_freq() will find the minimum frequency that does not exceed its
  303. * target load given the current load.
  304. */
  305. static unsigned int choose_freq(
  306. struct cpufreq_interactive_cpuinfo *pcpu, unsigned int loadadjfreq)
  307. {
  308. unsigned int freq = pcpu->policy->cur;
  309. unsigned int prevfreq, freqmin, freqmax;
  310. unsigned int tl;
  311. int index;
  312. freqmin = 0;
  313. freqmax = UINT_MAX;
  314. do {
  315. prevfreq = freq;
  316. tl = freq_to_targetload(freq);
  317. /*
  318. * Find the lowest frequency where the computed load is less
  319. * than or equal to the target load.
  320. */
  321. if (cpufreq_frequency_table_target(
  322. pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
  323. CPUFREQ_RELATION_L, &index))
  324. break;
  325. freq = pcpu->freq_table[index].frequency;
  326. if (freq > prevfreq) {
  327. /* The previous frequency is too low. */
  328. freqmin = prevfreq;
  329. if (freq >= freqmax) {
  330. /*
  331. * Find the highest frequency that is less
  332. * than freqmax.
  333. */
  334. if (cpufreq_frequency_table_target(
  335. pcpu->policy, pcpu->freq_table,
  336. freqmax - 1, CPUFREQ_RELATION_H,
  337. &index))
  338. break;
  339. freq = pcpu->freq_table[index].frequency;
  340. if (freq == freqmin) {
  341. /*
  342. * The first frequency below freqmax
  343. * has already been found to be too
  344. * low. freqmax is the lowest speed
  345. * we found that is fast enough.
  346. */
  347. freq = freqmax;
  348. break;
  349. }
  350. }
  351. } else if (freq < prevfreq) {
  352. /* The previous frequency is high enough. */
  353. freqmax = prevfreq;
  354. if (freq <= freqmin) {
  355. /*
  356. * Find the lowest frequency that is higher
  357. * than freqmin.
  358. */
  359. if (cpufreq_frequency_table_target(
  360. pcpu->policy, pcpu->freq_table,
  361. freqmin + 1, CPUFREQ_RELATION_L,
  362. &index))
  363. break;
  364. freq = pcpu->freq_table[index].frequency;
  365. /*
  366. * If freqmax is the first frequency above
  367. * freqmin then we have already found that
  368. * this speed is fast enough.
  369. */
  370. if (freq == freqmax)
  371. break;
  372. }
  373. }
  374. /* If same frequency chosen as previous then done. */
  375. } while (freq != prevfreq);
  376. return freq;
  377. }
  378. static u64 update_load(int cpu)
  379. {
  380. struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
  381. u64 now;
  382. u64 now_idle;
  383. unsigned int delta_idle;
  384. unsigned int delta_time;
  385. u64 active_time;
  386. #if defined(CONFIG_SEC_PM) || defined(CONFIG_MODE_AUTO_CHANGE)
  387. unsigned int cur_load = 0;
  388. #endif
  389. #ifdef CONFIG_MODE_AUTO_CHANGE
  390. struct cpufreq_loadinfo *cur_loadinfo = &per_cpu(loadinfo, cpu);
  391. #endif
  392. now_idle = get_cpu_idle_time(cpu, &now);
  393. delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
  394. delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
  395. if (delta_time <= delta_idle)
  396. active_time = 0;
  397. else
  398. active_time = delta_time - delta_idle;
  399. pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
  400. pcpu->time_in_idle = now_idle;
  401. pcpu->time_in_idle_timestamp = now;
  402. #if defined(CONFIG_SEC_PM) || defined(CONFIG_MODE_AUTO_CHANGE)
  403. cur_load = (unsigned int)(active_time * 100) / delta_time;
  404. #endif
  405. #ifdef CONFIG_SEC_PM
  406. pcpu->policy->load_at_max = (cur_load * pcpu->policy->cur) /
  407. pcpu->policy->cpuinfo.max_freq;
  408. #endif
  409. #ifdef CONFIG_MODE_AUTO_CHANGE
  410. cur_loadinfo->load = (cur_load * pcpu->policy->cur) /
  411. pcpu->policy->cpuinfo.max_freq;
  412. cur_loadinfo->freq = pcpu->policy->cur;
  413. cur_loadinfo->timestamp = now;
  414. #endif
  415. return now;
  416. }
  417. #ifdef CONFIG_MODE_AUTO_CHANGE
  418. static unsigned int check_mode(int cpu, unsigned int cur_mode, u64 now)
  419. {
  420. int i;
  421. unsigned int ret=cur_mode, total_load=0, max_single_load=0;
  422. struct cpufreq_loadinfo *cur_loadinfo;
  423. if (now - mode_check_timestamp < timer_rate - 1000)
  424. return ret;
  425. if (now - mode_check_timestamp > timer_rate + 1000)
  426. mode_check_timestamp = now - timer_rate;
  427. for_each_online_cpu(i) {
  428. cur_loadinfo = &per_cpu(loadinfo, i);
  429. total_load += cur_loadinfo->load;
  430. if (cur_loadinfo->load > max_single_load)
  431. max_single_load = cur_loadinfo->load;
  432. }
  433. if (!(cur_mode & SINGLE_MODE)) {
  434. if (max_single_load >= single_enter_load)
  435. time_in_single_enter += now - mode_check_timestamp;
  436. else
  437. time_in_single_enter = 0;
  438. if (time_in_single_enter >= single_enter_time)
  439. ret |= SINGLE_MODE;
  440. }
  441. if (!(cur_mode & MULTI_MODE)) {
  442. if (total_load >= multi_enter_load)
  443. time_in_multi_enter += now - mode_check_timestamp;
  444. else
  445. time_in_multi_enter = 0;
  446. if (time_in_multi_enter >= multi_enter_time)
  447. ret |= MULTI_MODE;
  448. }
  449. if (cur_mode & SINGLE_MODE) {
  450. if (max_single_load < single_exit_load)
  451. time_in_single_exit += now - mode_check_timestamp;
  452. else
  453. time_in_single_exit = 0;
  454. if (time_in_single_exit >= single_exit_time)
  455. ret &= ~SINGLE_MODE;
  456. }
  457. if (cur_mode & MULTI_MODE) {
  458. if (total_load < multi_exit_load)
  459. time_in_multi_exit += now - mode_check_timestamp;
  460. else
  461. time_in_multi_exit = 0;
  462. if (time_in_multi_exit >= multi_exit_time)
  463. ret &= ~MULTI_MODE;
  464. }
  465. trace_cpufreq_interactive_mode(cpu, total_load,
  466. time_in_single_enter, time_in_multi_enter,
  467. time_in_single_exit, time_in_multi_exit, ret);
  468. if (time_in_single_enter >= single_enter_time)
  469. time_in_single_enter = 0;
  470. if (time_in_multi_enter >= multi_enter_time)
  471. time_in_multi_enter = 0;
  472. if (time_in_single_exit >= single_exit_time)
  473. time_in_single_exit = 0;
  474. if (time_in_multi_exit >= multi_exit_time)
  475. time_in_multi_exit = 0;
  476. mode_check_timestamp = now;
  477. return ret;
  478. }
  479. static void set_new_param_set(unsigned int index)
  480. {
  481. unsigned long flags;
  482. hispeed_freq = hispeed_freq_set[index];
  483. go_hispeed_load = go_hispeed_load_set[index];
  484. spin_lock_irqsave(&target_loads_lock, flags);
  485. target_loads = target_loads_set[index];
  486. ntarget_loads = ntarget_loads_set[index];
  487. spin_unlock_irqrestore(&target_loads_lock, flags);
  488. min_sample_time = min_sample_time_set[index];
  489. timer_rate = timer_rate_set[index];
  490. spin_lock_irqsave(&above_hispeed_delay_lock, flags);
  491. above_hispeed_delay = above_hispeed_delay_set[index];
  492. nabove_hispeed_delay = nabove_hispeed_delay_set[index];
  493. spin_unlock_irqrestore(&above_hispeed_delay_lock, flags);
  494. cur_param_index = index;
  495. }
  496. static void enter_mode(void)
  497. {
  498. #if 1
  499. set_new_param_set(mode);
  500. #else
  501. set_new_param_set(1);
  502. #endif
  503. #ifdef CONFIG_RETENTION_CHANGE
  504. queue_work(retention_toggle_wq, &retention_toggle_work);
  505. #endif
  506. }
  507. static void exit_mode(void)
  508. {
  509. set_new_param_set(0);
  510. #ifdef CONFIG_RETENTION_CHANGE
  511. queue_work(retention_toggle_wq, &retention_toggle_work);
  512. #endif
  513. }
  514. #endif
  515. static void cpufreq_interactive_timer(unsigned long data)
  516. {
  517. u64 now;
  518. unsigned int delta_time;
  519. u64 cputime_speedadj;
  520. int cpu_load;
  521. struct cpufreq_interactive_cpuinfo *pcpu =
  522. &per_cpu(cpuinfo, data);
  523. unsigned int new_freq;
  524. unsigned int loadadjfreq;
  525. unsigned int index;
  526. unsigned long flags;
  527. bool boosted;
  528. unsigned long mod_min_sample_time;
  529. int i, max_load;
  530. unsigned int max_freq;
  531. struct cpufreq_interactive_cpuinfo *picpu;
  532. #ifdef CONFIG_MODE_AUTO_CHANGE
  533. unsigned int new_mode;
  534. #endif
  535. if (!down_read_trylock(&pcpu->enable_sem))
  536. return;
  537. if (!pcpu->governor_enabled)
  538. goto exit;
  539. if (cpu_is_offline(data))
  540. goto exit;
  541. spin_lock_irqsave(&pcpu->load_lock, flags);
  542. now = update_load(data);
  543. delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
  544. cputime_speedadj = pcpu->cputime_speedadj;
  545. spin_unlock_irqrestore(&pcpu->load_lock, flags);
  546. if (WARN_ON_ONCE(!delta_time))
  547. goto rearm;
  548. #ifdef CONFIG_MODE_AUTO_CHANGE
  549. spin_lock_irqsave(&mode_lock, flags);
  550. if (enforced_mode)
  551. new_mode = enforced_mode;
  552. else
  553. new_mode = check_mode(data, mode, now);
  554. if (new_mode != mode) {
  555. mode = new_mode;
  556. if (new_mode & MULTI_MODE || new_mode & SINGLE_MODE) {
  557. #ifdef CONFIG_RETENTION_CHANGE
  558. ++mode_count;
  559. #endif
  560. pr_info("Governor: enter mode 0x%x\n", mode);
  561. enter_mode();
  562. } else {
  563. #ifdef CONFIG_RETENTION_CHANGE
  564. mode_count=0;
  565. #endif
  566. pr_info("Governor: exit mode 0x%x\n", mode);
  567. exit_mode();
  568. }
  569. }
  570. spin_unlock_irqrestore(&mode_lock, flags);
  571. #endif
  572. do_div(cputime_speedadj, delta_time);
  573. loadadjfreq = (unsigned int)cputime_speedadj * 100;
  574. cpu_load = loadadjfreq / pcpu->policy->cur;
  575. pcpu->prev_load = cpu_load;
  576. boosted = boost_val || now < boostpulse_endtime;
  577. #ifdef CONFIG_SEC_PM
  578. pcpu->policy->util = cpu_load;
  579. #endif
  580. if (cpu_load >= go_hispeed_load || boosted) {
  581. if (pcpu->policy->cur < hispeed_freq) {
  582. new_freq = hispeed_freq;
  583. } else {
  584. new_freq = choose_freq(pcpu, loadadjfreq);
  585. if (new_freq < hispeed_freq)
  586. new_freq = hispeed_freq;
  587. }
  588. } else {
  589. new_freq = choose_freq(pcpu, loadadjfreq);
  590. if (sync_freq && new_freq < sync_freq) {
  591. max_load = 0;
  592. max_freq = 0;
  593. for_each_online_cpu(i) {
  594. picpu = &per_cpu(cpuinfo, i);
  595. if (i == data || picpu->prev_load <
  596. up_threshold_any_cpu_load)
  597. continue;
  598. max_load = max(max_load, picpu->prev_load);
  599. max_freq = max(max_freq, picpu->target_freq);
  600. }
  601. if (max_freq > up_threshold_any_cpu_freq &&
  602. max_load >= up_threshold_any_cpu_load)
  603. new_freq = sync_freq;
  604. }
  605. }
  606. if (pcpu->policy->cur >= hispeed_freq &&
  607. new_freq > pcpu->policy->cur &&
  608. now - pcpu->hispeed_validate_time <
  609. freq_to_above_hispeed_delay(pcpu->policy->cur)) {
  610. trace_cpufreq_interactive_notyet(
  611. data, cpu_load, pcpu->target_freq,
  612. pcpu->policy->cur, new_freq);
  613. goto rearm;
  614. }
  615. if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
  616. new_freq, CPUFREQ_RELATION_L,
  617. &index))
  618. goto rearm;
  619. new_freq = pcpu->freq_table[index].frequency;
  620. /*
  621. * Do not scale below floor_freq unless we have been at or above the
  622. * floor frequency for the minimum sample time since last validated.
  623. */
  624. if (sampling_down_factor && pcpu->policy->cur == pcpu->policy->max){
  625. mod_min_sample_time = sampling_down_factor;
  626. pcpu->minfreq_boost=0;
  627. }
  628. else
  629. mod_min_sample_time = min_sample_time;
  630. if (pcpu->minfreq_boost) {
  631. if (sampling_down_factor &&
  632. (pcpu->policy->cur != pcpu->policy->max))
  633. mod_min_sample_time = 0;
  634. pcpu->minfreq_boost = 0;
  635. }
  636. if (new_freq < pcpu->floor_freq) {
  637. if (now - pcpu->floor_validate_time < mod_min_sample_time) {
  638. trace_cpufreq_interactive_notyet(
  639. data, cpu_load, pcpu->target_freq,
  640. pcpu->policy->cur, new_freq);
  641. goto rearm;
  642. }
  643. }
  644. /*
  645. * Update the timestamp for checking whether speed has been held at
  646. * or above the selected frequency for a minimum of min_sample_time,
  647. * if not boosted to hispeed_freq. If boosted to hispeed_freq then we
  648. * allow the speed to drop as soon as the boostpulse duration expires
  649. * (or the indefinite boost is turned off).
  650. */
  651. if (!boosted || new_freq > hispeed_freq) {
  652. pcpu->floor_freq = new_freq;
  653. pcpu->floor_validate_time = now;
  654. }
  655. if (pcpu->target_freq == new_freq &&
  656. pcpu->target_freq <= pcpu->policy->cur) {
  657. trace_cpufreq_interactive_already(
  658. data, cpu_load, pcpu->target_freq,
  659. pcpu->policy->cur, new_freq);
  660. goto rearm_if_notmax;
  661. }
  662. trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
  663. pcpu->policy->cur, new_freq);
  664. pcpu->target_freq = new_freq;
  665. spin_lock_irqsave(&speedchange_cpumask_lock, flags);
  666. cpumask_set_cpu(data, &speedchange_cpumask);
  667. spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
  668. wake_up_process(speedchange_task);
  669. rearm_if_notmax:
  670. /*
  671. * Already set max speed and don't see a need to change that,
  672. * wait until next idle to re-evaluate, don't need timer.
  673. */
  674. if (pcpu->target_freq == pcpu->policy->max)
  675. #ifdef CONFIG_MODE_AUTO_CHANGE
  676. goto rearm;
  677. #else
  678. goto exit;
  679. #endif
  680. rearm:
  681. if (!timer_pending(&pcpu->cpu_timer))
  682. cpufreq_interactive_timer_resched(pcpu);
  683. exit:
  684. up_read(&pcpu->enable_sem);
  685. return;
  686. }
  687. static void cpufreq_interactive_idle_start(void)
  688. {
  689. int cpu = smp_processor_id();
  690. struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
  691. int pending;
  692. u64 now;
  693. if (!down_read_trylock(&pcpu->enable_sem))
  694. return;
  695. if (!pcpu->governor_enabled)
  696. goto exit;
  697. /* Cancel the timer if cpu is offline */
  698. if (cpu_is_offline(cpu)) {
  699. del_timer(&pcpu->cpu_timer);
  700. del_timer(&pcpu->cpu_slack_timer);
  701. goto exit;
  702. }
  703. pending = timer_pending(&pcpu->cpu_timer);
  704. if (pcpu->target_freq != pcpu->policy->min) {
  705. /*
  706. * Entering idle while not at lowest speed. On some
  707. * platforms this can hold the other CPU(s) at that speed
  708. * even though the CPU is idle. Set a timer to re-evaluate
  709. * speed so this idle CPU doesn't hold the other CPUs above
  710. * min indefinitely. This should probably be a quirk of
  711. * the CPUFreq driver.
  712. */
  713. if (!pending) {
  714. cpufreq_interactive_timer_resched(pcpu);
  715. now = ktime_to_us(ktime_get());
  716. if ((pcpu->policy->cur == pcpu->policy->max) &&
  717. (now - pcpu->hispeed_validate_time) >
  718. MIN_BUSY_TIME) {
  719. pcpu->floor_validate_time = now;
  720. }
  721. }
  722. }
  723. exit:
  724. up_read(&pcpu->enable_sem);
  725. }
  726. static void cpufreq_interactive_idle_end(void)
  727. {
  728. struct cpufreq_interactive_cpuinfo *pcpu =
  729. &per_cpu(cpuinfo, smp_processor_id());
  730. if (!down_read_trylock(&pcpu->enable_sem))
  731. return;
  732. if (!pcpu->governor_enabled) {
  733. up_read(&pcpu->enable_sem);
  734. return;
  735. }
  736. /* Arm the timer for 1-2 ticks later if not already. */
  737. if (!timer_pending(&pcpu->cpu_timer)) {
  738. cpufreq_interactive_timer_resched(pcpu);
  739. } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
  740. del_timer(&pcpu->cpu_timer);
  741. del_timer(&pcpu->cpu_slack_timer);
  742. cpufreq_interactive_timer(smp_processor_id());
  743. }
  744. up_read(&pcpu->enable_sem);
  745. }
  746. static int cpufreq_interactive_speedchange_task(void *data)
  747. {
  748. unsigned int cpu;
  749. cpumask_t tmp_mask;
  750. unsigned long flags;
  751. struct cpufreq_interactive_cpuinfo *pcpu;
  752. while (1) {
  753. set_current_state(TASK_INTERRUPTIBLE);
  754. spin_lock_irqsave(&speedchange_cpumask_lock, flags);
  755. if (cpumask_empty(&speedchange_cpumask)) {
  756. spin_unlock_irqrestore(&speedchange_cpumask_lock,
  757. flags);
  758. schedule();
  759. if (kthread_should_stop())
  760. break;
  761. spin_lock_irqsave(&speedchange_cpumask_lock, flags);
  762. }
  763. set_current_state(TASK_RUNNING);
  764. tmp_mask = speedchange_cpumask;
  765. cpumask_clear(&speedchange_cpumask);
  766. spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
  767. for_each_cpu(cpu, &tmp_mask) {
  768. unsigned int j;
  769. unsigned int max_freq = 0;
  770. struct cpufreq_interactive_cpuinfo *pjcpu;
  771. pcpu = &per_cpu(cpuinfo, cpu);
  772. if (!down_read_trylock(&pcpu->enable_sem))
  773. continue;
  774. if (!pcpu->governor_enabled) {
  775. up_read(&pcpu->enable_sem);
  776. continue;
  777. }
  778. for_each_cpu(j, pcpu->policy->cpus) {
  779. pjcpu = &per_cpu(cpuinfo, j);
  780. if (pjcpu->target_freq > max_freq)
  781. max_freq = pjcpu->target_freq;
  782. }
  783. if (max_freq != pcpu->policy->cur) {
  784. u64 now;
  785. __cpufreq_driver_target(pcpu->policy,
  786. max_freq,
  787. CPUFREQ_RELATION_H);
  788. now = ktime_to_us(ktime_get());
  789. for_each_cpu(j, pcpu->policy->cpus) {
  790. pjcpu = &per_cpu(cpuinfo, j);
  791. pjcpu->hispeed_validate_time = now;
  792. }
  793. }
  794. trace_cpufreq_interactive_setspeed(cpu,
  795. pcpu->target_freq,
  796. pcpu->policy->cur);
  797. up_read(&pcpu->enable_sem);
  798. }
  799. }
  800. return 0;
  801. }
  802. static void cpufreq_interactive_boost(void)
  803. {
  804. int i;
  805. int anyboost = 0;
  806. unsigned long flags;
  807. struct cpufreq_interactive_cpuinfo *pcpu;
  808. spin_lock_irqsave(&speedchange_cpumask_lock, flags);
  809. for_each_online_cpu(i) {
  810. pcpu = &per_cpu(cpuinfo, i);
  811. if (pcpu->target_freq < hispeed_freq) {
  812. pcpu->target_freq = hispeed_freq;
  813. cpumask_set_cpu(i, &speedchange_cpumask);
  814. pcpu->hispeed_validate_time =
  815. ktime_to_us(ktime_get());
  816. anyboost = 1;
  817. }
  818. /*
  819. * Set floor freq and (re)start timer for when last
  820. * validated.
  821. */
  822. pcpu->floor_freq = hispeed_freq;
  823. pcpu->floor_validate_time = ktime_to_us(ktime_get());
  824. }
  825. spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
  826. if (anyboost)
  827. wake_up_process(speedchange_task);
  828. }
  829. static int cpufreq_interactive_notifier(
  830. struct notifier_block *nb, unsigned long val, void *data)
  831. {
  832. struct cpufreq_freqs *freq = data;
  833. struct cpufreq_interactive_cpuinfo *pcpu;
  834. int cpu;
  835. unsigned long flags;
  836. if (val == CPUFREQ_PRECHANGE) {
  837. pcpu = &per_cpu(cpuinfo, freq->cpu);
  838. if (!down_read_trylock(&pcpu->enable_sem))
  839. return 0;
  840. if (!pcpu->governor_enabled) {
  841. up_read(&pcpu->enable_sem);
  842. return 0;
  843. }
  844. for_each_cpu(cpu, pcpu->policy->cpus) {
  845. struct cpufreq_interactive_cpuinfo *pjcpu =
  846. &per_cpu(cpuinfo, cpu);
  847. if (cpu != freq->cpu) {
  848. if (!down_read_trylock(&pjcpu->enable_sem))
  849. continue;
  850. if (!pjcpu->governor_enabled) {
  851. up_read(&pjcpu->enable_sem);
  852. continue;
  853. }
  854. }
  855. spin_lock_irqsave(&pjcpu->load_lock, flags);
  856. update_load(cpu);
  857. spin_unlock_irqrestore(&pjcpu->load_lock, flags);
  858. if (cpu != freq->cpu)
  859. up_read(&pjcpu->enable_sem);
  860. }
  861. up_read(&pcpu->enable_sem);
  862. }
  863. return 0;
  864. }
  865. static struct notifier_block cpufreq_notifier_block = {
  866. .notifier_call = cpufreq_interactive_notifier,
  867. };
  868. static unsigned int *get_tokenized_data(const char *buf, int *num_tokens)
  869. {
  870. const char *cp;
  871. int i;
  872. int ntokens = 1;
  873. unsigned int *tokenized_data;
  874. int err = -EINVAL;
  875. cp = buf;
  876. while ((cp = strpbrk(cp + 1, " :")))
  877. ntokens++;
  878. if (!(ntokens & 0x1))
  879. goto err;
  880. tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
  881. if (!tokenized_data) {
  882. err = -ENOMEM;
  883. goto err;
  884. }
  885. cp = buf;
  886. i = 0;
  887. while (i < ntokens) {
  888. if (sscanf(cp, "%u", &tokenized_data[i++]) != 1)
  889. goto err_kfree;
  890. cp = strpbrk(cp, " :");
  891. if (!cp)
  892. break;
  893. cp++;
  894. }
  895. if (i != ntokens)
  896. goto err_kfree;
  897. *num_tokens = ntokens;
  898. return tokenized_data;
  899. err_kfree:
  900. kfree(tokenized_data);
  901. err:
  902. return ERR_PTR(err);
  903. }
  904. static ssize_t show_target_loads(
  905. struct kobject *kobj, struct attribute *attr, char *buf)
  906. {
  907. int i;
  908. ssize_t ret = 0;
  909. unsigned long flags;
  910. spin_lock_irqsave(&target_loads_lock, flags);
  911. #ifdef CONFIG_MODE_AUTO_CHANGE
  912. for (i = 0; i < ntarget_loads_set[param_index]; i++)
  913. ret += sprintf(buf + ret, "%u%s", target_loads_set[param_index][i],
  914. i & 0x1 ? ":" : " ");
  915. #else
  916. for (i = 0; i < ntarget_loads; i++)
  917. ret += sprintf(buf + ret, "%u%s", target_loads[i],
  918. i & 0x1 ? ":" : " ");
  919. #endif
  920. ret = ret - 1;
  921. ret += sprintf(buf + ret, "\n");
  922. spin_unlock_irqrestore(&target_loads_lock, flags);
  923. return ret;
  924. }
  925. static ssize_t store_target_loads(
  926. struct kobject *kobj, struct attribute *attr, const char *buf,
  927. size_t count)
  928. {
  929. int ntokens;
  930. unsigned int *new_target_loads = NULL;
  931. unsigned long flags;
  932. #ifdef CONFIG_MODE_AUTO_CHANGE
  933. unsigned long flags2;
  934. #endif
  935. new_target_loads = get_tokenized_data(buf, &ntokens);
  936. if (IS_ERR(new_target_loads))
  937. return PTR_RET(new_target_loads);
  938. #ifdef CONFIG_MODE_AUTO_CHANGE
  939. spin_lock_irqsave(&mode_lock, flags2);
  940. #endif
  941. spin_lock_irqsave(&target_loads_lock, flags);
  942. #ifdef CONFIG_MODE_AUTO_CHANGE
  943. if (target_loads_set[param_index] != default_target_loads)
  944. kfree(target_loads_set[param_index]);
  945. target_loads_set[param_index] = new_target_loads;
  946. ntarget_loads_set[param_index] = ntokens;
  947. if (cur_param_index == param_index) {
  948. target_loads = new_target_loads;
  949. ntarget_loads = ntokens;
  950. }
  951. #else
  952. if (target_loads != default_target_loads)
  953. kfree(target_loads);
  954. target_loads = new_target_loads;
  955. ntarget_loads = ntokens;
  956. #endif
  957. spin_unlock_irqrestore(&target_loads_lock, flags);
  958. #ifdef CONFIG_MODE_AUTO_CHANGE
  959. spin_unlock_irqrestore(&mode_lock, flags2);
  960. #endif
  961. return count;
  962. }
  963. static struct global_attr target_loads_attr =
  964. __ATTR(target_loads, S_IRUGO | S_IWUSR,
  965. show_target_loads, store_target_loads);
  966. static ssize_t show_above_hispeed_delay(
  967. struct kobject *kobj, struct attribute *attr, char *buf)
  968. {
  969. int i;
  970. ssize_t ret = 0;
  971. unsigned long flags;
  972. spin_lock_irqsave(&above_hispeed_delay_lock, flags);
  973. #ifdef CONFIG_MODE_AUTO_CHANGE
  974. for (i = 0; i < nabove_hispeed_delay_set[param_index]; i++)
  975. ret += sprintf(buf + ret, "%u%s", above_hispeed_delay_set[param_index][i],
  976. i & 0x1 ? ":" : " ");
  977. #else
  978. for (i = 0; i < nabove_hispeed_delay; i++)
  979. ret += sprintf(buf + ret, "%u%s", above_hispeed_delay[i],
  980. i & 0x1 ? ":" : " ");
  981. #endif
  982. ret = ret - 1;
  983. ret += sprintf(buf + ret, "\n");
  984. spin_unlock_irqrestore(&above_hispeed_delay_lock, flags);
  985. return ret;
  986. }
  987. static ssize_t store_above_hispeed_delay(
  988. struct kobject *kobj, struct attribute *attr, const char *buf,
  989. size_t count)
  990. {
  991. int ntokens, i;
  992. unsigned int *new_above_hispeed_delay = NULL;
  993. unsigned long flags;
  994. #ifdef CONFIG_MODE_AUTO_CHANGE
  995. unsigned long flags2;
  996. #endif
  997. new_above_hispeed_delay = get_tokenized_data(buf, &ntokens);
  998. if (IS_ERR(new_above_hispeed_delay))
  999. return PTR_RET(new_above_hispeed_delay);
  1000. #ifdef CONFIG_MODE_AUTO_CHANGE
  1001. spin_lock_irqsave(&mode_lock, flags2);
  1002. #endif
  1003. /* Make sure frequencies are in ascending order. */
  1004. for (i = 3; i < ntokens; i += 2) {
  1005. if (new_above_hispeed_delay[i] <=
  1006. new_above_hispeed_delay[i - 2]) {
  1007. kfree(new_above_hispeed_delay);
  1008. return -EINVAL;
  1009. }
  1010. }
  1011. spin_lock_irqsave(&above_hispeed_delay_lock, flags);
  1012. #ifdef CONFIG_MODE_AUTO_CHANGE
  1013. if (above_hispeed_delay_set[param_index] != default_above_hispeed_delay)
  1014. kfree(above_hispeed_delay_set[param_index]);
  1015. above_hispeed_delay_set[param_index] = new_above_hispeed_delay;
  1016. nabove_hispeed_delay_set[param_index] = ntokens;
  1017. if (cur_param_index == param_index) {
  1018. above_hispeed_delay = new_above_hispeed_delay;
  1019. nabove_hispeed_delay = ntokens;
  1020. }
  1021. #else
  1022. if (above_hispeed_delay != default_above_hispeed_delay)
  1023. kfree(above_hispeed_delay);
  1024. above_hispeed_delay = new_above_hispeed_delay;
  1025. nabove_hispeed_delay = ntokens;
  1026. #endif
  1027. spin_unlock_irqrestore(&above_hispeed_delay_lock, flags);
  1028. #ifdef CONFIG_MODE_AUTO_CHANGE
  1029. spin_unlock_irqrestore(&mode_lock, flags2);
  1030. #endif
  1031. return count;
  1032. }
  1033. static struct global_attr above_hispeed_delay_attr =
  1034. __ATTR(above_hispeed_delay, S_IRUGO | S_IWUSR,
  1035. show_above_hispeed_delay, store_above_hispeed_delay);
  1036. static ssize_t show_hispeed_freq(struct kobject *kobj,
  1037. struct attribute *attr, char *buf)
  1038. {
  1039. #ifdef CONFIG_MODE_AUTO_CHANGE
  1040. return sprintf(buf, "%u\n", hispeed_freq_set[param_index]);
  1041. #else
  1042. return sprintf(buf, "%u\n", hispeed_freq);
  1043. #endif
  1044. }
  1045. static ssize_t store_hispeed_freq(struct kobject *kobj,
  1046. struct attribute *attr, const char *buf,
  1047. size_t count)
  1048. {
  1049. int ret;
  1050. long unsigned int val;
  1051. #ifdef CONFIG_MODE_AUTO_CHANGE
  1052. unsigned long flags2;
  1053. #endif
  1054. ret = kstrtoul(buf, 0, &val);
  1055. if (ret < 0)
  1056. return ret;
  1057. #ifdef CONFIG_MODE_AUTO_CHANGE
  1058. spin_lock_irqsave(&mode_lock, flags2);
  1059. hispeed_freq_set[param_index] = val;
  1060. if (cur_param_index == param_index)
  1061. hispeed_freq = val;
  1062. spin_unlock_irqrestore(&mode_lock, flags2);
  1063. #else
  1064. hispeed_freq = val;
  1065. #endif
  1066. return count;
  1067. }
  1068. static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644,
  1069. show_hispeed_freq, store_hispeed_freq);
  1070. static ssize_t show_sampling_down_factor(struct kobject *kobj,
  1071. struct attribute *attr, char *buf)
  1072. {
  1073. #ifdef CONFIG_MODE_AUTO_CHANGE
  1074. return sprintf(buf, "%u\n", sampling_down_factor_set[param_index]);
  1075. #else
  1076. return sprintf(buf, "%u\n", sampling_down_factor);
  1077. #endif
  1078. }
  1079. static ssize_t store_sampling_down_factor(struct kobject *kobj,
  1080. struct attribute *attr, const char *buf,
  1081. size_t count)
  1082. {
  1083. int ret;
  1084. long unsigned int val;
  1085. #ifdef CONFIG_MODE_AUTO_CHANGE
  1086. unsigned long flags2;
  1087. #endif
  1088. ret = kstrtoul(buf, 0, &val);
  1089. if (ret < 0)
  1090. return ret;
  1091. #ifdef CONFIG_MODE_AUTO_CHANGE
  1092. spin_lock_irqsave(&mode_lock, flags2);
  1093. sampling_down_factor_set[param_index] = val;
  1094. if (cur_param_index == param_index)
  1095. sampling_down_factor = val;
  1096. spin_unlock_irqrestore(&mode_lock, flags2);
  1097. #else
  1098. sampling_down_factor = val;
  1099. #endif
  1100. return count;
  1101. }
  1102. static struct global_attr sampling_down_factor_attr =
  1103. __ATTR(sampling_down_factor, 0644,
  1104. show_sampling_down_factor, store_sampling_down_factor);
  1105. static ssize_t show_go_hispeed_load(struct kobject *kobj,
  1106. struct attribute *attr, char *buf)
  1107. {
  1108. #ifdef CONFIG_MODE_AUTO_CHANGE
  1109. return sprintf(buf, "%lu\n", go_hispeed_load_set[param_index]);
  1110. #else
  1111. return sprintf(buf, "%lu\n", go_hispeed_load);
  1112. #endif
  1113. }
  1114. static ssize_t store_go_hispeed_load(struct kobject *kobj,
  1115. struct attribute *attr, const char *buf, size_t count)
  1116. {
  1117. int ret;
  1118. unsigned long val;
  1119. #ifdef CONFIG_MODE_AUTO_CHANGE
  1120. unsigned long flags2;
  1121. #endif
  1122. ret = kstrtoul(buf, 0, &val);
  1123. if (ret < 0)
  1124. return ret;
  1125. #ifdef CONFIG_MODE_AUTO_CHANGE
  1126. spin_lock_irqsave(&mode_lock, flags2);
  1127. go_hispeed_load_set[param_index] = val;
  1128. if (cur_param_index == param_index)
  1129. go_hispeed_load = val;
  1130. spin_unlock_irqrestore(&mode_lock, flags2);
  1131. #else
  1132. go_hispeed_load = val;
  1133. #endif
  1134. return count;
  1135. }
  1136. static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644,
  1137. show_go_hispeed_load, store_go_hispeed_load);
  1138. static ssize_t show_min_sample_time(struct kobject *kobj,
  1139. struct attribute *attr, char *buf)
  1140. {
  1141. #ifdef CONFIG_MODE_AUTO_CHANGE
  1142. return sprintf(buf, "%lu\n", min_sample_time_set[param_index]);
  1143. #else
  1144. return sprintf(buf, "%lu\n", min_sample_time);
  1145. #endif
  1146. }
  1147. static ssize_t store_min_sample_time(struct kobject *kobj,
  1148. struct attribute *attr, const char *buf, size_t count)
  1149. {
  1150. int ret;
  1151. unsigned long val;
  1152. #ifdef CONFIG_MODE_AUTO_CHANGE
  1153. unsigned long flags2;
  1154. #endif
  1155. ret = kstrtoul(buf, 0, &val);
  1156. if (ret < 0)
  1157. return ret;
  1158. #ifdef CONFIG_MODE_AUTO_CHANGE
  1159. spin_lock_irqsave(&mode_lock, flags2);
  1160. min_sample_time_set[param_index] = val;
  1161. if (cur_param_index == param_index)
  1162. min_sample_time = val;
  1163. spin_unlock_irqrestore(&mode_lock, flags2);
  1164. #else
  1165. min_sample_time = val;
  1166. #endif
  1167. return count;
  1168. }
  1169. static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
  1170. show_min_sample_time, store_min_sample_time);
  1171. static ssize_t show_timer_rate(struct kobject *kobj,
  1172. struct attribute *attr, char *buf)
  1173. {
  1174. #ifdef CONFIG_MODE_AUTO_CHANGE
  1175. return sprintf(buf, "%lu\n", timer_rate_set[param_index]);
  1176. #else
  1177. return sprintf(buf, "%lu\n", timer_rate);
  1178. #endif
  1179. }
  1180. static ssize_t store_timer_rate(struct kobject *kobj,
  1181. struct attribute *attr, const char *buf, size_t count)
  1182. {
  1183. int ret;
  1184. unsigned long val;
  1185. #ifdef CONFIG_MODE_AUTO_CHANGE
  1186. unsigned long flags2;
  1187. #endif
  1188. ret = kstrtoul(buf, 0, &val);
  1189. if (ret < 0)
  1190. return ret;
  1191. #ifdef CONFIG_MODE_AUTO_CHANGE
  1192. spin_lock_irqsave(&mode_lock, flags2);
  1193. timer_rate_set[param_index] = val;
  1194. if (cur_param_index == param_index)
  1195. timer_rate = val;
  1196. spin_unlock_irqrestore(&mode_lock, flags2);
  1197. #else
  1198. timer_rate = val;
  1199. #endif
  1200. return count;
  1201. }
  1202. static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
  1203. show_timer_rate, store_timer_rate);
  1204. static ssize_t show_timer_slack(
  1205. struct kobject *kobj, struct attribute *attr, char *buf)
  1206. {
  1207. return sprintf(buf, "%d\n", timer_slack_val);
  1208. }
  1209. static ssize_t store_timer_slack(
  1210. struct kobject *kobj, struct attribute *attr, const char *buf,
  1211. size_t count)
  1212. {
  1213. int ret;
  1214. unsigned long val;
  1215. ret = kstrtol(buf, 10, &val);
  1216. if (ret < 0)
  1217. return ret;
  1218. timer_slack_val = val;
  1219. return count;
  1220. }
  1221. define_one_global_rw(timer_slack);
  1222. static ssize_t show_boost(struct kobject *kobj, struct attribute *attr,
  1223. char *buf)
  1224. {
  1225. return sprintf(buf, "%d\n", boost_val);
  1226. }
  1227. static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
  1228. const char *buf, size_t count)
  1229. {
  1230. int ret;
  1231. unsigned long val;
  1232. ret = kstrtoul(buf, 0, &val);
  1233. if (ret < 0)
  1234. return ret;
  1235. boost_val = val;
  1236. if (boost_val) {
  1237. trace_cpufreq_interactive_boost("on");
  1238. cpufreq_interactive_boost();
  1239. } else {
  1240. trace_cpufreq_interactive_unboost("off");
  1241. }
  1242. return count;
  1243. }
  1244. define_one_global_rw(boost);
  1245. static ssize_t store_boostpulse(struct kobject *kobj, struct attribute *attr,
  1246. const char *buf, size_t count)
  1247. {
  1248. int ret;
  1249. unsigned long val;
  1250. ret = kstrtoul(buf, 0, &val);
  1251. if (ret < 0)
  1252. return ret;
  1253. boostpulse_endtime = ktime_to_us(ktime_get()) + boostpulse_duration_val;
  1254. trace_cpufreq_interactive_boost("pulse");
  1255. cpufreq_interactive_boost();
  1256. return count;
  1257. }
  1258. static struct global_attr boostpulse =
  1259. __ATTR(boostpulse, 0200, NULL, store_boostpulse);
  1260. static ssize_t show_boostpulse_duration(
  1261. struct kobject *kobj, struct attribute *attr, char *buf)
  1262. {
  1263. return sprintf(buf, "%d\n", boostpulse_duration_val);
  1264. }
  1265. static ssize_t store_boostpulse_duration(
  1266. struct kobject *kobj, struct attribute *attr, const char *buf,
  1267. size_t count)
  1268. {
  1269. int ret;
  1270. unsigned long val;
  1271. ret = kstrtoul(buf, 0, &val);
  1272. if (ret < 0)
  1273. return ret;
  1274. boostpulse_duration_val = val;
  1275. return count;
  1276. }
  1277. define_one_global_rw(boostpulse_duration);
  1278. static ssize_t show_io_is_busy(struct kobject *kobj,
  1279. struct attribute *attr, char *buf)
  1280. {
  1281. return sprintf(buf, "%u\n", io_is_busy);
  1282. }
  1283. static ssize_t store_io_is_busy(struct kobject *kobj,
  1284. struct attribute *attr, const char *buf, size_t count)
  1285. {
  1286. int ret;
  1287. unsigned long val;
  1288. ret = kstrtoul(buf, 0, &val);
  1289. if (ret < 0)
  1290. return ret;
  1291. io_is_busy = val;
  1292. return count;
  1293. }
  1294. static struct global_attr io_is_busy_attr = __ATTR(io_is_busy, 0644,
  1295. show_io_is_busy, store_io_is_busy);
  1296. static ssize_t show_sync_freq(struct kobject *kobj,
  1297. struct attribute *attr, char *buf)
  1298. {
  1299. return sprintf(buf, "%u\n", sync_freq);
  1300. }
  1301. static ssize_t store_sync_freq(struct kobject *kobj,
  1302. struct attribute *attr, const char *buf, size_t count)
  1303. {
  1304. int ret;
  1305. unsigned long val;
  1306. ret = kstrtoul(buf, 0, &val);
  1307. if (ret < 0)
  1308. return ret;
  1309. sync_freq = val;
  1310. return count;
  1311. }
  1312. static struct global_attr sync_freq_attr = __ATTR(sync_freq, 0644,
  1313. show_sync_freq, store_sync_freq);
  1314. static ssize_t show_up_threshold_any_cpu_load(struct kobject *kobj,
  1315. struct attribute *attr, char *buf)
  1316. {
  1317. return snprintf(buf, PAGE_SIZE, "%u\n", up_threshold_any_cpu_load);
  1318. }
  1319. static ssize_t store_up_threshold_any_cpu_load(struct kobject *kobj,
  1320. struct attribute *attr, const char *buf, size_t count)
  1321. {
  1322. int ret;
  1323. unsigned long val;
  1324. ret = kstrtoul(buf, 0, &val);
  1325. if (ret < 0)
  1326. return ret;
  1327. up_threshold_any_cpu_load = val;
  1328. return count;
  1329. }
  1330. static struct global_attr up_threshold_any_cpu_load_attr =
  1331. __ATTR(up_threshold_any_cpu_load, 0644,
  1332. show_up_threshold_any_cpu_load,
  1333. store_up_threshold_any_cpu_load);
  1334. static ssize_t show_up_threshold_any_cpu_freq(struct kobject *kobj,
  1335. struct attribute *attr, char *buf)
  1336. {
  1337. return snprintf(buf, PAGE_SIZE, "%u\n", up_threshold_any_cpu_freq);
  1338. }
  1339. static ssize_t store_up_threshold_any_cpu_freq(struct kobject *kobj,
  1340. struct attribute *attr, const char *buf, size_t count)
  1341. {
  1342. int ret;
  1343. unsigned long val;
  1344. ret = kstrtoul(buf, 0, &val);
  1345. if (ret < 0)
  1346. return ret;
  1347. up_threshold_any_cpu_freq = val;
  1348. return count;
  1349. }
  1350. static struct global_attr up_threshold_any_cpu_freq_attr =
  1351. __ATTR(up_threshold_any_cpu_freq, 0644,
  1352. show_up_threshold_any_cpu_freq,
  1353. store_up_threshold_any_cpu_freq);
  1354. #ifdef CONFIG_MODE_AUTO_CHANGE
  1355. #define index(obj_name, obj_attr) \
  1356. static ssize_t show_##obj_name(struct kobject *kobj, \
  1357. struct attribute *attr, char *buf) \
  1358. { \
  1359. return sprintf(buf, "%u\n", obj_name); \
  1360. } \
  1361. \
  1362. static ssize_t store_##obj_name(struct kobject *kobj, \
  1363. struct attribute *attr, const char *buf, \
  1364. size_t count) \
  1365. { \
  1366. int ret; \
  1367. long unsigned int val; \
  1368. \
  1369. ret = kstrtoul(buf, 0, &val); \
  1370. if (ret < 0) \
  1371. return ret; \
  1372. \
  1373. val &= MULTI_MODE | SINGLE_MODE | NO_MODE; \
  1374. obj_name = val; \
  1375. return count; \
  1376. } \
  1377. \
  1378. static struct global_attr obj_attr = __ATTR(obj_name, 0644, \
  1379. show_##obj_name, store_##obj_name); \
  1380. index(mode, mode_attr);
  1381. index(enforced_mode, enforced_mode_attr);
  1382. index(param_index, param_index_attr);
  1383. #define load(obj_name, obj_attr) \
  1384. static ssize_t show_##obj_name(struct kobject *kobj, \
  1385. struct attribute *attr, char *buf) \
  1386. { \
  1387. return sprintf(buf, "%u\n", obj_name); \
  1388. } \
  1389. \
  1390. static ssize_t store_##obj_name(struct kobject *kobj, \
  1391. struct attribute *attr, const char *buf, \
  1392. size_t count) \
  1393. { \
  1394. int ret; \
  1395. long unsigned int val; \
  1396. \
  1397. ret = kstrtoul(buf, 0, &val); \
  1398. if (ret < 0) \
  1399. return ret; \
  1400. \
  1401. obj_name = val; \
  1402. return count; \
  1403. } \
  1404. \
  1405. static struct global_attr obj_attr = __ATTR(obj_name, 0644, \
  1406. show_##obj_name, store_##obj_name); \
  1407. load(multi_enter_load, multi_enter_load_attr);
  1408. load(multi_exit_load, multi_exit_load_attr);
  1409. load(single_enter_load, single_enter_load_attr);
  1410. load(single_exit_load, single_exit_load_attr);
  1411. #define time(obj_name, obj_attr) \
  1412. static ssize_t show_##obj_name(struct kobject *kobj, \
  1413. struct attribute *attr, char *buf) \
  1414. { \
  1415. return sprintf(buf, "%lu\n", obj_name); \
  1416. } \
  1417. \
  1418. static ssize_t store_##obj_name(struct kobject *kobj, \
  1419. struct attribute *attr, const char *buf, \
  1420. size_t count) \
  1421. { \
  1422. int ret; \
  1423. unsigned long val; \
  1424. \
  1425. ret = kstrtoul(buf, 0, &val); \
  1426. if (ret < 0) \
  1427. return ret; \
  1428. \
  1429. obj_name = val; \
  1430. return count; \
  1431. } \
  1432. \
  1433. static struct global_attr obj_attr = __ATTR(obj_name, 0644, \
  1434. show_##obj_name, store_##obj_name); \
  1435. time(multi_enter_time, multi_enter_time_attr);
  1436. time(multi_exit_time, multi_exit_time_attr);
  1437. time(single_enter_time, single_enter_time_attr);
  1438. time(single_exit_time, single_exit_time_attr);
  1439. #endif
  1440. static struct attribute *interactive_attributes[] = {
  1441. &target_loads_attr.attr,
  1442. &above_hispeed_delay_attr.attr,
  1443. &hispeed_freq_attr.attr,
  1444. &go_hispeed_load_attr.attr,
  1445. &min_sample_time_attr.attr,
  1446. &timer_rate_attr.attr,
  1447. &timer_slack.attr,
  1448. &boost.attr,
  1449. &boostpulse.attr,
  1450. &boostpulse_duration.attr,
  1451. &io_is_busy_attr.attr,
  1452. &sampling_down_factor_attr.attr,
  1453. &sync_freq_attr.attr,
  1454. &up_threshold_any_cpu_load_attr.attr,
  1455. &up_threshold_any_cpu_freq_attr.attr,
  1456. #ifdef CONFIG_MODE_AUTO_CHANGE
  1457. &mode_attr.attr,
  1458. &enforced_mode_attr.attr,
  1459. &param_index_attr.attr,
  1460. &multi_enter_load_attr.attr,
  1461. &multi_exit_load_attr.attr,
  1462. &single_enter_load_attr.attr,
  1463. &single_exit_load_attr.attr,
  1464. &multi_enter_time_attr.attr,
  1465. &multi_exit_time_attr.attr,
  1466. &single_enter_time_attr.attr,
  1467. &single_exit_time_attr.attr,
  1468. #endif
  1469. NULL,
  1470. };
  1471. static struct attribute_group interactive_attr_group = {
  1472. .attrs = interactive_attributes,
  1473. .name = "interactive",
  1474. };
  1475. static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
  1476. unsigned long val,
  1477. void *data)
  1478. {
  1479. switch (val) {
  1480. case IDLE_START:
  1481. cpufreq_interactive_idle_start();
  1482. break;
  1483. case IDLE_END:
  1484. cpufreq_interactive_idle_end();
  1485. break;
  1486. }
  1487. return 0;
  1488. }
  1489. static struct notifier_block cpufreq_interactive_idle_nb = {
  1490. .notifier_call = cpufreq_interactive_idle_notifier,
  1491. };
  1492. #ifdef CONFIG_MODE_AUTO_CHANGE
  1493. static void cpufreq_param_set_init(void)
  1494. {
  1495. unsigned int i;
  1496. unsigned long flags;
  1497. multi_enter_load = DEFAULT_TARGET_LOAD * num_possible_cpus();
  1498. spin_lock_irqsave(&mode_lock, flags);
  1499. for (i=0 ; i<MAX_PARAM_SET; i++) {
  1500. hispeed_freq_set[i] = 0;
  1501. go_hispeed_load_set[i] = go_hispeed_load;
  1502. target_loads_set[i] = target_loads;
  1503. ntarget_loads_set[i] = ntarget_loads;
  1504. min_sample_time_set[i] = min_sample_time;
  1505. timer_rate_set[i] = timer_rate;
  1506. above_hispeed_delay_set[i] = above_hispeed_delay;
  1507. nabove_hispeed_delay_set[i] = nabove_hispeed_delay;
  1508. sampling_down_factor_set[i] = sampling_down_factor;
  1509. }
  1510. spin_unlock_irqrestore(&mode_lock, flags);
  1511. }
  1512. #endif
  1513. static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
  1514. unsigned int event)
  1515. {
  1516. int rc;
  1517. unsigned int j;
  1518. struct cpufreq_interactive_cpuinfo *pcpu;
  1519. struct cpufreq_frequency_table *freq_table;
  1520. switch (event) {
  1521. case CPUFREQ_GOV_START:
  1522. mutex_lock(&gov_lock);
  1523. freq_table =
  1524. cpufreq_frequency_get_table(policy->cpu);
  1525. if (!hispeed_freq)
  1526. hispeed_freq = policy->max;
  1527. #ifdef CONFIG_MODE_AUTO_CHANGE
  1528. for (j=0 ; j<MAX_PARAM_SET ; j++)
  1529. if (!hispeed_freq_set[j])
  1530. hispeed_freq_set[j] = policy->max;
  1531. #endif
  1532. for_each_cpu(j, policy->cpus) {
  1533. pcpu = &per_cpu(cpuinfo, j);
  1534. pcpu->policy = policy;
  1535. pcpu->target_freq = policy->cur;
  1536. pcpu->freq_table = freq_table;
  1537. pcpu->floor_freq = pcpu->target_freq;
  1538. pcpu->floor_validate_time =
  1539. ktime_to_us(ktime_get());
  1540. pcpu->hispeed_validate_time =
  1541. pcpu->floor_validate_time;
  1542. down_write(&pcpu->enable_sem);
  1543. del_timer_sync(&pcpu->cpu_timer);
  1544. del_timer_sync(&pcpu->cpu_slack_timer);
  1545. cpufreq_interactive_timer_start(j);
  1546. pcpu->governor_enabled = 1;
  1547. up_write(&pcpu->enable_sem);
  1548. }
  1549. /*
  1550. * Do not register the idle hook and create sysfs
  1551. * entries if we have already done so.
  1552. */
  1553. if (++active_count > 1) {
  1554. mutex_unlock(&gov_lock);
  1555. return 0;
  1556. }
  1557. rc = sysfs_create_group(cpufreq_global_kobject,
  1558. &interactive_attr_group);
  1559. if (rc) {
  1560. mutex_unlock(&gov_lock);
  1561. return rc;
  1562. }
  1563. idle_notifier_register(&cpufreq_interactive_idle_nb);
  1564. cpufreq_register_notifier(
  1565. &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
  1566. mutex_unlock(&gov_lock);
  1567. break;
  1568. case CPUFREQ_GOV_STOP:
  1569. mutex_lock(&gov_lock);
  1570. for_each_cpu(j, policy->cpus) {
  1571. pcpu = &per_cpu(cpuinfo, j);
  1572. down_write(&pcpu->enable_sem);
  1573. pcpu->governor_enabled = 0;
  1574. pcpu->target_freq = 0;
  1575. del_timer_sync(&pcpu->cpu_timer);
  1576. del_timer_sync(&pcpu->cpu_slack_timer);
  1577. up_write(&pcpu->enable_sem);
  1578. }
  1579. if (--active_count > 0) {
  1580. mutex_unlock(&gov_lock);
  1581. return 0;
  1582. }
  1583. cpufreq_unregister_notifier(
  1584. &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
  1585. idle_notifier_unregister(&cpufreq_interactive_idle_nb);
  1586. sysfs_remove_group(cpufreq_global_kobject,
  1587. &interactive_attr_group);
  1588. mutex_unlock(&gov_lock);
  1589. break;
  1590. case CPUFREQ_GOV_LIMITS:
  1591. __cpufreq_driver_target(policy,
  1592. policy->cur, CPUFREQ_RELATION_L);
  1593. for_each_cpu(j, policy->cpus) {
  1594. pcpu = &per_cpu(cpuinfo, j);
  1595. /* hold write semaphore to avoid race */
  1596. down_write(&pcpu->enable_sem);
  1597. if (pcpu->governor_enabled == 0) {
  1598. up_write(&pcpu->enable_sem);
  1599. continue;
  1600. }
  1601. /* update target_freq firstly */
  1602. if (policy->max < pcpu->target_freq)
  1603. pcpu->target_freq = policy->max;
  1604. else if (policy->min > pcpu->target_freq)
  1605. pcpu->target_freq = policy->min;
  1606. /* Reschedule timer.
  1607. * Delete the timers, else the timer callback may
  1608. * return without re-arm the timer when failed
  1609. * acquire the semaphore. This race may cause timer
  1610. * stopped unexpectedly.
  1611. */
  1612. del_timer_sync(&pcpu->cpu_timer);
  1613. del_timer_sync(&pcpu->cpu_slack_timer);
  1614. cpufreq_interactive_timer_start(j);
  1615. pcpu->minfreq_boost = 1;
  1616. up_write(&pcpu->enable_sem);
  1617. }
  1618. break;
  1619. }
  1620. return 0;
  1621. }
  1622. static void cpufreq_interactive_nop_timer(unsigned long data)
  1623. {
  1624. }
  1625. static int __init cpufreq_interactive_init(void)
  1626. {
  1627. unsigned int i;
  1628. struct cpufreq_interactive_cpuinfo *pcpu;
  1629. struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
  1630. /* Initalize per-cpu timers */
  1631. for_each_possible_cpu(i) {
  1632. pcpu = &per_cpu(cpuinfo, i);
  1633. init_timer_deferrable(&pcpu->cpu_timer);
  1634. pcpu->cpu_timer.function = cpufreq_interactive_timer;
  1635. pcpu->cpu_timer.data = i;
  1636. init_timer(&pcpu->cpu_slack_timer);
  1637. pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
  1638. spin_lock_init(&pcpu->load_lock);
  1639. init_rwsem(&pcpu->enable_sem);
  1640. }
  1641. spin_lock_init(&target_loads_lock);
  1642. spin_lock_init(&speedchange_cpumask_lock);
  1643. spin_lock_init(&above_hispeed_delay_lock);
  1644. #ifdef CONFIG_MODE_AUTO_CHANGE
  1645. spin_lock_init(&mode_lock);
  1646. cpufreq_param_set_init();
  1647. #endif
  1648. #ifdef CONFIG_RETENTION_CHANGE
  1649. retention_toggle_wq = alloc_workqueue("retentionToggle_wq", WQ_HIGHPRI, 0);
  1650. if(!retention_toggle_wq)
  1651. pr_info("retention toggle workqueue init error\n");
  1652. INIT_WORK(&retention_toggle_work, do_toggle_retention);
  1653. #endif
  1654. mutex_init(&gov_lock);
  1655. speedchange_task =
  1656. kthread_create(cpufreq_interactive_speedchange_task, NULL,
  1657. "cfinteractive");
  1658. if (IS_ERR(speedchange_task))
  1659. return PTR_ERR(speedchange_task);
  1660. sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
  1661. get_task_struct(speedchange_task);
  1662. /* NB: wake up so the thread does not look hung to the freezer */
  1663. wake_up_process(speedchange_task);
  1664. return cpufreq_register_governor(&cpufreq_gov_interactive);
  1665. }
  1666. #ifdef CONFIG_RETENTION_CHANGE
  1667. static void do_toggle_retention(struct work_struct *work)
  1668. {
  1669. if(mode_count == 1)
  1670. msm_pm_retention_mode_enable(0);
  1671. else if(mode_count == 0)
  1672. msm_pm_retention_mode_enable(1);
  1673. }
  1674. #endif // CONFIG_RETENTION_CHANGE
  1675. #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
  1676. fs_initcall(cpufreq_interactive_init);
  1677. #else
  1678. module_init(cpufreq_interactive_init);
  1679. #endif
  1680. static void __exit cpufreq_interactive_exit(void)
  1681. {
  1682. cpufreq_unregister_governor(&cpufreq_gov_interactive);
  1683. kthread_stop(speedchange_task);
  1684. put_task_struct(speedchange_task);
  1685. if (above_hispeed_delay != default_above_hispeed_delay)
  1686. kfree(above_hispeed_delay);
  1687. }
  1688. module_exit(cpufreq_interactive_exit);
  1689. MODULE_AUTHOR("Mike Chan <mike@android.com>");
  1690. MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
  1691. "Latency sensitive workloads");
  1692. MODULE_LICENSE("GPL");