cpufreq_interactive.c 51 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961
  1. /*
  2. * drivers/cpufreq/cpufreq_interactive.c
  3. *
  4. * Copyright (C) 2010 Google, Inc.
  5. *
  6. * This software is licensed under the terms of the GNU General Public
  7. * License version 2, as published by the Free Software Foundation, and
  8. * may be copied, distributed, and modified under those terms.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * Author: Mike Chan (mike@android.com)
  16. *
  17. */
  18. #include <linux/cpu.h>
  19. #include <linux/cpumask.h>
  20. #include <linux/cpufreq.h>
  21. #include <linux/module.h>
  22. #include <linux/moduleparam.h>
  23. #include <linux/rwsem.h>
  24. #include <linux/sched.h>
  25. #include <linux/tick.h>
  26. #include <linux/time.h>
  27. #include <linux/timer.h>
  28. #include <linux/workqueue.h>
  29. #include <linux/kthread.h>
  30. #include <linux/slab.h>
  31. #include <linux/kernel_stat.h>
  32. #include <asm/cputime.h>
  33. #define CREATE_TRACE_POINTS
  34. #include <trace/events/cpufreq_interactive.h>
  35. #define CONFIG_MODE_AUTO_CHANGE
  36. #define CONFIG_RETENTION_CHANGE
  37. static int active_count;
  38. struct cpufreq_interactive_cpuinfo {
  39. struct timer_list cpu_timer;
  40. struct timer_list cpu_slack_timer;
  41. spinlock_t load_lock; /* protects the next 4 fields */
  42. u64 time_in_idle;
  43. u64 time_in_idle_timestamp;
  44. u64 cputime_speedadj;
  45. u64 cputime_speedadj_timestamp;
  46. struct cpufreq_policy *policy;
  47. struct cpufreq_frequency_table *freq_table;
  48. unsigned int target_freq;
  49. unsigned int floor_freq;
  50. u64 floor_validate_time;
  51. u64 hispeed_validate_time;
  52. struct rw_semaphore enable_sem;
  53. int governor_enabled;
  54. int prev_load;
  55. int minfreq_boost;
  56. };
  57. static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
  58. /* realtime thread handles frequency scaling */
  59. static struct task_struct *speedchange_task;
  60. static cpumask_t speedchange_cpumask;
  61. static spinlock_t speedchange_cpumask_lock;
  62. static struct mutex gov_lock;
  63. /* Hi speed to bump to from lo speed when load burst (default max) */
  64. static unsigned int hispeed_freq;
  65. /* Go to hi speed when CPU load at or above this value. */
  66. #define DEFAULT_GO_HISPEED_LOAD 99
  67. static unsigned long go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
  68. /* Sampling down factor to be applied to min_sample_time at max freq */
  69. static unsigned int sampling_down_factor;
  70. /* Target load. Lower values result in higher CPU speeds. */
  71. #define DEFAULT_TARGET_LOAD 90
  72. static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
  73. static spinlock_t target_loads_lock;
  74. static unsigned int *target_loads = default_target_loads;
  75. static int ntarget_loads = ARRAY_SIZE(default_target_loads);
  76. /*
  77. * The minimum amount of time to spend at a frequency before we can ramp down.
  78. */
  79. #define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
  80. static unsigned long min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
  81. /*
  82. * The sample rate of the timer used to increase frequency
  83. */
  84. #define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
  85. static unsigned long timer_rate = DEFAULT_TIMER_RATE;
  86. /* Busy SDF parameters*/
  87. #define MIN_BUSY_TIME (100 * USEC_PER_MSEC)
  88. /*
  89. * Wait this long before raising speed above hispeed, by default a single
  90. * timer interval.
  91. */
  92. #define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
  93. static unsigned int default_above_hispeed_delay[] = {
  94. DEFAULT_ABOVE_HISPEED_DELAY };
  95. static spinlock_t above_hispeed_delay_lock;
  96. static unsigned int *above_hispeed_delay = default_above_hispeed_delay;
  97. static int nabove_hispeed_delay = ARRAY_SIZE(default_above_hispeed_delay);
  98. /* Non-zero means indefinite speed boost active */
  99. static int boost_val;
  100. /* Duration of a boot pulse in usecs */
  101. static int boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
  102. /* End time of boost pulse in ktime converted to usecs */
  103. static u64 boostpulse_endtime;
  104. /*
  105. * Max additional time to wait in idle, beyond timer_rate, at speeds above
  106. * minimum before wakeup to reduce speed, or -1 if unnecessary.
  107. */
  108. #define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
  109. static int timer_slack_val = DEFAULT_TIMER_SLACK;
  110. static bool io_is_busy;
  111. #ifdef CONFIG_MODE_AUTO_CHANGE
  112. struct cpufreq_loadinfo {
  113. unsigned int load;
  114. unsigned int freq;
  115. u64 timestamp;
  116. };
  117. static DEFINE_PER_CPU(struct cpufreq_loadinfo, loadinfo);
  118. static spinlock_t mode_lock;
  119. #define MULTI_MODE 2
  120. #define SINGLE_MODE 1
  121. #define NO_MODE 0
  122. static unsigned int mode = 0;
  123. static unsigned int enforced_mode = 0;
  124. static u64 mode_check_timestamp = 0;
  125. #define DEFAULT_MULTI_ENTER_TIME (4 * DEFAULT_TIMER_RATE)
  126. static unsigned long multi_enter_time = DEFAULT_MULTI_ENTER_TIME;
  127. static unsigned long time_in_multi_enter = 0;
  128. static unsigned int multi_enter_load = 4 * DEFAULT_TARGET_LOAD;
  129. #define DEFAULT_MULTI_EXIT_TIME (16 * DEFAULT_TIMER_RATE)
  130. static unsigned long multi_exit_time = DEFAULT_MULTI_EXIT_TIME;
  131. static unsigned long time_in_multi_exit = 0;
  132. static unsigned int multi_exit_load = 4 * DEFAULT_TARGET_LOAD;
  133. #define DEFAULT_SINGLE_ENTER_TIME (8 * DEFAULT_TIMER_RATE)
  134. static unsigned long single_enter_time = DEFAULT_SINGLE_ENTER_TIME;
  135. static unsigned long time_in_single_enter = 0;
  136. static unsigned int single_enter_load = DEFAULT_TARGET_LOAD;
  137. #define DEFAULT_SINGLE_EXIT_TIME (4 * DEFAULT_TIMER_RATE)
  138. static unsigned long single_exit_time = DEFAULT_SINGLE_EXIT_TIME;
  139. static unsigned long time_in_single_exit = 0;
  140. static unsigned int single_exit_load = DEFAULT_TARGET_LOAD;
  141. static unsigned int param_index = 0;
  142. static unsigned int cur_param_index = 0;
  143. #define MAX_PARAM_SET 4 /* ((MULTI_MODE | SINGLE_MODE | NO_MODE) + 1) */
  144. static unsigned int hispeed_freq_set[MAX_PARAM_SET];
  145. static unsigned long go_hispeed_load_set[MAX_PARAM_SET];
  146. static unsigned int *target_loads_set[MAX_PARAM_SET];
  147. static int ntarget_loads_set[MAX_PARAM_SET];
  148. static unsigned long min_sample_time_set[MAX_PARAM_SET];
  149. static unsigned long timer_rate_set[MAX_PARAM_SET];
  150. static unsigned int *above_hispeed_delay_set[MAX_PARAM_SET];
  151. static int nabove_hispeed_delay_set[MAX_PARAM_SET];
  152. static unsigned int sampling_down_factor_set[MAX_PARAM_SET];
  153. #endif /* CONFIG_MODE_AUTO_CHANGE */
  154. #ifdef CONFIG_RETENTION_CHANGE
  155. static void do_toggle_retention(struct work_struct *work);
  156. extern void msm_pm_retention_mode_enable(bool enable);
  157. static struct workqueue_struct *retention_toggle_wq;
  158. static struct work_struct retention_toggle_work;
  159. static int mode_count = 0;
  160. #endif
  161. /*
  162. * If the max load among other CPUs is higher than up_threshold_any_cpu_load
  163. * and if the highest frequency among the other CPUs is higher than
  164. * up_threshold_any_cpu_freq then do not let the frequency to drop below
  165. * sync_freq
  166. */
  167. static unsigned int up_threshold_any_cpu_load;
  168. static unsigned int sync_freq;
  169. static unsigned int up_threshold_any_cpu_freq;
  170. static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
  171. unsigned int event);
  172. #define DYN_DEFER (1)
  173. #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
  174. static
  175. #endif
  176. struct cpufreq_governor cpufreq_gov_interactive = {
  177. .name = "interactive",
  178. .governor = cpufreq_governor_interactive,
  179. .max_transition_latency = 10000000,
  180. .owner = THIS_MODULE,
  181. };
  182. static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
  183. cputime64_t *wall)
  184. {
  185. u64 idle_time;
  186. u64 cur_wall_time;
  187. u64 busy_time;
  188. cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
  189. busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
  190. busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
  191. busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
  192. busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
  193. busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
  194. busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
  195. idle_time = cur_wall_time - busy_time;
  196. if (wall)
  197. *wall = jiffies_to_usecs(cur_wall_time);
  198. return jiffies_to_usecs(idle_time);
  199. }
  200. #ifdef DYN_DEFER
  201. static inline void timer_set_nondeferrable(struct timer_list *timer)
  202. {
  203. timer->base =
  204. ((struct tvec_base *)((unsigned long)timer->base &
  205. ~TBASE_DEFERRABLE_FLAG));
  206. }
  207. static inline void timer_set_deferrable(struct timer_list *timer)
  208. {
  209. timer->base =
  210. ((struct tvec_base *)((unsigned long)timer->base |
  211. TBASE_DEFERRABLE_FLAG));
  212. }
  213. #endif
  214. static inline cputime64_t get_cpu_idle_time(unsigned int cpu,
  215. cputime64_t *wall)
  216. {
  217. u64 idle_time = get_cpu_idle_time_us(cpu, wall);
  218. if (idle_time == -1ULL)
  219. idle_time = get_cpu_idle_time_jiffy(cpu, wall);
  220. else if (!io_is_busy)
  221. idle_time += get_cpu_iowait_time_us(cpu, wall);
  222. return idle_time;
  223. }
  224. static void cpufreq_interactive_timer_resched(
  225. struct cpufreq_interactive_cpuinfo *pcpu)
  226. {
  227. unsigned long expires;
  228. unsigned long flags;
  229. spin_lock_irqsave(&pcpu->load_lock, flags);
  230. pcpu->time_in_idle =
  231. get_cpu_idle_time(smp_processor_id(),
  232. &pcpu->time_in_idle_timestamp);
  233. pcpu->cputime_speedadj = 0;
  234. pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
  235. expires = jiffies + usecs_to_jiffies(timer_rate);
  236. #ifdef DYN_DEFER
  237. if (pcpu->target_freq > pcpu->policy->min)
  238. timer_set_nondeferrable(&pcpu->cpu_timer);
  239. else
  240. timer_set_deferrable(&pcpu->cpu_timer);
  241. #endif
  242. mod_timer_pinned(&pcpu->cpu_timer, expires);
  243. if (timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) {
  244. expires += usecs_to_jiffies(timer_slack_val);
  245. mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
  246. }
  247. spin_unlock_irqrestore(&pcpu->load_lock, flags);
  248. }
  249. /* The caller shall take enable_sem write semaphore to avoid any timer race.
  250. * The cpu_timer and cpu_slack_timer must be deactivated when calling this
  251. * function.
  252. */
  253. static void cpufreq_interactive_timer_start(int cpu)
  254. {
  255. struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
  256. unsigned long expires = jiffies + usecs_to_jiffies(timer_rate);
  257. unsigned long flags;
  258. pcpu->cpu_timer.expires = expires;
  259. del_timer_sync(&pcpu->cpu_timer);
  260. add_timer_on(&pcpu->cpu_timer, cpu);
  261. if (timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) {
  262. expires += usecs_to_jiffies(timer_slack_val);
  263. pcpu->cpu_slack_timer.expires = expires;
  264. del_timer_sync(&pcpu->cpu_slack_timer);
  265. add_timer_on(&pcpu->cpu_slack_timer, cpu);
  266. }
  267. spin_lock_irqsave(&pcpu->load_lock, flags);
  268. pcpu->time_in_idle =
  269. get_cpu_idle_time(cpu, &pcpu->time_in_idle_timestamp);
  270. pcpu->cputime_speedadj = 0;
  271. pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
  272. spin_unlock_irqrestore(&pcpu->load_lock, flags);
  273. }
  274. static unsigned int freq_to_above_hispeed_delay(unsigned int freq)
  275. {
  276. int i;
  277. unsigned int ret;
  278. unsigned long flags;
  279. spin_lock_irqsave(&above_hispeed_delay_lock, flags);
  280. for (i = 0; i < nabove_hispeed_delay - 1 &&
  281. freq >= above_hispeed_delay[i+1]; i += 2)
  282. ;
  283. ret = above_hispeed_delay[i];
  284. ret = (ret > (1 * USEC_PER_MSEC)) ? (ret - (1 * USEC_PER_MSEC)) : ret;
  285. spin_unlock_irqrestore(&above_hispeed_delay_lock, flags);
  286. return ret;
  287. }
  288. static unsigned int freq_to_targetload(unsigned int freq)
  289. {
  290. int i;
  291. unsigned int ret;
  292. unsigned long flags;
  293. spin_lock_irqsave(&target_loads_lock, flags);
  294. for (i = 0; i < ntarget_loads - 1 && freq >= target_loads[i+1]; i += 2)
  295. ;
  296. ret = target_loads[i];
  297. spin_unlock_irqrestore(&target_loads_lock, flags);
  298. return ret;
  299. }
  300. /*
  301. * If increasing frequencies never map to a lower target load then
  302. * choose_freq() will find the minimum frequency that does not exceed its
  303. * target load given the current load.
  304. */
  305. static unsigned int choose_freq(
  306. struct cpufreq_interactive_cpuinfo *pcpu, unsigned int loadadjfreq)
  307. {
  308. unsigned int freq = pcpu->policy->cur;
  309. unsigned int prevfreq, freqmin, freqmax;
  310. unsigned int tl;
  311. int index;
  312. freqmin = 0;
  313. freqmax = UINT_MAX;
  314. do {
  315. prevfreq = freq;
  316. tl = freq_to_targetload(freq);
  317. /*
  318. * Find the lowest frequency where the computed load is less
  319. * than or equal to the target load.
  320. */
  321. if (cpufreq_frequency_table_target(
  322. pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
  323. CPUFREQ_RELATION_L, &index))
  324. break;
  325. freq = pcpu->freq_table[index].frequency;
  326. if (freq > prevfreq) {
  327. /* The previous frequency is too low. */
  328. freqmin = prevfreq;
  329. if (freq >= freqmax) {
  330. /*
  331. * Find the highest frequency that is less
  332. * than freqmax.
  333. */
  334. if (cpufreq_frequency_table_target(
  335. pcpu->policy, pcpu->freq_table,
  336. freqmax - 1, CPUFREQ_RELATION_H,
  337. &index))
  338. break;
  339. freq = pcpu->freq_table[index].frequency;
  340. if (freq == freqmin) {
  341. /*
  342. * The first frequency below freqmax
  343. * has already been found to be too
  344. * low. freqmax is the lowest speed
  345. * we found that is fast enough.
  346. */
  347. freq = freqmax;
  348. break;
  349. }
  350. }
  351. } else if (freq < prevfreq) {
  352. /* The previous frequency is high enough. */
  353. freqmax = prevfreq;
  354. if (freq <= freqmin) {
  355. /*
  356. * Find the lowest frequency that is higher
  357. * than freqmin.
  358. */
  359. if (cpufreq_frequency_table_target(
  360. pcpu->policy, pcpu->freq_table,
  361. freqmin + 1, CPUFREQ_RELATION_L,
  362. &index))
  363. break;
  364. freq = pcpu->freq_table[index].frequency;
  365. /*
  366. * If freqmax is the first frequency above
  367. * freqmin then we have already found that
  368. * this speed is fast enough.
  369. */
  370. if (freq == freqmax)
  371. break;
  372. }
  373. }
  374. /* If same frequency chosen as previous then done. */
  375. } while (freq != prevfreq);
  376. return freq;
  377. }
  378. static u64 update_load(int cpu)
  379. {
  380. struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
  381. u64 now;
  382. u64 now_idle;
  383. unsigned int delta_idle;
  384. unsigned int delta_time;
  385. u64 active_time;
  386. #if defined(CONFIG_SEC_PM) || defined(CONFIG_MODE_AUTO_CHANGE)
  387. unsigned int cur_load = 0;
  388. #endif
  389. #ifdef CONFIG_MODE_AUTO_CHANGE
  390. struct cpufreq_loadinfo *cur_loadinfo = &per_cpu(loadinfo, cpu);
  391. #endif
  392. now_idle = get_cpu_idle_time(cpu, &now);
  393. delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
  394. delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
  395. if (delta_time <= delta_idle)
  396. active_time = 0;
  397. else
  398. active_time = delta_time - delta_idle;
  399. pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
  400. pcpu->time_in_idle = now_idle;
  401. pcpu->time_in_idle_timestamp = now;
  402. #if defined(CONFIG_SEC_PM) || defined(CONFIG_MODE_AUTO_CHANGE)
  403. cur_load = (unsigned int)(active_time * 100) / delta_time;
  404. #endif
  405. #ifdef CONFIG_SEC_PM
  406. pcpu->policy->load_at_max = (cur_load * pcpu->policy->cur) /
  407. pcpu->policy->cpuinfo.max_freq;
  408. #endif
  409. #ifdef CONFIG_MODE_AUTO_CHANGE
  410. cur_loadinfo->load = (cur_load * pcpu->policy->cur) /
  411. pcpu->policy->cpuinfo.max_freq;
  412. cur_loadinfo->freq = pcpu->policy->cur;
  413. cur_loadinfo->timestamp = now;
  414. #endif
  415. return now;
  416. }
  417. #ifdef CONFIG_MODE_AUTO_CHANGE
  418. static unsigned int check_mode(int cpu, unsigned int cur_mode, u64 now)
  419. {
  420. int i;
  421. unsigned int ret=cur_mode, total_load=0, max_single_load=0;
  422. struct cpufreq_loadinfo *cur_loadinfo;
  423. if (now - mode_check_timestamp < timer_rate - 1000)
  424. return ret;
  425. if (now - mode_check_timestamp > timer_rate + 1000)
  426. mode_check_timestamp = now - timer_rate;
  427. for_each_online_cpu(i) {
  428. cur_loadinfo = &per_cpu(loadinfo, i);
  429. total_load += cur_loadinfo->load;
  430. if (cur_loadinfo->load > max_single_load)
  431. max_single_load = cur_loadinfo->load;
  432. }
  433. if (!(cur_mode & SINGLE_MODE)) {
  434. if (max_single_load >= single_enter_load)
  435. time_in_single_enter += now - mode_check_timestamp;
  436. else
  437. time_in_single_enter = 0;
  438. if (time_in_single_enter >= single_enter_time)
  439. ret |= SINGLE_MODE;
  440. }
  441. if (!(cur_mode & MULTI_MODE)) {
  442. if (total_load >= multi_enter_load)
  443. time_in_multi_enter += now - mode_check_timestamp;
  444. else
  445. time_in_multi_enter = 0;
  446. if (time_in_multi_enter >= multi_enter_time)
  447. ret |= MULTI_MODE;
  448. }
  449. if (cur_mode & SINGLE_MODE) {
  450. if (max_single_load < single_exit_load)
  451. time_in_single_exit += now - mode_check_timestamp;
  452. else
  453. time_in_single_exit = 0;
  454. if (time_in_single_exit >= single_exit_time)
  455. ret &= ~SINGLE_MODE;
  456. }
  457. if (cur_mode & MULTI_MODE) {
  458. if (total_load < multi_exit_load)
  459. time_in_multi_exit += now - mode_check_timestamp;
  460. else
  461. time_in_multi_exit = 0;
  462. if (time_in_multi_exit >= multi_exit_time)
  463. ret &= ~MULTI_MODE;
  464. }
  465. trace_cpufreq_interactive_mode(cpu, total_load,
  466. time_in_single_enter, time_in_multi_enter,
  467. time_in_single_exit, time_in_multi_exit, ret);
  468. if (time_in_single_enter >= single_enter_time)
  469. time_in_single_enter = 0;
  470. if (time_in_multi_enter >= multi_enter_time)
  471. time_in_multi_enter = 0;
  472. if (time_in_single_exit >= single_exit_time)
  473. time_in_single_exit = 0;
  474. if (time_in_multi_exit >= multi_exit_time)
  475. time_in_multi_exit = 0;
  476. mode_check_timestamp = now;
  477. return ret;
  478. }
  479. static void set_new_param_set(unsigned int index)
  480. {
  481. unsigned long flags;
  482. hispeed_freq = hispeed_freq_set[index];
  483. go_hispeed_load = go_hispeed_load_set[index];
  484. spin_lock_irqsave(&target_loads_lock, flags);
  485. target_loads = target_loads_set[index];
  486. ntarget_loads = ntarget_loads_set[index];
  487. spin_unlock_irqrestore(&target_loads_lock, flags);
  488. min_sample_time = min_sample_time_set[index];
  489. timer_rate = timer_rate_set[index];
  490. spin_lock_irqsave(&above_hispeed_delay_lock, flags);
  491. above_hispeed_delay = above_hispeed_delay_set[index];
  492. nabove_hispeed_delay = nabove_hispeed_delay_set[index];
  493. spin_unlock_irqrestore(&above_hispeed_delay_lock, flags);
  494. cur_param_index = index;
  495. }
  496. static void enter_mode(void)
  497. {
  498. #if 1
  499. set_new_param_set(mode);
  500. #else
  501. set_new_param_set(1);
  502. #endif
  503. #ifdef CONFIG_RETENTION_CHANGE
  504. queue_work(retention_toggle_wq, &retention_toggle_work);
  505. #endif
  506. }
  507. static void exit_mode(void)
  508. {
  509. set_new_param_set(0);
  510. #ifdef CONFIG_RETENTION_CHANGE
  511. queue_work(retention_toggle_wq, &retention_toggle_work);
  512. #endif
  513. }
  514. #endif
  515. static void cpufreq_interactive_timer(unsigned long data)
  516. {
  517. u64 now;
  518. unsigned int delta_time;
  519. u64 cputime_speedadj;
  520. int cpu_load;
  521. struct cpufreq_interactive_cpuinfo *pcpu =
  522. &per_cpu(cpuinfo, data);
  523. unsigned int new_freq;
  524. unsigned int loadadjfreq;
  525. unsigned int index;
  526. unsigned long flags;
  527. bool boosted;
  528. unsigned long mod_min_sample_time;
  529. int i, max_load;
  530. unsigned int max_freq;
  531. struct cpufreq_interactive_cpuinfo *picpu;
  532. #ifdef CONFIG_MODE_AUTO_CHANGE
  533. unsigned int new_mode;
  534. #endif
  535. if (!down_read_trylock(&pcpu->enable_sem))
  536. return;
  537. if (!pcpu->governor_enabled)
  538. goto exit;
  539. spin_lock_irqsave(&pcpu->load_lock, flags);
  540. now = update_load(data);
  541. delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
  542. cputime_speedadj = pcpu->cputime_speedadj;
  543. spin_unlock_irqrestore(&pcpu->load_lock, flags);
  544. if (WARN_ON_ONCE(!delta_time))
  545. goto rearm;
  546. #ifdef CONFIG_MODE_AUTO_CHANGE
  547. spin_lock_irqsave(&mode_lock, flags);
  548. if (enforced_mode)
  549. new_mode = enforced_mode;
  550. else
  551. new_mode = check_mode(data, mode, now);
  552. if (new_mode != mode) {
  553. mode = new_mode;
  554. if (new_mode & MULTI_MODE || new_mode & SINGLE_MODE) {
  555. #ifdef CONFIG_RETENTION_CHANGE
  556. ++mode_count;
  557. #endif
  558. pr_info("Governor: enter mode 0x%x\n", mode);
  559. enter_mode();
  560. } else {
  561. #ifdef CONFIG_RETENTION_CHANGE
  562. mode_count=0;
  563. #endif
  564. pr_info("Governor: exit mode 0x%x\n", mode);
  565. exit_mode();
  566. }
  567. }
  568. spin_unlock_irqrestore(&mode_lock, flags);
  569. #endif
  570. do_div(cputime_speedadj, delta_time);
  571. loadadjfreq = (unsigned int)cputime_speedadj * 100;
  572. cpu_load = loadadjfreq / pcpu->target_freq;
  573. pcpu->prev_load = cpu_load;
  574. boosted = boost_val || now < boostpulse_endtime;
  575. #ifdef CONFIG_SEC_PM
  576. pcpu->policy->util = cpu_load;
  577. #endif
  578. if (cpu_load >= go_hispeed_load || boosted) {
  579. if (pcpu->target_freq < hispeed_freq) {
  580. new_freq = hispeed_freq;
  581. } else {
  582. new_freq = choose_freq(pcpu, loadadjfreq);
  583. if (new_freq < hispeed_freq)
  584. new_freq = hispeed_freq;
  585. }
  586. } else {
  587. new_freq = choose_freq(pcpu, loadadjfreq);
  588. if (sync_freq && new_freq < sync_freq) {
  589. max_load = 0;
  590. max_freq = 0;
  591. for_each_online_cpu(i) {
  592. picpu = &per_cpu(cpuinfo, i);
  593. if (i == data || picpu->prev_load <
  594. up_threshold_any_cpu_load)
  595. continue;
  596. max_load = max(max_load, picpu->prev_load);
  597. max_freq = max(max_freq, picpu->target_freq);
  598. }
  599. if (max_freq > up_threshold_any_cpu_freq &&
  600. max_load >= up_threshold_any_cpu_load)
  601. new_freq = sync_freq;
  602. }
  603. }
  604. if (pcpu->target_freq >= hispeed_freq &&
  605. new_freq > pcpu->target_freq &&
  606. now - pcpu->hispeed_validate_time <
  607. freq_to_above_hispeed_delay(pcpu->target_freq)) {
  608. trace_cpufreq_interactive_notyet(
  609. data, cpu_load, pcpu->target_freq,
  610. pcpu->policy->cur, new_freq);
  611. goto rearm;
  612. }
  613. pcpu->hispeed_validate_time = now;
  614. if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
  615. new_freq, CPUFREQ_RELATION_L,
  616. &index))
  617. goto rearm;
  618. new_freq = pcpu->freq_table[index].frequency;
  619. /*
  620. * Do not scale below floor_freq unless we have been at or above the
  621. * floor frequency for the minimum sample time since last validated.
  622. */
  623. if (sampling_down_factor && pcpu->policy->cur == pcpu->policy->max){
  624. mod_min_sample_time = sampling_down_factor;
  625. pcpu->minfreq_boost=0;
  626. }
  627. else
  628. mod_min_sample_time = min_sample_time;
  629. if (pcpu->minfreq_boost) {
  630. mod_min_sample_time = 0;
  631. pcpu->minfreq_boost = 0;
  632. }
  633. if (new_freq < pcpu->floor_freq) {
  634. if (now - pcpu->floor_validate_time < mod_min_sample_time) {
  635. trace_cpufreq_interactive_notyet(
  636. data, cpu_load, pcpu->target_freq,
  637. pcpu->policy->cur, new_freq);
  638. goto rearm;
  639. }
  640. }
  641. /*
  642. * Update the timestamp for checking whether speed has been held at
  643. * or above the selected frequency for a minimum of min_sample_time,
  644. * if not boosted to hispeed_freq. If boosted to hispeed_freq then we
  645. * allow the speed to drop as soon as the boostpulse duration expires
  646. * (or the indefinite boost is turned off).
  647. */
  648. if (!boosted || new_freq > hispeed_freq) {
  649. pcpu->floor_freq = new_freq;
  650. pcpu->floor_validate_time = now;
  651. }
  652. if (pcpu->target_freq == new_freq &&
  653. pcpu->target_freq <= pcpu->policy->cur) {
  654. trace_cpufreq_interactive_already(
  655. data, cpu_load, pcpu->target_freq,
  656. pcpu->policy->cur, new_freq);
  657. goto rearm_if_notmax;
  658. }
  659. trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
  660. pcpu->policy->cur, new_freq);
  661. pcpu->target_freq = new_freq;
  662. spin_lock_irqsave(&speedchange_cpumask_lock, flags);
  663. cpumask_set_cpu(data, &speedchange_cpumask);
  664. spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
  665. wake_up_process(speedchange_task);
  666. rearm_if_notmax:
  667. /*
  668. * Already set max speed and don't see a need to change that,
  669. * wait until next idle to re-evaluate, don't need timer.
  670. */
  671. if (pcpu->target_freq == pcpu->policy->max)
  672. #ifdef CONFIG_MODE_AUTO_CHANGE
  673. goto rearm;
  674. #else
  675. goto exit;
  676. #endif
  677. rearm:
  678. if (!timer_pending(&pcpu->cpu_timer))
  679. cpufreq_interactive_timer_resched(pcpu);
  680. exit:
  681. up_read(&pcpu->enable_sem);
  682. return;
  683. }
  684. static void cpufreq_interactive_idle_start(void)
  685. {
  686. struct cpufreq_interactive_cpuinfo *pcpu =
  687. &per_cpu(cpuinfo, smp_processor_id());
  688. int pending;
  689. u64 now;
  690. if (!down_read_trylock(&pcpu->enable_sem))
  691. return;
  692. if (!pcpu->governor_enabled) {
  693. up_read(&pcpu->enable_sem);
  694. return;
  695. }
  696. pending = timer_pending(&pcpu->cpu_timer);
  697. if (pcpu->target_freq != pcpu->policy->min) {
  698. /*
  699. * Entering idle while not at lowest speed. On some
  700. * platforms this can hold the other CPU(s) at that speed
  701. * even though the CPU is idle. Set a timer to re-evaluate
  702. * speed so this idle CPU doesn't hold the other CPUs above
  703. * min indefinitely. This should probably be a quirk of
  704. * the CPUFreq driver.
  705. */
  706. if (!pending) {
  707. cpufreq_interactive_timer_resched(pcpu);
  708. now = ktime_to_us(ktime_get());
  709. if ((pcpu->policy->cur == pcpu->policy->max) &&
  710. (now - pcpu->hispeed_validate_time) >
  711. MIN_BUSY_TIME) {
  712. pcpu->floor_validate_time = now;
  713. }
  714. }
  715. }
  716. up_read(&pcpu->enable_sem);
  717. }
  718. static void cpufreq_interactive_idle_end(void)
  719. {
  720. struct cpufreq_interactive_cpuinfo *pcpu =
  721. &per_cpu(cpuinfo, smp_processor_id());
  722. if (!down_read_trylock(&pcpu->enable_sem))
  723. return;
  724. if (!pcpu->governor_enabled) {
  725. up_read(&pcpu->enable_sem);
  726. return;
  727. }
  728. /* Arm the timer for 1-2 ticks later if not already. */
  729. if (!timer_pending(&pcpu->cpu_timer)) {
  730. cpufreq_interactive_timer_resched(pcpu);
  731. } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
  732. del_timer(&pcpu->cpu_timer);
  733. del_timer(&pcpu->cpu_slack_timer);
  734. cpufreq_interactive_timer(smp_processor_id());
  735. }
  736. up_read(&pcpu->enable_sem);
  737. }
  738. static int cpufreq_interactive_speedchange_task(void *data)
  739. {
  740. unsigned int cpu;
  741. cpumask_t tmp_mask;
  742. unsigned long flags;
  743. struct cpufreq_interactive_cpuinfo *pcpu;
  744. while (1) {
  745. set_current_state(TASK_INTERRUPTIBLE);
  746. spin_lock_irqsave(&speedchange_cpumask_lock, flags);
  747. if (cpumask_empty(&speedchange_cpumask)) {
  748. spin_unlock_irqrestore(&speedchange_cpumask_lock,
  749. flags);
  750. schedule();
  751. if (kthread_should_stop())
  752. break;
  753. spin_lock_irqsave(&speedchange_cpumask_lock, flags);
  754. }
  755. set_current_state(TASK_RUNNING);
  756. tmp_mask = speedchange_cpumask;
  757. cpumask_clear(&speedchange_cpumask);
  758. spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
  759. for_each_cpu(cpu, &tmp_mask) {
  760. unsigned int j;
  761. unsigned int max_freq = 0;
  762. pcpu = &per_cpu(cpuinfo, cpu);
  763. if (!down_read_trylock(&pcpu->enable_sem))
  764. continue;
  765. if (!pcpu->governor_enabled) {
  766. up_read(&pcpu->enable_sem);
  767. continue;
  768. }
  769. for_each_cpu(j, pcpu->policy->cpus) {
  770. struct cpufreq_interactive_cpuinfo *pjcpu =
  771. &per_cpu(cpuinfo, j);
  772. if (pjcpu->target_freq > max_freq)
  773. max_freq = pjcpu->target_freq;
  774. }
  775. if (max_freq != pcpu->policy->cur)
  776. __cpufreq_driver_target(pcpu->policy,
  777. max_freq,
  778. CPUFREQ_RELATION_H);
  779. trace_cpufreq_interactive_setspeed(cpu,
  780. pcpu->target_freq,
  781. pcpu->policy->cur);
  782. up_read(&pcpu->enable_sem);
  783. }
  784. }
  785. return 0;
  786. }
  787. static void cpufreq_interactive_boost(void)
  788. {
  789. int i;
  790. int anyboost = 0;
  791. unsigned long flags;
  792. struct cpufreq_interactive_cpuinfo *pcpu;
  793. spin_lock_irqsave(&speedchange_cpumask_lock, flags);
  794. for_each_online_cpu(i) {
  795. pcpu = &per_cpu(cpuinfo, i);
  796. if (pcpu->target_freq < hispeed_freq) {
  797. pcpu->target_freq = hispeed_freq;
  798. cpumask_set_cpu(i, &speedchange_cpumask);
  799. pcpu->hispeed_validate_time =
  800. ktime_to_us(ktime_get());
  801. anyboost = 1;
  802. }
  803. /*
  804. * Set floor freq and (re)start timer for when last
  805. * validated.
  806. */
  807. pcpu->floor_freq = hispeed_freq;
  808. pcpu->floor_validate_time = ktime_to_us(ktime_get());
  809. }
  810. spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
  811. if (anyboost)
  812. wake_up_process(speedchange_task);
  813. }
  814. static int cpufreq_interactive_notifier(
  815. struct notifier_block *nb, unsigned long val, void *data)
  816. {
  817. struct cpufreq_freqs *freq = data;
  818. struct cpufreq_interactive_cpuinfo *pcpu;
  819. int cpu;
  820. unsigned long flags;
  821. if (val == CPUFREQ_POSTCHANGE) {
  822. pcpu = &per_cpu(cpuinfo, freq->cpu);
  823. if (!down_read_trylock(&pcpu->enable_sem))
  824. return 0;
  825. if (!pcpu->governor_enabled) {
  826. up_read(&pcpu->enable_sem);
  827. return 0;
  828. }
  829. for_each_cpu(cpu, pcpu->policy->cpus) {
  830. struct cpufreq_interactive_cpuinfo *pjcpu =
  831. &per_cpu(cpuinfo, cpu);
  832. if (cpu != freq->cpu) {
  833. if (!down_read_trylock(&pjcpu->enable_sem))
  834. continue;
  835. if (!pjcpu->governor_enabled) {
  836. up_read(&pjcpu->enable_sem);
  837. continue;
  838. }
  839. }
  840. spin_lock_irqsave(&pjcpu->load_lock, flags);
  841. update_load(cpu);
  842. spin_unlock_irqrestore(&pjcpu->load_lock, flags);
  843. if (cpu != freq->cpu)
  844. up_read(&pjcpu->enable_sem);
  845. }
  846. up_read(&pcpu->enable_sem);
  847. }
  848. return 0;
  849. }
  850. static struct notifier_block cpufreq_notifier_block = {
  851. .notifier_call = cpufreq_interactive_notifier,
  852. };
  853. static unsigned int *get_tokenized_data(const char *buf, int *num_tokens)
  854. {
  855. const char *cp;
  856. int i;
  857. int ntokens = 1;
  858. unsigned int *tokenized_data;
  859. int err = -EINVAL;
  860. cp = buf;
  861. while ((cp = strpbrk(cp + 1, " :")))
  862. ntokens++;
  863. if (!(ntokens & 0x1))
  864. goto err;
  865. tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
  866. if (!tokenized_data) {
  867. err = -ENOMEM;
  868. goto err;
  869. }
  870. cp = buf;
  871. i = 0;
  872. while (i < ntokens) {
  873. if (sscanf(cp, "%u", &tokenized_data[i++]) != 1)
  874. goto err_kfree;
  875. cp = strpbrk(cp, " :");
  876. if (!cp)
  877. break;
  878. cp++;
  879. }
  880. if (i != ntokens)
  881. goto err_kfree;
  882. *num_tokens = ntokens;
  883. return tokenized_data;
  884. err_kfree:
  885. kfree(tokenized_data);
  886. err:
  887. return ERR_PTR(err);
  888. }
  889. static ssize_t show_target_loads(
  890. struct kobject *kobj, struct attribute *attr, char *buf)
  891. {
  892. int i;
  893. ssize_t ret = 0;
  894. unsigned long flags;
  895. spin_lock_irqsave(&target_loads_lock, flags);
  896. #ifdef CONFIG_MODE_AUTO_CHANGE
  897. for (i = 0; i < ntarget_loads_set[param_index]; i++)
  898. ret += sprintf(buf + ret, "%u%s", target_loads_set[param_index][i],
  899. i & 0x1 ? ":" : " ");
  900. #else
  901. for (i = 0; i < ntarget_loads; i++)
  902. ret += sprintf(buf + ret, "%u%s", target_loads[i],
  903. i & 0x1 ? ":" : " ");
  904. #endif
  905. ret = ret - 1;
  906. ret += sprintf(buf + ret, "\n");
  907. spin_unlock_irqrestore(&target_loads_lock, flags);
  908. return ret;
  909. }
  910. static ssize_t store_target_loads(
  911. struct kobject *kobj, struct attribute *attr, const char *buf,
  912. size_t count)
  913. {
  914. int ntokens;
  915. unsigned int *new_target_loads = NULL;
  916. unsigned long flags;
  917. #ifdef CONFIG_MODE_AUTO_CHANGE
  918. unsigned long flags2;
  919. #endif
  920. new_target_loads = get_tokenized_data(buf, &ntokens);
  921. if (IS_ERR(new_target_loads))
  922. return PTR_RET(new_target_loads);
  923. #ifdef CONFIG_MODE_AUTO_CHANGE
  924. spin_lock_irqsave(&mode_lock, flags2);
  925. #endif
  926. spin_lock_irqsave(&target_loads_lock, flags);
  927. #ifdef CONFIG_MODE_AUTO_CHANGE
  928. if (target_loads_set[param_index] != default_target_loads)
  929. kfree(target_loads_set[param_index]);
  930. target_loads_set[param_index] = new_target_loads;
  931. ntarget_loads_set[param_index] = ntokens;
  932. if (cur_param_index == param_index) {
  933. target_loads = new_target_loads;
  934. ntarget_loads = ntokens;
  935. }
  936. #else
  937. if (target_loads != default_target_loads)
  938. kfree(target_loads);
  939. target_loads = new_target_loads;
  940. ntarget_loads = ntokens;
  941. #endif
  942. spin_unlock_irqrestore(&target_loads_lock, flags);
  943. #ifdef CONFIG_MODE_AUTO_CHANGE
  944. spin_unlock_irqrestore(&mode_lock, flags2);
  945. #endif
  946. return count;
  947. }
  948. static struct global_attr target_loads_attr =
  949. __ATTR(target_loads, S_IRUGO | S_IWUSR,
  950. show_target_loads, store_target_loads);
  951. static ssize_t show_above_hispeed_delay(
  952. struct kobject *kobj, struct attribute *attr, char *buf)
  953. {
  954. int i;
  955. ssize_t ret = 0;
  956. unsigned long flags;
  957. spin_lock_irqsave(&above_hispeed_delay_lock, flags);
  958. #ifdef CONFIG_MODE_AUTO_CHANGE
  959. for (i = 0; i < nabove_hispeed_delay_set[param_index]; i++)
  960. ret += sprintf(buf + ret, "%u%s", above_hispeed_delay_set[param_index][i],
  961. i & 0x1 ? ":" : " ");
  962. #else
  963. for (i = 0; i < nabove_hispeed_delay; i++)
  964. ret += sprintf(buf + ret, "%u%s", above_hispeed_delay[i],
  965. i & 0x1 ? ":" : " ");
  966. #endif
  967. ret = ret - 1;
  968. ret += sprintf(buf + ret, "\n");
  969. spin_unlock_irqrestore(&above_hispeed_delay_lock, flags);
  970. return ret;
  971. }
  972. static ssize_t store_above_hispeed_delay(
  973. struct kobject *kobj, struct attribute *attr, const char *buf,
  974. size_t count)
  975. {
  976. int ntokens;
  977. unsigned int *new_above_hispeed_delay = NULL;
  978. unsigned long flags;
  979. #ifdef CONFIG_MODE_AUTO_CHANGE
  980. unsigned long flags2;
  981. #endif
  982. new_above_hispeed_delay = get_tokenized_data(buf, &ntokens);
  983. if (IS_ERR(new_above_hispeed_delay))
  984. return PTR_RET(new_above_hispeed_delay);
  985. #ifdef CONFIG_MODE_AUTO_CHANGE
  986. spin_lock_irqsave(&mode_lock, flags2);
  987. #endif
  988. spin_lock_irqsave(&above_hispeed_delay_lock, flags);
  989. #ifdef CONFIG_MODE_AUTO_CHANGE
  990. if (above_hispeed_delay_set[param_index] != default_above_hispeed_delay)
  991. kfree(above_hispeed_delay_set[param_index]);
  992. above_hispeed_delay_set[param_index] = new_above_hispeed_delay;
  993. nabove_hispeed_delay_set[param_index] = ntokens;
  994. if (cur_param_index == param_index) {
  995. above_hispeed_delay = new_above_hispeed_delay;
  996. nabove_hispeed_delay = ntokens;
  997. }
  998. #else
  999. if (above_hispeed_delay != default_above_hispeed_delay)
  1000. kfree(above_hispeed_delay);
  1001. above_hispeed_delay = new_above_hispeed_delay;
  1002. nabove_hispeed_delay = ntokens;
  1003. #endif
  1004. spin_unlock_irqrestore(&above_hispeed_delay_lock, flags);
  1005. #ifdef CONFIG_MODE_AUTO_CHANGE
  1006. spin_unlock_irqrestore(&mode_lock, flags2);
  1007. #endif
  1008. return count;
  1009. }
  1010. static struct global_attr above_hispeed_delay_attr =
  1011. __ATTR(above_hispeed_delay, S_IRUGO | S_IWUSR,
  1012. show_above_hispeed_delay, store_above_hispeed_delay);
  1013. static ssize_t show_hispeed_freq(struct kobject *kobj,
  1014. struct attribute *attr, char *buf)
  1015. {
  1016. #ifdef CONFIG_MODE_AUTO_CHANGE
  1017. return sprintf(buf, "%u\n", hispeed_freq_set[param_index]);
  1018. #else
  1019. return sprintf(buf, "%u\n", hispeed_freq);
  1020. #endif
  1021. }
  1022. static ssize_t store_hispeed_freq(struct kobject *kobj,
  1023. struct attribute *attr, const char *buf,
  1024. size_t count)
  1025. {
  1026. int ret;
  1027. long unsigned int val;
  1028. #ifdef CONFIG_MODE_AUTO_CHANGE
  1029. unsigned long flags2;
  1030. #endif
  1031. ret = strict_strtoul(buf, 0, &val);
  1032. if (ret < 0)
  1033. return ret;
  1034. #ifdef CONFIG_MODE_AUTO_CHANGE
  1035. spin_lock_irqsave(&mode_lock, flags2);
  1036. hispeed_freq_set[param_index] = val;
  1037. if (cur_param_index == param_index)
  1038. hispeed_freq = val;
  1039. spin_unlock_irqrestore(&mode_lock, flags2);
  1040. #else
  1041. hispeed_freq = val;
  1042. #endif
  1043. return count;
  1044. }
  1045. static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644,
  1046. show_hispeed_freq, store_hispeed_freq);
  1047. static ssize_t show_sampling_down_factor(struct kobject *kobj,
  1048. struct attribute *attr, char *buf)
  1049. {
  1050. #ifdef CONFIG_MODE_AUTO_CHANGE
  1051. return sprintf(buf, "%u\n", sampling_down_factor_set[param_index]);
  1052. #else
  1053. return sprintf(buf, "%u\n", sampling_down_factor);
  1054. #endif
  1055. }
  1056. static ssize_t store_sampling_down_factor(struct kobject *kobj,
  1057. struct attribute *attr, const char *buf,
  1058. size_t count)
  1059. {
  1060. int ret;
  1061. long unsigned int val;
  1062. #ifdef CONFIG_MODE_AUTO_CHANGE
  1063. unsigned long flags2;
  1064. #endif
  1065. ret = strict_strtoul(buf, 0, &val);
  1066. if (ret < 0)
  1067. return ret;
  1068. #ifdef CONFIG_MODE_AUTO_CHANGE
  1069. spin_lock_irqsave(&mode_lock, flags2);
  1070. sampling_down_factor_set[param_index] = val;
  1071. if (cur_param_index == param_index)
  1072. sampling_down_factor = val;
  1073. spin_unlock_irqrestore(&mode_lock, flags2);
  1074. #else
  1075. sampling_down_factor = val;
  1076. #endif
  1077. return count;
  1078. }
  1079. static struct global_attr sampling_down_factor_attr =
  1080. __ATTR(sampling_down_factor, 0644,
  1081. show_sampling_down_factor, store_sampling_down_factor);
  1082. static ssize_t show_go_hispeed_load(struct kobject *kobj,
  1083. struct attribute *attr, char *buf)
  1084. {
  1085. #ifdef CONFIG_MODE_AUTO_CHANGE
  1086. return sprintf(buf, "%lu\n", go_hispeed_load_set[param_index]);
  1087. #else
  1088. return sprintf(buf, "%lu\n", go_hispeed_load);
  1089. #endif
  1090. }
  1091. static ssize_t store_go_hispeed_load(struct kobject *kobj,
  1092. struct attribute *attr, const char *buf, size_t count)
  1093. {
  1094. int ret;
  1095. unsigned long val;
  1096. #ifdef CONFIG_MODE_AUTO_CHANGE
  1097. unsigned long flags2;
  1098. #endif
  1099. ret = strict_strtoul(buf, 0, &val);
  1100. if (ret < 0)
  1101. return ret;
  1102. #ifdef CONFIG_MODE_AUTO_CHANGE
  1103. spin_lock_irqsave(&mode_lock, flags2);
  1104. go_hispeed_load_set[param_index] = val;
  1105. if (cur_param_index == param_index)
  1106. go_hispeed_load = val;
  1107. spin_unlock_irqrestore(&mode_lock, flags2);
  1108. #else
  1109. go_hispeed_load = val;
  1110. #endif
  1111. return count;
  1112. }
  1113. static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644,
  1114. show_go_hispeed_load, store_go_hispeed_load);
  1115. static ssize_t show_min_sample_time(struct kobject *kobj,
  1116. struct attribute *attr, char *buf)
  1117. {
  1118. #ifdef CONFIG_MODE_AUTO_CHANGE
  1119. return sprintf(buf, "%lu\n", min_sample_time_set[param_index]);
  1120. #else
  1121. return sprintf(buf, "%lu\n", min_sample_time);
  1122. #endif
  1123. }
  1124. static ssize_t store_min_sample_time(struct kobject *kobj,
  1125. struct attribute *attr, const char *buf, size_t count)
  1126. {
  1127. int ret;
  1128. unsigned long val;
  1129. #ifdef CONFIG_MODE_AUTO_CHANGE
  1130. unsigned long flags2;
  1131. #endif
  1132. ret = strict_strtoul(buf, 0, &val);
  1133. if (ret < 0)
  1134. return ret;
  1135. #ifdef CONFIG_MODE_AUTO_CHANGE
  1136. spin_lock_irqsave(&mode_lock, flags2);
  1137. min_sample_time_set[param_index] = val;
  1138. if (cur_param_index == param_index)
  1139. min_sample_time = val;
  1140. spin_unlock_irqrestore(&mode_lock, flags2);
  1141. #else
  1142. min_sample_time = val;
  1143. #endif
  1144. return count;
  1145. }
  1146. static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
  1147. show_min_sample_time, store_min_sample_time);
  1148. static ssize_t show_timer_rate(struct kobject *kobj,
  1149. struct attribute *attr, char *buf)
  1150. {
  1151. #ifdef CONFIG_MODE_AUTO_CHANGE
  1152. return sprintf(buf, "%lu\n", timer_rate_set[param_index]);
  1153. #else
  1154. return sprintf(buf, "%lu\n", timer_rate);
  1155. #endif
  1156. }
  1157. static ssize_t store_timer_rate(struct kobject *kobj,
  1158. struct attribute *attr, const char *buf, size_t count)
  1159. {
  1160. int ret;
  1161. unsigned long val;
  1162. #ifdef CONFIG_MODE_AUTO_CHANGE
  1163. unsigned long flags2;
  1164. #endif
  1165. ret = strict_strtoul(buf, 0, &val);
  1166. if (ret < 0)
  1167. return ret;
  1168. #ifdef CONFIG_MODE_AUTO_CHANGE
  1169. spin_lock_irqsave(&mode_lock, flags2);
  1170. timer_rate_set[param_index] = val;
  1171. if (cur_param_index == param_index)
  1172. timer_rate = val;
  1173. spin_unlock_irqrestore(&mode_lock, flags2);
  1174. #else
  1175. timer_rate = val;
  1176. #endif
  1177. return count;
  1178. }
  1179. static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
  1180. show_timer_rate, store_timer_rate);
  1181. static ssize_t show_timer_slack(
  1182. struct kobject *kobj, struct attribute *attr, char *buf)
  1183. {
  1184. return sprintf(buf, "%d\n", timer_slack_val);
  1185. }
  1186. static ssize_t store_timer_slack(
  1187. struct kobject *kobj, struct attribute *attr, const char *buf,
  1188. size_t count)
  1189. {
  1190. int ret;
  1191. unsigned long val;
  1192. ret = kstrtol(buf, 10, &val);
  1193. if (ret < 0)
  1194. return ret;
  1195. timer_slack_val = val;
  1196. return count;
  1197. }
  1198. define_one_global_rw(timer_slack);
  1199. static ssize_t show_boost(struct kobject *kobj, struct attribute *attr,
  1200. char *buf)
  1201. {
  1202. return sprintf(buf, "%d\n", boost_val);
  1203. }
  1204. static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
  1205. const char *buf, size_t count)
  1206. {
  1207. int ret;
  1208. unsigned long val;
  1209. ret = kstrtoul(buf, 0, &val);
  1210. if (ret < 0)
  1211. return ret;
  1212. boost_val = val;
  1213. if (boost_val) {
  1214. trace_cpufreq_interactive_boost("on");
  1215. cpufreq_interactive_boost();
  1216. } else {
  1217. trace_cpufreq_interactive_unboost("off");
  1218. }
  1219. return count;
  1220. }
  1221. define_one_global_rw(boost);
  1222. static ssize_t store_boostpulse(struct kobject *kobj, struct attribute *attr,
  1223. const char *buf, size_t count)
  1224. {
  1225. int ret;
  1226. unsigned long val;
  1227. ret = kstrtoul(buf, 0, &val);
  1228. if (ret < 0)
  1229. return ret;
  1230. boostpulse_endtime = ktime_to_us(ktime_get()) + boostpulse_duration_val;
  1231. trace_cpufreq_interactive_boost("pulse");
  1232. cpufreq_interactive_boost();
  1233. return count;
  1234. }
  1235. static struct global_attr boostpulse =
  1236. __ATTR(boostpulse, 0200, NULL, store_boostpulse);
  1237. static ssize_t show_boostpulse_duration(
  1238. struct kobject *kobj, struct attribute *attr, char *buf)
  1239. {
  1240. return sprintf(buf, "%d\n", boostpulse_duration_val);
  1241. }
  1242. static ssize_t store_boostpulse_duration(
  1243. struct kobject *kobj, struct attribute *attr, const char *buf,
  1244. size_t count)
  1245. {
  1246. int ret;
  1247. unsigned long val;
  1248. ret = kstrtoul(buf, 0, &val);
  1249. if (ret < 0)
  1250. return ret;
  1251. boostpulse_duration_val = val;
  1252. return count;
  1253. }
  1254. define_one_global_rw(boostpulse_duration);
  1255. static ssize_t show_io_is_busy(struct kobject *kobj,
  1256. struct attribute *attr, char *buf)
  1257. {
  1258. return sprintf(buf, "%u\n", io_is_busy);
  1259. }
  1260. static ssize_t store_io_is_busy(struct kobject *kobj,
  1261. struct attribute *attr, const char *buf, size_t count)
  1262. {
  1263. int ret;
  1264. unsigned long val;
  1265. ret = kstrtoul(buf, 0, &val);
  1266. if (ret < 0)
  1267. return ret;
  1268. io_is_busy = val;
  1269. return count;
  1270. }
  1271. static struct global_attr io_is_busy_attr = __ATTR(io_is_busy, 0644,
  1272. show_io_is_busy, store_io_is_busy);
  1273. static ssize_t show_sync_freq(struct kobject *kobj,
  1274. struct attribute *attr, char *buf)
  1275. {
  1276. return sprintf(buf, "%u\n", sync_freq);
  1277. }
  1278. static ssize_t store_sync_freq(struct kobject *kobj,
  1279. struct attribute *attr, const char *buf, size_t count)
  1280. {
  1281. int ret;
  1282. unsigned long val;
  1283. ret = kstrtoul(buf, 0, &val);
  1284. if (ret < 0)
  1285. return ret;
  1286. sync_freq = val;
  1287. return count;
  1288. }
  1289. static struct global_attr sync_freq_attr = __ATTR(sync_freq, 0644,
  1290. show_sync_freq, store_sync_freq);
  1291. static ssize_t show_up_threshold_any_cpu_load(struct kobject *kobj,
  1292. struct attribute *attr, char *buf)
  1293. {
  1294. return snprintf(buf, PAGE_SIZE, "%u\n", up_threshold_any_cpu_load);
  1295. }
  1296. static ssize_t store_up_threshold_any_cpu_load(struct kobject *kobj,
  1297. struct attribute *attr, const char *buf, size_t count)
  1298. {
  1299. int ret;
  1300. unsigned long val;
  1301. ret = kstrtoul(buf, 0, &val);
  1302. if (ret < 0)
  1303. return ret;
  1304. up_threshold_any_cpu_load = val;
  1305. return count;
  1306. }
  1307. static struct global_attr up_threshold_any_cpu_load_attr =
  1308. __ATTR(up_threshold_any_cpu_load, 0644,
  1309. show_up_threshold_any_cpu_load,
  1310. store_up_threshold_any_cpu_load);
  1311. static ssize_t show_up_threshold_any_cpu_freq(struct kobject *kobj,
  1312. struct attribute *attr, char *buf)
  1313. {
  1314. return snprintf(buf, PAGE_SIZE, "%u\n", up_threshold_any_cpu_freq);
  1315. }
  1316. static ssize_t store_up_threshold_any_cpu_freq(struct kobject *kobj,
  1317. struct attribute *attr, const char *buf, size_t count)
  1318. {
  1319. int ret;
  1320. unsigned long val;
  1321. ret = kstrtoul(buf, 0, &val);
  1322. if (ret < 0)
  1323. return ret;
  1324. up_threshold_any_cpu_freq = val;
  1325. return count;
  1326. }
  1327. static struct global_attr up_threshold_any_cpu_freq_attr =
  1328. __ATTR(up_threshold_any_cpu_freq, 0644,
  1329. show_up_threshold_any_cpu_freq,
  1330. store_up_threshold_any_cpu_freq);
  1331. #ifdef CONFIG_MODE_AUTO_CHANGE
  1332. #define index(obj_name, obj_attr) \
  1333. static ssize_t show_##obj_name(struct kobject *kobj, \
  1334. struct attribute *attr, char *buf) \
  1335. { \
  1336. return sprintf(buf, "%u\n", obj_name); \
  1337. } \
  1338. \
  1339. static ssize_t store_##obj_name(struct kobject *kobj, \
  1340. struct attribute *attr, const char *buf, \
  1341. size_t count) \
  1342. { \
  1343. int ret; \
  1344. long unsigned int val; \
  1345. \
  1346. ret = strict_strtoul(buf, 0, &val); \
  1347. if (ret < 0) \
  1348. return ret; \
  1349. \
  1350. val &= MULTI_MODE | SINGLE_MODE | NO_MODE; \
  1351. obj_name = val; \
  1352. return count; \
  1353. } \
  1354. \
  1355. static struct global_attr obj_attr = __ATTR(obj_name, 0644, \
  1356. show_##obj_name, store_##obj_name); \
  1357. index(mode, mode_attr);
  1358. index(enforced_mode, enforced_mode_attr);
  1359. index(param_index, param_index_attr);
  1360. #define load(obj_name, obj_attr) \
  1361. static ssize_t show_##obj_name(struct kobject *kobj, \
  1362. struct attribute *attr, char *buf) \
  1363. { \
  1364. return sprintf(buf, "%u\n", obj_name); \
  1365. } \
  1366. \
  1367. static ssize_t store_##obj_name(struct kobject *kobj, \
  1368. struct attribute *attr, const char *buf, \
  1369. size_t count) \
  1370. { \
  1371. int ret; \
  1372. long unsigned int val; \
  1373. \
  1374. ret = strict_strtoul(buf, 0, &val); \
  1375. if (ret < 0) \
  1376. return ret; \
  1377. \
  1378. obj_name = val; \
  1379. return count; \
  1380. } \
  1381. \
  1382. static struct global_attr obj_attr = __ATTR(obj_name, 0644, \
  1383. show_##obj_name, store_##obj_name); \
  1384. load(multi_enter_load, multi_enter_load_attr);
  1385. load(multi_exit_load, multi_exit_load_attr);
  1386. load(single_enter_load, single_enter_load_attr);
  1387. load(single_exit_load, single_exit_load_attr);
  1388. #define time(obj_name, obj_attr) \
  1389. static ssize_t show_##obj_name(struct kobject *kobj, \
  1390. struct attribute *attr, char *buf) \
  1391. { \
  1392. return sprintf(buf, "%lu\n", obj_name); \
  1393. } \
  1394. \
  1395. static ssize_t store_##obj_name(struct kobject *kobj, \
  1396. struct attribute *attr, const char *buf, \
  1397. size_t count) \
  1398. { \
  1399. int ret; \
  1400. unsigned long val; \
  1401. \
  1402. ret = strict_strtoul(buf, 0, &val); \
  1403. if (ret < 0) \
  1404. return ret; \
  1405. \
  1406. obj_name = val; \
  1407. return count; \
  1408. } \
  1409. \
  1410. static struct global_attr obj_attr = __ATTR(obj_name, 0644, \
  1411. show_##obj_name, store_##obj_name); \
  1412. time(multi_enter_time, multi_enter_time_attr);
  1413. time(multi_exit_time, multi_exit_time_attr);
  1414. time(single_enter_time, single_enter_time_attr);
  1415. time(single_exit_time, single_exit_time_attr);
  1416. #endif
  1417. static struct attribute *interactive_attributes[] = {
  1418. &target_loads_attr.attr,
  1419. &above_hispeed_delay_attr.attr,
  1420. &hispeed_freq_attr.attr,
  1421. &go_hispeed_load_attr.attr,
  1422. &min_sample_time_attr.attr,
  1423. &timer_rate_attr.attr,
  1424. &timer_slack.attr,
  1425. &boost.attr,
  1426. &boostpulse.attr,
  1427. &boostpulse_duration.attr,
  1428. &io_is_busy_attr.attr,
  1429. &sampling_down_factor_attr.attr,
  1430. &sync_freq_attr.attr,
  1431. &up_threshold_any_cpu_load_attr.attr,
  1432. &up_threshold_any_cpu_freq_attr.attr,
  1433. #ifdef CONFIG_MODE_AUTO_CHANGE
  1434. &mode_attr.attr,
  1435. &enforced_mode_attr.attr,
  1436. &param_index_attr.attr,
  1437. &multi_enter_load_attr.attr,
  1438. &multi_exit_load_attr.attr,
  1439. &single_enter_load_attr.attr,
  1440. &single_exit_load_attr.attr,
  1441. &multi_enter_time_attr.attr,
  1442. &multi_exit_time_attr.attr,
  1443. &single_enter_time_attr.attr,
  1444. &single_exit_time_attr.attr,
  1445. #endif
  1446. NULL,
  1447. };
  1448. static struct attribute_group interactive_attr_group = {
  1449. .attrs = interactive_attributes,
  1450. .name = "interactive",
  1451. };
  1452. static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
  1453. unsigned long val,
  1454. void *data)
  1455. {
  1456. switch (val) {
  1457. case IDLE_START:
  1458. cpufreq_interactive_idle_start();
  1459. break;
  1460. case IDLE_END:
  1461. cpufreq_interactive_idle_end();
  1462. break;
  1463. }
  1464. return 0;
  1465. }
  1466. static struct notifier_block cpufreq_interactive_idle_nb = {
  1467. .notifier_call = cpufreq_interactive_idle_notifier,
  1468. };
  1469. #ifdef CONFIG_MODE_AUTO_CHANGE
  1470. static void cpufreq_param_set_init(void)
  1471. {
  1472. unsigned int i;
  1473. unsigned long flags;
  1474. multi_enter_load = DEFAULT_TARGET_LOAD * num_possible_cpus();
  1475. spin_lock_irqsave(&mode_lock, flags);
  1476. for (i=0 ; i<MAX_PARAM_SET; i++) {
  1477. hispeed_freq_set[i] = 0;
  1478. go_hispeed_load_set[i] = go_hispeed_load;
  1479. target_loads_set[i] = target_loads;
  1480. ntarget_loads_set[i] = ntarget_loads;
  1481. min_sample_time_set[i] = min_sample_time;
  1482. timer_rate_set[i] = timer_rate;
  1483. above_hispeed_delay_set[i] = above_hispeed_delay;
  1484. nabove_hispeed_delay_set[i] = nabove_hispeed_delay;
  1485. sampling_down_factor_set[i] = sampling_down_factor;
  1486. }
  1487. spin_unlock_irqrestore(&mode_lock, flags);
  1488. }
  1489. #endif
  1490. static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
  1491. unsigned int event)
  1492. {
  1493. int rc;
  1494. unsigned int j;
  1495. struct cpufreq_interactive_cpuinfo *pcpu;
  1496. struct cpufreq_frequency_table *freq_table;
  1497. switch (event) {
  1498. case CPUFREQ_GOV_START:
  1499. if (!cpu_online(policy->cpu))
  1500. return -EINVAL;
  1501. mutex_lock(&gov_lock);
  1502. freq_table =
  1503. cpufreq_frequency_get_table(policy->cpu);
  1504. if (!hispeed_freq)
  1505. hispeed_freq = policy->max;
  1506. #ifdef CONFIG_MODE_AUTO_CHANGE
  1507. for (j=0 ; j<MAX_PARAM_SET ; j++)
  1508. if (!hispeed_freq_set[j])
  1509. hispeed_freq_set[j] = policy->max;
  1510. #endif
  1511. for_each_cpu(j, policy->cpus) {
  1512. pcpu = &per_cpu(cpuinfo, j);
  1513. pcpu->policy = policy;
  1514. pcpu->target_freq = policy->cur;
  1515. pcpu->freq_table = freq_table;
  1516. pcpu->floor_freq = pcpu->target_freq;
  1517. pcpu->floor_validate_time =
  1518. ktime_to_us(ktime_get());
  1519. pcpu->hispeed_validate_time =
  1520. pcpu->floor_validate_time;
  1521. down_write(&pcpu->enable_sem);
  1522. del_timer_sync(&pcpu->cpu_timer);
  1523. del_timer_sync(&pcpu->cpu_slack_timer);
  1524. cpufreq_interactive_timer_start(j);
  1525. pcpu->governor_enabled = 1;
  1526. up_write(&pcpu->enable_sem);
  1527. }
  1528. /*
  1529. * Do not register the idle hook and create sysfs
  1530. * entries if we have already done so.
  1531. */
  1532. if (++active_count > 1) {
  1533. mutex_unlock(&gov_lock);
  1534. return 0;
  1535. }
  1536. rc = sysfs_create_group(cpufreq_global_kobject,
  1537. &interactive_attr_group);
  1538. if (rc) {
  1539. mutex_unlock(&gov_lock);
  1540. return rc;
  1541. }
  1542. idle_notifier_register(&cpufreq_interactive_idle_nb);
  1543. cpufreq_register_notifier(
  1544. &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
  1545. mutex_unlock(&gov_lock);
  1546. break;
  1547. case CPUFREQ_GOV_STOP:
  1548. mutex_lock(&gov_lock);
  1549. for_each_cpu(j, policy->cpus) {
  1550. pcpu = &per_cpu(cpuinfo, j);
  1551. down_write(&pcpu->enable_sem);
  1552. pcpu->governor_enabled = 0;
  1553. pcpu->target_freq = 0;
  1554. del_timer_sync(&pcpu->cpu_timer);
  1555. del_timer_sync(&pcpu->cpu_slack_timer);
  1556. up_write(&pcpu->enable_sem);
  1557. }
  1558. if (--active_count > 0) {
  1559. mutex_unlock(&gov_lock);
  1560. return 0;
  1561. }
  1562. cpufreq_unregister_notifier(
  1563. &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
  1564. idle_notifier_unregister(&cpufreq_interactive_idle_nb);
  1565. sysfs_remove_group(cpufreq_global_kobject,
  1566. &interactive_attr_group);
  1567. mutex_unlock(&gov_lock);
  1568. break;
  1569. case CPUFREQ_GOV_LIMITS:
  1570. if (policy->max < policy->cur)
  1571. __cpufreq_driver_target(policy,
  1572. policy->max, CPUFREQ_RELATION_H);
  1573. else if (policy->min > policy->cur)
  1574. __cpufreq_driver_target(policy,
  1575. policy->min, CPUFREQ_RELATION_L);
  1576. for_each_cpu(j, policy->cpus) {
  1577. pcpu = &per_cpu(cpuinfo, j);
  1578. /* hold write semaphore to avoid race */
  1579. down_write(&pcpu->enable_sem);
  1580. if (pcpu->governor_enabled == 0) {
  1581. up_write(&pcpu->enable_sem);
  1582. continue;
  1583. }
  1584. /* update target_freq firstly */
  1585. if (policy->max < pcpu->target_freq)
  1586. pcpu->target_freq = policy->max;
  1587. else if (policy->min > pcpu->target_freq)
  1588. pcpu->target_freq = policy->min;
  1589. /* Reschedule timer.
  1590. * Delete the timers, else the timer callback may
  1591. * return without re-arm the timer when failed
  1592. * acquire the semaphore. This race may cause timer
  1593. * stopped unexpectedly.
  1594. */
  1595. del_timer_sync(&pcpu->cpu_timer);
  1596. del_timer_sync(&pcpu->cpu_slack_timer);
  1597. cpufreq_interactive_timer_start(j);
  1598. pcpu->minfreq_boost = 1;
  1599. up_write(&pcpu->enable_sem);
  1600. }
  1601. break;
  1602. }
  1603. return 0;
  1604. }
  1605. static void cpufreq_interactive_nop_timer(unsigned long data)
  1606. {
  1607. }
  1608. static int __init cpufreq_interactive_init(void)
  1609. {
  1610. unsigned int i;
  1611. struct cpufreq_interactive_cpuinfo *pcpu;
  1612. struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
  1613. /* Initalize per-cpu timers */
  1614. for_each_possible_cpu(i) {
  1615. pcpu = &per_cpu(cpuinfo, i);
  1616. init_timer_deferrable(&pcpu->cpu_timer);
  1617. pcpu->cpu_timer.function = cpufreq_interactive_timer;
  1618. pcpu->cpu_timer.data = i;
  1619. init_timer(&pcpu->cpu_slack_timer);
  1620. pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
  1621. spin_lock_init(&pcpu->load_lock);
  1622. init_rwsem(&pcpu->enable_sem);
  1623. }
  1624. spin_lock_init(&target_loads_lock);
  1625. spin_lock_init(&speedchange_cpumask_lock);
  1626. spin_lock_init(&above_hispeed_delay_lock);
  1627. #ifdef CONFIG_MODE_AUTO_CHANGE
  1628. spin_lock_init(&mode_lock);
  1629. cpufreq_param_set_init();
  1630. #endif
  1631. #ifdef CONFIG_RETENTION_CHANGE
  1632. retention_toggle_wq = alloc_workqueue("retentionToggle_wq", WQ_HIGHPRI, 0);
  1633. if(!retention_toggle_wq)
  1634. pr_info("retention toggle workqueue init error\n");
  1635. INIT_WORK(&retention_toggle_work, do_toggle_retention);
  1636. #endif
  1637. mutex_init(&gov_lock);
  1638. speedchange_task =
  1639. kthread_create(cpufreq_interactive_speedchange_task, NULL,
  1640. "cfinteractive");
  1641. if (IS_ERR(speedchange_task))
  1642. return PTR_ERR(speedchange_task);
  1643. sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
  1644. get_task_struct(speedchange_task);
  1645. /* NB: wake up so the thread does not look hung to the freezer */
  1646. wake_up_process(speedchange_task);
  1647. return cpufreq_register_governor(&cpufreq_gov_interactive);
  1648. }
  1649. #ifdef CONFIG_RETENTION_CHANGE
  1650. static void do_toggle_retention(struct work_struct *work)
  1651. {
  1652. if(mode_count == 1)
  1653. msm_pm_retention_mode_enable(0);
  1654. else if(mode_count == 0)
  1655. msm_pm_retention_mode_enable(1);
  1656. }
  1657. #endif // CONFIG_RETENTION_CHANGE
  1658. #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
  1659. fs_initcall(cpufreq_interactive_init);
  1660. #else
  1661. module_init(cpufreq_interactive_init);
  1662. #endif
  1663. static void __exit cpufreq_interactive_exit(void)
  1664. {
  1665. cpufreq_unregister_governor(&cpufreq_gov_interactive);
  1666. kthread_stop(speedchange_task);
  1667. put_task_struct(speedchange_task);
  1668. }
  1669. module_exit(cpufreq_interactive_exit);
  1670. MODULE_AUTHOR("Mike Chan <mike@android.com>");
  1671. MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
  1672. "Latency sensitive workloads");
  1673. MODULE_LICENSE("GPL");