12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961 |
- /*
- * drivers/cpufreq/cpufreq_interactive.c
- *
- * Copyright (C) 2010 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Author: Mike Chan (mike@android.com)
- *
- */
- #include <linux/cpu.h>
- #include <linux/cpumask.h>
- #include <linux/cpufreq.h>
- #include <linux/module.h>
- #include <linux/moduleparam.h>
- #include <linux/rwsem.h>
- #include <linux/sched.h>
- #include <linux/tick.h>
- #include <linux/time.h>
- #include <linux/timer.h>
- #include <linux/workqueue.h>
- #include <linux/kthread.h>
- #include <linux/slab.h>
- #include <linux/kernel_stat.h>
- #include <asm/cputime.h>
- #define CREATE_TRACE_POINTS
- #include <trace/events/cpufreq_interactive.h>
- #define CONFIG_MODE_AUTO_CHANGE
- #define CONFIG_RETENTION_CHANGE
- static int active_count;
- struct cpufreq_interactive_cpuinfo {
- struct timer_list cpu_timer;
- struct timer_list cpu_slack_timer;
- spinlock_t load_lock; /* protects the next 4 fields */
- u64 time_in_idle;
- u64 time_in_idle_timestamp;
- u64 cputime_speedadj;
- u64 cputime_speedadj_timestamp;
- struct cpufreq_policy *policy;
- struct cpufreq_frequency_table *freq_table;
- unsigned int target_freq;
- unsigned int floor_freq;
- u64 floor_validate_time;
- u64 hispeed_validate_time;
- struct rw_semaphore enable_sem;
- int governor_enabled;
- int prev_load;
- int minfreq_boost;
- };
- static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
- /* realtime thread handles frequency scaling */
- static struct task_struct *speedchange_task;
- static cpumask_t speedchange_cpumask;
- static spinlock_t speedchange_cpumask_lock;
- static struct mutex gov_lock;
- /* Hi speed to bump to from lo speed when load burst (default max) */
- static unsigned int hispeed_freq;
- /* Go to hi speed when CPU load at or above this value. */
- #define DEFAULT_GO_HISPEED_LOAD 99
- static unsigned long go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
- /* Sampling down factor to be applied to min_sample_time at max freq */
- static unsigned int sampling_down_factor;
- /* Target load. Lower values result in higher CPU speeds. */
- #define DEFAULT_TARGET_LOAD 90
- static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
- static spinlock_t target_loads_lock;
- static unsigned int *target_loads = default_target_loads;
- static int ntarget_loads = ARRAY_SIZE(default_target_loads);
- /*
- * The minimum amount of time to spend at a frequency before we can ramp down.
- */
- #define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
- static unsigned long min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
- /*
- * The sample rate of the timer used to increase frequency
- */
- #define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
- static unsigned long timer_rate = DEFAULT_TIMER_RATE;
- /* Busy SDF parameters*/
- #define MIN_BUSY_TIME (100 * USEC_PER_MSEC)
- /*
- * Wait this long before raising speed above hispeed, by default a single
- * timer interval.
- */
- #define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
- static unsigned int default_above_hispeed_delay[] = {
- DEFAULT_ABOVE_HISPEED_DELAY };
- static spinlock_t above_hispeed_delay_lock;
- static unsigned int *above_hispeed_delay = default_above_hispeed_delay;
- static int nabove_hispeed_delay = ARRAY_SIZE(default_above_hispeed_delay);
- /* Non-zero means indefinite speed boost active */
- static int boost_val;
- /* Duration of a boot pulse in usecs */
- static int boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
- /* End time of boost pulse in ktime converted to usecs */
- static u64 boostpulse_endtime;
- /*
- * Max additional time to wait in idle, beyond timer_rate, at speeds above
- * minimum before wakeup to reduce speed, or -1 if unnecessary.
- */
- #define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
- static int timer_slack_val = DEFAULT_TIMER_SLACK;
- static bool io_is_busy;
- #ifdef CONFIG_MODE_AUTO_CHANGE
- struct cpufreq_loadinfo {
- unsigned int load;
- unsigned int freq;
- u64 timestamp;
- };
- static DEFINE_PER_CPU(struct cpufreq_loadinfo, loadinfo);
- static spinlock_t mode_lock;
- #define MULTI_MODE 2
- #define SINGLE_MODE 1
- #define NO_MODE 0
- static unsigned int mode = 0;
- static unsigned int enforced_mode = 0;
- static u64 mode_check_timestamp = 0;
- #define DEFAULT_MULTI_ENTER_TIME (4 * DEFAULT_TIMER_RATE)
- static unsigned long multi_enter_time = DEFAULT_MULTI_ENTER_TIME;
- static unsigned long time_in_multi_enter = 0;
- static unsigned int multi_enter_load = 4 * DEFAULT_TARGET_LOAD;
- #define DEFAULT_MULTI_EXIT_TIME (16 * DEFAULT_TIMER_RATE)
- static unsigned long multi_exit_time = DEFAULT_MULTI_EXIT_TIME;
- static unsigned long time_in_multi_exit = 0;
- static unsigned int multi_exit_load = 4 * DEFAULT_TARGET_LOAD;
- #define DEFAULT_SINGLE_ENTER_TIME (8 * DEFAULT_TIMER_RATE)
- static unsigned long single_enter_time = DEFAULT_SINGLE_ENTER_TIME;
- static unsigned long time_in_single_enter = 0;
- static unsigned int single_enter_load = DEFAULT_TARGET_LOAD;
- #define DEFAULT_SINGLE_EXIT_TIME (4 * DEFAULT_TIMER_RATE)
- static unsigned long single_exit_time = DEFAULT_SINGLE_EXIT_TIME;
- static unsigned long time_in_single_exit = 0;
- static unsigned int single_exit_load = DEFAULT_TARGET_LOAD;
- static unsigned int param_index = 0;
- static unsigned int cur_param_index = 0;
- #define MAX_PARAM_SET 4 /* ((MULTI_MODE | SINGLE_MODE | NO_MODE) + 1) */
- static unsigned int hispeed_freq_set[MAX_PARAM_SET];
- static unsigned long go_hispeed_load_set[MAX_PARAM_SET];
- static unsigned int *target_loads_set[MAX_PARAM_SET];
- static int ntarget_loads_set[MAX_PARAM_SET];
- static unsigned long min_sample_time_set[MAX_PARAM_SET];
- static unsigned long timer_rate_set[MAX_PARAM_SET];
- static unsigned int *above_hispeed_delay_set[MAX_PARAM_SET];
- static int nabove_hispeed_delay_set[MAX_PARAM_SET];
- static unsigned int sampling_down_factor_set[MAX_PARAM_SET];
- #endif /* CONFIG_MODE_AUTO_CHANGE */
- #ifdef CONFIG_RETENTION_CHANGE
- static void do_toggle_retention(struct work_struct *work);
- extern void msm_pm_retention_mode_enable(bool enable);
- static struct workqueue_struct *retention_toggle_wq;
- static struct work_struct retention_toggle_work;
- static int mode_count = 0;
- #endif
- /*
- * If the max load among other CPUs is higher than up_threshold_any_cpu_load
- * and if the highest frequency among the other CPUs is higher than
- * up_threshold_any_cpu_freq then do not let the frequency to drop below
- * sync_freq
- */
- static unsigned int up_threshold_any_cpu_load;
- static unsigned int sync_freq;
- static unsigned int up_threshold_any_cpu_freq;
- static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
- unsigned int event);
- #define DYN_DEFER (1)
-
- #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
- static
- #endif
- struct cpufreq_governor cpufreq_gov_interactive = {
- .name = "interactive",
- .governor = cpufreq_governor_interactive,
- .max_transition_latency = 10000000,
- .owner = THIS_MODULE,
- };
- static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
- cputime64_t *wall)
- {
- u64 idle_time;
- u64 cur_wall_time;
- u64 busy_time;
- cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
- busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
- busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
- busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
- busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
- busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
- busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
- idle_time = cur_wall_time - busy_time;
- if (wall)
- *wall = jiffies_to_usecs(cur_wall_time);
- return jiffies_to_usecs(idle_time);
- }
- #ifdef DYN_DEFER
- static inline void timer_set_nondeferrable(struct timer_list *timer)
- {
- timer->base =
- ((struct tvec_base *)((unsigned long)timer->base &
- ~TBASE_DEFERRABLE_FLAG));
- }
- static inline void timer_set_deferrable(struct timer_list *timer)
- {
- timer->base =
- ((struct tvec_base *)((unsigned long)timer->base |
- TBASE_DEFERRABLE_FLAG));
- }
- #endif
- static inline cputime64_t get_cpu_idle_time(unsigned int cpu,
- cputime64_t *wall)
- {
- u64 idle_time = get_cpu_idle_time_us(cpu, wall);
- if (idle_time == -1ULL)
- idle_time = get_cpu_idle_time_jiffy(cpu, wall);
- else if (!io_is_busy)
- idle_time += get_cpu_iowait_time_us(cpu, wall);
- return idle_time;
- }
- static void cpufreq_interactive_timer_resched(
- struct cpufreq_interactive_cpuinfo *pcpu)
- {
- unsigned long expires;
- unsigned long flags;
- spin_lock_irqsave(&pcpu->load_lock, flags);
- pcpu->time_in_idle =
- get_cpu_idle_time(smp_processor_id(),
- &pcpu->time_in_idle_timestamp);
- pcpu->cputime_speedadj = 0;
- pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
- expires = jiffies + usecs_to_jiffies(timer_rate);
-
- #ifdef DYN_DEFER
- if (pcpu->target_freq > pcpu->policy->min)
- timer_set_nondeferrable(&pcpu->cpu_timer);
- else
- timer_set_deferrable(&pcpu->cpu_timer);
- #endif
- mod_timer_pinned(&pcpu->cpu_timer, expires);
- if (timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) {
- expires += usecs_to_jiffies(timer_slack_val);
- mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
- }
- spin_unlock_irqrestore(&pcpu->load_lock, flags);
- }
- /* The caller shall take enable_sem write semaphore to avoid any timer race.
- * The cpu_timer and cpu_slack_timer must be deactivated when calling this
- * function.
- */
- static void cpufreq_interactive_timer_start(int cpu)
- {
- struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
- unsigned long expires = jiffies + usecs_to_jiffies(timer_rate);
- unsigned long flags;
- pcpu->cpu_timer.expires = expires;
- del_timer_sync(&pcpu->cpu_timer);
- add_timer_on(&pcpu->cpu_timer, cpu);
- if (timer_slack_val >= 0 && pcpu->target_freq > pcpu->policy->min) {
- expires += usecs_to_jiffies(timer_slack_val);
- pcpu->cpu_slack_timer.expires = expires;
- del_timer_sync(&pcpu->cpu_slack_timer);
- add_timer_on(&pcpu->cpu_slack_timer, cpu);
- }
- spin_lock_irqsave(&pcpu->load_lock, flags);
- pcpu->time_in_idle =
- get_cpu_idle_time(cpu, &pcpu->time_in_idle_timestamp);
- pcpu->cputime_speedadj = 0;
- pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
- spin_unlock_irqrestore(&pcpu->load_lock, flags);
- }
- static unsigned int freq_to_above_hispeed_delay(unsigned int freq)
- {
- int i;
- unsigned int ret;
- unsigned long flags;
- spin_lock_irqsave(&above_hispeed_delay_lock, flags);
- for (i = 0; i < nabove_hispeed_delay - 1 &&
- freq >= above_hispeed_delay[i+1]; i += 2)
- ;
- ret = above_hispeed_delay[i];
- ret = (ret > (1 * USEC_PER_MSEC)) ? (ret - (1 * USEC_PER_MSEC)) : ret;
- spin_unlock_irqrestore(&above_hispeed_delay_lock, flags);
- return ret;
- }
- static unsigned int freq_to_targetload(unsigned int freq)
- {
- int i;
- unsigned int ret;
- unsigned long flags;
- spin_lock_irqsave(&target_loads_lock, flags);
- for (i = 0; i < ntarget_loads - 1 && freq >= target_loads[i+1]; i += 2)
- ;
- ret = target_loads[i];
- spin_unlock_irqrestore(&target_loads_lock, flags);
- return ret;
- }
- /*
- * If increasing frequencies never map to a lower target load then
- * choose_freq() will find the minimum frequency that does not exceed its
- * target load given the current load.
- */
- static unsigned int choose_freq(
- struct cpufreq_interactive_cpuinfo *pcpu, unsigned int loadadjfreq)
- {
- unsigned int freq = pcpu->policy->cur;
- unsigned int prevfreq, freqmin, freqmax;
- unsigned int tl;
- int index;
- freqmin = 0;
- freqmax = UINT_MAX;
- do {
- prevfreq = freq;
- tl = freq_to_targetload(freq);
- /*
- * Find the lowest frequency where the computed load is less
- * than or equal to the target load.
- */
- if (cpufreq_frequency_table_target(
- pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
- CPUFREQ_RELATION_L, &index))
- break;
- freq = pcpu->freq_table[index].frequency;
- if (freq > prevfreq) {
- /* The previous frequency is too low. */
- freqmin = prevfreq;
- if (freq >= freqmax) {
- /*
- * Find the highest frequency that is less
- * than freqmax.
- */
- if (cpufreq_frequency_table_target(
- pcpu->policy, pcpu->freq_table,
- freqmax - 1, CPUFREQ_RELATION_H,
- &index))
- break;
- freq = pcpu->freq_table[index].frequency;
- if (freq == freqmin) {
- /*
- * The first frequency below freqmax
- * has already been found to be too
- * low. freqmax is the lowest speed
- * we found that is fast enough.
- */
- freq = freqmax;
- break;
- }
- }
- } else if (freq < prevfreq) {
- /* The previous frequency is high enough. */
- freqmax = prevfreq;
- if (freq <= freqmin) {
- /*
- * Find the lowest frequency that is higher
- * than freqmin.
- */
- if (cpufreq_frequency_table_target(
- pcpu->policy, pcpu->freq_table,
- freqmin + 1, CPUFREQ_RELATION_L,
- &index))
- break;
- freq = pcpu->freq_table[index].frequency;
- /*
- * If freqmax is the first frequency above
- * freqmin then we have already found that
- * this speed is fast enough.
- */
- if (freq == freqmax)
- break;
- }
- }
- /* If same frequency chosen as previous then done. */
- } while (freq != prevfreq);
- return freq;
- }
- static u64 update_load(int cpu)
- {
- struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
- u64 now;
- u64 now_idle;
- unsigned int delta_idle;
- unsigned int delta_time;
- u64 active_time;
- #if defined(CONFIG_SEC_PM) || defined(CONFIG_MODE_AUTO_CHANGE)
- unsigned int cur_load = 0;
- #endif
- #ifdef CONFIG_MODE_AUTO_CHANGE
- struct cpufreq_loadinfo *cur_loadinfo = &per_cpu(loadinfo, cpu);
- #endif
- now_idle = get_cpu_idle_time(cpu, &now);
- delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
- delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
- if (delta_time <= delta_idle)
- active_time = 0;
- else
- active_time = delta_time - delta_idle;
- pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
- pcpu->time_in_idle = now_idle;
- pcpu->time_in_idle_timestamp = now;
- #if defined(CONFIG_SEC_PM) || defined(CONFIG_MODE_AUTO_CHANGE)
- cur_load = (unsigned int)(active_time * 100) / delta_time;
- #endif
- #ifdef CONFIG_SEC_PM
- pcpu->policy->load_at_max = (cur_load * pcpu->policy->cur) /
- pcpu->policy->cpuinfo.max_freq;
- #endif
- #ifdef CONFIG_MODE_AUTO_CHANGE
- cur_loadinfo->load = (cur_load * pcpu->policy->cur) /
- pcpu->policy->cpuinfo.max_freq;
- cur_loadinfo->freq = pcpu->policy->cur;
- cur_loadinfo->timestamp = now;
- #endif
- return now;
- }
- #ifdef CONFIG_MODE_AUTO_CHANGE
- static unsigned int check_mode(int cpu, unsigned int cur_mode, u64 now)
- {
- int i;
- unsigned int ret=cur_mode, total_load=0, max_single_load=0;
- struct cpufreq_loadinfo *cur_loadinfo;
- if (now - mode_check_timestamp < timer_rate - 1000)
- return ret;
- if (now - mode_check_timestamp > timer_rate + 1000)
- mode_check_timestamp = now - timer_rate;
- for_each_online_cpu(i) {
- cur_loadinfo = &per_cpu(loadinfo, i);
- total_load += cur_loadinfo->load;
- if (cur_loadinfo->load > max_single_load)
- max_single_load = cur_loadinfo->load;
- }
- if (!(cur_mode & SINGLE_MODE)) {
- if (max_single_load >= single_enter_load)
- time_in_single_enter += now - mode_check_timestamp;
- else
- time_in_single_enter = 0;
- if (time_in_single_enter >= single_enter_time)
- ret |= SINGLE_MODE;
- }
- if (!(cur_mode & MULTI_MODE)) {
- if (total_load >= multi_enter_load)
- time_in_multi_enter += now - mode_check_timestamp;
- else
- time_in_multi_enter = 0;
- if (time_in_multi_enter >= multi_enter_time)
- ret |= MULTI_MODE;
- }
- if (cur_mode & SINGLE_MODE) {
- if (max_single_load < single_exit_load)
- time_in_single_exit += now - mode_check_timestamp;
- else
- time_in_single_exit = 0;
- if (time_in_single_exit >= single_exit_time)
- ret &= ~SINGLE_MODE;
- }
- if (cur_mode & MULTI_MODE) {
- if (total_load < multi_exit_load)
- time_in_multi_exit += now - mode_check_timestamp;
- else
- time_in_multi_exit = 0;
- if (time_in_multi_exit >= multi_exit_time)
- ret &= ~MULTI_MODE;
- }
- trace_cpufreq_interactive_mode(cpu, total_load,
- time_in_single_enter, time_in_multi_enter,
- time_in_single_exit, time_in_multi_exit, ret);
- if (time_in_single_enter >= single_enter_time)
- time_in_single_enter = 0;
- if (time_in_multi_enter >= multi_enter_time)
- time_in_multi_enter = 0;
- if (time_in_single_exit >= single_exit_time)
- time_in_single_exit = 0;
- if (time_in_multi_exit >= multi_exit_time)
- time_in_multi_exit = 0;
- mode_check_timestamp = now;
- return ret;
- }
- static void set_new_param_set(unsigned int index)
- {
- unsigned long flags;
- hispeed_freq = hispeed_freq_set[index];
- go_hispeed_load = go_hispeed_load_set[index];
- spin_lock_irqsave(&target_loads_lock, flags);
- target_loads = target_loads_set[index];
- ntarget_loads = ntarget_loads_set[index];
- spin_unlock_irqrestore(&target_loads_lock, flags);
- min_sample_time = min_sample_time_set[index];
- timer_rate = timer_rate_set[index];
- spin_lock_irqsave(&above_hispeed_delay_lock, flags);
- above_hispeed_delay = above_hispeed_delay_set[index];
- nabove_hispeed_delay = nabove_hispeed_delay_set[index];
- spin_unlock_irqrestore(&above_hispeed_delay_lock, flags);
- cur_param_index = index;
- }
- static void enter_mode(void)
- {
- #if 1
- set_new_param_set(mode);
- #else
- set_new_param_set(1);
- #endif
- #ifdef CONFIG_RETENTION_CHANGE
- queue_work(retention_toggle_wq, &retention_toggle_work);
- #endif
- }
- static void exit_mode(void)
- {
- set_new_param_set(0);
- #ifdef CONFIG_RETENTION_CHANGE
- queue_work(retention_toggle_wq, &retention_toggle_work);
- #endif
- }
- #endif
- static void cpufreq_interactive_timer(unsigned long data)
- {
- u64 now;
- unsigned int delta_time;
- u64 cputime_speedadj;
- int cpu_load;
- struct cpufreq_interactive_cpuinfo *pcpu =
- &per_cpu(cpuinfo, data);
- unsigned int new_freq;
- unsigned int loadadjfreq;
- unsigned int index;
- unsigned long flags;
- bool boosted;
- unsigned long mod_min_sample_time;
- int i, max_load;
- unsigned int max_freq;
- struct cpufreq_interactive_cpuinfo *picpu;
- #ifdef CONFIG_MODE_AUTO_CHANGE
- unsigned int new_mode;
- #endif
- if (!down_read_trylock(&pcpu->enable_sem))
- return;
- if (!pcpu->governor_enabled)
- goto exit;
- spin_lock_irqsave(&pcpu->load_lock, flags);
- now = update_load(data);
- delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
- cputime_speedadj = pcpu->cputime_speedadj;
- spin_unlock_irqrestore(&pcpu->load_lock, flags);
- if (WARN_ON_ONCE(!delta_time))
- goto rearm;
- #ifdef CONFIG_MODE_AUTO_CHANGE
- spin_lock_irqsave(&mode_lock, flags);
- if (enforced_mode)
- new_mode = enforced_mode;
- else
- new_mode = check_mode(data, mode, now);
- if (new_mode != mode) {
- mode = new_mode;
- if (new_mode & MULTI_MODE || new_mode & SINGLE_MODE) {
- #ifdef CONFIG_RETENTION_CHANGE
- ++mode_count;
- #endif
- pr_info("Governor: enter mode 0x%x\n", mode);
- enter_mode();
- } else {
- #ifdef CONFIG_RETENTION_CHANGE
- mode_count=0;
- #endif
- pr_info("Governor: exit mode 0x%x\n", mode);
- exit_mode();
- }
- }
- spin_unlock_irqrestore(&mode_lock, flags);
- #endif
- do_div(cputime_speedadj, delta_time);
- loadadjfreq = (unsigned int)cputime_speedadj * 100;
- cpu_load = loadadjfreq / pcpu->target_freq;
- pcpu->prev_load = cpu_load;
- boosted = boost_val || now < boostpulse_endtime;
- #ifdef CONFIG_SEC_PM
- pcpu->policy->util = cpu_load;
- #endif
- if (cpu_load >= go_hispeed_load || boosted) {
- if (pcpu->target_freq < hispeed_freq) {
- new_freq = hispeed_freq;
- } else {
- new_freq = choose_freq(pcpu, loadadjfreq);
- if (new_freq < hispeed_freq)
- new_freq = hispeed_freq;
- }
- } else {
- new_freq = choose_freq(pcpu, loadadjfreq);
- if (sync_freq && new_freq < sync_freq) {
- max_load = 0;
- max_freq = 0;
- for_each_online_cpu(i) {
- picpu = &per_cpu(cpuinfo, i);
- if (i == data || picpu->prev_load <
- up_threshold_any_cpu_load)
- continue;
- max_load = max(max_load, picpu->prev_load);
- max_freq = max(max_freq, picpu->target_freq);
- }
- if (max_freq > up_threshold_any_cpu_freq &&
- max_load >= up_threshold_any_cpu_load)
- new_freq = sync_freq;
- }
- }
- if (pcpu->target_freq >= hispeed_freq &&
- new_freq > pcpu->target_freq &&
- now - pcpu->hispeed_validate_time <
- freq_to_above_hispeed_delay(pcpu->target_freq)) {
- trace_cpufreq_interactive_notyet(
- data, cpu_load, pcpu->target_freq,
- pcpu->policy->cur, new_freq);
- goto rearm;
- }
- pcpu->hispeed_validate_time = now;
- if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
- new_freq, CPUFREQ_RELATION_L,
- &index))
- goto rearm;
- new_freq = pcpu->freq_table[index].frequency;
- /*
- * Do not scale below floor_freq unless we have been at or above the
- * floor frequency for the minimum sample time since last validated.
- */
- if (sampling_down_factor && pcpu->policy->cur == pcpu->policy->max){
- mod_min_sample_time = sampling_down_factor;
- pcpu->minfreq_boost=0;
- }
- else
- mod_min_sample_time = min_sample_time;
- if (pcpu->minfreq_boost) {
- mod_min_sample_time = 0;
- pcpu->minfreq_boost = 0;
- }
- if (new_freq < pcpu->floor_freq) {
- if (now - pcpu->floor_validate_time < mod_min_sample_time) {
- trace_cpufreq_interactive_notyet(
- data, cpu_load, pcpu->target_freq,
- pcpu->policy->cur, new_freq);
- goto rearm;
- }
- }
- /*
- * Update the timestamp for checking whether speed has been held at
- * or above the selected frequency for a minimum of min_sample_time,
- * if not boosted to hispeed_freq. If boosted to hispeed_freq then we
- * allow the speed to drop as soon as the boostpulse duration expires
- * (or the indefinite boost is turned off).
- */
- if (!boosted || new_freq > hispeed_freq) {
- pcpu->floor_freq = new_freq;
- pcpu->floor_validate_time = now;
- }
- if (pcpu->target_freq == new_freq &&
- pcpu->target_freq <= pcpu->policy->cur) {
- trace_cpufreq_interactive_already(
- data, cpu_load, pcpu->target_freq,
- pcpu->policy->cur, new_freq);
- goto rearm_if_notmax;
- }
- trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
- pcpu->policy->cur, new_freq);
- pcpu->target_freq = new_freq;
- spin_lock_irqsave(&speedchange_cpumask_lock, flags);
- cpumask_set_cpu(data, &speedchange_cpumask);
- spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
- wake_up_process(speedchange_task);
- rearm_if_notmax:
- /*
- * Already set max speed and don't see a need to change that,
- * wait until next idle to re-evaluate, don't need timer.
- */
- if (pcpu->target_freq == pcpu->policy->max)
- #ifdef CONFIG_MODE_AUTO_CHANGE
- goto rearm;
- #else
- goto exit;
- #endif
- rearm:
- if (!timer_pending(&pcpu->cpu_timer))
- cpufreq_interactive_timer_resched(pcpu);
- exit:
- up_read(&pcpu->enable_sem);
- return;
- }
- static void cpufreq_interactive_idle_start(void)
- {
- struct cpufreq_interactive_cpuinfo *pcpu =
- &per_cpu(cpuinfo, smp_processor_id());
- int pending;
- u64 now;
- if (!down_read_trylock(&pcpu->enable_sem))
- return;
- if (!pcpu->governor_enabled) {
- up_read(&pcpu->enable_sem);
- return;
- }
- pending = timer_pending(&pcpu->cpu_timer);
- if (pcpu->target_freq != pcpu->policy->min) {
- /*
- * Entering idle while not at lowest speed. On some
- * platforms this can hold the other CPU(s) at that speed
- * even though the CPU is idle. Set a timer to re-evaluate
- * speed so this idle CPU doesn't hold the other CPUs above
- * min indefinitely. This should probably be a quirk of
- * the CPUFreq driver.
- */
- if (!pending) {
- cpufreq_interactive_timer_resched(pcpu);
- now = ktime_to_us(ktime_get());
- if ((pcpu->policy->cur == pcpu->policy->max) &&
- (now - pcpu->hispeed_validate_time) >
- MIN_BUSY_TIME) {
- pcpu->floor_validate_time = now;
- }
- }
- }
- up_read(&pcpu->enable_sem);
- }
- static void cpufreq_interactive_idle_end(void)
- {
- struct cpufreq_interactive_cpuinfo *pcpu =
- &per_cpu(cpuinfo, smp_processor_id());
- if (!down_read_trylock(&pcpu->enable_sem))
- return;
- if (!pcpu->governor_enabled) {
- up_read(&pcpu->enable_sem);
- return;
- }
- /* Arm the timer for 1-2 ticks later if not already. */
- if (!timer_pending(&pcpu->cpu_timer)) {
- cpufreq_interactive_timer_resched(pcpu);
- } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
- del_timer(&pcpu->cpu_timer);
- del_timer(&pcpu->cpu_slack_timer);
- cpufreq_interactive_timer(smp_processor_id());
- }
- up_read(&pcpu->enable_sem);
- }
- static int cpufreq_interactive_speedchange_task(void *data)
- {
- unsigned int cpu;
- cpumask_t tmp_mask;
- unsigned long flags;
- struct cpufreq_interactive_cpuinfo *pcpu;
- while (1) {
- set_current_state(TASK_INTERRUPTIBLE);
- spin_lock_irqsave(&speedchange_cpumask_lock, flags);
- if (cpumask_empty(&speedchange_cpumask)) {
- spin_unlock_irqrestore(&speedchange_cpumask_lock,
- flags);
- schedule();
- if (kthread_should_stop())
- break;
- spin_lock_irqsave(&speedchange_cpumask_lock, flags);
- }
- set_current_state(TASK_RUNNING);
- tmp_mask = speedchange_cpumask;
- cpumask_clear(&speedchange_cpumask);
- spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
- for_each_cpu(cpu, &tmp_mask) {
- unsigned int j;
- unsigned int max_freq = 0;
- pcpu = &per_cpu(cpuinfo, cpu);
- if (!down_read_trylock(&pcpu->enable_sem))
- continue;
- if (!pcpu->governor_enabled) {
- up_read(&pcpu->enable_sem);
- continue;
- }
- for_each_cpu(j, pcpu->policy->cpus) {
- struct cpufreq_interactive_cpuinfo *pjcpu =
- &per_cpu(cpuinfo, j);
- if (pjcpu->target_freq > max_freq)
- max_freq = pjcpu->target_freq;
- }
- if (max_freq != pcpu->policy->cur)
- __cpufreq_driver_target(pcpu->policy,
- max_freq,
- CPUFREQ_RELATION_H);
- trace_cpufreq_interactive_setspeed(cpu,
- pcpu->target_freq,
- pcpu->policy->cur);
- up_read(&pcpu->enable_sem);
- }
- }
- return 0;
- }
- static void cpufreq_interactive_boost(void)
- {
- int i;
- int anyboost = 0;
- unsigned long flags;
- struct cpufreq_interactive_cpuinfo *pcpu;
- spin_lock_irqsave(&speedchange_cpumask_lock, flags);
- for_each_online_cpu(i) {
- pcpu = &per_cpu(cpuinfo, i);
- if (pcpu->target_freq < hispeed_freq) {
- pcpu->target_freq = hispeed_freq;
- cpumask_set_cpu(i, &speedchange_cpumask);
- pcpu->hispeed_validate_time =
- ktime_to_us(ktime_get());
- anyboost = 1;
- }
- /*
- * Set floor freq and (re)start timer for when last
- * validated.
- */
- pcpu->floor_freq = hispeed_freq;
- pcpu->floor_validate_time = ktime_to_us(ktime_get());
- }
- spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
- if (anyboost)
- wake_up_process(speedchange_task);
- }
- static int cpufreq_interactive_notifier(
- struct notifier_block *nb, unsigned long val, void *data)
- {
- struct cpufreq_freqs *freq = data;
- struct cpufreq_interactive_cpuinfo *pcpu;
- int cpu;
- unsigned long flags;
- if (val == CPUFREQ_POSTCHANGE) {
- pcpu = &per_cpu(cpuinfo, freq->cpu);
- if (!down_read_trylock(&pcpu->enable_sem))
- return 0;
- if (!pcpu->governor_enabled) {
- up_read(&pcpu->enable_sem);
- return 0;
- }
- for_each_cpu(cpu, pcpu->policy->cpus) {
- struct cpufreq_interactive_cpuinfo *pjcpu =
- &per_cpu(cpuinfo, cpu);
- if (cpu != freq->cpu) {
- if (!down_read_trylock(&pjcpu->enable_sem))
- continue;
- if (!pjcpu->governor_enabled) {
- up_read(&pjcpu->enable_sem);
- continue;
- }
- }
- spin_lock_irqsave(&pjcpu->load_lock, flags);
- update_load(cpu);
- spin_unlock_irqrestore(&pjcpu->load_lock, flags);
- if (cpu != freq->cpu)
- up_read(&pjcpu->enable_sem);
- }
- up_read(&pcpu->enable_sem);
- }
- return 0;
- }
- static struct notifier_block cpufreq_notifier_block = {
- .notifier_call = cpufreq_interactive_notifier,
- };
- static unsigned int *get_tokenized_data(const char *buf, int *num_tokens)
- {
- const char *cp;
- int i;
- int ntokens = 1;
- unsigned int *tokenized_data;
- int err = -EINVAL;
- cp = buf;
- while ((cp = strpbrk(cp + 1, " :")))
- ntokens++;
- if (!(ntokens & 0x1))
- goto err;
- tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
- if (!tokenized_data) {
- err = -ENOMEM;
- goto err;
- }
- cp = buf;
- i = 0;
- while (i < ntokens) {
- if (sscanf(cp, "%u", &tokenized_data[i++]) != 1)
- goto err_kfree;
- cp = strpbrk(cp, " :");
- if (!cp)
- break;
- cp++;
- }
- if (i != ntokens)
- goto err_kfree;
- *num_tokens = ntokens;
- return tokenized_data;
- err_kfree:
- kfree(tokenized_data);
- err:
- return ERR_PTR(err);
- }
- static ssize_t show_target_loads(
- struct kobject *kobj, struct attribute *attr, char *buf)
- {
- int i;
- ssize_t ret = 0;
- unsigned long flags;
- spin_lock_irqsave(&target_loads_lock, flags);
- #ifdef CONFIG_MODE_AUTO_CHANGE
- for (i = 0; i < ntarget_loads_set[param_index]; i++)
- ret += sprintf(buf + ret, "%u%s", target_loads_set[param_index][i],
- i & 0x1 ? ":" : " ");
- #else
- for (i = 0; i < ntarget_loads; i++)
- ret += sprintf(buf + ret, "%u%s", target_loads[i],
- i & 0x1 ? ":" : " ");
- #endif
- ret = ret - 1;
- ret += sprintf(buf + ret, "\n");
- spin_unlock_irqrestore(&target_loads_lock, flags);
- return ret;
- }
- static ssize_t store_target_loads(
- struct kobject *kobj, struct attribute *attr, const char *buf,
- size_t count)
- {
- int ntokens;
- unsigned int *new_target_loads = NULL;
- unsigned long flags;
- #ifdef CONFIG_MODE_AUTO_CHANGE
- unsigned long flags2;
- #endif
- new_target_loads = get_tokenized_data(buf, &ntokens);
- if (IS_ERR(new_target_loads))
- return PTR_RET(new_target_loads);
- #ifdef CONFIG_MODE_AUTO_CHANGE
- spin_lock_irqsave(&mode_lock, flags2);
- #endif
- spin_lock_irqsave(&target_loads_lock, flags);
- #ifdef CONFIG_MODE_AUTO_CHANGE
- if (target_loads_set[param_index] != default_target_loads)
- kfree(target_loads_set[param_index]);
- target_loads_set[param_index] = new_target_loads;
- ntarget_loads_set[param_index] = ntokens;
- if (cur_param_index == param_index) {
- target_loads = new_target_loads;
- ntarget_loads = ntokens;
- }
- #else
- if (target_loads != default_target_loads)
- kfree(target_loads);
- target_loads = new_target_loads;
- ntarget_loads = ntokens;
- #endif
- spin_unlock_irqrestore(&target_loads_lock, flags);
- #ifdef CONFIG_MODE_AUTO_CHANGE
- spin_unlock_irqrestore(&mode_lock, flags2);
- #endif
- return count;
- }
- static struct global_attr target_loads_attr =
- __ATTR(target_loads, S_IRUGO | S_IWUSR,
- show_target_loads, store_target_loads);
- static ssize_t show_above_hispeed_delay(
- struct kobject *kobj, struct attribute *attr, char *buf)
- {
- int i;
- ssize_t ret = 0;
- unsigned long flags;
- spin_lock_irqsave(&above_hispeed_delay_lock, flags);
- #ifdef CONFIG_MODE_AUTO_CHANGE
- for (i = 0; i < nabove_hispeed_delay_set[param_index]; i++)
- ret += sprintf(buf + ret, "%u%s", above_hispeed_delay_set[param_index][i],
- i & 0x1 ? ":" : " ");
- #else
- for (i = 0; i < nabove_hispeed_delay; i++)
- ret += sprintf(buf + ret, "%u%s", above_hispeed_delay[i],
- i & 0x1 ? ":" : " ");
- #endif
- ret = ret - 1;
- ret += sprintf(buf + ret, "\n");
- spin_unlock_irqrestore(&above_hispeed_delay_lock, flags);
- return ret;
- }
- static ssize_t store_above_hispeed_delay(
- struct kobject *kobj, struct attribute *attr, const char *buf,
- size_t count)
- {
- int ntokens;
- unsigned int *new_above_hispeed_delay = NULL;
- unsigned long flags;
- #ifdef CONFIG_MODE_AUTO_CHANGE
- unsigned long flags2;
- #endif
- new_above_hispeed_delay = get_tokenized_data(buf, &ntokens);
- if (IS_ERR(new_above_hispeed_delay))
- return PTR_RET(new_above_hispeed_delay);
- #ifdef CONFIG_MODE_AUTO_CHANGE
- spin_lock_irqsave(&mode_lock, flags2);
- #endif
- spin_lock_irqsave(&above_hispeed_delay_lock, flags);
- #ifdef CONFIG_MODE_AUTO_CHANGE
- if (above_hispeed_delay_set[param_index] != default_above_hispeed_delay)
- kfree(above_hispeed_delay_set[param_index]);
- above_hispeed_delay_set[param_index] = new_above_hispeed_delay;
- nabove_hispeed_delay_set[param_index] = ntokens;
- if (cur_param_index == param_index) {
- above_hispeed_delay = new_above_hispeed_delay;
- nabove_hispeed_delay = ntokens;
- }
- #else
- if (above_hispeed_delay != default_above_hispeed_delay)
- kfree(above_hispeed_delay);
- above_hispeed_delay = new_above_hispeed_delay;
- nabove_hispeed_delay = ntokens;
- #endif
- spin_unlock_irqrestore(&above_hispeed_delay_lock, flags);
- #ifdef CONFIG_MODE_AUTO_CHANGE
- spin_unlock_irqrestore(&mode_lock, flags2);
- #endif
- return count;
- }
- static struct global_attr above_hispeed_delay_attr =
- __ATTR(above_hispeed_delay, S_IRUGO | S_IWUSR,
- show_above_hispeed_delay, store_above_hispeed_delay);
- static ssize_t show_hispeed_freq(struct kobject *kobj,
- struct attribute *attr, char *buf)
- {
- #ifdef CONFIG_MODE_AUTO_CHANGE
- return sprintf(buf, "%u\n", hispeed_freq_set[param_index]);
- #else
- return sprintf(buf, "%u\n", hispeed_freq);
- #endif
- }
- static ssize_t store_hispeed_freq(struct kobject *kobj,
- struct attribute *attr, const char *buf,
- size_t count)
- {
- int ret;
- long unsigned int val;
- #ifdef CONFIG_MODE_AUTO_CHANGE
- unsigned long flags2;
- #endif
- ret = strict_strtoul(buf, 0, &val);
- if (ret < 0)
- return ret;
- #ifdef CONFIG_MODE_AUTO_CHANGE
- spin_lock_irqsave(&mode_lock, flags2);
- hispeed_freq_set[param_index] = val;
- if (cur_param_index == param_index)
- hispeed_freq = val;
- spin_unlock_irqrestore(&mode_lock, flags2);
- #else
- hispeed_freq = val;
- #endif
- return count;
- }
- static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644,
- show_hispeed_freq, store_hispeed_freq);
- static ssize_t show_sampling_down_factor(struct kobject *kobj,
- struct attribute *attr, char *buf)
- {
- #ifdef CONFIG_MODE_AUTO_CHANGE
- return sprintf(buf, "%u\n", sampling_down_factor_set[param_index]);
- #else
- return sprintf(buf, "%u\n", sampling_down_factor);
- #endif
- }
- static ssize_t store_sampling_down_factor(struct kobject *kobj,
- struct attribute *attr, const char *buf,
- size_t count)
- {
- int ret;
- long unsigned int val;
- #ifdef CONFIG_MODE_AUTO_CHANGE
- unsigned long flags2;
- #endif
- ret = strict_strtoul(buf, 0, &val);
- if (ret < 0)
- return ret;
- #ifdef CONFIG_MODE_AUTO_CHANGE
- spin_lock_irqsave(&mode_lock, flags2);
- sampling_down_factor_set[param_index] = val;
- if (cur_param_index == param_index)
- sampling_down_factor = val;
- spin_unlock_irqrestore(&mode_lock, flags2);
- #else
- sampling_down_factor = val;
- #endif
- return count;
- }
- static struct global_attr sampling_down_factor_attr =
- __ATTR(sampling_down_factor, 0644,
- show_sampling_down_factor, store_sampling_down_factor);
- static ssize_t show_go_hispeed_load(struct kobject *kobj,
- struct attribute *attr, char *buf)
- {
- #ifdef CONFIG_MODE_AUTO_CHANGE
- return sprintf(buf, "%lu\n", go_hispeed_load_set[param_index]);
- #else
- return sprintf(buf, "%lu\n", go_hispeed_load);
- #endif
- }
- static ssize_t store_go_hispeed_load(struct kobject *kobj,
- struct attribute *attr, const char *buf, size_t count)
- {
- int ret;
- unsigned long val;
- #ifdef CONFIG_MODE_AUTO_CHANGE
- unsigned long flags2;
- #endif
- ret = strict_strtoul(buf, 0, &val);
- if (ret < 0)
- return ret;
- #ifdef CONFIG_MODE_AUTO_CHANGE
- spin_lock_irqsave(&mode_lock, flags2);
- go_hispeed_load_set[param_index] = val;
- if (cur_param_index == param_index)
- go_hispeed_load = val;
- spin_unlock_irqrestore(&mode_lock, flags2);
- #else
- go_hispeed_load = val;
- #endif
- return count;
- }
- static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644,
- show_go_hispeed_load, store_go_hispeed_load);
- static ssize_t show_min_sample_time(struct kobject *kobj,
- struct attribute *attr, char *buf)
- {
- #ifdef CONFIG_MODE_AUTO_CHANGE
- return sprintf(buf, "%lu\n", min_sample_time_set[param_index]);
- #else
- return sprintf(buf, "%lu\n", min_sample_time);
- #endif
- }
- static ssize_t store_min_sample_time(struct kobject *kobj,
- struct attribute *attr, const char *buf, size_t count)
- {
- int ret;
- unsigned long val;
- #ifdef CONFIG_MODE_AUTO_CHANGE
- unsigned long flags2;
- #endif
- ret = strict_strtoul(buf, 0, &val);
- if (ret < 0)
- return ret;
- #ifdef CONFIG_MODE_AUTO_CHANGE
- spin_lock_irqsave(&mode_lock, flags2);
- min_sample_time_set[param_index] = val;
- if (cur_param_index == param_index)
- min_sample_time = val;
- spin_unlock_irqrestore(&mode_lock, flags2);
- #else
- min_sample_time = val;
- #endif
- return count;
- }
- static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
- show_min_sample_time, store_min_sample_time);
- static ssize_t show_timer_rate(struct kobject *kobj,
- struct attribute *attr, char *buf)
- {
- #ifdef CONFIG_MODE_AUTO_CHANGE
- return sprintf(buf, "%lu\n", timer_rate_set[param_index]);
- #else
- return sprintf(buf, "%lu\n", timer_rate);
- #endif
- }
- static ssize_t store_timer_rate(struct kobject *kobj,
- struct attribute *attr, const char *buf, size_t count)
- {
- int ret;
- unsigned long val;
- #ifdef CONFIG_MODE_AUTO_CHANGE
- unsigned long flags2;
- #endif
- ret = strict_strtoul(buf, 0, &val);
- if (ret < 0)
- return ret;
- #ifdef CONFIG_MODE_AUTO_CHANGE
- spin_lock_irqsave(&mode_lock, flags2);
- timer_rate_set[param_index] = val;
- if (cur_param_index == param_index)
- timer_rate = val;
- spin_unlock_irqrestore(&mode_lock, flags2);
- #else
- timer_rate = val;
- #endif
- return count;
- }
- static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
- show_timer_rate, store_timer_rate);
- static ssize_t show_timer_slack(
- struct kobject *kobj, struct attribute *attr, char *buf)
- {
- return sprintf(buf, "%d\n", timer_slack_val);
- }
- static ssize_t store_timer_slack(
- struct kobject *kobj, struct attribute *attr, const char *buf,
- size_t count)
- {
- int ret;
- unsigned long val;
- ret = kstrtol(buf, 10, &val);
- if (ret < 0)
- return ret;
- timer_slack_val = val;
- return count;
- }
- define_one_global_rw(timer_slack);
- static ssize_t show_boost(struct kobject *kobj, struct attribute *attr,
- char *buf)
- {
- return sprintf(buf, "%d\n", boost_val);
- }
- static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
- const char *buf, size_t count)
- {
- int ret;
- unsigned long val;
- ret = kstrtoul(buf, 0, &val);
- if (ret < 0)
- return ret;
- boost_val = val;
- if (boost_val) {
- trace_cpufreq_interactive_boost("on");
- cpufreq_interactive_boost();
- } else {
- trace_cpufreq_interactive_unboost("off");
- }
- return count;
- }
- define_one_global_rw(boost);
- static ssize_t store_boostpulse(struct kobject *kobj, struct attribute *attr,
- const char *buf, size_t count)
- {
- int ret;
- unsigned long val;
- ret = kstrtoul(buf, 0, &val);
- if (ret < 0)
- return ret;
- boostpulse_endtime = ktime_to_us(ktime_get()) + boostpulse_duration_val;
- trace_cpufreq_interactive_boost("pulse");
- cpufreq_interactive_boost();
- return count;
- }
- static struct global_attr boostpulse =
- __ATTR(boostpulse, 0200, NULL, store_boostpulse);
- static ssize_t show_boostpulse_duration(
- struct kobject *kobj, struct attribute *attr, char *buf)
- {
- return sprintf(buf, "%d\n", boostpulse_duration_val);
- }
- static ssize_t store_boostpulse_duration(
- struct kobject *kobj, struct attribute *attr, const char *buf,
- size_t count)
- {
- int ret;
- unsigned long val;
- ret = kstrtoul(buf, 0, &val);
- if (ret < 0)
- return ret;
- boostpulse_duration_val = val;
- return count;
- }
- define_one_global_rw(boostpulse_duration);
- static ssize_t show_io_is_busy(struct kobject *kobj,
- struct attribute *attr, char *buf)
- {
- return sprintf(buf, "%u\n", io_is_busy);
- }
- static ssize_t store_io_is_busy(struct kobject *kobj,
- struct attribute *attr, const char *buf, size_t count)
- {
- int ret;
- unsigned long val;
- ret = kstrtoul(buf, 0, &val);
- if (ret < 0)
- return ret;
- io_is_busy = val;
- return count;
- }
- static struct global_attr io_is_busy_attr = __ATTR(io_is_busy, 0644,
- show_io_is_busy, store_io_is_busy);
- static ssize_t show_sync_freq(struct kobject *kobj,
- struct attribute *attr, char *buf)
- {
- return sprintf(buf, "%u\n", sync_freq);
- }
- static ssize_t store_sync_freq(struct kobject *kobj,
- struct attribute *attr, const char *buf, size_t count)
- {
- int ret;
- unsigned long val;
- ret = kstrtoul(buf, 0, &val);
- if (ret < 0)
- return ret;
- sync_freq = val;
- return count;
- }
- static struct global_attr sync_freq_attr = __ATTR(sync_freq, 0644,
- show_sync_freq, store_sync_freq);
- static ssize_t show_up_threshold_any_cpu_load(struct kobject *kobj,
- struct attribute *attr, char *buf)
- {
- return snprintf(buf, PAGE_SIZE, "%u\n", up_threshold_any_cpu_load);
- }
- static ssize_t store_up_threshold_any_cpu_load(struct kobject *kobj,
- struct attribute *attr, const char *buf, size_t count)
- {
- int ret;
- unsigned long val;
- ret = kstrtoul(buf, 0, &val);
- if (ret < 0)
- return ret;
- up_threshold_any_cpu_load = val;
- return count;
- }
- static struct global_attr up_threshold_any_cpu_load_attr =
- __ATTR(up_threshold_any_cpu_load, 0644,
- show_up_threshold_any_cpu_load,
- store_up_threshold_any_cpu_load);
- static ssize_t show_up_threshold_any_cpu_freq(struct kobject *kobj,
- struct attribute *attr, char *buf)
- {
- return snprintf(buf, PAGE_SIZE, "%u\n", up_threshold_any_cpu_freq);
- }
- static ssize_t store_up_threshold_any_cpu_freq(struct kobject *kobj,
- struct attribute *attr, const char *buf, size_t count)
- {
- int ret;
- unsigned long val;
- ret = kstrtoul(buf, 0, &val);
- if (ret < 0)
- return ret;
- up_threshold_any_cpu_freq = val;
- return count;
- }
- static struct global_attr up_threshold_any_cpu_freq_attr =
- __ATTR(up_threshold_any_cpu_freq, 0644,
- show_up_threshold_any_cpu_freq,
- store_up_threshold_any_cpu_freq);
- #ifdef CONFIG_MODE_AUTO_CHANGE
- #define index(obj_name, obj_attr) \
- static ssize_t show_##obj_name(struct kobject *kobj, \
- struct attribute *attr, char *buf) \
- { \
- return sprintf(buf, "%u\n", obj_name); \
- } \
- \
- static ssize_t store_##obj_name(struct kobject *kobj, \
- struct attribute *attr, const char *buf, \
- size_t count) \
- { \
- int ret; \
- long unsigned int val; \
- \
- ret = strict_strtoul(buf, 0, &val); \
- if (ret < 0) \
- return ret; \
- \
- val &= MULTI_MODE | SINGLE_MODE | NO_MODE; \
- obj_name = val; \
- return count; \
- } \
- \
- static struct global_attr obj_attr = __ATTR(obj_name, 0644, \
- show_##obj_name, store_##obj_name); \
- index(mode, mode_attr);
- index(enforced_mode, enforced_mode_attr);
- index(param_index, param_index_attr);
- #define load(obj_name, obj_attr) \
- static ssize_t show_##obj_name(struct kobject *kobj, \
- struct attribute *attr, char *buf) \
- { \
- return sprintf(buf, "%u\n", obj_name); \
- } \
- \
- static ssize_t store_##obj_name(struct kobject *kobj, \
- struct attribute *attr, const char *buf, \
- size_t count) \
- { \
- int ret; \
- long unsigned int val; \
- \
- ret = strict_strtoul(buf, 0, &val); \
- if (ret < 0) \
- return ret; \
- \
- obj_name = val; \
- return count; \
- } \
- \
- static struct global_attr obj_attr = __ATTR(obj_name, 0644, \
- show_##obj_name, store_##obj_name); \
- load(multi_enter_load, multi_enter_load_attr);
- load(multi_exit_load, multi_exit_load_attr);
- load(single_enter_load, single_enter_load_attr);
- load(single_exit_load, single_exit_load_attr);
- #define time(obj_name, obj_attr) \
- static ssize_t show_##obj_name(struct kobject *kobj, \
- struct attribute *attr, char *buf) \
- { \
- return sprintf(buf, "%lu\n", obj_name); \
- } \
- \
- static ssize_t store_##obj_name(struct kobject *kobj, \
- struct attribute *attr, const char *buf, \
- size_t count) \
- { \
- int ret; \
- unsigned long val; \
- \
- ret = strict_strtoul(buf, 0, &val); \
- if (ret < 0) \
- return ret; \
- \
- obj_name = val; \
- return count; \
- } \
- \
- static struct global_attr obj_attr = __ATTR(obj_name, 0644, \
- show_##obj_name, store_##obj_name); \
- time(multi_enter_time, multi_enter_time_attr);
- time(multi_exit_time, multi_exit_time_attr);
- time(single_enter_time, single_enter_time_attr);
- time(single_exit_time, single_exit_time_attr);
- #endif
- static struct attribute *interactive_attributes[] = {
- &target_loads_attr.attr,
- &above_hispeed_delay_attr.attr,
- &hispeed_freq_attr.attr,
- &go_hispeed_load_attr.attr,
- &min_sample_time_attr.attr,
- &timer_rate_attr.attr,
- &timer_slack.attr,
- &boost.attr,
- &boostpulse.attr,
- &boostpulse_duration.attr,
- &io_is_busy_attr.attr,
- &sampling_down_factor_attr.attr,
- &sync_freq_attr.attr,
- &up_threshold_any_cpu_load_attr.attr,
- &up_threshold_any_cpu_freq_attr.attr,
- #ifdef CONFIG_MODE_AUTO_CHANGE
- &mode_attr.attr,
- &enforced_mode_attr.attr,
- ¶m_index_attr.attr,
- &multi_enter_load_attr.attr,
- &multi_exit_load_attr.attr,
- &single_enter_load_attr.attr,
- &single_exit_load_attr.attr,
- &multi_enter_time_attr.attr,
- &multi_exit_time_attr.attr,
- &single_enter_time_attr.attr,
- &single_exit_time_attr.attr,
- #endif
- NULL,
- };
- static struct attribute_group interactive_attr_group = {
- .attrs = interactive_attributes,
- .name = "interactive",
- };
- static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
- unsigned long val,
- void *data)
- {
- switch (val) {
- case IDLE_START:
- cpufreq_interactive_idle_start();
- break;
- case IDLE_END:
- cpufreq_interactive_idle_end();
- break;
- }
- return 0;
- }
- static struct notifier_block cpufreq_interactive_idle_nb = {
- .notifier_call = cpufreq_interactive_idle_notifier,
- };
- #ifdef CONFIG_MODE_AUTO_CHANGE
- static void cpufreq_param_set_init(void)
- {
- unsigned int i;
- unsigned long flags;
- multi_enter_load = DEFAULT_TARGET_LOAD * num_possible_cpus();
- spin_lock_irqsave(&mode_lock, flags);
- for (i=0 ; i<MAX_PARAM_SET; i++) {
- hispeed_freq_set[i] = 0;
- go_hispeed_load_set[i] = go_hispeed_load;
- target_loads_set[i] = target_loads;
- ntarget_loads_set[i] = ntarget_loads;
- min_sample_time_set[i] = min_sample_time;
- timer_rate_set[i] = timer_rate;
- above_hispeed_delay_set[i] = above_hispeed_delay;
- nabove_hispeed_delay_set[i] = nabove_hispeed_delay;
- sampling_down_factor_set[i] = sampling_down_factor;
- }
- spin_unlock_irqrestore(&mode_lock, flags);
- }
- #endif
- static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
- unsigned int event)
- {
- int rc;
- unsigned int j;
- struct cpufreq_interactive_cpuinfo *pcpu;
- struct cpufreq_frequency_table *freq_table;
- switch (event) {
- case CPUFREQ_GOV_START:
- if (!cpu_online(policy->cpu))
- return -EINVAL;
- mutex_lock(&gov_lock);
- freq_table =
- cpufreq_frequency_get_table(policy->cpu);
- if (!hispeed_freq)
- hispeed_freq = policy->max;
- #ifdef CONFIG_MODE_AUTO_CHANGE
- for (j=0 ; j<MAX_PARAM_SET ; j++)
- if (!hispeed_freq_set[j])
- hispeed_freq_set[j] = policy->max;
- #endif
- for_each_cpu(j, policy->cpus) {
- pcpu = &per_cpu(cpuinfo, j);
- pcpu->policy = policy;
- pcpu->target_freq = policy->cur;
- pcpu->freq_table = freq_table;
- pcpu->floor_freq = pcpu->target_freq;
- pcpu->floor_validate_time =
- ktime_to_us(ktime_get());
- pcpu->hispeed_validate_time =
- pcpu->floor_validate_time;
- down_write(&pcpu->enable_sem);
- del_timer_sync(&pcpu->cpu_timer);
- del_timer_sync(&pcpu->cpu_slack_timer);
- cpufreq_interactive_timer_start(j);
- pcpu->governor_enabled = 1;
- up_write(&pcpu->enable_sem);
- }
- /*
- * Do not register the idle hook and create sysfs
- * entries if we have already done so.
- */
- if (++active_count > 1) {
- mutex_unlock(&gov_lock);
- return 0;
- }
- rc = sysfs_create_group(cpufreq_global_kobject,
- &interactive_attr_group);
- if (rc) {
- mutex_unlock(&gov_lock);
- return rc;
- }
- idle_notifier_register(&cpufreq_interactive_idle_nb);
- cpufreq_register_notifier(
- &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
- mutex_unlock(&gov_lock);
- break;
- case CPUFREQ_GOV_STOP:
- mutex_lock(&gov_lock);
- for_each_cpu(j, policy->cpus) {
- pcpu = &per_cpu(cpuinfo, j);
- down_write(&pcpu->enable_sem);
- pcpu->governor_enabled = 0;
- pcpu->target_freq = 0;
- del_timer_sync(&pcpu->cpu_timer);
- del_timer_sync(&pcpu->cpu_slack_timer);
- up_write(&pcpu->enable_sem);
- }
- if (--active_count > 0) {
- mutex_unlock(&gov_lock);
- return 0;
- }
- cpufreq_unregister_notifier(
- &cpufreq_notifier_block, CPUFREQ_TRANSITION_NOTIFIER);
- idle_notifier_unregister(&cpufreq_interactive_idle_nb);
- sysfs_remove_group(cpufreq_global_kobject,
- &interactive_attr_group);
- mutex_unlock(&gov_lock);
- break;
- case CPUFREQ_GOV_LIMITS:
- if (policy->max < policy->cur)
- __cpufreq_driver_target(policy,
- policy->max, CPUFREQ_RELATION_H);
- else if (policy->min > policy->cur)
- __cpufreq_driver_target(policy,
- policy->min, CPUFREQ_RELATION_L);
- for_each_cpu(j, policy->cpus) {
- pcpu = &per_cpu(cpuinfo, j);
- /* hold write semaphore to avoid race */
- down_write(&pcpu->enable_sem);
- if (pcpu->governor_enabled == 0) {
- up_write(&pcpu->enable_sem);
- continue;
- }
- /* update target_freq firstly */
- if (policy->max < pcpu->target_freq)
- pcpu->target_freq = policy->max;
- else if (policy->min > pcpu->target_freq)
- pcpu->target_freq = policy->min;
- /* Reschedule timer.
- * Delete the timers, else the timer callback may
- * return without re-arm the timer when failed
- * acquire the semaphore. This race may cause timer
- * stopped unexpectedly.
- */
- del_timer_sync(&pcpu->cpu_timer);
- del_timer_sync(&pcpu->cpu_slack_timer);
- cpufreq_interactive_timer_start(j);
- pcpu->minfreq_boost = 1;
- up_write(&pcpu->enable_sem);
- }
- break;
- }
- return 0;
- }
- static void cpufreq_interactive_nop_timer(unsigned long data)
- {
- }
- static int __init cpufreq_interactive_init(void)
- {
- unsigned int i;
- struct cpufreq_interactive_cpuinfo *pcpu;
- struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
- /* Initalize per-cpu timers */
- for_each_possible_cpu(i) {
- pcpu = &per_cpu(cpuinfo, i);
- init_timer_deferrable(&pcpu->cpu_timer);
- pcpu->cpu_timer.function = cpufreq_interactive_timer;
- pcpu->cpu_timer.data = i;
- init_timer(&pcpu->cpu_slack_timer);
- pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
- spin_lock_init(&pcpu->load_lock);
- init_rwsem(&pcpu->enable_sem);
- }
- spin_lock_init(&target_loads_lock);
- spin_lock_init(&speedchange_cpumask_lock);
- spin_lock_init(&above_hispeed_delay_lock);
- #ifdef CONFIG_MODE_AUTO_CHANGE
- spin_lock_init(&mode_lock);
- cpufreq_param_set_init();
- #endif
- #ifdef CONFIG_RETENTION_CHANGE
- retention_toggle_wq = alloc_workqueue("retentionToggle_wq", WQ_HIGHPRI, 0);
- if(!retention_toggle_wq)
- pr_info("retention toggle workqueue init error\n");
- INIT_WORK(&retention_toggle_work, do_toggle_retention);
- #endif
- mutex_init(&gov_lock);
- speedchange_task =
- kthread_create(cpufreq_interactive_speedchange_task, NULL,
- "cfinteractive");
- if (IS_ERR(speedchange_task))
- return PTR_ERR(speedchange_task);
- sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, ¶m);
- get_task_struct(speedchange_task);
- /* NB: wake up so the thread does not look hung to the freezer */
- wake_up_process(speedchange_task);
- return cpufreq_register_governor(&cpufreq_gov_interactive);
- }
- #ifdef CONFIG_RETENTION_CHANGE
- static void do_toggle_retention(struct work_struct *work)
- {
- if(mode_count == 1)
- msm_pm_retention_mode_enable(0);
- else if(mode_count == 0)
- msm_pm_retention_mode_enable(1);
- }
- #endif // CONFIG_RETENTION_CHANGE
- #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
- fs_initcall(cpufreq_interactive_init);
- #else
- module_init(cpufreq_interactive_init);
- #endif
- static void __exit cpufreq_interactive_exit(void)
- {
- cpufreq_unregister_governor(&cpufreq_gov_interactive);
- kthread_stop(speedchange_task);
- put_task_struct(speedchange_task);
- }
- module_exit(cpufreq_interactive_exit);
- MODULE_AUTHOR("Mike Chan <mike@android.com>");
- MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
- "Latency sensitive workloads");
- MODULE_LICENSE("GPL");
|