cpufreq_interactive.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997
  1. /*
  2. * drivers/cpufreq/cpufreq_interactive.c
  3. *
  4. * Copyright (C) 2010 Google, Inc.
  5. *
  6. * This software is licensed under the terms of the GNU General Public
  7. * License version 2, as published by the Free Software Foundation, and
  8. * may be copied, distributed, and modified under those terms.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * Author: Mike Chan (mike@android.com)
  16. *
  17. */
  18. #include <linux/cpu.h>
  19. #include <linux/cpumask.h>
  20. #include <linux/cpufreq.h>
  21. #include <linux/mutex.h>
  22. #include <linux/sched.h>
  23. #include <linux/tick.h>
  24. #include <linux/time.h>
  25. #include <linux/timer.h>
  26. #include <linux/workqueue.h>
  27. #include <linux/kthread.h>
  28. #include <linux/mutex.h>
  29. #include <linux/slab.h>
  30. #include <linux/input.h>
  31. #include <asm/cputime.h>
  32. #define CREATE_TRACE_POINTS
  33. #include <trace/events/cpufreq_interactive.h>
  34. static atomic_t active_count = ATOMIC_INIT(0);
  35. struct cpufreq_interactive_cpuinfo {
  36. struct timer_list cpu_timer;
  37. int timer_idlecancel;
  38. u64 time_in_idle;
  39. u64 idle_exit_time;
  40. u64 timer_run_time;
  41. int idling;
  42. u64 target_set_time;
  43. u64 target_set_time_in_idle;
  44. struct cpufreq_policy *policy;
  45. struct cpufreq_frequency_table *freq_table;
  46. unsigned int target_freq;
  47. unsigned int floor_freq;
  48. u64 floor_validate_time;
  49. int governor_enabled;
  50. };
  51. static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
  52. /* Workqueues handle frequency scaling */
  53. static struct task_struct *up_task;
  54. static struct workqueue_struct *down_wq;
  55. static struct work_struct freq_scale_down_work;
  56. static cpumask_t up_cpumask;
  57. static spinlock_t up_cpumask_lock;
  58. static cpumask_t down_cpumask;
  59. static spinlock_t down_cpumask_lock;
  60. static struct mutex set_speed_lock;
  61. /* Hi speed to bump to from lo speed when load burst (default max) */
  62. static u64 hispeed_freq;
  63. /* Go to hi speed when CPU load at or above this value. */
  64. #define DEFAULT_GO_HISPEED_LOAD 85
  65. static unsigned long go_hispeed_load;
  66. /*
  67. * The minimum amount of time to spend at a frequency before we can ramp down.
  68. */
  69. #define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
  70. static unsigned long min_sample_time;
  71. /*
  72. * The sample rate of the timer used to increase frequency
  73. */
  74. #define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
  75. static unsigned long timer_rate;
  76. /*
  77. * Wait this long before raising speed above hispeed, by default a single
  78. * timer interval.
  79. */
  80. #define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
  81. static unsigned long above_hispeed_delay_val;
  82. /*
  83. * Boost pulse to hispeed on touchscreen input.
  84. */
  85. static int input_boost_val;
  86. struct cpufreq_interactive_inputopen {
  87. struct input_handle *handle;
  88. struct work_struct inputopen_work;
  89. };
  90. static struct cpufreq_interactive_inputopen inputopen;
  91. /*
  92. * Non-zero means longer-term speed boost active.
  93. */
  94. static int boost_val;
  95. static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
  96. unsigned int event);
  97. #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
  98. static
  99. #endif
  100. struct cpufreq_governor cpufreq_gov_interactive = {
  101. .name = "interactive",
  102. .governor = cpufreq_governor_interactive,
  103. .max_transition_latency = 10000000,
  104. .owner = THIS_MODULE,
  105. };
  106. static void cpufreq_interactive_timer(unsigned long data)
  107. {
  108. unsigned int delta_idle;
  109. unsigned int delta_time;
  110. int cpu_load;
  111. int load_since_change;
  112. u64 time_in_idle;
  113. u64 idle_exit_time;
  114. struct cpufreq_interactive_cpuinfo *pcpu =
  115. &per_cpu(cpuinfo, data);
  116. u64 now_idle;
  117. unsigned int new_freq;
  118. unsigned int index;
  119. unsigned long flags;
  120. smp_rmb();
  121. if (!pcpu->governor_enabled)
  122. goto exit;
  123. /*
  124. * Once pcpu->timer_run_time is updated to >= pcpu->idle_exit_time,
  125. * this lets idle exit know the current idle time sample has
  126. * been processed, and idle exit can generate a new sample and
  127. * re-arm the timer. This prevents a concurrent idle
  128. * exit on that CPU from writing a new set of info at the same time
  129. * the timer function runs (the timer function can't use that info
  130. * until more time passes).
  131. */
  132. time_in_idle = pcpu->time_in_idle;
  133. idle_exit_time = pcpu->idle_exit_time;
  134. now_idle = get_cpu_idle_time_us(data, &pcpu->timer_run_time);
  135. smp_wmb();
  136. /* If we raced with cancelling a timer, skip. */
  137. if (!idle_exit_time)
  138. goto exit;
  139. delta_idle = (unsigned int) cputime64_sub(now_idle, time_in_idle);
  140. delta_time = (unsigned int) cputime64_sub(pcpu->timer_run_time,
  141. idle_exit_time);
  142. /*
  143. * If timer ran less than 1ms after short-term sample started, retry.
  144. */
  145. if (delta_time < 1000)
  146. goto rearm;
  147. if (delta_idle > delta_time)
  148. cpu_load = 0;
  149. else
  150. cpu_load = 100 * (delta_time - delta_idle) / delta_time;
  151. delta_idle = (unsigned int) cputime64_sub(now_idle,
  152. pcpu->target_set_time_in_idle);
  153. delta_time = (unsigned int) cputime64_sub(pcpu->timer_run_time,
  154. pcpu->target_set_time);
  155. if ((delta_time == 0) || (delta_idle > delta_time))
  156. load_since_change = 0;
  157. else
  158. load_since_change =
  159. 100 * (delta_time - delta_idle) / delta_time;
  160. /*
  161. * Choose greater of short-term load (since last idle timer
  162. * started or timer function re-armed itself) or long-term load
  163. * (since last frequency change).
  164. */
  165. if (load_since_change > cpu_load)
  166. cpu_load = load_since_change;
  167. if (cpu_load >= go_hispeed_load || boost_val) {
  168. if (pcpu->target_freq <= pcpu->policy->min) {
  169. new_freq = hispeed_freq;
  170. } else {
  171. new_freq = pcpu->policy->max * cpu_load / 100;
  172. if (new_freq < hispeed_freq)
  173. new_freq = hispeed_freq;
  174. if (pcpu->target_freq == hispeed_freq &&
  175. new_freq > hispeed_freq &&
  176. cputime64_sub(pcpu->timer_run_time,
  177. pcpu->target_set_time)
  178. < above_hispeed_delay_val) {
  179. trace_cpufreq_interactive_notyet(data, cpu_load,
  180. pcpu->target_freq,
  181. new_freq);
  182. goto rearm;
  183. }
  184. }
  185. } else {
  186. new_freq = pcpu->policy->max * cpu_load / 100;
  187. }
  188. if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
  189. new_freq, CPUFREQ_RELATION_H,
  190. &index)) {
  191. pr_warn_once("timer %d: cpufreq_frequency_table_target error\n",
  192. (int) data);
  193. goto rearm;
  194. }
  195. new_freq = pcpu->freq_table[index].frequency;
  196. /*
  197. * Do not scale below floor_freq unless we have been at or above the
  198. * floor frequency for the minimum sample time since last validated.
  199. */
  200. if (new_freq < pcpu->floor_freq) {
  201. if (cputime64_sub(pcpu->timer_run_time,
  202. pcpu->floor_validate_time)
  203. < min_sample_time) {
  204. trace_cpufreq_interactive_notyet(data, cpu_load,
  205. pcpu->target_freq, new_freq);
  206. goto rearm;
  207. }
  208. }
  209. pcpu->floor_freq = new_freq;
  210. pcpu->floor_validate_time = pcpu->timer_run_time;
  211. if (pcpu->target_freq == new_freq) {
  212. trace_cpufreq_interactive_already(data, cpu_load,
  213. pcpu->target_freq, new_freq);
  214. goto rearm_if_notmax;
  215. }
  216. trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
  217. new_freq);
  218. pcpu->target_set_time_in_idle = now_idle;
  219. pcpu->target_set_time = pcpu->timer_run_time;
  220. if (new_freq < pcpu->target_freq) {
  221. pcpu->target_freq = new_freq;
  222. spin_lock_irqsave(&down_cpumask_lock, flags);
  223. cpumask_set_cpu(data, &down_cpumask);
  224. spin_unlock_irqrestore(&down_cpumask_lock, flags);
  225. queue_work(down_wq, &freq_scale_down_work);
  226. } else {
  227. pcpu->target_freq = new_freq;
  228. spin_lock_irqsave(&up_cpumask_lock, flags);
  229. cpumask_set_cpu(data, &up_cpumask);
  230. spin_unlock_irqrestore(&up_cpumask_lock, flags);
  231. wake_up_process(up_task);
  232. }
  233. rearm_if_notmax:
  234. /*
  235. * Already set max speed and don't see a need to change that,
  236. * wait until next idle to re-evaluate, don't need timer.
  237. */
  238. if (pcpu->target_freq == pcpu->policy->max)
  239. goto exit;
  240. rearm:
  241. if (!timer_pending(&pcpu->cpu_timer)) {
  242. /*
  243. * If already at min: if that CPU is idle, don't set timer.
  244. * Else cancel the timer if that CPU goes idle. We don't
  245. * need to re-evaluate speed until the next idle exit.
  246. */
  247. if (pcpu->target_freq == pcpu->policy->min) {
  248. smp_rmb();
  249. if (pcpu->idling)
  250. goto exit;
  251. pcpu->timer_idlecancel = 1;
  252. }
  253. pcpu->time_in_idle = get_cpu_idle_time_us(
  254. data, &pcpu->idle_exit_time);
  255. mod_timer(&pcpu->cpu_timer,
  256. jiffies + usecs_to_jiffies(timer_rate));
  257. }
  258. exit:
  259. return;
  260. }
  261. static void cpufreq_interactive_idle_start(void)
  262. {
  263. struct cpufreq_interactive_cpuinfo *pcpu =
  264. &per_cpu(cpuinfo, smp_processor_id());
  265. int pending;
  266. if (!pcpu->governor_enabled)
  267. return;
  268. pcpu->idling = 1;
  269. smp_wmb();
  270. pending = timer_pending(&pcpu->cpu_timer);
  271. if (pcpu->target_freq != pcpu->policy->min) {
  272. #ifdef CONFIG_SMP
  273. /*
  274. * Entering idle while not at lowest speed. On some
  275. * platforms this can hold the other CPU(s) at that speed
  276. * even though the CPU is idle. Set a timer to re-evaluate
  277. * speed so this idle CPU doesn't hold the other CPUs above
  278. * min indefinitely. This should probably be a quirk of
  279. * the CPUFreq driver.
  280. */
  281. if (!pending) {
  282. pcpu->time_in_idle = get_cpu_idle_time_us(
  283. smp_processor_id(), &pcpu->idle_exit_time);
  284. pcpu->timer_idlecancel = 0;
  285. mod_timer(&pcpu->cpu_timer,
  286. jiffies + usecs_to_jiffies(timer_rate));
  287. }
  288. #endif
  289. } else {
  290. /*
  291. * If at min speed and entering idle after load has
  292. * already been evaluated, and a timer has been set just in
  293. * case the CPU suddenly goes busy, cancel that timer. The
  294. * CPU didn't go busy; we'll recheck things upon idle exit.
  295. */
  296. if (pending && pcpu->timer_idlecancel) {
  297. del_timer(&pcpu->cpu_timer);
  298. /*
  299. * Ensure last timer run time is after current idle
  300. * sample start time, so next idle exit will always
  301. * start a new idle sampling period.
  302. */
  303. pcpu->idle_exit_time = 0;
  304. pcpu->timer_idlecancel = 0;
  305. }
  306. }
  307. }
  308. static void cpufreq_interactive_idle_end(void)
  309. {
  310. struct cpufreq_interactive_cpuinfo *pcpu =
  311. &per_cpu(cpuinfo, smp_processor_id());
  312. pcpu->idling = 0;
  313. smp_wmb();
  314. /*
  315. * Arm the timer for 1-2 ticks later if not already, and if the timer
  316. * function has already processed the previous load sampling
  317. * interval. (If the timer is not pending but has not processed
  318. * the previous interval, it is probably racing with us on another
  319. * CPU. Let it compute load based on the previous sample and then
  320. * re-arm the timer for another interval when it's done, rather
  321. * than updating the interval start time to be "now", which doesn't
  322. * give the timer function enough time to make a decision on this
  323. * run.)
  324. */
  325. if (timer_pending(&pcpu->cpu_timer) == 0 &&
  326. pcpu->timer_run_time >= pcpu->idle_exit_time &&
  327. pcpu->governor_enabled) {
  328. pcpu->time_in_idle =
  329. get_cpu_idle_time_us(smp_processor_id(),
  330. &pcpu->idle_exit_time);
  331. pcpu->timer_idlecancel = 0;
  332. mod_timer(&pcpu->cpu_timer,
  333. jiffies + usecs_to_jiffies(timer_rate));
  334. }
  335. }
  336. static int cpufreq_interactive_up_task(void *data)
  337. {
  338. unsigned int cpu;
  339. cpumask_t tmp_mask;
  340. unsigned long flags;
  341. struct cpufreq_interactive_cpuinfo *pcpu;
  342. while (1) {
  343. set_current_state(TASK_INTERRUPTIBLE);
  344. spin_lock_irqsave(&up_cpumask_lock, flags);
  345. if (cpumask_empty(&up_cpumask)) {
  346. spin_unlock_irqrestore(&up_cpumask_lock, flags);
  347. schedule();
  348. if (kthread_should_stop())
  349. break;
  350. spin_lock_irqsave(&up_cpumask_lock, flags);
  351. }
  352. set_current_state(TASK_RUNNING);
  353. tmp_mask = up_cpumask;
  354. cpumask_clear(&up_cpumask);
  355. spin_unlock_irqrestore(&up_cpumask_lock, flags);
  356. for_each_cpu(cpu, &tmp_mask) {
  357. unsigned int j;
  358. unsigned int max_freq = 0;
  359. pcpu = &per_cpu(cpuinfo, cpu);
  360. smp_rmb();
  361. if (!pcpu->governor_enabled)
  362. continue;
  363. mutex_lock(&set_speed_lock);
  364. for_each_cpu(j, pcpu->policy->cpus) {
  365. struct cpufreq_interactive_cpuinfo *pjcpu =
  366. &per_cpu(cpuinfo, j);
  367. if (pjcpu->target_freq > max_freq)
  368. max_freq = pjcpu->target_freq;
  369. }
  370. if (max_freq != pcpu->policy->cur)
  371. __cpufreq_driver_target(pcpu->policy,
  372. max_freq,
  373. CPUFREQ_RELATION_H);
  374. mutex_unlock(&set_speed_lock);
  375. trace_cpufreq_interactive_up(cpu, pcpu->target_freq,
  376. pcpu->policy->cur);
  377. }
  378. }
  379. return 0;
  380. }
  381. static void cpufreq_interactive_freq_down(struct work_struct *work)
  382. {
  383. unsigned int cpu;
  384. cpumask_t tmp_mask;
  385. unsigned long flags;
  386. struct cpufreq_interactive_cpuinfo *pcpu;
  387. spin_lock_irqsave(&down_cpumask_lock, flags);
  388. tmp_mask = down_cpumask;
  389. cpumask_clear(&down_cpumask);
  390. spin_unlock_irqrestore(&down_cpumask_lock, flags);
  391. for_each_cpu(cpu, &tmp_mask) {
  392. unsigned int j;
  393. unsigned int max_freq = 0;
  394. pcpu = &per_cpu(cpuinfo, cpu);
  395. smp_rmb();
  396. if (!pcpu->governor_enabled)
  397. continue;
  398. mutex_lock(&set_speed_lock);
  399. for_each_cpu(j, pcpu->policy->cpus) {
  400. struct cpufreq_interactive_cpuinfo *pjcpu =
  401. &per_cpu(cpuinfo, j);
  402. if (pjcpu->target_freq > max_freq)
  403. max_freq = pjcpu->target_freq;
  404. }
  405. if (max_freq != pcpu->policy->cur)
  406. __cpufreq_driver_target(pcpu->policy, max_freq,
  407. CPUFREQ_RELATION_H);
  408. mutex_unlock(&set_speed_lock);
  409. trace_cpufreq_interactive_down(cpu, pcpu->target_freq,
  410. pcpu->policy->cur);
  411. }
  412. }
  413. static void cpufreq_interactive_boost(void)
  414. {
  415. int i;
  416. int anyboost = 0;
  417. unsigned long flags;
  418. struct cpufreq_interactive_cpuinfo *pcpu;
  419. spin_lock_irqsave(&up_cpumask_lock, flags);
  420. for_each_online_cpu(i) {
  421. pcpu = &per_cpu(cpuinfo, i);
  422. if (pcpu->target_freq < hispeed_freq) {
  423. pcpu->target_freq = hispeed_freq;
  424. cpumask_set_cpu(i, &up_cpumask);
  425. pcpu->target_set_time_in_idle =
  426. get_cpu_idle_time_us(i, &pcpu->target_set_time);
  427. anyboost = 1;
  428. }
  429. /*
  430. * Set floor freq and (re)start timer for when last
  431. * validated.
  432. */
  433. pcpu->floor_freq = hispeed_freq;
  434. pcpu->floor_validate_time = ktime_to_us(ktime_get());
  435. }
  436. spin_unlock_irqrestore(&up_cpumask_lock, flags);
  437. if (anyboost)
  438. wake_up_process(up_task);
  439. }
  440. /*
  441. * Pulsed boost on input event raises CPUs to hispeed_freq and lets
  442. * usual algorithm of min_sample_time decide when to allow speed
  443. * to drop.
  444. */
  445. static void cpufreq_interactive_input_event(struct input_handle *handle,
  446. unsigned int type,
  447. unsigned int code, int value)
  448. {
  449. if (input_boost_val && type == EV_SYN && code == SYN_REPORT) {
  450. trace_cpufreq_interactive_boost("input");
  451. cpufreq_interactive_boost();
  452. }
  453. }
  454. static void cpufreq_interactive_input_open(struct work_struct *w)
  455. {
  456. struct cpufreq_interactive_inputopen *io =
  457. container_of(w, struct cpufreq_interactive_inputopen,
  458. inputopen_work);
  459. int error;
  460. error = input_open_device(io->handle);
  461. if (error)
  462. input_unregister_handle(io->handle);
  463. }
  464. static int cpufreq_interactive_input_connect(struct input_handler *handler,
  465. struct input_dev *dev,
  466. const struct input_device_id *id)
  467. {
  468. struct input_handle *handle;
  469. int error;
  470. pr_info("%s: connect to %s\n", __func__, dev->name);
  471. handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
  472. if (!handle)
  473. return -ENOMEM;
  474. handle->dev = dev;
  475. handle->handler = handler;
  476. handle->name = "cpufreq_interactive";
  477. error = input_register_handle(handle);
  478. if (error)
  479. goto err;
  480. inputopen.handle = handle;
  481. queue_work(down_wq, &inputopen.inputopen_work);
  482. return 0;
  483. err:
  484. kfree(handle);
  485. return error;
  486. }
  487. static void cpufreq_interactive_input_disconnect(struct input_handle *handle)
  488. {
  489. input_close_device(handle);
  490. input_unregister_handle(handle);
  491. kfree(handle);
  492. }
  493. static const struct input_device_id cpufreq_interactive_ids[] = {
  494. {
  495. .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
  496. INPUT_DEVICE_ID_MATCH_ABSBIT,
  497. .evbit = { BIT_MASK(EV_ABS) },
  498. .absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
  499. BIT_MASK(ABS_MT_POSITION_X) |
  500. BIT_MASK(ABS_MT_POSITION_Y) },
  501. }, /* multi-touch touchscreen */
  502. {
  503. .flags = INPUT_DEVICE_ID_MATCH_KEYBIT |
  504. INPUT_DEVICE_ID_MATCH_ABSBIT,
  505. .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
  506. .absbit = { [BIT_WORD(ABS_X)] =
  507. BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) },
  508. }, /* touchpad */
  509. { },
  510. };
  511. static struct input_handler cpufreq_interactive_input_handler = {
  512. .event = cpufreq_interactive_input_event,
  513. .connect = cpufreq_interactive_input_connect,
  514. .disconnect = cpufreq_interactive_input_disconnect,
  515. .name = "cpufreq_interactive",
  516. .id_table = cpufreq_interactive_ids,
  517. };
  518. static ssize_t show_hispeed_freq(struct kobject *kobj,
  519. struct attribute *attr, char *buf)
  520. {
  521. return sprintf(buf, "%llu\n", hispeed_freq);
  522. }
  523. static ssize_t store_hispeed_freq(struct kobject *kobj,
  524. struct attribute *attr, const char *buf,
  525. size_t count)
  526. {
  527. int ret;
  528. u64 val;
  529. ret = strict_strtoull(buf, 0, &val);
  530. if (ret < 0)
  531. return ret;
  532. hispeed_freq = val;
  533. return count;
  534. }
  535. static struct global_attr hispeed_freq_attr = __ATTR(hispeed_freq, 0644,
  536. show_hispeed_freq, store_hispeed_freq);
  537. static ssize_t show_go_hispeed_load(struct kobject *kobj,
  538. struct attribute *attr, char *buf)
  539. {
  540. return sprintf(buf, "%lu\n", go_hispeed_load);
  541. }
  542. static ssize_t store_go_hispeed_load(struct kobject *kobj,
  543. struct attribute *attr, const char *buf, size_t count)
  544. {
  545. int ret;
  546. unsigned long val;
  547. ret = strict_strtoul(buf, 0, &val);
  548. if (ret < 0)
  549. return ret;
  550. go_hispeed_load = val;
  551. return count;
  552. }
  553. static struct global_attr go_hispeed_load_attr = __ATTR(go_hispeed_load, 0644,
  554. show_go_hispeed_load, store_go_hispeed_load);
  555. static ssize_t show_min_sample_time(struct kobject *kobj,
  556. struct attribute *attr, char *buf)
  557. {
  558. return sprintf(buf, "%lu\n", min_sample_time);
  559. }
  560. static ssize_t store_min_sample_time(struct kobject *kobj,
  561. struct attribute *attr, const char *buf, size_t count)
  562. {
  563. int ret;
  564. unsigned long val;
  565. ret = strict_strtoul(buf, 0, &val);
  566. if (ret < 0)
  567. return ret;
  568. min_sample_time = val;
  569. return count;
  570. }
  571. static struct global_attr min_sample_time_attr = __ATTR(min_sample_time, 0644,
  572. show_min_sample_time, store_min_sample_time);
  573. static ssize_t show_above_hispeed_delay(struct kobject *kobj,
  574. struct attribute *attr, char *buf)
  575. {
  576. return sprintf(buf, "%lu\n", above_hispeed_delay_val);
  577. }
  578. static ssize_t store_above_hispeed_delay(struct kobject *kobj,
  579. struct attribute *attr,
  580. const char *buf, size_t count)
  581. {
  582. int ret;
  583. unsigned long val;
  584. ret = strict_strtoul(buf, 0, &val);
  585. if (ret < 0)
  586. return ret;
  587. above_hispeed_delay_val = val;
  588. return count;
  589. }
  590. define_one_global_rw(above_hispeed_delay);
  591. static ssize_t show_timer_rate(struct kobject *kobj,
  592. struct attribute *attr, char *buf)
  593. {
  594. return sprintf(buf, "%lu\n", timer_rate);
  595. }
  596. static ssize_t store_timer_rate(struct kobject *kobj,
  597. struct attribute *attr, const char *buf, size_t count)
  598. {
  599. int ret;
  600. unsigned long val;
  601. ret = strict_strtoul(buf, 0, &val);
  602. if (ret < 0)
  603. return ret;
  604. timer_rate = val;
  605. return count;
  606. }
  607. static struct global_attr timer_rate_attr = __ATTR(timer_rate, 0644,
  608. show_timer_rate, store_timer_rate);
  609. static ssize_t show_input_boost(struct kobject *kobj, struct attribute *attr,
  610. char *buf)
  611. {
  612. return sprintf(buf, "%u\n", input_boost_val);
  613. }
  614. static ssize_t store_input_boost(struct kobject *kobj, struct attribute *attr,
  615. const char *buf, size_t count)
  616. {
  617. int ret;
  618. unsigned long val;
  619. ret = strict_strtoul(buf, 0, &val);
  620. if (ret < 0)
  621. return ret;
  622. input_boost_val = val;
  623. return count;
  624. }
  625. define_one_global_rw(input_boost);
  626. static ssize_t show_boost(struct kobject *kobj, struct attribute *attr,
  627. char *buf)
  628. {
  629. return sprintf(buf, "%d\n", boost_val);
  630. }
  631. static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
  632. const char *buf, size_t count)
  633. {
  634. int ret;
  635. unsigned long val;
  636. ret = kstrtoul(buf, 0, &val);
  637. if (ret < 0)
  638. return ret;
  639. boost_val = val;
  640. if (boost_val) {
  641. trace_cpufreq_interactive_boost("on");
  642. cpufreq_interactive_boost();
  643. } else {
  644. trace_cpufreq_interactive_unboost("off");
  645. }
  646. return count;
  647. }
  648. define_one_global_rw(boost);
  649. static ssize_t store_boostpulse(struct kobject *kobj, struct attribute *attr,
  650. const char *buf, size_t count)
  651. {
  652. int ret;
  653. unsigned long val;
  654. ret = kstrtoul(buf, 0, &val);
  655. if (ret < 0)
  656. return ret;
  657. trace_cpufreq_interactive_boost("pulse");
  658. cpufreq_interactive_boost();
  659. return count;
  660. }
  661. static struct global_attr boostpulse =
  662. __ATTR(boostpulse, 0200, NULL, store_boostpulse);
  663. static struct attribute *interactive_attributes[] = {
  664. &hispeed_freq_attr.attr,
  665. &go_hispeed_load_attr.attr,
  666. &above_hispeed_delay.attr,
  667. &min_sample_time_attr.attr,
  668. &timer_rate_attr.attr,
  669. &input_boost.attr,
  670. &boost.attr,
  671. &boostpulse.attr,
  672. NULL,
  673. };
  674. static struct attribute_group interactive_attr_group = {
  675. .attrs = interactive_attributes,
  676. .name = "interactive",
  677. };
  678. static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
  679. unsigned int event)
  680. {
  681. int rc;
  682. unsigned int j;
  683. struct cpufreq_interactive_cpuinfo *pcpu;
  684. struct cpufreq_frequency_table *freq_table;
  685. switch (event) {
  686. case CPUFREQ_GOV_START:
  687. if (!cpu_online(policy->cpu))
  688. return -EINVAL;
  689. freq_table =
  690. cpufreq_frequency_get_table(policy->cpu);
  691. for_each_cpu(j, policy->cpus) {
  692. pcpu = &per_cpu(cpuinfo, j);
  693. pcpu->policy = policy;
  694. pcpu->target_freq = policy->cur;
  695. pcpu->freq_table = freq_table;
  696. pcpu->target_set_time_in_idle =
  697. get_cpu_idle_time_us(j,
  698. &pcpu->target_set_time);
  699. pcpu->floor_freq = pcpu->target_freq;
  700. pcpu->floor_validate_time =
  701. pcpu->target_set_time;
  702. pcpu->governor_enabled = 1;
  703. smp_wmb();
  704. }
  705. if (!hispeed_freq)
  706. hispeed_freq = policy->max;
  707. /*
  708. * Do not register the idle hook and create sysfs
  709. * entries if we have already done so.
  710. */
  711. if (atomic_inc_return(&active_count) > 1)
  712. return 0;
  713. rc = sysfs_create_group(cpufreq_global_kobject,
  714. &interactive_attr_group);
  715. if (rc)
  716. return rc;
  717. rc = input_register_handler(&cpufreq_interactive_input_handler);
  718. if (rc)
  719. pr_warn("%s: failed to register input handler\n",
  720. __func__);
  721. break;
  722. case CPUFREQ_GOV_STOP:
  723. for_each_cpu(j, policy->cpus) {
  724. pcpu = &per_cpu(cpuinfo, j);
  725. pcpu->governor_enabled = 0;
  726. smp_wmb();
  727. del_timer_sync(&pcpu->cpu_timer);
  728. /*
  729. * Reset idle exit time since we may cancel the timer
  730. * before it can run after the last idle exit time,
  731. * to avoid tripping the check in idle exit for a timer
  732. * that is trying to run.
  733. */
  734. pcpu->idle_exit_time = 0;
  735. }
  736. flush_work(&freq_scale_down_work);
  737. if (atomic_dec_return(&active_count) > 0)
  738. return 0;
  739. input_unregister_handler(&cpufreq_interactive_input_handler);
  740. sysfs_remove_group(cpufreq_global_kobject,
  741. &interactive_attr_group);
  742. break;
  743. case CPUFREQ_GOV_LIMITS:
  744. if (policy->max < policy->cur)
  745. __cpufreq_driver_target(policy,
  746. policy->max, CPUFREQ_RELATION_H);
  747. else if (policy->min > policy->cur)
  748. __cpufreq_driver_target(policy,
  749. policy->min, CPUFREQ_RELATION_L);
  750. break;
  751. }
  752. return 0;
  753. }
  754. static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
  755. unsigned long val,
  756. void *data)
  757. {
  758. switch (val) {
  759. case IDLE_START:
  760. cpufreq_interactive_idle_start();
  761. break;
  762. case IDLE_END:
  763. cpufreq_interactive_idle_end();
  764. break;
  765. }
  766. return 0;
  767. }
  768. static struct notifier_block cpufreq_interactive_idle_nb = {
  769. .notifier_call = cpufreq_interactive_idle_notifier,
  770. };
  771. static int __init cpufreq_interactive_init(void)
  772. {
  773. unsigned int i;
  774. struct cpufreq_interactive_cpuinfo *pcpu;
  775. struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
  776. go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
  777. min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
  778. above_hispeed_delay_val = DEFAULT_ABOVE_HISPEED_DELAY;
  779. timer_rate = DEFAULT_TIMER_RATE;
  780. /* Initalize per-cpu timers */
  781. for_each_possible_cpu(i) {
  782. pcpu = &per_cpu(cpuinfo, i);
  783. init_timer(&pcpu->cpu_timer);
  784. pcpu->cpu_timer.function = cpufreq_interactive_timer;
  785. pcpu->cpu_timer.data = i;
  786. }
  787. up_task = kthread_create(cpufreq_interactive_up_task, NULL,
  788. "kinteractiveup");
  789. if (IS_ERR(up_task))
  790. return PTR_ERR(up_task);
  791. sched_setscheduler_nocheck(up_task, SCHED_FIFO, &param);
  792. get_task_struct(up_task);
  793. /* No rescuer thread, bind to CPU queuing the work for possibly
  794. warm cache (probably doesn't matter much). */
  795. down_wq = alloc_workqueue("knteractive_down", 0, 1);
  796. if (!down_wq)
  797. goto err_freeuptask;
  798. INIT_WORK(&freq_scale_down_work,
  799. cpufreq_interactive_freq_down);
  800. spin_lock_init(&up_cpumask_lock);
  801. spin_lock_init(&down_cpumask_lock);
  802. mutex_init(&set_speed_lock);
  803. idle_notifier_register(&cpufreq_interactive_idle_nb);
  804. INIT_WORK(&inputopen.inputopen_work, cpufreq_interactive_input_open);
  805. return cpufreq_register_governor(&cpufreq_gov_interactive);
  806. err_freeuptask:
  807. put_task_struct(up_task);
  808. return -ENOMEM;
  809. }
  810. #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
  811. fs_initcall(cpufreq_interactive_init);
  812. #else
  813. module_init(cpufreq_interactive_init);
  814. #endif
  815. static void __exit cpufreq_interactive_exit(void)
  816. {
  817. cpufreq_unregister_governor(&cpufreq_gov_interactive);
  818. kthread_stop(up_task);
  819. put_task_struct(up_task);
  820. destroy_workqueue(down_wq);
  821. }
  822. module_exit(cpufreq_interactive_exit);
  823. MODULE_AUTHOR("Mike Chan <mike@android.com>");
  824. MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
  825. "Latency sensitive workloads");
  826. MODULE_LICENSE("GPL");