123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376 |
- /*
- * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
- #define pr_fmt(fmt) "cpu-boost: " fmt
- #include <linux/kernel.h>
- #include <linux/init.h>
- #include <linux/notifier.h>
- #include <linux/cpufreq.h>
- #include <linux/cpu.h>
- #include <linux/sched.h>
- #include <linux/jiffies.h>
- #include <linux/kthread.h>
- #include <linux/moduleparam.h>
- #include <linux/slab.h>
- #include <linux/input.h>
- #include <linux/time.h>
- struct cpu_sync {
- struct task_struct *thread;
- wait_queue_head_t sync_wq;
- struct delayed_work boost_rem;
- struct delayed_work input_boost_rem;
- int cpu;
- spinlock_t lock;
- bool pending;
- atomic_t being_woken;
- int src_cpu;
- unsigned int boost_min;
- unsigned int input_boost_min;
- };
- static DEFINE_PER_CPU(struct cpu_sync, sync_info);
- static struct workqueue_struct *cpu_boost_wq;
- static struct work_struct input_boost_work;
- static unsigned int boost_ms;
- module_param(boost_ms, uint, 0644);
- static unsigned int sync_threshold;
- module_param(sync_threshold, uint, 0644);
- static unsigned int input_boost_freq;
- module_param(input_boost_freq, uint, 0644);
- static unsigned int input_boost_ms = 40;
- module_param(input_boost_ms, uint, 0644);
- static u64 last_input_time;
- #define MIN_INPUT_INTERVAL (150 * USEC_PER_MSEC)
- /*
- * The CPUFREQ_ADJUST notifier is used to override the current policy min to
- * make sure policy min >= boost_min. The cpufreq framework then does the job
- * of enforcing the new policy.
- *
- * The sync kthread needs to run on the CPU in question to avoid deadlocks in
- * the wake up code. Achieve this by binding the thread to the respective
- * CPU. But a CPU going offline unbinds threads from that CPU. So, set it up
- * again each time the CPU comes back up. We can use CPUFREQ_START to figure
- * out a CPU is coming online instead of registering for hotplug notifiers.
- */
- static int boost_adjust_notify(struct notifier_block *nb, unsigned long val, void *data)
- {
- struct cpufreq_policy *policy = data;
- unsigned int cpu = policy->cpu;
- struct cpu_sync *s = &per_cpu(sync_info, cpu);
- unsigned int b_min = s->boost_min;
- unsigned int ib_min = s->input_boost_min;
- unsigned int min;
- switch (val) {
- case CPUFREQ_ADJUST:
- if (!b_min && !ib_min)
- break;
- min = max(b_min, ib_min);
- pr_debug("CPU%u policy min before boost: %u kHz\n",
- cpu, policy->min);
- pr_debug("CPU%u boost min: %u kHz\n", cpu, min);
- cpufreq_verify_within_limits(policy, min, UINT_MAX);
- pr_debug("CPU%u policy min after boost: %u kHz\n",
- cpu, policy->min);
- break;
- case CPUFREQ_START:
- set_cpus_allowed(s->thread, *cpumask_of(cpu));
- break;
- }
- return NOTIFY_OK;
- }
- static struct notifier_block boost_adjust_nb = {
- .notifier_call = boost_adjust_notify,
- };
- static void do_boost_rem(struct work_struct *work)
- {
- struct cpu_sync *s = container_of(work, struct cpu_sync,
- boost_rem.work);
- pr_debug("Removing boost for CPU%d\n", s->cpu);
- s->boost_min = 0;
- /* Force policy re-evaluation to trigger adjust notifier. */
- cpufreq_update_policy(s->cpu);
- }
- static void do_input_boost_rem(struct work_struct *work)
- {
- struct cpu_sync *s = container_of(work, struct cpu_sync,
- input_boost_rem.work);
- pr_debug("Removing input boost for CPU%d\n", s->cpu);
- s->input_boost_min = 0;
- /* Force policy re-evaluation to trigger adjust notifier. */
- cpufreq_update_policy(s->cpu);
- }
- static int boost_mig_sync_thread(void *data)
- {
- int dest_cpu = (int) data;
- int src_cpu, ret;
- struct cpu_sync *s = &per_cpu(sync_info, dest_cpu);
- struct cpufreq_policy dest_policy;
- struct cpufreq_policy src_policy;
- unsigned long flags;
- while(1) {
- wait_event_interruptible(s->sync_wq, s->pending ||
- kthread_should_stop());
- if (kthread_should_stop())
- break;
- spin_lock_irqsave(&s->lock, flags);
- s->pending = false;
- src_cpu = s->src_cpu;
- spin_unlock_irqrestore(&s->lock, flags);
- ret = cpufreq_get_policy(&src_policy, src_cpu);
- if (ret)
- continue;
- ret = cpufreq_get_policy(&dest_policy, dest_cpu);
- if (ret)
- continue;
- if (dest_policy.cur >= src_policy.cur ) {
- pr_debug("No sync. CPU%d@%dKHz >= CPU%d@%dKHz\n",
- dest_cpu, dest_policy.cur, src_cpu, src_policy.cur);
- continue;
- }
- if (sync_threshold && (dest_policy.cur >= sync_threshold))
- continue;
- cancel_delayed_work_sync(&s->boost_rem);
- if (sync_threshold) {
- if (src_policy.cur >= sync_threshold)
- s->boost_min = sync_threshold;
- else
- s->boost_min = src_policy.cur;
- } else {
- s->boost_min = src_policy.cur;
- }
- /* Force policy re-evaluation to trigger adjust notifier. */
- get_online_cpus();
- if (cpu_online(dest_cpu)) {
- cpufreq_update_policy(dest_cpu);
- queue_delayed_work_on(dest_cpu, cpu_boost_wq,
- &s->boost_rem, msecs_to_jiffies(boost_ms));
- } else {
- s->boost_min = 0;
- pr_debug("Resetting boost_min to 0\n");
- }
- put_online_cpus();
- }
- return 0;
- }
- static int boost_migration_notify(struct notifier_block *nb,
- unsigned long dest_cpu, void *arg)
- {
- unsigned long flags;
- struct cpu_sync *s = &per_cpu(sync_info, dest_cpu);
- if (!boost_ms)
- return NOTIFY_OK;
- pr_debug("Migration: CPU%d --> CPU%d\n", (int) arg, (int) dest_cpu);
- spin_lock_irqsave(&s->lock, flags);
- s->pending = true;
- s->src_cpu = (int) arg;
- spin_unlock_irqrestore(&s->lock, flags);
- /*
- * Avoid issuing recursive wakeup call, as sync thread itself could be
- * seen as migrating triggering this notification. Note that sync thread
- * of a cpu could be running for a short while with its affinity broken
- * because of CPU hotplug.
- */
- if (!atomic_cmpxchg(&s->being_woken, 0, 1)) {
- wake_up(&s->sync_wq);
- atomic_set(&s->being_woken, 0);
- }
- return NOTIFY_OK;
- }
- static struct notifier_block boost_migration_nb = {
- .notifier_call = boost_migration_notify,
- };
- static void do_input_boost(struct work_struct *work)
- {
- unsigned int i, ret;
- struct cpu_sync *i_sync_info;
- struct cpufreq_policy policy;
- get_online_cpus();
- for_each_online_cpu(i) {
- i_sync_info = &per_cpu(sync_info, i);
- ret = cpufreq_get_policy(&policy, i);
- if (ret)
- continue;
- if (policy.cur >= input_boost_freq)
- continue;
- cancel_delayed_work_sync(&i_sync_info->input_boost_rem);
- i_sync_info->input_boost_min = input_boost_freq;
- cpufreq_update_policy(i);
- queue_delayed_work_on(i_sync_info->cpu, cpu_boost_wq,
- &i_sync_info->input_boost_rem,
- msecs_to_jiffies(input_boost_ms));
- }
- put_online_cpus();
- }
- static void cpuboost_input_event(struct input_handle *handle,
- unsigned int type, unsigned int code, int value)
- {
- u64 now;
- if (!input_boost_freq)
- return;
- now = ktime_to_us(ktime_get());
- if (now - last_input_time < MIN_INPUT_INTERVAL)
- return;
- if (work_pending(&input_boost_work))
- return;
- queue_work(cpu_boost_wq, &input_boost_work);
- last_input_time = ktime_to_us(ktime_get());
- }
- static int cpuboost_input_connect(struct input_handler *handler,
- struct input_dev *dev, const struct input_device_id *id)
- {
- struct input_handle *handle;
- int error;
- handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL);
- if (!handle)
- return -ENOMEM;
- handle->dev = dev;
- handle->handler = handler;
- handle->name = "cpufreq";
- error = input_register_handle(handle);
- if (error)
- goto err2;
- error = input_open_device(handle);
- if (error)
- goto err1;
- return 0;
- err1:
- input_unregister_handle(handle);
- err2:
- kfree(handle);
- return error;
- }
- static void cpuboost_input_disconnect(struct input_handle *handle)
- {
- input_close_device(handle);
- input_unregister_handle(handle);
- kfree(handle);
- }
- static const struct input_device_id cpuboost_ids[] = {
- /* multi-touch touchscreen */
- {
- .flags = INPUT_DEVICE_ID_MATCH_EVBIT |
- INPUT_DEVICE_ID_MATCH_ABSBIT,
- .evbit = { BIT_MASK(EV_ABS) },
- .absbit = { [BIT_WORD(ABS_MT_POSITION_X)] =
- BIT_MASK(ABS_MT_POSITION_X) |
- BIT_MASK(ABS_MT_POSITION_Y) },
- },
- /* touchpad */
- {
- .flags = INPUT_DEVICE_ID_MATCH_KEYBIT |
- INPUT_DEVICE_ID_MATCH_ABSBIT,
- .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) },
- .absbit = { [BIT_WORD(ABS_X)] =
- BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) },
- },
- /* Keypad */
- {
- .flags = INPUT_DEVICE_ID_MATCH_EVBIT,
- .evbit = { BIT_MASK(EV_KEY) },
- },
- { },
- };
- static struct input_handler cpuboost_input_handler = {
- .event = cpuboost_input_event,
- .connect = cpuboost_input_connect,
- .disconnect = cpuboost_input_disconnect,
- .name = "cpu-boost",
- .id_table = cpuboost_ids,
- };
- static int cpu_boost_init(void)
- {
- int cpu, ret;
- struct cpu_sync *s;
- cpu_boost_wq = alloc_workqueue("cpuboost_wq", WQ_HIGHPRI, 0);
- if (!cpu_boost_wq)
- return -EFAULT;
- INIT_WORK(&input_boost_work, do_input_boost);
- for_each_possible_cpu(cpu) {
- s = &per_cpu(sync_info, cpu);
- s->cpu = cpu;
- init_waitqueue_head(&s->sync_wq);
- atomic_set(&s->being_woken, 0);
- spin_lock_init(&s->lock);
- INIT_DELAYED_WORK(&s->boost_rem, do_boost_rem);
- INIT_DELAYED_WORK(&s->input_boost_rem, do_input_boost_rem);
- s->thread = kthread_run(boost_mig_sync_thread, (void *)cpu,
- "boost_sync/%d", cpu);
- set_cpus_allowed(s->thread, *cpumask_of(cpu));
- }
- cpufreq_register_notifier(&boost_adjust_nb, CPUFREQ_POLICY_NOTIFIER);
- atomic_notifier_chain_register(&migration_notifier_head,
- &boost_migration_nb);
- ret = input_register_handler(&cpuboost_input_handler);
- return 0;
- }
- late_initcall(cpu_boost_init);
|