123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640 |
- /* CPU control.
- * (C) 2001, 2002, 2003, 2004 Rusty Russell
- *
- * This code is licenced under the GPL.
- */
- #include <linux/proc_fs.h>
- #include <linux/smp.h>
- #include <linux/init.h>
- #include <linux/notifier.h>
- #include <linux/sched.h>
- #include <linux/unistd.h>
- #include <linux/cpu.h>
- #include <linux/module.h>
- #include <linux/kthread.h>
- #include <linux/stop_machine.h>
- #include <linux/mutex.h>
- #include <linux/gfp.h>
- #define CREATE_TRACE_POINTS
- #include <trace/events/cpu_hotplug.h>
- #ifdef CONFIG_SMP
- /* Serializes the updates to cpu_online_mask, cpu_present_mask */
- static DEFINE_MUTEX(cpu_add_remove_lock);
- /*
- * The following two API's must be used when attempting
- * to serialize the updates to cpu_online_mask, cpu_present_mask.
- */
- void cpu_maps_update_begin(void)
- {
- mutex_lock(&cpu_add_remove_lock);
- }
- void cpu_maps_update_done(void)
- {
- mutex_unlock(&cpu_add_remove_lock);
- }
- static RAW_NOTIFIER_HEAD(cpu_chain);
- /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
- * Should always be manipulated under cpu_add_remove_lock
- */
- static int cpu_hotplug_disabled;
- #ifdef CONFIG_HOTPLUG_CPU
- static struct {
- struct task_struct *active_writer;
- struct mutex lock; /* Synchronizes accesses to refcount, */
- /*
- * Also blocks the new readers during
- * an ongoing cpu hotplug operation.
- */
- int refcount;
- } cpu_hotplug = {
- .active_writer = NULL,
- .lock = __MUTEX_INITIALIZER(cpu_hotplug.lock),
- .refcount = 0,
- };
- void get_online_cpus(void)
- {
- might_sleep();
- if (cpu_hotplug.active_writer == current)
- return;
- mutex_lock(&cpu_hotplug.lock);
- cpu_hotplug.refcount++;
- mutex_unlock(&cpu_hotplug.lock);
- }
- EXPORT_SYMBOL_GPL(get_online_cpus);
- void put_online_cpus(void)
- {
- if (cpu_hotplug.active_writer == current)
- return;
- mutex_lock(&cpu_hotplug.lock);
- if (!--cpu_hotplug.refcount && unlikely(cpu_hotplug.active_writer))
- wake_up_process(cpu_hotplug.active_writer);
- mutex_unlock(&cpu_hotplug.lock);
- }
- EXPORT_SYMBOL_GPL(put_online_cpus);
- /*
- * This ensures that the hotplug operation can begin only when the
- * refcount goes to zero.
- *
- * Note that during a cpu-hotplug operation, the new readers, if any,
- * will be blocked by the cpu_hotplug.lock
- *
- * Since cpu_hotplug_begin() is always called after invoking
- * cpu_maps_update_begin(), we can be sure that only one writer is active.
- *
- * Note that theoretically, there is a possibility of a livelock:
- * - Refcount goes to zero, last reader wakes up the sleeping
- * writer.
- * - Last reader unlocks the cpu_hotplug.lock.
- * - A new reader arrives at this moment, bumps up the refcount.
- * - The writer acquires the cpu_hotplug.lock finds the refcount
- * non zero and goes to sleep again.
- *
- * However, this is very difficult to achieve in practice since
- * get_online_cpus() not an api which is called all that often.
- *
- */
- static void cpu_hotplug_begin(void)
- {
- cpu_hotplug.active_writer = current;
- for (;;) {
- mutex_lock(&cpu_hotplug.lock);
- if (likely(!cpu_hotplug.refcount))
- break;
- __set_current_state(TASK_UNINTERRUPTIBLE);
- mutex_unlock(&cpu_hotplug.lock);
- schedule();
- }
- }
- static void cpu_hotplug_done(void)
- {
- cpu_hotplug.active_writer = NULL;
- mutex_unlock(&cpu_hotplug.lock);
- }
- #else /* #if CONFIG_HOTPLUG_CPU */
- static void cpu_hotplug_begin(void) {}
- static void cpu_hotplug_done(void) {}
- #endif /* #else #if CONFIG_HOTPLUG_CPU */
- /* Need to know about CPUs going up/down? */
- int __ref register_cpu_notifier(struct notifier_block *nb)
- {
- int ret;
- cpu_maps_update_begin();
- ret = raw_notifier_chain_register(&cpu_chain, nb);
- cpu_maps_update_done();
- return ret;
- }
- static int __cpu_notify(unsigned long val, void *v, int nr_to_call,
- int *nr_calls)
- {
- int ret;
- ret = __raw_notifier_call_chain(&cpu_chain, val, v, nr_to_call,
- nr_calls);
- return notifier_to_errno(ret);
- }
- static int cpu_notify(unsigned long val, void *v)
- {
- return __cpu_notify(val, v, -1, NULL);
- }
- #ifdef CONFIG_HOTPLUG_CPU
- static void cpu_notify_nofail(unsigned long val, void *v)
- {
- BUG_ON(cpu_notify(val, v));
- }
- EXPORT_SYMBOL(register_cpu_notifier);
- void __ref unregister_cpu_notifier(struct notifier_block *nb)
- {
- cpu_maps_update_begin();
- raw_notifier_chain_unregister(&cpu_chain, nb);
- cpu_maps_update_done();
- }
- EXPORT_SYMBOL(unregister_cpu_notifier);
- static inline void check_for_tasks(int cpu)
- {
- struct task_struct *p;
- write_lock_irq(&tasklist_lock);
- for_each_process(p) {
- if (task_cpu(p) == cpu && p->state == TASK_RUNNING &&
- (!cputime_eq(p->utime, cputime_zero) ||
- !cputime_eq(p->stime, cputime_zero)))
- printk(KERN_WARNING "Task %s (pid = %d) is on cpu %d "
- "(state = %ld, flags = %x)\n",
- p->comm, task_pid_nr(p), cpu,
- p->state, p->flags);
- }
- write_unlock_irq(&tasklist_lock);
- }
- struct take_cpu_down_param {
- unsigned long mod;
- void *hcpu;
- };
- /* Take this CPU down. */
- static int __ref take_cpu_down(void *_param)
- {
- struct take_cpu_down_param *param = _param;
- unsigned int cpu = (unsigned int)(param->hcpu);
- int err;
- /* Ensure this CPU doesn't handle any more interrupts. */
- trace_cpu_hotplug_disable_start(cpu);
- err = __cpu_disable();
- trace_cpu_hotplug_disable_end(cpu);
- if (err < 0)
- return err;
- cpu_notify(CPU_DYING | param->mod, param->hcpu);
- return 0;
- }
- /* Requires cpu_add_remove_lock to be held */
- static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
- {
- int err, nr_calls = 0;
- void *hcpu = (void *)(long)cpu;
- unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
- struct take_cpu_down_param tcd_param = {
- .mod = mod,
- .hcpu = hcpu,
- };
- if (num_online_cpus() == 1)
- return -EBUSY;
- if (!cpu_online(cpu))
- return -EINVAL;
- cpu_hotplug_begin();
- err = __cpu_notify(CPU_DOWN_PREPARE | mod, hcpu, -1, &nr_calls);
- if (err) {
- nr_calls--;
- __cpu_notify(CPU_DOWN_FAILED | mod, hcpu, nr_calls, NULL);
- printk("%s: attempt to take down CPU %u failed\n",
- __func__, cpu);
- goto out_release;
- }
- err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
- if (err) {
- /* CPU didn't die: tell everyone. Can't complain. */
- cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
- goto out_release;
- }
- BUG_ON(cpu_online(cpu));
- /*
- * The migration_call() CPU_DYING callback will have removed all
- * runnable tasks from the cpu, there's only the idle task left now
- * that the migration thread is done doing the stop_machine thing.
- *
- * Wait for the stop thread to go away.
- */
- while (!idle_cpu(cpu))
- cpu_relax();
- /* This actually kills the CPU. */
- trace_cpu_hotplug_die_start(cpu);
- __cpu_die(cpu);
- trace_cpu_hotplug_die_end(cpu);
- /* CPU is completely dead: tell everyone. Too late to complain. */
- cpu_notify_nofail(CPU_DEAD | mod, hcpu);
- check_for_tasks(cpu);
- out_release:
- cpu_hotplug_done();
- if (!err)
- cpu_notify_nofail(CPU_POST_DEAD | mod, hcpu);
- return err;
- }
- int __ref cpu_down(unsigned int cpu)
- {
- int err;
- cpu_maps_update_begin();
- trace_cpu_hotplug_down_start(cpu);
- if (cpu_hotplug_disabled) {
- err = -EBUSY;
- goto out;
- }
- err = _cpu_down(cpu, 0);
- #ifdef CONFIG_MESON6_SMP_HOTPLUG
- extern void disable_cpu_fw();
- disable_cpu_fw();
- #endif
- out:
- trace_cpu_hotplug_down_end(cpu);
- cpu_maps_update_done();
- return err;
- }
- EXPORT_SYMBOL(cpu_down);
- #endif /*CONFIG_HOTPLUG_CPU*/
- /* Requires cpu_add_remove_lock to be held */
- static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
- {
- int ret, nr_calls = 0;
- void *hcpu = (void *)(long)cpu;
- unsigned long mod = tasks_frozen ? CPU_TASKS_FROZEN : 0;
- if (cpu_online(cpu) || !cpu_present(cpu))
- return -EINVAL;
- cpu_hotplug_begin();
- ret = __cpu_notify(CPU_UP_PREPARE | mod, hcpu, -1, &nr_calls);
- if (ret) {
- nr_calls--;
- printk(KERN_WARNING "%s: attempt to bring up CPU %u failed\n",
- __func__, cpu);
- goto out_notify;
- }
- #ifdef CONFIG_MESON6_SMP_HOTPLUG
- extern void restore_cpu_fw();
- restore_cpu_fw();
- #endif
- /* Arch-specific enabling code. */
- trace_cpu_hotplug_arch_up_start(cpu);
- ret = __cpu_up(cpu);
- trace_cpu_hotplug_arch_up_end(cpu);
- if (ret != 0)
- goto out_notify;
- BUG_ON(!cpu_online(cpu));
- /* Now call notifier in preparation. */
- cpu_notify(CPU_ONLINE | mod, hcpu);
- out_notify:
- if (ret != 0)
- __cpu_notify(CPU_UP_CANCELED | mod, hcpu, nr_calls, NULL);
- cpu_hotplug_done();
- return ret;
- }
- int __cpuinit cpu_up(unsigned int cpu)
- {
- int err = 0;
- #ifdef CONFIG_MEMORY_HOTPLUG
- int nid;
- pg_data_t *pgdat;
- #endif
- if (!cpu_possible(cpu)) {
- printk(KERN_ERR "can't online cpu %d because it is not "
- "configured as may-hotadd at boot time\n", cpu);
- #if defined(CONFIG_IA64)
- printk(KERN_ERR "please check additional_cpus= boot "
- "parameter\n");
- #endif
- return -EINVAL;
- }
- #ifdef CONFIG_MEMORY_HOTPLUG
- nid = cpu_to_node(cpu);
- if (!node_online(nid)) {
- err = mem_online_node(nid);
- if (err)
- return err;
- }
- pgdat = NODE_DATA(nid);
- if (!pgdat) {
- printk(KERN_ERR
- "Can't online cpu %d due to NULL pgdat\n", cpu);
- return -ENOMEM;
- }
- if (pgdat->node_zonelists->_zonerefs->zone == NULL) {
- mutex_lock(&zonelists_mutex);
- build_all_zonelists(NULL);
- mutex_unlock(&zonelists_mutex);
- }
- #endif
- cpu_maps_update_begin();
- trace_cpu_hotplug_up_start(cpu);
- if (cpu_hotplug_disabled) {
- err = -EBUSY;
- goto out;
- }
- err = _cpu_up(cpu, 0);
- out:
- trace_cpu_hotplug_up_end(cpu);
- cpu_maps_update_done();
- return err;
- }
- #ifdef CONFIG_PM_SLEEP_SMP
- static cpumask_var_t frozen_cpus;
- void __weak arch_disable_nonboot_cpus_begin(void)
- {
- }
- void __weak arch_disable_nonboot_cpus_end(void)
- {
- }
- int disable_nonboot_cpus(void)
- {
- int cpu, first_cpu, error = 0;
- cpu_maps_update_begin();
- first_cpu = cpumask_first(cpu_online_mask);
- /*
- * We take down all of the non-boot CPUs in one shot to avoid races
- * with the userspace trying to use the CPU hotplug at the same time
- */
- cpumask_clear(frozen_cpus);
- arch_disable_nonboot_cpus_begin();
- printk("Disabling non-boot CPUs ...\n");
- for_each_online_cpu(cpu) {
- if (cpu == first_cpu)
- continue;
- error = _cpu_down(cpu, 1);
- if (!error)
- cpumask_set_cpu(cpu, frozen_cpus);
- else {
- printk(KERN_ERR "Error taking CPU%d down: %d\n",
- cpu, error);
- break;
- }
- }
- arch_disable_nonboot_cpus_end();
- if (!error) {
- BUG_ON(num_online_cpus() > 1);
- /* Make sure the CPUs won't be enabled by someone else */
- cpu_hotplug_disabled = 1;
- } else {
- printk(KERN_ERR "Non-boot CPUs are not disabled\n");
- }
- cpu_maps_update_done();
- return error;
- }
- void __weak arch_enable_nonboot_cpus_begin(void)
- {
- }
- void __weak arch_enable_nonboot_cpus_end(void)
- {
- }
- void __ref enable_nonboot_cpus(void)
- {
- int cpu, error;
- /* Allow everyone to use the CPU hotplug again */
- cpu_maps_update_begin();
- cpu_hotplug_disabled = 0;
- if (cpumask_empty(frozen_cpus))
- goto out;
- printk(KERN_INFO "Enabling non-boot CPUs ...\n");
- arch_enable_nonboot_cpus_begin();
- for_each_cpu(cpu, frozen_cpus) {
- error = _cpu_up(cpu, 1);
- if (!error) {
- printk(KERN_INFO "CPU%d is up\n", cpu);
- continue;
- }
- printk(KERN_WARNING "Error taking CPU%d up: %d\n", cpu, error);
- }
- arch_enable_nonboot_cpus_end();
- cpumask_clear(frozen_cpus);
- out:
- cpu_maps_update_done();
- }
- static int alloc_frozen_cpus(void)
- {
- if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
- return -ENOMEM;
- return 0;
- }
- core_initcall(alloc_frozen_cpus);
- #endif /* CONFIG_PM_SLEEP_SMP */
- /**
- * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
- * @cpu: cpu that just started
- *
- * This function calls the cpu_chain notifiers with CPU_STARTING.
- * It must be called by the arch code on the new cpu, before the new cpu
- * enables interrupts and before the "boot" cpu returns from __cpu_up().
- */
- void __cpuinit notify_cpu_starting(unsigned int cpu)
- {
- unsigned long val = CPU_STARTING;
- #ifdef CONFIG_PM_SLEEP_SMP
- if (frozen_cpus != NULL && cpumask_test_cpu(cpu, frozen_cpus))
- val = CPU_STARTING_FROZEN;
- #endif /* CONFIG_PM_SLEEP_SMP */
- cpu_notify(val, (void *)(long)cpu);
- }
- #endif /* CONFIG_SMP */
- /*
- * cpu_bit_bitmap[] is a special, "compressed" data structure that
- * represents all NR_CPUS bits binary values of 1<<nr.
- *
- * It is used by cpumask_of() to get a constant address to a CPU
- * mask value that has a single bit set only.
- */
- /* cpu_bit_bitmap[0] is empty - so we can back into it */
- #define MASK_DECLARE_1(x) [x+1][0] = (1UL << (x))
- #define MASK_DECLARE_2(x) MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
- #define MASK_DECLARE_4(x) MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
- #define MASK_DECLARE_8(x) MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
- const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
- MASK_DECLARE_8(0), MASK_DECLARE_8(8),
- MASK_DECLARE_8(16), MASK_DECLARE_8(24),
- #if BITS_PER_LONG > 32
- MASK_DECLARE_8(32), MASK_DECLARE_8(40),
- MASK_DECLARE_8(48), MASK_DECLARE_8(56),
- #endif
- };
- EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
- const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
- EXPORT_SYMBOL(cpu_all_bits);
- #ifdef CONFIG_INIT_ALL_POSSIBLE
- static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly
- = CPU_BITS_ALL;
- #else
- static DECLARE_BITMAP(cpu_possible_bits, CONFIG_NR_CPUS) __read_mostly;
- #endif
- const struct cpumask *const cpu_possible_mask = to_cpumask(cpu_possible_bits);
- EXPORT_SYMBOL(cpu_possible_mask);
- static DECLARE_BITMAP(cpu_online_bits, CONFIG_NR_CPUS) __read_mostly;
- const struct cpumask *const cpu_online_mask = to_cpumask(cpu_online_bits);
- EXPORT_SYMBOL(cpu_online_mask);
- static DECLARE_BITMAP(cpu_present_bits, CONFIG_NR_CPUS) __read_mostly;
- const struct cpumask *const cpu_present_mask = to_cpumask(cpu_present_bits);
- EXPORT_SYMBOL(cpu_present_mask);
- static DECLARE_BITMAP(cpu_active_bits, CONFIG_NR_CPUS) __read_mostly;
- const struct cpumask *const cpu_active_mask = to_cpumask(cpu_active_bits);
- EXPORT_SYMBOL(cpu_active_mask);
- void set_cpu_possible(unsigned int cpu, bool possible)
- {
- if (possible)
- cpumask_set_cpu(cpu, to_cpumask(cpu_possible_bits));
- else
- cpumask_clear_cpu(cpu, to_cpumask(cpu_possible_bits));
- }
- void set_cpu_present(unsigned int cpu, bool present)
- {
- if (present)
- cpumask_set_cpu(cpu, to_cpumask(cpu_present_bits));
- else
- cpumask_clear_cpu(cpu, to_cpumask(cpu_present_bits));
- }
- void set_cpu_online(unsigned int cpu, bool online)
- {
- if (online)
- cpumask_set_cpu(cpu, to_cpumask(cpu_online_bits));
- else
- cpumask_clear_cpu(cpu, to_cpumask(cpu_online_bits));
- }
- void set_cpu_active(unsigned int cpu, bool active)
- {
- if (active)
- cpumask_set_cpu(cpu, to_cpumask(cpu_active_bits));
- else
- cpumask_clear_cpu(cpu, to_cpumask(cpu_active_bits));
- }
- void init_cpu_present(const struct cpumask *src)
- {
- cpumask_copy(to_cpumask(cpu_present_bits), src);
- }
- void init_cpu_possible(const struct cpumask *src)
- {
- cpumask_copy(to_cpumask(cpu_possible_bits), src);
- }
- void init_cpu_online(const struct cpumask *src)
- {
- cpumask_copy(to_cpumask(cpu_online_bits), src);
- }
- static ATOMIC_NOTIFIER_HEAD(idle_notifier);
- void idle_notifier_register(struct notifier_block *n)
- {
- atomic_notifier_chain_register(&idle_notifier, n);
- }
- EXPORT_SYMBOL_GPL(idle_notifier_register);
- void idle_notifier_unregister(struct notifier_block *n)
- {
- atomic_notifier_chain_unregister(&idle_notifier, n);
- }
- EXPORT_SYMBOL_GPL(idle_notifier_unregister);
- void idle_notifier_call_chain(unsigned long val)
- {
- atomic_notifier_call_chain(&idle_notifier, val, NULL);
- }
- EXPORT_SYMBOL_GPL(idle_notifier_call_chain);
|