123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962 |
- /*
- * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
- * Copyright (C) 2005-2006, Thomas Gleixner, Russell King
- *
- * This file contains the interrupt descriptor management code
- *
- * Detailed information is available in Documentation/core-api/genericirq.rst
- *
- */
- #include <linux/irq.h>
- #include <linux/slab.h>
- #include <linux/export.h>
- #include <linux/interrupt.h>
- #include <linux/kernel_stat.h>
- #include <linux/radix-tree.h>
- #include <linux/bitmap.h>
- #include <linux/irqdomain.h>
- #include <linux/sysfs.h>
- #include "internals.h"
- /*
- * lockdep: we want to handle all irq_desc locks as a single lock-class:
- */
- static struct lock_class_key irq_desc_lock_class;
- #if defined(CONFIG_SMP)
- static int __init irq_affinity_setup(char *str)
- {
- alloc_bootmem_cpumask_var(&irq_default_affinity);
- cpulist_parse(str, irq_default_affinity);
- /*
- * Set at least the boot cpu. We don't want to end up with
- * bugreports caused by random comandline masks
- */
- cpumask_set_cpu(smp_processor_id(), irq_default_affinity);
- return 1;
- }
- __setup("irqaffinity=", irq_affinity_setup);
- static void __init init_irq_default_affinity(void)
- {
- if (!cpumask_available(irq_default_affinity))
- zalloc_cpumask_var(&irq_default_affinity, GFP_NOWAIT);
- if (cpumask_empty(irq_default_affinity))
- cpumask_set_cpu(0, irq_default_affinity);
- }
- #else
- static void __init init_irq_default_affinity(void)
- {
- }
- #endif
- #ifdef CONFIG_SMP
- static int alloc_masks(struct irq_desc *desc, int node)
- {
- if (!zalloc_cpumask_var_node(&desc->irq_common_data.affinity,
- GFP_KERNEL, node))
- return -ENOMEM;
- #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
- if (!zalloc_cpumask_var_node(&desc->irq_common_data.effective_affinity,
- GFP_KERNEL, node)) {
- free_cpumask_var(desc->irq_common_data.affinity);
- return -ENOMEM;
- }
- #endif
- #ifdef CONFIG_GENERIC_PENDING_IRQ
- if (!zalloc_cpumask_var_node(&desc->pending_mask, GFP_KERNEL, node)) {
- #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
- free_cpumask_var(desc->irq_common_data.effective_affinity);
- #endif
- free_cpumask_var(desc->irq_common_data.affinity);
- return -ENOMEM;
- }
- #endif
- return 0;
- }
- static void desc_smp_init(struct irq_desc *desc, int node,
- const struct cpumask *affinity)
- {
- if (!affinity)
- affinity = irq_default_affinity;
- cpumask_copy(desc->irq_common_data.affinity, affinity);
- #ifdef CONFIG_GENERIC_PENDING_IRQ
- cpumask_clear(desc->pending_mask);
- #endif
- #ifdef CONFIG_NUMA
- desc->irq_common_data.node = node;
- #endif
- }
- #else
- static inline int
- alloc_masks(struct irq_desc *desc, int node) { return 0; }
- static inline void
- desc_smp_init(struct irq_desc *desc, int node, const struct cpumask *affinity) { }
- #endif
- static void desc_set_defaults(unsigned int irq, struct irq_desc *desc, int node,
- const struct cpumask *affinity, struct module *owner)
- {
- int cpu;
- desc->irq_common_data.handler_data = NULL;
- desc->irq_common_data.msi_desc = NULL;
- desc->irq_data.common = &desc->irq_common_data;
- desc->irq_data.irq = irq;
- desc->irq_data.chip = &no_irq_chip;
- desc->irq_data.chip_data = NULL;
- irq_settings_clr_and_set(desc, ~0, _IRQ_DEFAULT_INIT_FLAGS);
- irqd_set(&desc->irq_data, IRQD_IRQ_DISABLED);
- irqd_set(&desc->irq_data, IRQD_IRQ_MASKED);
- desc->handle_irq = handle_bad_irq;
- desc->depth = 1;
- desc->irq_count = 0;
- desc->irqs_unhandled = 0;
- desc->tot_count = 0;
- desc->name = NULL;
- desc->owner = owner;
- for_each_possible_cpu(cpu)
- *per_cpu_ptr(desc->kstat_irqs, cpu) = 0;
- desc_smp_init(desc, node, affinity);
- }
- int nr_irqs = NR_IRQS;
- EXPORT_SYMBOL_GPL(nr_irqs);
- static DEFINE_MUTEX(sparse_irq_lock);
- static DECLARE_BITMAP(allocated_irqs, IRQ_BITMAP_BITS);
- #ifdef CONFIG_SPARSE_IRQ
- static void irq_kobj_release(struct kobject *kobj);
- #ifdef CONFIG_SYSFS
- static struct kobject *irq_kobj_base;
- #define IRQ_ATTR_RO(_name) \
- static struct kobj_attribute _name##_attr = __ATTR_RO(_name)
- static ssize_t per_cpu_count_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
- {
- struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
- int cpu, irq = desc->irq_data.irq;
- ssize_t ret = 0;
- char *p = "";
- for_each_possible_cpu(cpu) {
- unsigned int c = kstat_irqs_cpu(irq, cpu);
- ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%u", p, c);
- p = ",";
- }
- ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
- return ret;
- }
- IRQ_ATTR_RO(per_cpu_count);
- static ssize_t chip_name_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
- {
- struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
- ssize_t ret = 0;
- raw_spin_lock_irq(&desc->lock);
- if (desc->irq_data.chip && desc->irq_data.chip->name) {
- ret = scnprintf(buf, PAGE_SIZE, "%s\n",
- desc->irq_data.chip->name);
- }
- raw_spin_unlock_irq(&desc->lock);
- return ret;
- }
- IRQ_ATTR_RO(chip_name);
- static ssize_t hwirq_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
- {
- struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
- ssize_t ret = 0;
- raw_spin_lock_irq(&desc->lock);
- if (desc->irq_data.domain)
- ret = sprintf(buf, "%d\n", (int)desc->irq_data.hwirq);
- raw_spin_unlock_irq(&desc->lock);
- return ret;
- }
- IRQ_ATTR_RO(hwirq);
- static ssize_t type_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
- {
- struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
- ssize_t ret = 0;
- raw_spin_lock_irq(&desc->lock);
- ret = sprintf(buf, "%s\n",
- irqd_is_level_type(&desc->irq_data) ? "level" : "edge");
- raw_spin_unlock_irq(&desc->lock);
- return ret;
- }
- IRQ_ATTR_RO(type);
- static ssize_t name_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
- {
- struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
- ssize_t ret = 0;
- raw_spin_lock_irq(&desc->lock);
- if (desc->name)
- ret = scnprintf(buf, PAGE_SIZE, "%s\n", desc->name);
- raw_spin_unlock_irq(&desc->lock);
- return ret;
- }
- IRQ_ATTR_RO(name);
- static ssize_t actions_show(struct kobject *kobj,
- struct kobj_attribute *attr, char *buf)
- {
- struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
- struct irqaction *action;
- ssize_t ret = 0;
- char *p = "";
- raw_spin_lock_irq(&desc->lock);
- for (action = desc->action; action != NULL; action = action->next) {
- ret += scnprintf(buf + ret, PAGE_SIZE - ret, "%s%s",
- p, action->name);
- p = ",";
- }
- raw_spin_unlock_irq(&desc->lock);
- if (ret)
- ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
- return ret;
- }
- IRQ_ATTR_RO(actions);
- static struct attribute *irq_attrs[] = {
- &per_cpu_count_attr.attr,
- &chip_name_attr.attr,
- &hwirq_attr.attr,
- &type_attr.attr,
- &name_attr.attr,
- &actions_attr.attr,
- NULL
- };
- static struct kobj_type irq_kobj_type = {
- .release = irq_kobj_release,
- .sysfs_ops = &kobj_sysfs_ops,
- .default_attrs = irq_attrs,
- };
- static void irq_sysfs_add(int irq, struct irq_desc *desc)
- {
- if (irq_kobj_base) {
- /*
- * Continue even in case of failure as this is nothing
- * crucial.
- */
- if (kobject_add(&desc->kobj, irq_kobj_base, "%d", irq))
- pr_warn("Failed to add kobject for irq %d\n", irq);
- }
- }
- static void irq_sysfs_del(struct irq_desc *desc)
- {
- /*
- * If irq_sysfs_init() has not yet been invoked (early boot), then
- * irq_kobj_base is NULL and the descriptor was never added.
- * kobject_del() complains about a object with no parent, so make
- * it conditional.
- */
- if (irq_kobj_base)
- kobject_del(&desc->kobj);
- }
- static int __init irq_sysfs_init(void)
- {
- struct irq_desc *desc;
- int irq;
- /* Prevent concurrent irq alloc/free */
- irq_lock_sparse();
- irq_kobj_base = kobject_create_and_add("irq", kernel_kobj);
- if (!irq_kobj_base) {
- irq_unlock_sparse();
- return -ENOMEM;
- }
- /* Add the already allocated interrupts */
- for_each_irq_desc(irq, desc)
- irq_sysfs_add(irq, desc);
- irq_unlock_sparse();
- return 0;
- }
- postcore_initcall(irq_sysfs_init);
- #else /* !CONFIG_SYSFS */
- static struct kobj_type irq_kobj_type = {
- .release = irq_kobj_release,
- };
- static void irq_sysfs_add(int irq, struct irq_desc *desc) {}
- static void irq_sysfs_del(struct irq_desc *desc) {}
- #endif /* CONFIG_SYSFS */
- static RADIX_TREE(irq_desc_tree, GFP_KERNEL);
- static void irq_insert_desc(unsigned int irq, struct irq_desc *desc)
- {
- radix_tree_insert(&irq_desc_tree, irq, desc);
- }
- struct irq_desc *irq_to_desc(unsigned int irq)
- {
- return radix_tree_lookup(&irq_desc_tree, irq);
- }
- EXPORT_SYMBOL(irq_to_desc);
- static void delete_irq_desc(unsigned int irq)
- {
- radix_tree_delete(&irq_desc_tree, irq);
- }
- #ifdef CONFIG_SMP
- static void free_masks(struct irq_desc *desc)
- {
- #ifdef CONFIG_GENERIC_PENDING_IRQ
- free_cpumask_var(desc->pending_mask);
- #endif
- free_cpumask_var(desc->irq_common_data.affinity);
- #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK
- free_cpumask_var(desc->irq_common_data.effective_affinity);
- #endif
- }
- #else
- static inline void free_masks(struct irq_desc *desc) { }
- #endif
- void irq_lock_sparse(void)
- {
- mutex_lock(&sparse_irq_lock);
- }
- void irq_unlock_sparse(void)
- {
- mutex_unlock(&sparse_irq_lock);
- }
- static struct irq_desc *alloc_desc(int irq, int node, unsigned int flags,
- const struct cpumask *affinity,
- struct module *owner)
- {
- struct irq_desc *desc;
- desc = kzalloc_node(sizeof(*desc), GFP_KERNEL, node);
- if (!desc)
- return NULL;
- /* allocate based on nr_cpu_ids */
- desc->kstat_irqs = alloc_percpu(unsigned int);
- if (!desc->kstat_irqs)
- goto err_desc;
- if (alloc_masks(desc, node))
- goto err_kstat;
- raw_spin_lock_init(&desc->lock);
- lockdep_set_class(&desc->lock, &irq_desc_lock_class);
- mutex_init(&desc->request_mutex);
- init_rcu_head(&desc->rcu);
- desc_set_defaults(irq, desc, node, affinity, owner);
- irqd_set(&desc->irq_data, flags);
- kobject_init(&desc->kobj, &irq_kobj_type);
- return desc;
- err_kstat:
- free_percpu(desc->kstat_irqs);
- err_desc:
- kfree(desc);
- return NULL;
- }
- static void irq_kobj_release(struct kobject *kobj)
- {
- struct irq_desc *desc = container_of(kobj, struct irq_desc, kobj);
- free_masks(desc);
- free_percpu(desc->kstat_irqs);
- kfree(desc);
- }
- static void delayed_free_desc(struct rcu_head *rhp)
- {
- struct irq_desc *desc = container_of(rhp, struct irq_desc, rcu);
- kobject_put(&desc->kobj);
- }
- static void free_desc(unsigned int irq)
- {
- struct irq_desc *desc = irq_to_desc(irq);
- irq_remove_debugfs_entry(desc);
- unregister_irq_proc(irq, desc);
- /*
- * sparse_irq_lock protects also show_interrupts() and
- * kstat_irq_usr(). Once we deleted the descriptor from the
- * sparse tree we can free it. Access in proc will fail to
- * lookup the descriptor.
- *
- * The sysfs entry must be serialized against a concurrent
- * irq_sysfs_init() as well.
- */
- irq_sysfs_del(desc);
- delete_irq_desc(irq);
- /*
- * We free the descriptor, masks and stat fields via RCU. That
- * allows demultiplex interrupts to do rcu based management of
- * the child interrupts.
- */
- call_rcu(&desc->rcu, delayed_free_desc);
- }
- static int alloc_descs(unsigned int start, unsigned int cnt, int node,
- const struct cpumask *affinity, struct module *owner)
- {
- const struct cpumask *mask = NULL;
- struct irq_desc *desc;
- unsigned int flags;
- int i;
- /* Validate affinity mask(s) */
- if (affinity) {
- for (i = 0, mask = affinity; i < cnt; i++, mask++) {
- if (cpumask_empty(mask))
- return -EINVAL;
- }
- }
- flags = affinity ? IRQD_AFFINITY_MANAGED : 0;
- mask = NULL;
- for (i = 0; i < cnt; i++) {
- if (affinity) {
- node = cpu_to_node(cpumask_first(affinity));
- mask = affinity;
- affinity++;
- }
- desc = alloc_desc(start + i, node, flags, mask, owner);
- if (!desc)
- goto err;
- irq_insert_desc(start + i, desc);
- irq_sysfs_add(start + i, desc);
- }
- bitmap_set(allocated_irqs, start, cnt);
- return start;
- err:
- for (i--; i >= 0; i--)
- free_desc(start + i);
- return -ENOMEM;
- }
- static int irq_expand_nr_irqs(unsigned int nr)
- {
- if (nr > IRQ_BITMAP_BITS)
- return -ENOMEM;
- nr_irqs = nr;
- return 0;
- }
- int __init early_irq_init(void)
- {
- int i, initcnt, node = first_online_node;
- struct irq_desc *desc;
- init_irq_default_affinity();
- /* Let arch update nr_irqs and return the nr of preallocated irqs */
- initcnt = arch_probe_nr_irqs();
- printk(KERN_INFO "NR_IRQS: %d, nr_irqs: %d, preallocated irqs: %d\n",
- NR_IRQS, nr_irqs, initcnt);
- if (WARN_ON(nr_irqs > IRQ_BITMAP_BITS))
- nr_irqs = IRQ_BITMAP_BITS;
- if (WARN_ON(initcnt > IRQ_BITMAP_BITS))
- initcnt = IRQ_BITMAP_BITS;
- if (initcnt > nr_irqs)
- nr_irqs = initcnt;
- for (i = 0; i < initcnt; i++) {
- desc = alloc_desc(i, node, 0, NULL, NULL);
- set_bit(i, allocated_irqs);
- irq_insert_desc(i, desc);
- }
- return arch_early_irq_init();
- }
- #else /* !CONFIG_SPARSE_IRQ */
- struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
- [0 ... NR_IRQS-1] = {
- .handle_irq = handle_bad_irq,
- .depth = 1,
- .lock = __RAW_SPIN_LOCK_UNLOCKED(irq_desc->lock),
- }
- };
- int __init early_irq_init(void)
- {
- int count, i, node = first_online_node;
- struct irq_desc *desc;
- init_irq_default_affinity();
- printk(KERN_INFO "NR_IRQS: %d\n", NR_IRQS);
- desc = irq_desc;
- count = ARRAY_SIZE(irq_desc);
- for (i = 0; i < count; i++) {
- desc[i].kstat_irqs = alloc_percpu(unsigned int);
- alloc_masks(&desc[i], node);
- raw_spin_lock_init(&desc[i].lock);
- lockdep_set_class(&desc[i].lock, &irq_desc_lock_class);
- mutex_init(&desc[i].request_mutex);
- desc_set_defaults(i, &desc[i], node, NULL, NULL);
- }
- return arch_early_irq_init();
- }
- struct irq_desc *irq_to_desc(unsigned int irq)
- {
- return (irq < NR_IRQS) ? irq_desc + irq : NULL;
- }
- EXPORT_SYMBOL(irq_to_desc);
- static void free_desc(unsigned int irq)
- {
- struct irq_desc *desc = irq_to_desc(irq);
- unsigned long flags;
- raw_spin_lock_irqsave(&desc->lock, flags);
- desc_set_defaults(irq, desc, irq_desc_get_node(desc), NULL, NULL);
- raw_spin_unlock_irqrestore(&desc->lock, flags);
- }
- static inline int alloc_descs(unsigned int start, unsigned int cnt, int node,
- const struct cpumask *affinity,
- struct module *owner)
- {
- u32 i;
- for (i = 0; i < cnt; i++) {
- struct irq_desc *desc = irq_to_desc(start + i);
- desc->owner = owner;
- }
- bitmap_set(allocated_irqs, start, cnt);
- return start;
- }
- static int irq_expand_nr_irqs(unsigned int nr)
- {
- return -ENOMEM;
- }
- void irq_mark_irq(unsigned int irq)
- {
- mutex_lock(&sparse_irq_lock);
- bitmap_set(allocated_irqs, irq, 1);
- mutex_unlock(&sparse_irq_lock);
- }
- #ifdef CONFIG_GENERIC_IRQ_LEGACY
- void irq_init_desc(unsigned int irq)
- {
- free_desc(irq);
- }
- #endif
- #endif /* !CONFIG_SPARSE_IRQ */
- /**
- * generic_handle_irq - Invoke the handler for a particular irq
- * @irq: The irq number to handle
- *
- */
- int generic_handle_irq(unsigned int irq)
- {
- struct irq_desc *desc = irq_to_desc(irq);
- if (!desc)
- return -EINVAL;
- generic_handle_irq_desc(desc);
- return 0;
- }
- EXPORT_SYMBOL_GPL(generic_handle_irq);
- #ifdef CONFIG_HANDLE_DOMAIN_IRQ
- #ifdef CONFIG_MTK_SCHED_TRACERS
- #include <trace/events/mtk_events.h>
- #endif
- /**
- * __handle_domain_irq - Invoke the handler for a HW irq belonging to a domain
- * @domain: The domain where to perform the lookup
- * @hwirq: The HW irq number to convert to a logical one
- * @lookup: Whether to perform the domain lookup or not
- * @regs: Register file coming from the low-level handling code
- *
- * Returns: 0 on success, or -EINVAL if conversion has failed
- */
- int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq,
- bool lookup, struct pt_regs *regs)
- {
- struct pt_regs *old_regs = set_irq_regs(regs);
- unsigned int irq = hwirq;
- int ret = 0;
- #ifdef CONFIG_MTK_SCHED_TRACERS
- struct irq_desc *desc;
- #endif
- irq_enter();
- #ifdef CONFIG_IRQ_DOMAIN
- if (lookup)
- irq = irq_find_mapping(domain, hwirq);
- #endif
- #ifdef CONFIG_MTK_SCHED_TRACERS
- desc = irq_to_desc(irq);
- trace_irq_entry(irq, (desc && desc->action && desc->action->name) ?
- desc->action->name : "-");
- #endif
- /*
- * Some hardware gives randomly wrong interrupts. Rather
- * than crashing, do something sensible.
- */
- if (unlikely(!irq || irq >= nr_irqs)) {
- ack_bad_irq(irq);
- ret = -EINVAL;
- } else {
- unsigned long long ts;
- int count;
- check_start_time_preempt(irq_note, count, ts, irq);
- generic_handle_irq(irq);
- check_process_time_preempt(irq_note, count, "irq %d %s", ts,
- irq, irq_to_name(irq));
- }
- #ifdef CONFIG_MTK_SCHED_TRACERS
- trace_irq_exit(irq);
- #endif
- irq_exit();
- set_irq_regs(old_regs);
- return ret;
- }
- #endif
- /* Dynamic interrupt handling */
- /**
- * irq_free_descs - free irq descriptors
- * @from: Start of descriptor range
- * @cnt: Number of consecutive irqs to free
- */
- void irq_free_descs(unsigned int from, unsigned int cnt)
- {
- int i;
- if (from >= nr_irqs || (from + cnt) > nr_irqs)
- return;
- mutex_lock(&sparse_irq_lock);
- for (i = 0; i < cnt; i++)
- free_desc(from + i);
- bitmap_clear(allocated_irqs, from, cnt);
- mutex_unlock(&sparse_irq_lock);
- }
- EXPORT_SYMBOL_GPL(irq_free_descs);
- /**
- * irq_alloc_descs - allocate and initialize a range of irq descriptors
- * @irq: Allocate for specific irq number if irq >= 0
- * @from: Start the search from this irq number
- * @cnt: Number of consecutive irqs to allocate.
- * @node: Preferred node on which the irq descriptor should be allocated
- * @owner: Owning module (can be NULL)
- * @affinity: Optional pointer to an affinity mask array of size @cnt which
- * hints where the irq descriptors should be allocated and which
- * default affinities to use
- *
- * Returns the first irq number or error code
- */
- int __ref
- __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node,
- struct module *owner, const struct cpumask *affinity)
- {
- int start, ret;
- if (!cnt)
- return -EINVAL;
- if (irq >= 0) {
- if (from > irq)
- return -EINVAL;
- from = irq;
- } else {
- /*
- * For interrupts which are freely allocated the
- * architecture can force a lower bound to the @from
- * argument. x86 uses this to exclude the GSI space.
- */
- from = arch_dynirq_lower_bound(from);
- }
- mutex_lock(&sparse_irq_lock);
- start = bitmap_find_next_zero_area(allocated_irqs, IRQ_BITMAP_BITS,
- from, cnt, 0);
- ret = -EEXIST;
- if (irq >=0 && start != irq)
- goto unlock;
- if (start + cnt > nr_irqs) {
- ret = irq_expand_nr_irqs(start + cnt);
- if (ret)
- goto unlock;
- }
- ret = alloc_descs(start, cnt, node, affinity, owner);
- unlock:
- mutex_unlock(&sparse_irq_lock);
- return ret;
- }
- EXPORT_SYMBOL_GPL(__irq_alloc_descs);
- #ifdef CONFIG_GENERIC_IRQ_LEGACY_ALLOC_HWIRQ
- /**
- * irq_alloc_hwirqs - Allocate an irq descriptor and initialize the hardware
- * @cnt: number of interrupts to allocate
- * @node: node on which to allocate
- *
- * Returns an interrupt number > 0 or 0, if the allocation fails.
- */
- unsigned int irq_alloc_hwirqs(int cnt, int node)
- {
- int i, irq = __irq_alloc_descs(-1, 0, cnt, node, NULL, NULL);
- if (irq < 0)
- return 0;
- for (i = irq; cnt > 0; i++, cnt--) {
- if (arch_setup_hwirq(i, node))
- goto err;
- irq_clear_status_flags(i, _IRQ_NOREQUEST);
- }
- return irq;
- err:
- for (i--; i >= irq; i--) {
- irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE);
- arch_teardown_hwirq(i);
- }
- irq_free_descs(irq, cnt);
- return 0;
- }
- EXPORT_SYMBOL_GPL(irq_alloc_hwirqs);
- /**
- * irq_free_hwirqs - Free irq descriptor and cleanup the hardware
- * @from: Free from irq number
- * @cnt: number of interrupts to free
- *
- */
- void irq_free_hwirqs(unsigned int from, int cnt)
- {
- int i, j;
- for (i = from, j = cnt; j > 0; i++, j--) {
- irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE);
- arch_teardown_hwirq(i);
- }
- irq_free_descs(from, cnt);
- }
- EXPORT_SYMBOL_GPL(irq_free_hwirqs);
- #endif
- /**
- * irq_get_next_irq - get next allocated irq number
- * @offset: where to start the search
- *
- * Returns next irq number after offset or nr_irqs if none is found.
- */
- unsigned int irq_get_next_irq(unsigned int offset)
- {
- return find_next_bit(allocated_irqs, nr_irqs, offset);
- }
- struct irq_desc *
- __irq_get_desc_lock(unsigned int irq, unsigned long *flags, bool bus,
- unsigned int check)
- {
- struct irq_desc *desc = irq_to_desc(irq);
- if (desc) {
- if (check & _IRQ_DESC_CHECK) {
- if ((check & _IRQ_DESC_PERCPU) &&
- !irq_settings_is_per_cpu_devid(desc))
- return NULL;
- if (!(check & _IRQ_DESC_PERCPU) &&
- irq_settings_is_per_cpu_devid(desc))
- return NULL;
- }
- if (bus)
- chip_bus_lock(desc);
- raw_spin_lock_irqsave(&desc->lock, *flags);
- }
- return desc;
- }
- void __irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags, bool bus)
- {
- raw_spin_unlock_irqrestore(&desc->lock, flags);
- if (bus)
- chip_bus_sync_unlock(desc);
- }
- int irq_set_percpu_devid_partition(unsigned int irq,
- const struct cpumask *affinity)
- {
- struct irq_desc *desc = irq_to_desc(irq);
- if (!desc)
- return -EINVAL;
- if (desc->percpu_enabled)
- return -EINVAL;
- desc->percpu_enabled = kzalloc(sizeof(*desc->percpu_enabled), GFP_KERNEL);
- if (!desc->percpu_enabled)
- return -ENOMEM;
- if (affinity)
- desc->percpu_affinity = affinity;
- else
- desc->percpu_affinity = cpu_possible_mask;
- irq_set_percpu_devid_flags(irq);
- return 0;
- }
- int irq_set_percpu_devid(unsigned int irq)
- {
- return irq_set_percpu_devid_partition(irq, NULL);
- }
- int irq_get_percpu_devid_partition(unsigned int irq, struct cpumask *affinity)
- {
- struct irq_desc *desc = irq_to_desc(irq);
- if (!desc || !desc->percpu_enabled)
- return -EINVAL;
- if (affinity)
- cpumask_copy(affinity, desc->percpu_affinity);
- return 0;
- }
- void kstat_incr_irq_this_cpu(unsigned int irq)
- {
- kstat_incr_irqs_this_cpu(irq_to_desc(irq));
- }
- /**
- * kstat_irqs_cpu - Get the statistics for an interrupt on a cpu
- * @irq: The interrupt number
- * @cpu: The cpu number
- *
- * Returns the sum of interrupt counts on @cpu since boot for
- * @irq. The caller must ensure that the interrupt is not removed
- * concurrently.
- */
- unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
- {
- struct irq_desc *desc = irq_to_desc(irq);
- return desc && desc->kstat_irqs ?
- *per_cpu_ptr(desc->kstat_irqs, cpu) : 0;
- }
- /**
- * kstat_irqs - Get the statistics for an interrupt
- * @irq: The interrupt number
- *
- * Returns the sum of interrupt counts on all cpus since boot for
- * @irq. The caller must ensure that the interrupt is not removed
- * concurrently.
- */
- unsigned int kstat_irqs(unsigned int irq)
- {
- struct irq_desc *desc = irq_to_desc(irq);
- unsigned int sum = 0;
- int cpu;
- if (!desc || !desc->kstat_irqs)
- return 0;
- if (!irq_settings_is_per_cpu_devid(desc) &&
- !irq_settings_is_per_cpu(desc))
- return desc->tot_count;
- for_each_possible_cpu(cpu)
- sum += *per_cpu_ptr(desc->kstat_irqs, cpu);
- return sum;
- }
- /**
- * kstat_irqs_usr - Get the statistics for an interrupt
- * @irq: The interrupt number
- *
- * Returns the sum of interrupt counts on all cpus since boot for
- * @irq. Contrary to kstat_irqs() this can be called from any
- * preemptible context. It's protected against concurrent removal of
- * an interrupt descriptor when sparse irqs are enabled.
- */
- unsigned int kstat_irqs_usr(unsigned int irq)
- {
- unsigned int sum;
- irq_lock_sparse();
- sum = kstat_irqs(irq);
- irq_unlock_sparse();
- return sum;
- }
|