123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307 |
- #ifndef __LINUX_PREEMPT_H
- #define __LINUX_PREEMPT_H
- /*
- * include/linux/preempt.h - macros for accessing and manipulating
- * preempt_count (used for kernel preemption, interrupt count, etc.)
- */
- #include <linux/linkage.h>
- #include <linux/list.h>
- /*
- * We put the hardirq and softirq counter into the preemption
- * counter. The bitmask has the following meaning:
- *
- * - bits 0-7 are the preemption count (max preemption depth: 256)
- * - bits 8-15 are the softirq count (max # of softirqs: 256)
- *
- * The hardirq count could in theory be the same as the number of
- * interrupts in the system, but we run all interrupt handlers with
- * interrupts disabled, so we cannot have nesting interrupts. Though
- * there are a few palaeontologic drivers which reenable interrupts in
- * the handler, so we need more than one bit here.
- *
- * PREEMPT_MASK: 0x000000ff
- * SOFTIRQ_MASK: 0x0000ff00
- * HARDIRQ_MASK: 0x000f0000
- * NMI_MASK: 0x00100000
- * PREEMPT_NEED_RESCHED: 0x80000000
- */
- #define PREEMPT_BITS 8
- #define SOFTIRQ_BITS 8
- #define HARDIRQ_BITS 4
- #define NMI_BITS 1
- #define PREEMPT_SHIFT 0
- #define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
- #define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
- #define NMI_SHIFT (HARDIRQ_SHIFT + HARDIRQ_BITS)
- #define __IRQ_MASK(x) ((1UL << (x))-1)
- #define PREEMPT_MASK (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
- #define SOFTIRQ_MASK (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
- #define HARDIRQ_MASK (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
- #define NMI_MASK (__IRQ_MASK(NMI_BITS) << NMI_SHIFT)
- #define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
- #define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
- #define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
- #define NMI_OFFSET (1UL << NMI_SHIFT)
- #define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
- /* We use the MSB mostly because its available */
- #define PREEMPT_NEED_RESCHED 0x80000000
- /* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */
- #include <asm/preempt.h>
- #define hardirq_count() (preempt_count() & HARDIRQ_MASK)
- #define softirq_count() (preempt_count() & SOFTIRQ_MASK)
- #define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
- | NMI_MASK))
- /*
- * Are we doing bottom half or hardware interrupt processing?
- *
- * in_irq() - We're in (hard) IRQ context
- * in_softirq() - We have BH disabled, or are processing softirqs
- * in_interrupt() - We're in NMI,IRQ,SoftIRQ context or have BH disabled
- * in_serving_softirq() - We're in softirq context
- * in_nmi() - We're in NMI context
- * in_task() - We're in task context
- *
- * Note: due to the BH disabled confusion: in_softirq(),in_interrupt() really
- * should not be used in new code.
- */
- #define in_irq() (hardirq_count())
- #define in_softirq() (softirq_count())
- #define in_interrupt() (irq_count())
- #define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
- #define in_nmi() (preempt_count() & NMI_MASK)
- #define in_task() (!(preempt_count() & \
- (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
- /*
- * The preempt_count offset after preempt_disable();
- */
- #if defined(CONFIG_PREEMPT_COUNT)
- # define PREEMPT_DISABLE_OFFSET PREEMPT_OFFSET
- #else
- # define PREEMPT_DISABLE_OFFSET 0
- #endif
- /*
- * The preempt_count offset after spin_lock()
- */
- #define PREEMPT_LOCK_OFFSET PREEMPT_DISABLE_OFFSET
- /*
- * The preempt_count offset needed for things like:
- *
- * spin_lock_bh()
- *
- * Which need to disable both preemption (CONFIG_PREEMPT_COUNT) and
- * softirqs, such that unlock sequences of:
- *
- * spin_unlock();
- * local_bh_enable();
- *
- * Work as expected.
- */
- #define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_LOCK_OFFSET)
- /*
- * Are we running in atomic context? WARNING: this macro cannot
- * always detect atomic context; in particular, it cannot know about
- * held spinlocks in non-preemptible kernels. Thus it should not be
- * used in the general case to determine whether sleeping is possible.
- * Do not use in_atomic() in driver code.
- */
- #define in_atomic() (preempt_count() != 0)
- /*
- * Check whether we were atomic before we did preempt_disable():
- * (used by the scheduler)
- */
- #define in_atomic_preempt_off() (preempt_count() != PREEMPT_DISABLE_OFFSET)
- #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
- extern void preempt_count_add(int val);
- extern void preempt_count_sub(int val);
- #define preempt_count_dec_and_test() \
- ({ preempt_count_sub(1); should_resched(0); })
- #else
- #define preempt_count_add(val) __preempt_count_add(val)
- #define preempt_count_sub(val) __preempt_count_sub(val)
- #define preempt_count_dec_and_test() __preempt_count_dec_and_test()
- #endif
- #define __preempt_count_inc() __preempt_count_add(1)
- #define __preempt_count_dec() __preempt_count_sub(1)
- #define preempt_count_inc() preempt_count_add(1)
- #define preempt_count_dec() preempt_count_sub(1)
- #ifdef CONFIG_PREEMPT_COUNT
- #define preempt_disable() \
- do { \
- preempt_count_inc(); \
- barrier(); \
- } while (0)
- #define sched_preempt_enable_no_resched() \
- do { \
- barrier(); \
- preempt_count_dec(); \
- } while (0)
- #define preempt_enable_no_resched() sched_preempt_enable_no_resched()
- #define preemptible() (preempt_count() == 0 && !irqs_disabled())
- #ifdef CONFIG_PREEMPT
- #define preempt_enable() \
- do { \
- barrier(); \
- if (unlikely(preempt_count_dec_and_test())) \
- __preempt_schedule(); \
- } while (0)
- #define preempt_enable_notrace() \
- do { \
- barrier(); \
- if (unlikely(__preempt_count_dec_and_test())) \
- __preempt_schedule_notrace(); \
- } while (0)
- #define preempt_check_resched() \
- do { \
- if (should_resched(0)) \
- __preempt_schedule(); \
- } while (0)
- #else /* !CONFIG_PREEMPT */
- #define preempt_enable() \
- do { \
- barrier(); \
- preempt_count_dec(); \
- } while (0)
- #define preempt_enable_notrace() \
- do { \
- barrier(); \
- __preempt_count_dec(); \
- } while (0)
- #define preempt_check_resched() do { } while (0)
- #endif /* CONFIG_PREEMPT */
- #define preempt_disable_notrace() \
- do { \
- __preempt_count_inc(); \
- barrier(); \
- } while (0)
- #define preempt_enable_no_resched_notrace() \
- do { \
- barrier(); \
- __preempt_count_dec(); \
- } while (0)
- #else /* !CONFIG_PREEMPT_COUNT */
- /*
- * Even if we don't have any preemption, we need preempt disable/enable
- * to be barriers, so that we don't have things like get_user/put_user
- * that can cause faults and scheduling migrate into our preempt-protected
- * region.
- */
- #define preempt_disable() barrier()
- #define sched_preempt_enable_no_resched() barrier()
- #define preempt_enable_no_resched() barrier()
- #define preempt_enable() barrier()
- #define preempt_check_resched() do { } while (0)
- #define preempt_disable_notrace() barrier()
- #define preempt_enable_no_resched_notrace() barrier()
- #define preempt_enable_notrace() barrier()
- #define preemptible() 0
- #endif /* CONFIG_PREEMPT_COUNT */
- #ifdef MODULE
- /*
- * Modules have no business playing preemption tricks.
- */
- #undef sched_preempt_enable_no_resched
- #undef preempt_enable_no_resched
- #undef preempt_enable_no_resched_notrace
- #undef preempt_check_resched
- #endif
- #define preempt_set_need_resched() \
- do { \
- set_preempt_need_resched(); \
- } while (0)
- #define preempt_fold_need_resched() \
- do { \
- if (tif_need_resched()) \
- set_preempt_need_resched(); \
- } while (0)
- #ifdef CONFIG_PREEMPT_NOTIFIERS
- struct preempt_notifier;
- /**
- * preempt_ops - notifiers called when a task is preempted and rescheduled
- * @sched_in: we're about to be rescheduled:
- * notifier: struct preempt_notifier for the task being scheduled
- * cpu: cpu we're scheduled on
- * @sched_out: we've just been preempted
- * notifier: struct preempt_notifier for the task being preempted
- * next: the task that's kicking us out
- *
- * Please note that sched_in and out are called under different
- * contexts. sched_out is called with rq lock held and irq disabled
- * while sched_in is called without rq lock and irq enabled. This
- * difference is intentional and depended upon by its users.
- */
- struct preempt_ops {
- void (*sched_in)(struct preempt_notifier *notifier, int cpu);
- void (*sched_out)(struct preempt_notifier *notifier,
- struct task_struct *next);
- };
- /**
- * preempt_notifier - key for installing preemption notifiers
- * @link: internal use
- * @ops: defines the notifier functions to be called
- *
- * Usually used in conjunction with container_of().
- */
- struct preempt_notifier {
- struct hlist_node link;
- struct preempt_ops *ops;
- };
- void preempt_notifier_inc(void);
- void preempt_notifier_dec(void);
- void preempt_notifier_register(struct preempt_notifier *notifier);
- void preempt_notifier_unregister(struct preempt_notifier *notifier);
- static inline void preempt_notifier_init(struct preempt_notifier *notifier,
- struct preempt_ops *ops)
- {
- INIT_HLIST_NODE(¬ifier->link);
- notifier->ops = ops;
- }
- #endif
- #endif /* __LINUX_PREEMPT_H */
|