123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501 |
- /*
- * kernel/mutex.c
- *
- * Mutexes: blocking mutual exclusion locks
- *
- * Started by Ingo Molnar:
- *
- * Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
- *
- * Many thanks to Arjan van de Ven, Thomas Gleixner, Steven Rostedt and
- * David Howells for suggestions and improvements.
- *
- * - Adaptive spinning for mutexes by Peter Zijlstra. (Ported to mainline
- * from the -rt tree, where it was originally implemented for rtmutexes
- * by Steven Rostedt, based on work by Gregory Haskins, Peter Morreale
- * and Sven Dietrich.
- *
- * Also see Documentation/mutex-design.txt.
- */
- #include <linux/mutex.h>
- #include <linux/sched.h>
- #include <linux/module.h>
- #include <linux/spinlock.h>
- #include <linux/interrupt.h>
- #include <linux/debug_locks.h>
- /*
- * In the DEBUG case we are using the "NULL fastpath" for mutexes,
- * which forces all calls into the slowpath:
- */
- #ifdef CONFIG_DEBUG_MUTEXES
- # include "mutex-debug.h"
- # include <asm-generic/mutex-null.h>
- #else
- # include "mutex.h"
- # include <asm/mutex.h>
- #endif
- void
- __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
- {
- atomic_set(&lock->count, 1);
- spin_lock_init(&lock->wait_lock);
- INIT_LIST_HEAD(&lock->wait_list);
- mutex_clear_owner(lock);
- debug_mutex_init(lock, name, key);
- }
- EXPORT_SYMBOL(__mutex_init);
- #ifndef CONFIG_DEBUG_LOCK_ALLOC
- /*
- * We split the mutex lock/unlock logic into separate fastpath and
- * slowpath functions, to reduce the register pressure on the fastpath.
- * We also put the fastpath first in the kernel image, to make sure the
- * branch is predicted by the CPU as default-untaken.
- */
- static __used noinline void __sched
- __mutex_lock_slowpath(atomic_t *lock_count);
- /**
- * mutex_lock - acquire the mutex
- * @lock: the mutex to be acquired
- *
- * Lock the mutex exclusively for this task. If the mutex is not
- * available right now, it will sleep until it can get it.
- *
- * The mutex must later on be released by the same task that
- * acquired it. Recursive locking is not allowed. The task
- * may not exit without first unlocking the mutex. Also, kernel
- * memory where the mutex resides mutex must not be freed with
- * the mutex still locked. The mutex must first be initialized
- * (or statically defined) before it can be locked. memset()-ing
- * the mutex to 0 is not allowed.
- *
- * ( The CONFIG_DEBUG_MUTEXES .config option turns on debugging
- * checks that will enforce the restrictions and will also do
- * deadlock debugging. )
- *
- * This function is similar to (but not equivalent to) down().
- */
- void __sched mutex_lock(struct mutex *lock)
- {
- might_sleep();
- /*
- * The locking fastpath is the 1->0 transition from
- * 'unlocked' into 'locked' state.
- */
- __mutex_fastpath_lock(&lock->count, __mutex_lock_slowpath);
- mutex_set_owner(lock);
- }
- EXPORT_SYMBOL(mutex_lock);
- #endif
- static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
- /**
- * mutex_unlock - release the mutex
- * @lock: the mutex to be released
- *
- * Unlock a mutex that has been locked by this task previously.
- *
- * This function must not be used in interrupt context. Unlocking
- * of a not locked mutex is not allowed.
- *
- * This function is similar to (but not equivalent to) up().
- */
- void __sched mutex_unlock(struct mutex *lock)
- {
- /*
- * The unlocking fastpath is the 0->1 transition from 'locked'
- * into 'unlocked' state:
- */
- #ifndef CONFIG_DEBUG_MUTEXES
- /*
- * When debugging is enabled we must not clear the owner before time,
- * the slow path will always be taken, and that clears the owner field
- * after verifying that it was indeed current.
- */
- mutex_clear_owner(lock);
- #endif
- __mutex_fastpath_unlock(&lock->count, __mutex_unlock_slowpath);
- }
- EXPORT_SYMBOL(mutex_unlock);
- /*
- * Lock a mutex (possibly interruptible), slowpath:
- */
- static inline int __sched
- __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
- struct lockdep_map *nest_lock, unsigned long ip)
- {
- struct task_struct *task = current;
- struct mutex_waiter waiter;
- unsigned long flags;
- preempt_disable();
- mutex_acquire_nest(&lock->dep_map, subclass, 0, nest_lock, ip);
- #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
- /*
- * Optimistic spinning.
- *
- * We try to spin for acquisition when we find that there are no
- * pending waiters and the lock owner is currently running on a
- * (different) CPU.
- *
- * The rationale is that if the lock owner is running, it is likely to
- * release the lock soon.
- *
- * Since this needs the lock owner, and this mutex implementation
- * doesn't track the owner atomically in the lock field, we need to
- * track it non-atomically.
- *
- * We can't do this for DEBUG_MUTEXES because that relies on wait_lock
- * to serialize everything.
- */
- for (;;) {
- struct task_struct *owner;
- /*
- * If there's an owner, wait for it to either
- * release the lock or go to sleep.
- */
- owner = ACCESS_ONCE(lock->owner);
- if (owner && !mutex_spin_on_owner(lock, owner))
- break;
- if (atomic_cmpxchg(&lock->count, 1, 0) == 1) {
- lock_acquired(&lock->dep_map, ip);
- mutex_set_owner(lock);
- preempt_enable();
- return 0;
- }
- /*
- * When there's no owner, we might have preempted between the
- * owner acquiring the lock and setting the owner field. If
- * we're an RT task that will live-lock because we won't let
- * the owner complete.
- */
- if (!owner && (need_resched() || rt_task(task)))
- break;
- /*
- * The cpu_relax() call is a compiler barrier which forces
- * everything in this loop to be re-loaded. We don't need
- * memory barriers as we'll eventually observe the right
- * values at the cost of a few extra spins.
- */
- arch_mutex_cpu_relax();
- }
- #endif
- spin_lock_mutex(&lock->wait_lock, flags);
- debug_mutex_lock_common(lock, &waiter);
- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
- /* add waiting tasks to the end of the waitqueue (FIFO): */
- list_add_tail(&waiter.list, &lock->wait_list);
- waiter.task = task;
- if (atomic_xchg(&lock->count, -1) == 1)
- goto done;
- lock_contended(&lock->dep_map, ip);
- for (;;) {
- /*
- * Lets try to take the lock again - this is needed even if
- * we get here for the first time (shortly after failing to
- * acquire the lock), to make sure that we get a wakeup once
- * it's unlocked. Later on, if we sleep, this is the
- * operation that gives us the lock. We xchg it to -1, so
- * that when we release the lock, we properly wake up the
- * other waiters:
- */
- if (atomic_xchg(&lock->count, -1) == 1)
- break;
- /*
- * got a signal? (This code gets eliminated in the
- * TASK_UNINTERRUPTIBLE case.)
- */
- if (unlikely(signal_pending_state(state, task))) {
- mutex_remove_waiter(lock, &waiter,
- task_thread_info(task));
- mutex_release(&lock->dep_map, 1, ip);
- spin_unlock_mutex(&lock->wait_lock, flags);
- debug_mutex_free_waiter(&waiter);
- preempt_enable();
- return -EINTR;
- }
- __set_task_state(task, state);
- /* didn't get the lock, go to sleep: */
- spin_unlock_mutex(&lock->wait_lock, flags);
- preempt_enable_no_resched();
- schedule();
- preempt_disable();
- spin_lock_mutex(&lock->wait_lock, flags);
- }
- done:
- lock_acquired(&lock->dep_map, ip);
- /* got the lock - rejoice! */
- mutex_remove_waiter(lock, &waiter, current_thread_info());
- mutex_set_owner(lock);
- /* set it to 0 if there are no waiters left: */
- if (likely(list_empty(&lock->wait_list)))
- atomic_set(&lock->count, 0);
- spin_unlock_mutex(&lock->wait_lock, flags);
- debug_mutex_free_waiter(&waiter);
- preempt_enable();
- return 0;
- }
- #ifdef CONFIG_DEBUG_LOCK_ALLOC
- void __sched
- mutex_lock_nested(struct mutex *lock, unsigned int subclass)
- {
- might_sleep();
- __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, subclass, NULL, _RET_IP_);
- }
- EXPORT_SYMBOL_GPL(mutex_lock_nested);
- void __sched
- _mutex_lock_nest_lock(struct mutex *lock, struct lockdep_map *nest)
- {
- might_sleep();
- __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, nest, _RET_IP_);
- }
- EXPORT_SYMBOL_GPL(_mutex_lock_nest_lock);
- int __sched
- mutex_lock_killable_nested(struct mutex *lock, unsigned int subclass)
- {
- might_sleep();
- return __mutex_lock_common(lock, TASK_KILLABLE, subclass, NULL, _RET_IP_);
- }
- EXPORT_SYMBOL_GPL(mutex_lock_killable_nested);
- int __sched
- mutex_lock_interruptible_nested(struct mutex *lock, unsigned int subclass)
- {
- might_sleep();
- return __mutex_lock_common(lock, TASK_INTERRUPTIBLE,
- subclass, NULL, _RET_IP_);
- }
- EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
- #endif
- /*
- * Release the lock, slowpath:
- */
- static inline void
- __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
- {
- struct mutex *lock = container_of(lock_count, struct mutex, count);
- unsigned long flags;
- spin_lock_mutex(&lock->wait_lock, flags);
- mutex_release(&lock->dep_map, nested, _RET_IP_);
- debug_mutex_unlock(lock);
- /*
- * some architectures leave the lock unlocked in the fastpath failure
- * case, others need to leave it locked. In the later case we have to
- * unlock it here
- */
- if (__mutex_slowpath_needs_to_unlock())
- atomic_set(&lock->count, 1);
- if (!list_empty(&lock->wait_list)) {
- /* get the first entry from the wait-list: */
- struct mutex_waiter *waiter =
- list_entry(lock->wait_list.next,
- struct mutex_waiter, list);
- debug_mutex_wake_waiter(lock, waiter);
- wake_up_process(waiter->task);
- }
- spin_unlock_mutex(&lock->wait_lock, flags);
- }
- /*
- * Release the lock, slowpath:
- */
- static __used noinline void
- __mutex_unlock_slowpath(atomic_t *lock_count)
- {
- __mutex_unlock_common_slowpath(lock_count, 1);
- }
- #ifndef CONFIG_DEBUG_LOCK_ALLOC
- /*
- * Here come the less common (and hence less performance-critical) APIs:
- * mutex_lock_interruptible() and mutex_trylock().
- */
- static noinline int __sched
- __mutex_lock_killable_slowpath(atomic_t *lock_count);
- static noinline int __sched
- __mutex_lock_interruptible_slowpath(atomic_t *lock_count);
- /**
- * mutex_lock_interruptible - acquire the mutex, interruptible
- * @lock: the mutex to be acquired
- *
- * Lock the mutex like mutex_lock(), and return 0 if the mutex has
- * been acquired or sleep until the mutex becomes available. If a
- * signal arrives while waiting for the lock then this function
- * returns -EINTR.
- *
- * This function is similar to (but not equivalent to) down_interruptible().
- */
- int __sched mutex_lock_interruptible(struct mutex *lock)
- {
- int ret;
- might_sleep();
- ret = __mutex_fastpath_lock_retval
- (&lock->count, __mutex_lock_interruptible_slowpath);
- if (!ret)
- mutex_set_owner(lock);
- return ret;
- }
- EXPORT_SYMBOL(mutex_lock_interruptible);
- int __sched mutex_lock_killable(struct mutex *lock)
- {
- int ret;
- might_sleep();
- ret = __mutex_fastpath_lock_retval
- (&lock->count, __mutex_lock_killable_slowpath);
- if (!ret)
- mutex_set_owner(lock);
- return ret;
- }
- EXPORT_SYMBOL(mutex_lock_killable);
- static __used noinline void __sched
- __mutex_lock_slowpath(atomic_t *lock_count)
- {
- struct mutex *lock = container_of(lock_count, struct mutex, count);
- __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, NULL, _RET_IP_);
- }
- static noinline int __sched
- __mutex_lock_killable_slowpath(atomic_t *lock_count)
- {
- struct mutex *lock = container_of(lock_count, struct mutex, count);
- return __mutex_lock_common(lock, TASK_KILLABLE, 0, NULL, _RET_IP_);
- }
- static noinline int __sched
- __mutex_lock_interruptible_slowpath(atomic_t *lock_count)
- {
- struct mutex *lock = container_of(lock_count, struct mutex, count);
- return __mutex_lock_common(lock, TASK_INTERRUPTIBLE, 0, NULL, _RET_IP_);
- }
- #endif
- /*
- * Spinlock based trylock, we take the spinlock and check whether we
- * can get the lock:
- */
- static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
- {
- struct mutex *lock = container_of(lock_count, struct mutex, count);
- unsigned long flags;
- int prev;
- spin_lock_mutex(&lock->wait_lock, flags);
- prev = atomic_xchg(&lock->count, -1);
- if (likely(prev == 1)) {
- mutex_set_owner(lock);
- mutex_acquire(&lock->dep_map, 0, 1, _RET_IP_);
- }
- /* Set it back to 0 if there are no waiters: */
- if (likely(list_empty(&lock->wait_list)))
- atomic_set(&lock->count, 0);
- spin_unlock_mutex(&lock->wait_lock, flags);
- return prev == 1;
- }
- /**
- * mutex_trylock - try to acquire the mutex, without waiting
- * @lock: the mutex to be acquired
- *
- * Try to acquire the mutex atomically. Returns 1 if the mutex
- * has been acquired successfully, and 0 on contention.
- *
- * NOTE: this function follows the spin_trylock() convention, so
- * it is negated from the down_trylock() return values! Be careful
- * about this when converting semaphore users to mutexes.
- *
- * This function must not be used in interrupt context. The
- * mutex must be released by the same task that acquired it.
- */
- int __sched mutex_trylock(struct mutex *lock)
- {
- int ret;
- ret = __mutex_fastpath_trylock(&lock->count, __mutex_trylock_slowpath);
- if (ret)
- mutex_set_owner(lock);
- return ret;
- }
- EXPORT_SYMBOL(mutex_trylock);
- /**
- * atomic_dec_and_mutex_lock - return holding mutex if we dec to 0
- * @cnt: the atomic which we are to dec
- * @lock: the mutex to return holding if we dec to 0
- *
- * return true and hold lock if we dec to 0, return false otherwise
- */
- int atomic_dec_and_mutex_lock(atomic_t *cnt, struct mutex *lock)
- {
- /* dec if we can't possibly hit 0 */
- if (atomic_add_unless(cnt, -1, 1))
- return 0;
- /* we might hit 0, so take the lock */
- mutex_lock(lock);
- if (!atomic_dec_and_test(cnt)) {
- /* when we actually did the dec, we didn't hit 0 */
- mutex_unlock(lock);
- return 0;
- }
- /* we hit 0, and we hold the lock */
- return 1;
- }
- EXPORT_SYMBOL(atomic_dec_and_mutex_lock);
|