123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134 |
- /* rwsem.h: R/W semaphores, public interface
- *
- * Written by David Howells (dhowells@redhat.com).
- * Derived from asm-i386/semaphore.h
- */
- #ifndef _LINUX_RWSEM_H
- #define _LINUX_RWSEM_H
- #include <linux/linkage.h>
- #include <linux/types.h>
- #include <linux/kernel.h>
- #include <linux/list.h>
- #include <linux/spinlock.h>
- #include <linux/atomic.h>
- struct rw_semaphore;
- #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
- #include <linux/rwsem-spinlock.h> /* use a generic implementation */
- #else
- /* All arch specific implementations share the same struct */
- struct rw_semaphore {
- long count;
- raw_spinlock_t wait_lock;
- struct list_head wait_list;
- #ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
- #endif
- };
- extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
- extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
- extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
- extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem);
- /* Include the arch specific part */
- #include <asm/rwsem.h>
- /* In all implementations count != 0 means locked */
- static inline int rwsem_is_locked(struct rw_semaphore *sem)
- {
- return sem->count != 0;
- }
- #endif
- /* Common initializer macros and functions */
- #ifdef CONFIG_DEBUG_LOCK_ALLOC
- # define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
- #else
- # define __RWSEM_DEP_MAP_INIT(lockname)
- #endif
- #define __RWSEM_INITIALIZER(name) \
- { RWSEM_UNLOCKED_VALUE, \
- __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
- LIST_HEAD_INIT((name).wait_list) \
- __RWSEM_DEP_MAP_INIT(name) }
- #define DECLARE_RWSEM(name) \
- struct rw_semaphore name = __RWSEM_INITIALIZER(name)
- extern void __init_rwsem(struct rw_semaphore *sem, const char *name,
- struct lock_class_key *key);
- #define init_rwsem(sem) \
- do { \
- static struct lock_class_key __key; \
- \
- __init_rwsem((sem), #sem, &__key); \
- } while (0)
- /*
- * lock for reading
- */
- extern void down_read(struct rw_semaphore *sem);
- /*
- * trylock for reading -- returns 1 if successful, 0 if contention
- */
- extern int down_read_trylock(struct rw_semaphore *sem);
- /*
- * lock for writing
- */
- extern void down_write(struct rw_semaphore *sem);
- /*
- * trylock for writing -- returns 1 if successful, 0 if contention
- */
- extern int down_write_trylock(struct rw_semaphore *sem);
- /*
- * release a read lock
- */
- extern void up_read(struct rw_semaphore *sem);
- /*
- * release a write lock
- */
- extern void up_write(struct rw_semaphore *sem);
- /*
- * downgrade write lock to read lock
- */
- extern void downgrade_write(struct rw_semaphore *sem);
- #ifdef CONFIG_DEBUG_LOCK_ALLOC
- /*
- * nested locking. NOTE: rwsems are not allowed to recurse
- * (which occurs if the same task tries to acquire the same
- * lock instance multiple times), but multiple locks of the
- * same lock class might be taken, if the order of the locks
- * is always the same. This ordering rule can be expressed
- * to lockdep via the _nested() APIs, but enumerating the
- * subclasses that are used. (If the nesting relationship is
- * static then another method for expressing nested locking is
- * the explicit definition of lock class keys and the use of
- * lockdep_set_class() at lock initialization time.
- * See Documentation/lockdep-design.txt for more details.)
- */
- extern void down_read_nested(struct rw_semaphore *sem, int subclass);
- extern void down_write_nested(struct rw_semaphore *sem, int subclass);
- #else
- # define down_read_nested(sem, subclass) down_read(sem)
- # define down_write_nested(sem, subclass) down_write(sem)
- #endif
- #endif /* _LINUX_RWSEM_H */
|