atomic32.c 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135
  1. /*
  2. * atomic32.c: 32-bit atomic_t implementation
  3. *
  4. * Copyright (C) 2004 Keith M Wesolowski
  5. * Copyright (C) 2007 Kyle McMartin
  6. *
  7. * Based on asm-parisc/atomic.h Copyright (C) 2000 Philipp Rumpf
  8. */
  9. #include <linux/atomic.h>
  10. #include <linux/spinlock.h>
  11. #include <linux/module.h>
  12. #ifdef CONFIG_SMP
  13. #define ATOMIC_HASH_SIZE 4
  14. #define ATOMIC_HASH(a) (&__atomic_hash[(((unsigned long)a)>>8) & (ATOMIC_HASH_SIZE-1)])
  15. spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] = {
  16. [0 ... (ATOMIC_HASH_SIZE-1)] = __SPIN_LOCK_UNLOCKED(__atomic_hash)
  17. };
  18. #else /* SMP */
  19. static DEFINE_SPINLOCK(dummy);
  20. #define ATOMIC_HASH_SIZE 1
  21. #define ATOMIC_HASH(a) (&dummy)
  22. #endif /* SMP */
  23. int __atomic_add_return(int i, atomic_t *v)
  24. {
  25. int ret;
  26. unsigned long flags;
  27. spin_lock_irqsave(ATOMIC_HASH(v), flags);
  28. ret = (v->counter += i);
  29. spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
  30. return ret;
  31. }
  32. EXPORT_SYMBOL(__atomic_add_return);
  33. int atomic_cmpxchg(atomic_t *v, int old, int new)
  34. {
  35. int ret;
  36. unsigned long flags;
  37. spin_lock_irqsave(ATOMIC_HASH(v), flags);
  38. ret = v->counter;
  39. if (likely(ret == old))
  40. v->counter = new;
  41. spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
  42. return ret;
  43. }
  44. EXPORT_SYMBOL(atomic_cmpxchg);
  45. int __atomic_add_unless(atomic_t *v, int a, int u)
  46. {
  47. int ret;
  48. unsigned long flags;
  49. spin_lock_irqsave(ATOMIC_HASH(v), flags);
  50. ret = v->counter;
  51. if (ret != u)
  52. v->counter += a;
  53. spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
  54. return ret;
  55. }
  56. EXPORT_SYMBOL(__atomic_add_unless);
  57. /* Atomic operations are already serializing */
  58. void atomic_set(atomic_t *v, int i)
  59. {
  60. unsigned long flags;
  61. spin_lock_irqsave(ATOMIC_HASH(v), flags);
  62. v->counter = i;
  63. spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
  64. }
  65. EXPORT_SYMBOL(atomic_set);
  66. unsigned long ___set_bit(unsigned long *addr, unsigned long mask)
  67. {
  68. unsigned long old, flags;
  69. spin_lock_irqsave(ATOMIC_HASH(addr), flags);
  70. old = *addr;
  71. *addr = old | mask;
  72. spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
  73. return old & mask;
  74. }
  75. EXPORT_SYMBOL(___set_bit);
  76. unsigned long ___clear_bit(unsigned long *addr, unsigned long mask)
  77. {
  78. unsigned long old, flags;
  79. spin_lock_irqsave(ATOMIC_HASH(addr), flags);
  80. old = *addr;
  81. *addr = old & ~mask;
  82. spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
  83. return old & mask;
  84. }
  85. EXPORT_SYMBOL(___clear_bit);
  86. unsigned long ___change_bit(unsigned long *addr, unsigned long mask)
  87. {
  88. unsigned long old, flags;
  89. spin_lock_irqsave(ATOMIC_HASH(addr), flags);
  90. old = *addr;
  91. *addr = old ^ mask;
  92. spin_unlock_irqrestore(ATOMIC_HASH(addr), flags);
  93. return old & mask;
  94. }
  95. EXPORT_SYMBOL(___change_bit);
  96. unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new)
  97. {
  98. unsigned long flags;
  99. u32 prev;
  100. spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
  101. if ((prev = *ptr) == old)
  102. *ptr = new;
  103. spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
  104. return (unsigned long)prev;
  105. }
  106. EXPORT_SYMBOL(__cmpxchg_u32);