bitops_64.h 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102
  1. /*
  2. * Copyright 2011 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. */
  14. #ifndef _ASM_TILE_BITOPS_64_H
  15. #define _ASM_TILE_BITOPS_64_H
  16. #include <linux/compiler.h>
  17. #include <linux/atomic.h>
  18. /* See <asm/bitops.h> for API comments. */
  19. static inline void set_bit(unsigned nr, volatile unsigned long *addr)
  20. {
  21. unsigned long mask = (1UL << (nr % BITS_PER_LONG));
  22. __insn_fetchor((void *)(addr + nr / BITS_PER_LONG), mask);
  23. }
  24. static inline void clear_bit(unsigned nr, volatile unsigned long *addr)
  25. {
  26. unsigned long mask = (1UL << (nr % BITS_PER_LONG));
  27. __insn_fetchand((void *)(addr + nr / BITS_PER_LONG), ~mask);
  28. }
  29. #define smp_mb__before_clear_bit() smp_mb()
  30. #define smp_mb__after_clear_bit() smp_mb()
  31. static inline void change_bit(unsigned nr, volatile unsigned long *addr)
  32. {
  33. unsigned long mask = (1UL << (nr % BITS_PER_LONG));
  34. unsigned long guess, oldval;
  35. addr += nr / BITS_PER_LONG;
  36. oldval = *addr;
  37. do {
  38. guess = oldval;
  39. oldval = atomic64_cmpxchg((atomic64_t *)addr,
  40. guess, guess ^ mask);
  41. } while (guess != oldval);
  42. }
  43. /*
  44. * The test_and_xxx_bit() routines require a memory fence before we
  45. * start the operation, and after the operation completes. We use
  46. * smp_mb() before, and rely on the "!= 0" comparison, plus a compiler
  47. * barrier(), to block until the atomic op is complete.
  48. */
  49. static inline int test_and_set_bit(unsigned nr, volatile unsigned long *addr)
  50. {
  51. int val;
  52. unsigned long mask = (1UL << (nr % BITS_PER_LONG));
  53. smp_mb(); /* barrier for proper semantics */
  54. val = (__insn_fetchor((void *)(addr + nr / BITS_PER_LONG), mask)
  55. & mask) != 0;
  56. barrier();
  57. return val;
  58. }
  59. static inline int test_and_clear_bit(unsigned nr, volatile unsigned long *addr)
  60. {
  61. int val;
  62. unsigned long mask = (1UL << (nr % BITS_PER_LONG));
  63. smp_mb(); /* barrier for proper semantics */
  64. val = (__insn_fetchand((void *)(addr + nr / BITS_PER_LONG), ~mask)
  65. & mask) != 0;
  66. barrier();
  67. return val;
  68. }
  69. static inline int test_and_change_bit(unsigned nr,
  70. volatile unsigned long *addr)
  71. {
  72. unsigned long mask = (1UL << (nr % BITS_PER_LONG));
  73. unsigned long guess, oldval;
  74. addr += nr / BITS_PER_LONG;
  75. oldval = *addr;
  76. do {
  77. guess = oldval;
  78. oldval = atomic64_cmpxchg((atomic64_t *)addr,
  79. guess, guess ^ mask);
  80. } while (guess != oldval);
  81. return (oldval & mask) != 0;
  82. }
  83. #include <asm-generic/bitops/ext2-atomic-setbit.h>
  84. #endif /* _ASM_TILE_BITOPS_64_H */