futex.h 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143
  1. /*
  2. * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. *
  14. * These routines make two important assumptions:
  15. *
  16. * 1. atomic_t is really an int and can be freely cast back and forth
  17. * (validated in __init_atomic_per_cpu).
  18. *
  19. * 2. userspace uses sys_cmpxchg() for all atomic operations, thus using
  20. * the same locking convention that all the kernel atomic routines use.
  21. */
  22. #ifndef _ASM_TILE_FUTEX_H
  23. #define _ASM_TILE_FUTEX_H
  24. #ifndef __ASSEMBLY__
  25. #include <linux/futex.h>
  26. #include <linux/uaccess.h>
  27. #include <linux/errno.h>
  28. extern struct __get_user futex_set(u32 __user *v, int i);
  29. extern struct __get_user futex_add(u32 __user *v, int n);
  30. extern struct __get_user futex_or(u32 __user *v, int n);
  31. extern struct __get_user futex_andn(u32 __user *v, int n);
  32. extern struct __get_user futex_cmpxchg(u32 __user *v, int o, int n);
  33. #ifndef __tilegx__
  34. extern struct __get_user futex_xor(u32 __user *v, int n);
  35. #else
  36. static inline struct __get_user futex_xor(u32 __user *uaddr, int n)
  37. {
  38. struct __get_user asm_ret = __get_user_4(uaddr);
  39. if (!asm_ret.err) {
  40. int oldval, newval;
  41. do {
  42. oldval = asm_ret.val;
  43. newval = oldval ^ n;
  44. asm_ret = futex_cmpxchg(uaddr, oldval, newval);
  45. } while (asm_ret.err == 0 && oldval != asm_ret.val);
  46. }
  47. return asm_ret;
  48. }
  49. #endif
  50. static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
  51. {
  52. int op = (encoded_op >> 28) & 7;
  53. int cmp = (encoded_op >> 24) & 15;
  54. int oparg = (encoded_op << 8) >> 20;
  55. int cmparg = (encoded_op << 20) >> 20;
  56. int ret;
  57. struct __get_user asm_ret;
  58. if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
  59. oparg = 1 << oparg;
  60. if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
  61. return -EFAULT;
  62. pagefault_disable();
  63. switch (op) {
  64. case FUTEX_OP_SET:
  65. asm_ret = futex_set(uaddr, oparg);
  66. break;
  67. case FUTEX_OP_ADD:
  68. asm_ret = futex_add(uaddr, oparg);
  69. break;
  70. case FUTEX_OP_OR:
  71. asm_ret = futex_or(uaddr, oparg);
  72. break;
  73. case FUTEX_OP_ANDN:
  74. asm_ret = futex_andn(uaddr, oparg);
  75. break;
  76. case FUTEX_OP_XOR:
  77. asm_ret = futex_xor(uaddr, oparg);
  78. break;
  79. default:
  80. asm_ret.err = -ENOSYS;
  81. }
  82. pagefault_enable();
  83. ret = asm_ret.err;
  84. if (!ret) {
  85. switch (cmp) {
  86. case FUTEX_OP_CMP_EQ:
  87. ret = (asm_ret.val == cmparg);
  88. break;
  89. case FUTEX_OP_CMP_NE:
  90. ret = (asm_ret.val != cmparg);
  91. break;
  92. case FUTEX_OP_CMP_LT:
  93. ret = (asm_ret.val < cmparg);
  94. break;
  95. case FUTEX_OP_CMP_GE:
  96. ret = (asm_ret.val >= cmparg);
  97. break;
  98. case FUTEX_OP_CMP_LE:
  99. ret = (asm_ret.val <= cmparg);
  100. break;
  101. case FUTEX_OP_CMP_GT:
  102. ret = (asm_ret.val > cmparg);
  103. break;
  104. default:
  105. ret = -ENOSYS;
  106. }
  107. }
  108. return ret;
  109. }
  110. static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
  111. u32 oldval, u32 newval)
  112. {
  113. struct __get_user asm_ret;
  114. if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
  115. return -EFAULT;
  116. asm_ret = futex_cmpxchg(uaddr, oldval, newval);
  117. *uval = asm_ret.val;
  118. return asm_ret.err;
  119. }
  120. #ifndef __tilegx__
  121. /* Return failure from the atomic wrappers. */
  122. struct __get_user __atomic_bad_address(int __user *addr);
  123. #endif
  124. #endif /* !__ASSEMBLY__ */
  125. #endif /* _ASM_TILE_FUTEX_H */