atomic.h 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191
  1. /*
  2. * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. */
  8. #ifndef _ASM_ARC_ATOMIC_H
  9. #define _ASM_ARC_ATOMIC_H
  10. #ifndef __ASSEMBLY__
  11. #include <linux/types.h>
  12. #include <linux/compiler.h>
  13. #include <asm/cmpxchg.h>
  14. #include <asm/barrier.h>
  15. #include <asm/smp.h>
  16. #define atomic_read(v) ((v)->counter)
  17. #ifdef CONFIG_ARC_HAS_LLSC
  18. #define atomic_set(v, i) (((v)->counter) = (i))
  19. #define ATOMIC_OP(op, c_op, asm_op) \
  20. static inline void atomic_##op(int i, atomic_t *v) \
  21. { \
  22. unsigned int temp; \
  23. \
  24. __asm__ __volatile__( \
  25. "1: llock %0, [%1] \n" \
  26. " " #asm_op " %0, %0, %2 \n" \
  27. " scond %0, [%1] \n" \
  28. " bnz 1b \n" \
  29. : "=&r"(temp) /* Early clobber, to prevent reg reuse */ \
  30. : "r"(&v->counter), "ir"(i) \
  31. : "cc"); \
  32. } \
  33. #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
  34. static inline int atomic_##op##_return(int i, atomic_t *v) \
  35. { \
  36. unsigned int temp; \
  37. \
  38. /* \
  39. * Explicit full memory barrier needed before/after as \
  40. * LLOCK/SCOND thmeselves don't provide any such semantics \
  41. */ \
  42. smp_mb(); \
  43. \
  44. __asm__ __volatile__( \
  45. "1: llock %0, [%1] \n" \
  46. " " #asm_op " %0, %0, %2 \n" \
  47. " scond %0, [%1] \n" \
  48. " bnz 1b \n" \
  49. : "=&r"(temp) \
  50. : "r"(&v->counter), "ir"(i) \
  51. : "cc"); \
  52. \
  53. smp_mb(); \
  54. \
  55. return temp; \
  56. }
  57. #else /* !CONFIG_ARC_HAS_LLSC */
  58. #ifndef CONFIG_SMP
  59. /* violating atomic_xxx API locking protocol in UP for optimization sake */
  60. #define atomic_set(v, i) (((v)->counter) = (i))
  61. #else
  62. static inline void atomic_set(atomic_t *v, int i)
  63. {
  64. /*
  65. * Independent of hardware support, all of the atomic_xxx() APIs need
  66. * to follow the same locking rules to make sure that a "hardware"
  67. * atomic insn (e.g. LD) doesn't clobber an "emulated" atomic insn
  68. * sequence
  69. *
  70. * Thus atomic_set() despite being 1 insn (and seemingly atomic)
  71. * requires the locking.
  72. */
  73. unsigned long flags;
  74. atomic_ops_lock(flags);
  75. v->counter = i;
  76. atomic_ops_unlock(flags);
  77. }
  78. #endif
  79. /*
  80. * Non hardware assisted Atomic-R-M-W
  81. * Locking would change to irq-disabling only (UP) and spinlocks (SMP)
  82. */
  83. #define ATOMIC_OP(op, c_op, asm_op) \
  84. static inline void atomic_##op(int i, atomic_t *v) \
  85. { \
  86. unsigned long flags; \
  87. \
  88. atomic_ops_lock(flags); \
  89. v->counter c_op i; \
  90. atomic_ops_unlock(flags); \
  91. }
  92. #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
  93. static inline int atomic_##op##_return(int i, atomic_t *v) \
  94. { \
  95. unsigned long flags; \
  96. unsigned long temp; \
  97. \
  98. /* \
  99. * spin lock/unlock provides the needed smp_mb() before/after \
  100. */ \
  101. atomic_ops_lock(flags); \
  102. temp = v->counter; \
  103. temp c_op i; \
  104. v->counter = temp; \
  105. atomic_ops_unlock(flags); \
  106. \
  107. return temp; \
  108. }
  109. #endif /* !CONFIG_ARC_HAS_LLSC */
  110. #define ATOMIC_OPS(op, c_op, asm_op) \
  111. ATOMIC_OP(op, c_op, asm_op) \
  112. ATOMIC_OP_RETURN(op, c_op, asm_op)
  113. ATOMIC_OPS(add, +=, add)
  114. ATOMIC_OPS(sub, -=, sub)
  115. ATOMIC_OP(and, &=, and)
  116. #define atomic_clear_mask(mask, v) atomic_and(~(mask), (v))
  117. #undef ATOMIC_OPS
  118. #undef ATOMIC_OP_RETURN
  119. #undef ATOMIC_OP
  120. /**
  121. * __atomic_add_unless - add unless the number is a given value
  122. * @v: pointer of type atomic_t
  123. * @a: the amount to add to v...
  124. * @u: ...unless v is equal to u.
  125. *
  126. * Atomically adds @a to @v, so long as it was not @u.
  127. * Returns the old value of @v
  128. */
  129. #define __atomic_add_unless(v, a, u) \
  130. ({ \
  131. int c, old; \
  132. \
  133. /* \
  134. * Explicit full memory barrier needed before/after as \
  135. * LLOCK/SCOND thmeselves don't provide any such semantics \
  136. */ \
  137. smp_mb(); \
  138. \
  139. c = atomic_read(v); \
  140. while (c != (u) && (old = atomic_cmpxchg((v), c, c + (a))) != c)\
  141. c = old; \
  142. \
  143. smp_mb(); \
  144. \
  145. c; \
  146. })
  147. #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
  148. #define atomic_inc(v) atomic_add(1, v)
  149. #define atomic_dec(v) atomic_sub(1, v)
  150. #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
  151. #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
  152. #define atomic_inc_return(v) atomic_add_return(1, (v))
  153. #define atomic_dec_return(v) atomic_sub_return(1, (v))
  154. #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
  155. #define atomic_add_negative(i, v) (atomic_add_return(i, v) < 0)
  156. #define ATOMIC_INIT(i) { (i) }
  157. #include <asm-generic/atomic64.h>
  158. #endif
  159. #endif