atomic.h 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198
  1. #ifndef __ARCH_M68K_ATOMIC__
  2. #define __ARCH_M68K_ATOMIC__
  3. #include <linux/types.h>
  4. #include <linux/irqflags.h>
  5. #include <asm/cmpxchg.h>
  6. #include <asm/barrier.h>
  7. /*
  8. * Atomic operations that C can't guarantee us. Useful for
  9. * resource counting etc..
  10. */
  11. /*
  12. * We do not have SMP m68k systems, so we don't have to deal with that.
  13. */
  14. #define ATOMIC_INIT(i) { (i) }
  15. #define atomic_read(v) ACCESS_ONCE((v)->counter)
  16. #define atomic_set(v, i) (((v)->counter) = i)
  17. /*
  18. * The ColdFire parts cannot do some immediate to memory operations,
  19. * so for them we do not specify the "i" asm constraint.
  20. */
  21. #ifdef CONFIG_COLDFIRE
  22. #define ASM_DI "d"
  23. #else
  24. #define ASM_DI "di"
  25. #endif
  26. #define ATOMIC_OP(op, c_op, asm_op) \
  27. static inline void atomic_##op(int i, atomic_t *v) \
  28. { \
  29. __asm__ __volatile__(#asm_op "l %1,%0" : "+m" (*v) : ASM_DI (i));\
  30. } \
  31. #ifdef CONFIG_RMW_INSNS
  32. #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
  33. static inline int atomic_##op##_return(int i, atomic_t *v) \
  34. { \
  35. int t, tmp; \
  36. \
  37. __asm__ __volatile__( \
  38. "1: movel %2,%1\n" \
  39. " " #asm_op "l %3,%1\n" \
  40. " casl %2,%1,%0\n" \
  41. " jne 1b" \
  42. : "+m" (*v), "=&d" (t), "=&d" (tmp) \
  43. : "g" (i), "2" (atomic_read(v))); \
  44. return t; \
  45. }
  46. #else
  47. #define ATOMIC_OP_RETURN(op, c_op, asm_op) \
  48. static inline int atomic_##op##_return(int i, atomic_t * v) \
  49. { \
  50. unsigned long flags; \
  51. int t; \
  52. \
  53. local_irq_save(flags); \
  54. t = (v->counter c_op i); \
  55. local_irq_restore(flags); \
  56. \
  57. return t; \
  58. }
  59. #endif /* CONFIG_RMW_INSNS */
  60. #define ATOMIC_OPS(op, c_op, asm_op) \
  61. ATOMIC_OP(op, c_op, asm_op) \
  62. ATOMIC_OP_RETURN(op, c_op, asm_op)
  63. ATOMIC_OPS(add, +=, add)
  64. ATOMIC_OPS(sub, -=, sub)
  65. #undef ATOMIC_OPS
  66. #undef ATOMIC_OP_RETURN
  67. #undef ATOMIC_OP
  68. static inline void atomic_inc(atomic_t *v)
  69. {
  70. __asm__ __volatile__("addql #1,%0" : "+m" (*v));
  71. }
  72. static inline void atomic_dec(atomic_t *v)
  73. {
  74. __asm__ __volatile__("subql #1,%0" : "+m" (*v));
  75. }
  76. static inline int atomic_dec_and_test(atomic_t *v)
  77. {
  78. char c;
  79. __asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v));
  80. return c != 0;
  81. }
  82. static inline int atomic_dec_and_test_lt(atomic_t *v)
  83. {
  84. char c;
  85. __asm__ __volatile__(
  86. "subql #1,%1; slt %0"
  87. : "=d" (c), "=m" (*v)
  88. : "m" (*v));
  89. return c != 0;
  90. }
  91. static inline int atomic_inc_and_test(atomic_t *v)
  92. {
  93. char c;
  94. __asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v));
  95. return c != 0;
  96. }
  97. #ifdef CONFIG_RMW_INSNS
  98. #define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
  99. #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
  100. #else /* !CONFIG_RMW_INSNS */
  101. static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
  102. {
  103. unsigned long flags;
  104. int prev;
  105. local_irq_save(flags);
  106. prev = atomic_read(v);
  107. if (prev == old)
  108. atomic_set(v, new);
  109. local_irq_restore(flags);
  110. return prev;
  111. }
  112. static inline int atomic_xchg(atomic_t *v, int new)
  113. {
  114. unsigned long flags;
  115. int prev;
  116. local_irq_save(flags);
  117. prev = atomic_read(v);
  118. atomic_set(v, new);
  119. local_irq_restore(flags);
  120. return prev;
  121. }
  122. #endif /* !CONFIG_RMW_INSNS */
  123. #define atomic_dec_return(v) atomic_sub_return(1, (v))
  124. #define atomic_inc_return(v) atomic_add_return(1, (v))
  125. static inline int atomic_sub_and_test(int i, atomic_t *v)
  126. {
  127. char c;
  128. __asm__ __volatile__("subl %2,%1; seq %0"
  129. : "=d" (c), "+m" (*v)
  130. : ASM_DI (i));
  131. return c != 0;
  132. }
  133. static inline int atomic_add_negative(int i, atomic_t *v)
  134. {
  135. char c;
  136. __asm__ __volatile__("addl %2,%1; smi %0"
  137. : "=d" (c), "+m" (*v)
  138. : ASM_DI (i));
  139. return c != 0;
  140. }
  141. static inline void atomic_clear_mask(unsigned long mask, unsigned long *v)
  142. {
  143. __asm__ __volatile__("andl %1,%0" : "+m" (*v) : ASM_DI (~(mask)));
  144. }
  145. static inline void atomic_set_mask(unsigned long mask, unsigned long *v)
  146. {
  147. __asm__ __volatile__("orl %1,%0" : "+m" (*v) : ASM_DI (mask));
  148. }
  149. static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
  150. {
  151. int c, old;
  152. c = atomic_read(v);
  153. for (;;) {
  154. if (unlikely(c == (u)))
  155. break;
  156. old = atomic_cmpxchg((v), c, c + (a));
  157. if (likely(old == c))
  158. break;
  159. c = old;
  160. }
  161. return c;
  162. }
  163. #endif /* __ARCH_M68K_ATOMIC __ */