cmpxchg.h 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233
  1. #ifndef ASM_X86_CMPXCHG_H
  2. #define ASM_X86_CMPXCHG_H
  3. #include <linux/compiler.h>
  4. #include <asm/alternative.h> /* Provides LOCK_PREFIX */
  5. #define __HAVE_ARCH_CMPXCHG 1
  6. /*
  7. * Non-existant functions to indicate usage errors at link time
  8. * (or compile-time if the compiler implements __compiletime_error().
  9. */
  10. extern void __xchg_wrong_size(void)
  11. __compiletime_error("Bad argument size for xchg");
  12. extern void __cmpxchg_wrong_size(void)
  13. __compiletime_error("Bad argument size for cmpxchg");
  14. extern void __xadd_wrong_size(void)
  15. __compiletime_error("Bad argument size for xadd");
  16. extern void __add_wrong_size(void)
  17. __compiletime_error("Bad argument size for add");
  18. /*
  19. * Constants for operation sizes. On 32-bit, the 64-bit size it set to
  20. * -1 because sizeof will never return -1, thereby making those switch
  21. * case statements guaranteeed dead code which the compiler will
  22. * eliminate, and allowing the "missing symbol in the default case" to
  23. * indicate a usage error.
  24. */
  25. #define __X86_CASE_B 1
  26. #define __X86_CASE_W 2
  27. #define __X86_CASE_L 4
  28. #ifdef CONFIG_64BIT
  29. #define __X86_CASE_Q 8
  30. #else
  31. #define __X86_CASE_Q -1 /* sizeof will never return -1 */
  32. #endif
  33. /*
  34. * An exchange-type operation, which takes a value and a pointer, and
  35. * returns the old value.
  36. */
  37. #define __xchg_op(ptr, arg, op, lock) \
  38. ({ \
  39. __typeof__ (*(ptr)) __ret = (arg); \
  40. switch (sizeof(*(ptr))) { \
  41. case __X86_CASE_B: \
  42. asm volatile (lock #op "b %b0, %1\n" \
  43. : "+q" (__ret), "+m" (*(ptr)) \
  44. : : "memory", "cc"); \
  45. break; \
  46. case __X86_CASE_W: \
  47. asm volatile (lock #op "w %w0, %1\n" \
  48. : "+r" (__ret), "+m" (*(ptr)) \
  49. : : "memory", "cc"); \
  50. break; \
  51. case __X86_CASE_L: \
  52. asm volatile (lock #op "l %0, %1\n" \
  53. : "+r" (__ret), "+m" (*(ptr)) \
  54. : : "memory", "cc"); \
  55. break; \
  56. case __X86_CASE_Q: \
  57. asm volatile (lock #op "q %q0, %1\n" \
  58. : "+r" (__ret), "+m" (*(ptr)) \
  59. : : "memory", "cc"); \
  60. break; \
  61. default: \
  62. __ ## op ## _wrong_size(); \
  63. } \
  64. __ret; \
  65. })
  66. /*
  67. * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
  68. * Since this is generally used to protect other memory information, we
  69. * use "asm volatile" and "memory" clobbers to prevent gcc from moving
  70. * information around.
  71. */
  72. #define xchg(ptr, v) __xchg_op((ptr), (v), xchg, "")
  73. /*
  74. * Atomic compare and exchange. Compare OLD with MEM, if identical,
  75. * store NEW in MEM. Return the initial value in MEM. Success is
  76. * indicated by comparing RETURN with OLD.
  77. */
  78. #define __raw_cmpxchg(ptr, old, new, size, lock) \
  79. ({ \
  80. __typeof__(*(ptr)) __ret; \
  81. __typeof__(*(ptr)) __old = (old); \
  82. __typeof__(*(ptr)) __new = (new); \
  83. switch (size) { \
  84. case __X86_CASE_B: \
  85. { \
  86. volatile u8 *__ptr = (volatile u8 *)(ptr); \
  87. asm volatile(lock "cmpxchgb %2,%1" \
  88. : "=a" (__ret), "+m" (*__ptr) \
  89. : "q" (__new), "0" (__old) \
  90. : "memory"); \
  91. break; \
  92. } \
  93. case __X86_CASE_W: \
  94. { \
  95. volatile u16 *__ptr = (volatile u16 *)(ptr); \
  96. asm volatile(lock "cmpxchgw %2,%1" \
  97. : "=a" (__ret), "+m" (*__ptr) \
  98. : "r" (__new), "0" (__old) \
  99. : "memory"); \
  100. break; \
  101. } \
  102. case __X86_CASE_L: \
  103. { \
  104. volatile u32 *__ptr = (volatile u32 *)(ptr); \
  105. asm volatile(lock "cmpxchgl %2,%1" \
  106. : "=a" (__ret), "+m" (*__ptr) \
  107. : "r" (__new), "0" (__old) \
  108. : "memory"); \
  109. break; \
  110. } \
  111. case __X86_CASE_Q: \
  112. { \
  113. volatile u64 *__ptr = (volatile u64 *)(ptr); \
  114. asm volatile(lock "cmpxchgq %2,%1" \
  115. : "=a" (__ret), "+m" (*__ptr) \
  116. : "r" (__new), "0" (__old) \
  117. : "memory"); \
  118. break; \
  119. } \
  120. default: \
  121. __cmpxchg_wrong_size(); \
  122. } \
  123. __ret; \
  124. })
  125. #define __cmpxchg(ptr, old, new, size) \
  126. __raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)
  127. #define __sync_cmpxchg(ptr, old, new, size) \
  128. __raw_cmpxchg((ptr), (old), (new), (size), "lock; ")
  129. #define __cmpxchg_local(ptr, old, new, size) \
  130. __raw_cmpxchg((ptr), (old), (new), (size), "")
  131. #ifdef CONFIG_X86_32
  132. # include <asm/cmpxchg_32.h>
  133. #else
  134. # include <asm/cmpxchg_64.h>
  135. #endif
  136. #define cmpxchg(ptr, old, new) \
  137. __cmpxchg(ptr, old, new, sizeof(*(ptr)))
  138. #define sync_cmpxchg(ptr, old, new) \
  139. __sync_cmpxchg(ptr, old, new, sizeof(*(ptr)))
  140. #define cmpxchg_local(ptr, old, new) \
  141. __cmpxchg_local(ptr, old, new, sizeof(*(ptr)))
  142. /*
  143. * xadd() adds "inc" to "*ptr" and atomically returns the previous
  144. * value of "*ptr".
  145. *
  146. * xadd() is locked when multiple CPUs are online
  147. * xadd_sync() is always locked
  148. * xadd_local() is never locked
  149. */
  150. #define __xadd(ptr, inc, lock) __xchg_op((ptr), (inc), xadd, lock)
  151. #define xadd(ptr, inc) __xadd((ptr), (inc), LOCK_PREFIX)
  152. #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
  153. #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
  154. #define __add(ptr, inc, lock) \
  155. ({ \
  156. __typeof__ (*(ptr)) __ret = (inc); \
  157. switch (sizeof(*(ptr))) { \
  158. case __X86_CASE_B: \
  159. asm volatile (lock "addb %b1, %0\n" \
  160. : "+m" (*(ptr)) : "qi" (inc) \
  161. : "memory", "cc"); \
  162. break; \
  163. case __X86_CASE_W: \
  164. asm volatile (lock "addw %w1, %0\n" \
  165. : "+m" (*(ptr)) : "ri" (inc) \
  166. : "memory", "cc"); \
  167. break; \
  168. case __X86_CASE_L: \
  169. asm volatile (lock "addl %1, %0\n" \
  170. : "+m" (*(ptr)) : "ri" (inc) \
  171. : "memory", "cc"); \
  172. break; \
  173. case __X86_CASE_Q: \
  174. asm volatile (lock "addq %1, %0\n" \
  175. : "+m" (*(ptr)) : "ri" (inc) \
  176. : "memory", "cc"); \
  177. break; \
  178. default: \
  179. __add_wrong_size(); \
  180. } \
  181. __ret; \
  182. })
  183. /*
  184. * add_*() adds "inc" to "*ptr"
  185. *
  186. * __add() takes a lock prefix
  187. * add_smp() is locked when multiple CPUs are online
  188. * add_sync() is always locked
  189. */
  190. #define add_smp(ptr, inc) __add((ptr), (inc), LOCK_PREFIX)
  191. #define add_sync(ptr, inc) __add((ptr), (inc), "lock; ")
  192. #define __cmpxchg_double(pfx, p1, p2, o1, o2, n1, n2) \
  193. ({ \
  194. bool __ret; \
  195. __typeof__(*(p1)) __old1 = (o1), __new1 = (n1); \
  196. __typeof__(*(p2)) __old2 = (o2), __new2 = (n2); \
  197. BUILD_BUG_ON(sizeof(*(p1)) != sizeof(long)); \
  198. BUILD_BUG_ON(sizeof(*(p2)) != sizeof(long)); \
  199. VM_BUG_ON((unsigned long)(p1) % (2 * sizeof(long))); \
  200. VM_BUG_ON((unsigned long)((p1) + 1) != (unsigned long)(p2)); \
  201. asm volatile(pfx "cmpxchg%c4b %2; sete %0" \
  202. : "=a" (__ret), "+d" (__old2), \
  203. "+m" (*(p1)), "+m" (*(p2)) \
  204. : "i" (2 * sizeof(long)), "a" (__old1), \
  205. "b" (__new1), "c" (__new2)); \
  206. __ret; \
  207. })
  208. #define cmpxchg_double(p1, p2, o1, o2, n1, n2) \
  209. __cmpxchg_double(LOCK_PREFIX, p1, p2, o1, o2, n1, n2)
  210. #define cmpxchg_double_local(p1, p2, o1, o2, n1, n2) \
  211. __cmpxchg_double(, p1, p2, o1, o2, n1, n2)
  212. #endif /* ASM_X86_CMPXCHG_H */