atomic-llsc.h 2.0 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788
  1. #ifndef __ASM_SH_ATOMIC_LLSC_H
  2. #define __ASM_SH_ATOMIC_LLSC_H
  3. /*
  4. * SH-4A note:
  5. *
  6. * We basically get atomic_xxx_return() for free compared with
  7. * atomic_xxx(). movli.l/movco.l require r0 due to the instruction
  8. * encoding, so the retval is automatically set without having to
  9. * do any special work.
  10. */
  11. /*
  12. * To get proper branch prediction for the main line, we must branch
  13. * forward to code at the end of this object's .text section, then
  14. * branch back to restart the operation.
  15. */
  16. #define ATOMIC_OP(op) \
  17. static inline void atomic_##op(int i, atomic_t *v) \
  18. { \
  19. unsigned long tmp; \
  20. \
  21. __asm__ __volatile__ ( \
  22. "1: movli.l @%2, %0 ! atomic_" #op "\n" \
  23. " " #op " %1, %0 \n" \
  24. " movco.l %0, @%2 \n" \
  25. " bf 1b \n" \
  26. : "=&z" (tmp) \
  27. : "r" (i), "r" (&v->counter) \
  28. : "t"); \
  29. }
  30. #define ATOMIC_OP_RETURN(op) \
  31. static inline int atomic_##op##_return(int i, atomic_t *v) \
  32. { \
  33. unsigned long temp; \
  34. \
  35. __asm__ __volatile__ ( \
  36. "1: movli.l @%2, %0 ! atomic_" #op "_return \n" \
  37. " " #op " %1, %0 \n" \
  38. " movco.l %0, @%2 \n" \
  39. " bf 1b \n" \
  40. " synco \n" \
  41. : "=&z" (temp) \
  42. : "r" (i), "r" (&v->counter) \
  43. : "t"); \
  44. \
  45. return temp; \
  46. }
  47. #define ATOMIC_OPS(op) ATOMIC_OP(op) ATOMIC_OP_RETURN(op)
  48. ATOMIC_OPS(add)
  49. ATOMIC_OPS(sub)
  50. #undef ATOMIC_OPS
  51. #undef ATOMIC_OP_RETURN
  52. #undef ATOMIC_OP
  53. static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
  54. {
  55. unsigned long tmp;
  56. __asm__ __volatile__ (
  57. "1: movli.l @%2, %0 ! atomic_clear_mask \n"
  58. " and %1, %0 \n"
  59. " movco.l %0, @%2 \n"
  60. " bf 1b \n"
  61. : "=&z" (tmp)
  62. : "r" (~mask), "r" (&v->counter)
  63. : "t");
  64. }
  65. static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
  66. {
  67. unsigned long tmp;
  68. __asm__ __volatile__ (
  69. "1: movli.l @%2, %0 ! atomic_set_mask \n"
  70. " or %1, %0 \n"
  71. " movco.l %0, @%2 \n"
  72. " bf 1b \n"
  73. : "=&z" (tmp)
  74. : "r" (mask), "r" (&v->counter)
  75. : "t");
  76. }
  77. #endif /* __ASM_SH_ATOMIC_LLSC_H */