kvm_para.h 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137
  1. #ifndef _ASM_X86_KVM_PARA_H
  2. #define _ASM_X86_KVM_PARA_H
  3. #include <asm/processor.h>
  4. #include <asm/alternative.h>
  5. #include <uapi/asm/kvm_para.h>
  6. extern void kvmclock_init(void);
  7. extern int kvm_register_clock(char *txt);
  8. #ifdef CONFIG_KVM_GUEST
  9. bool kvm_check_and_clear_guest_paused(void);
  10. #else
  11. static inline bool kvm_check_and_clear_guest_paused(void)
  12. {
  13. return false;
  14. }
  15. #endif /* CONFIG_KVM_GUEST */
  16. #ifdef CONFIG_DEBUG_RODATA
  17. #define KVM_HYPERCALL \
  18. ALTERNATIVE(".byte 0x0f,0x01,0xc1", ".byte 0x0f,0x01,0xd9", X86_FEATURE_VMMCALL)
  19. #else
  20. /* On AMD processors, vmcall will generate a trap that we will
  21. * then rewrite to the appropriate instruction.
  22. */
  23. #define KVM_HYPERCALL ".byte 0x0f,0x01,0xc1"
  24. #endif
  25. /* For KVM hypercalls, a three-byte sequence of either the vmcall or the vmmcall
  26. * instruction. The hypervisor may replace it with something else but only the
  27. * instructions are guaranteed to be supported.
  28. *
  29. * Up to four arguments may be passed in rbx, rcx, rdx, and rsi respectively.
  30. * The hypercall number should be placed in rax and the return value will be
  31. * placed in rax. No other registers will be clobbered unless explicitly
  32. * noted by the particular hypercall.
  33. */
  34. static inline long kvm_hypercall0(unsigned int nr)
  35. {
  36. long ret;
  37. asm volatile(KVM_HYPERCALL
  38. : "=a"(ret)
  39. : "a"(nr)
  40. : "memory");
  41. return ret;
  42. }
  43. static inline long kvm_hypercall1(unsigned int nr, unsigned long p1)
  44. {
  45. long ret;
  46. asm volatile(KVM_HYPERCALL
  47. : "=a"(ret)
  48. : "a"(nr), "b"(p1)
  49. : "memory");
  50. return ret;
  51. }
  52. static inline long kvm_hypercall2(unsigned int nr, unsigned long p1,
  53. unsigned long p2)
  54. {
  55. long ret;
  56. asm volatile(KVM_HYPERCALL
  57. : "=a"(ret)
  58. : "a"(nr), "b"(p1), "c"(p2)
  59. : "memory");
  60. return ret;
  61. }
  62. static inline long kvm_hypercall3(unsigned int nr, unsigned long p1,
  63. unsigned long p2, unsigned long p3)
  64. {
  65. long ret;
  66. asm volatile(KVM_HYPERCALL
  67. : "=a"(ret)
  68. : "a"(nr), "b"(p1), "c"(p2), "d"(p3)
  69. : "memory");
  70. return ret;
  71. }
  72. static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
  73. unsigned long p2, unsigned long p3,
  74. unsigned long p4)
  75. {
  76. long ret;
  77. asm volatile(KVM_HYPERCALL
  78. : "=a"(ret)
  79. : "a"(nr), "b"(p1), "c"(p2), "d"(p3), "S"(p4)
  80. : "memory");
  81. return ret;
  82. }
  83. #ifdef CONFIG_KVM_GUEST
  84. bool kvm_para_available(void);
  85. unsigned int kvm_arch_para_features(void);
  86. void __init kvm_guest_init(void);
  87. void kvm_async_pf_task_wait(u32 token);
  88. void kvm_async_pf_task_wake(u32 token);
  89. u32 kvm_read_and_reset_pf_reason(void);
  90. extern void kvm_disable_steal_time(void);
  91. #ifdef CONFIG_PARAVIRT_SPINLOCKS
  92. void __init kvm_spinlock_init(void);
  93. #else /* !CONFIG_PARAVIRT_SPINLOCKS */
  94. static inline void kvm_spinlock_init(void)
  95. {
  96. }
  97. #endif /* CONFIG_PARAVIRT_SPINLOCKS */
  98. #else /* CONFIG_KVM_GUEST */
  99. #define kvm_guest_init() do {} while (0)
  100. #define kvm_async_pf_task_wait(T) do {} while(0)
  101. #define kvm_async_pf_task_wake(T) do {} while(0)
  102. static inline bool kvm_para_available(void)
  103. {
  104. return 0;
  105. }
  106. static inline unsigned int kvm_arch_para_features(void)
  107. {
  108. return 0;
  109. }
  110. static inline u32 kvm_read_and_reset_pf_reason(void)
  111. {
  112. return 0;
  113. }
  114. static inline void kvm_disable_steal_time(void)
  115. {
  116. return;
  117. }
  118. #endif
  119. #endif /* _ASM_X86_KVM_PARA_H */