xsave.h 6.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256
  1. #ifndef __ASM_X86_XSAVE_H
  2. #define __ASM_X86_XSAVE_H
  3. #include <linux/types.h>
  4. #include <asm/processor.h>
  5. #define XSTATE_CPUID 0x0000000d
  6. #define XSTATE_FP 0x1
  7. #define XSTATE_SSE 0x2
  8. #define XSTATE_YMM 0x4
  9. #define XSTATE_BNDREGS 0x8
  10. #define XSTATE_BNDCSR 0x10
  11. #define XSTATE_OPMASK 0x20
  12. #define XSTATE_ZMM_Hi256 0x40
  13. #define XSTATE_Hi16_ZMM 0x80
  14. #define XSTATE_FPSSE (XSTATE_FP | XSTATE_SSE)
  15. /* Bit 63 of XCR0 is reserved for future expansion */
  16. #define XSTATE_EXTEND_MASK (~(XSTATE_FPSSE | (1ULL << 63)))
  17. #define FXSAVE_SIZE 512
  18. #define XSAVE_HDR_SIZE 64
  19. #define XSAVE_HDR_OFFSET FXSAVE_SIZE
  20. #define XSAVE_YMM_SIZE 256
  21. #define XSAVE_YMM_OFFSET (XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET)
  22. /* Supported features which support lazy state saving */
  23. #define XSTATE_LAZY (XSTATE_FP | XSTATE_SSE | XSTATE_YMM \
  24. | XSTATE_OPMASK | XSTATE_ZMM_Hi256 | XSTATE_Hi16_ZMM)
  25. /* Supported features which require eager state saving */
  26. #define XSTATE_EAGER (XSTATE_BNDREGS | XSTATE_BNDCSR)
  27. /* All currently supported features */
  28. #define XCNTXT_MASK (XSTATE_LAZY | XSTATE_EAGER)
  29. #ifdef CONFIG_X86_64
  30. #define REX_PREFIX "0x48, "
  31. #else
  32. #define REX_PREFIX
  33. #endif
  34. extern unsigned int xstate_size;
  35. extern u64 pcntxt_mask;
  36. extern u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
  37. extern struct xsave_struct *init_xstate_buf;
  38. extern void xsave_init(void);
  39. extern void update_regset_xstate_info(unsigned int size, u64 xstate_mask);
  40. extern int init_fpu(struct task_struct *child);
  41. /* These macros all use (%edi)/(%rdi) as the single memory argument. */
  42. #define XSAVE ".byte " REX_PREFIX "0x0f,0xae,0x27"
  43. #define XSAVEOPT ".byte " REX_PREFIX "0x0f,0xae,0x37"
  44. #define XSAVES ".byte " REX_PREFIX "0x0f,0xc7,0x2f"
  45. #define XRSTOR ".byte " REX_PREFIX "0x0f,0xae,0x2f"
  46. #define XRSTORS ".byte " REX_PREFIX "0x0f,0xc7,0x1f"
  47. #define xstate_fault ".section .fixup,\"ax\"\n" \
  48. "3: movl $-1,%[err]\n" \
  49. " jmp 2b\n" \
  50. ".previous\n" \
  51. _ASM_EXTABLE(1b, 3b) \
  52. : [err] "=r" (err)
  53. /*
  54. * This function is called only during boot time when x86 caps are not set
  55. * up and alternative can not be used yet.
  56. */
  57. static inline int xsave_state_booting(struct xsave_struct *fx, u64 mask)
  58. {
  59. u32 lmask = mask;
  60. u32 hmask = mask >> 32;
  61. int err = 0;
  62. WARN_ON(system_state != SYSTEM_BOOTING);
  63. if (boot_cpu_has(X86_FEATURE_XSAVES))
  64. asm volatile("1:"XSAVES"\n\t"
  65. "2:\n\t"
  66. xstate_fault
  67. : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
  68. : "memory");
  69. else
  70. asm volatile("1:"XSAVE"\n\t"
  71. "2:\n\t"
  72. xstate_fault
  73. : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
  74. : "memory");
  75. return err;
  76. }
  77. /*
  78. * This function is called only during boot time when x86 caps are not set
  79. * up and alternative can not be used yet.
  80. */
  81. static inline int xrstor_state_booting(struct xsave_struct *fx, u64 mask)
  82. {
  83. u32 lmask = mask;
  84. u32 hmask = mask >> 32;
  85. int err = 0;
  86. WARN_ON(system_state != SYSTEM_BOOTING);
  87. if (boot_cpu_has(X86_FEATURE_XSAVES))
  88. asm volatile("1:"XRSTORS"\n\t"
  89. "2:\n\t"
  90. xstate_fault
  91. : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
  92. : "memory");
  93. else
  94. asm volatile("1:"XRSTOR"\n\t"
  95. "2:\n\t"
  96. xstate_fault
  97. : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
  98. : "memory");
  99. return err;
  100. }
  101. /*
  102. * Save processor xstate to xsave area.
  103. */
  104. static inline int xsave_state(struct xsave_struct *fx, u64 mask)
  105. {
  106. u32 lmask = mask;
  107. u32 hmask = mask >> 32;
  108. int err = 0;
  109. /*
  110. * If xsaves is enabled, xsaves replaces xsaveopt because
  111. * it supports compact format and supervisor states in addition to
  112. * modified optimization in xsaveopt.
  113. *
  114. * Otherwise, if xsaveopt is enabled, xsaveopt replaces xsave
  115. * because xsaveopt supports modified optimization which is not
  116. * supported by xsave.
  117. *
  118. * If none of xsaves and xsaveopt is enabled, use xsave.
  119. */
  120. alternative_input_2(
  121. "1:"XSAVE,
  122. XSAVEOPT,
  123. X86_FEATURE_XSAVEOPT,
  124. XSAVES,
  125. X86_FEATURE_XSAVES,
  126. [fx] "D" (fx), "a" (lmask), "d" (hmask) :
  127. "memory");
  128. asm volatile("2:\n\t"
  129. xstate_fault
  130. : "0" (0)
  131. : "memory");
  132. return err;
  133. }
  134. /*
  135. * Restore processor xstate from xsave area.
  136. */
  137. static inline int xrstor_state(struct xsave_struct *fx, u64 mask)
  138. {
  139. int err = 0;
  140. u32 lmask = mask;
  141. u32 hmask = mask >> 32;
  142. /*
  143. * Use xrstors to restore context if it is enabled. xrstors supports
  144. * compacted format of xsave area which is not supported by xrstor.
  145. */
  146. alternative_input(
  147. "1: " XRSTOR,
  148. XRSTORS,
  149. X86_FEATURE_XSAVES,
  150. "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
  151. : "memory");
  152. asm volatile("2:\n"
  153. xstate_fault
  154. : "0" (0)
  155. : "memory");
  156. return err;
  157. }
  158. /*
  159. * Save xstate context for old process during context switch.
  160. */
  161. static inline void fpu_xsave(struct fpu *fpu)
  162. {
  163. xsave_state(&fpu->state->xsave, -1);
  164. }
  165. /*
  166. * Restore xstate context for new process during context switch.
  167. */
  168. static inline int fpu_xrstor_checking(struct xsave_struct *fx)
  169. {
  170. return xrstor_state(fx, -1);
  171. }
  172. /*
  173. * Save xstate to user space xsave area.
  174. *
  175. * We don't use modified optimization because xrstor/xrstors might track
  176. * a different application.
  177. *
  178. * We don't use compacted format xsave area for
  179. * backward compatibility for old applications which don't understand
  180. * compacted format of xsave area.
  181. */
  182. static inline int xsave_user(struct xsave_struct __user *buf)
  183. {
  184. int err;
  185. /*
  186. * Clear the xsave header first, so that reserved fields are
  187. * initialized to zero.
  188. */
  189. err = __clear_user(&buf->xsave_hdr, sizeof(buf->xsave_hdr));
  190. if (unlikely(err))
  191. return -EFAULT;
  192. __asm__ __volatile__(ASM_STAC "\n"
  193. "1:"XSAVE"\n"
  194. "2: " ASM_CLAC "\n"
  195. xstate_fault
  196. : "D" (buf), "a" (-1), "d" (-1), "0" (0)
  197. : "memory");
  198. return err;
  199. }
  200. /*
  201. * Restore xstate from user space xsave area.
  202. */
  203. static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
  204. {
  205. int err = 0;
  206. struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
  207. u32 lmask = mask;
  208. u32 hmask = mask >> 32;
  209. __asm__ __volatile__(ASM_STAC "\n"
  210. "1:"XRSTOR"\n"
  211. "2: " ASM_CLAC "\n"
  212. xstate_fault
  213. : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
  214. : "memory"); /* memory required? */
  215. return err;
  216. }
  217. void *get_xsave_addr(struct xsave_struct *xsave, int xstate);
  218. void setup_xstate_comp(void);
  219. #endif