ptrace.h 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262
  1. #ifndef _ASM_X86_PTRACE_H
  2. #define _ASM_X86_PTRACE_H
  3. #include <asm/segment.h>
  4. #include <asm/page_types.h>
  5. #include <uapi/asm/ptrace.h>
  6. #ifndef __ASSEMBLY__
  7. #ifdef __i386__
  8. struct pt_regs {
  9. unsigned long bx;
  10. unsigned long cx;
  11. unsigned long dx;
  12. unsigned long si;
  13. unsigned long di;
  14. unsigned long bp;
  15. unsigned long ax;
  16. unsigned long ds;
  17. unsigned long es;
  18. unsigned long fs;
  19. unsigned long gs;
  20. unsigned long orig_ax;
  21. unsigned long ip;
  22. unsigned long cs;
  23. unsigned long flags;
  24. unsigned long sp;
  25. unsigned long ss;
  26. };
  27. #else /* __i386__ */
  28. struct pt_regs {
  29. unsigned long r15;
  30. unsigned long r14;
  31. unsigned long r13;
  32. unsigned long r12;
  33. unsigned long bp;
  34. unsigned long bx;
  35. /* arguments: non interrupts/non tracing syscalls only save up to here*/
  36. unsigned long r11;
  37. unsigned long r10;
  38. unsigned long r9;
  39. unsigned long r8;
  40. unsigned long ax;
  41. unsigned long cx;
  42. unsigned long dx;
  43. unsigned long si;
  44. unsigned long di;
  45. unsigned long orig_ax;
  46. /* end of arguments */
  47. /* cpu exception frame or undefined */
  48. unsigned long ip;
  49. unsigned long cs;
  50. unsigned long flags;
  51. unsigned long sp;
  52. unsigned long ss;
  53. /* top of stack page */
  54. };
  55. #endif /* !__i386__ */
  56. #ifdef CONFIG_PARAVIRT
  57. #include <asm/paravirt_types.h>
  58. #endif
  59. struct cpuinfo_x86;
  60. struct task_struct;
  61. extern unsigned long profile_pc(struct pt_regs *regs);
  62. #define profile_pc profile_pc
  63. extern unsigned long
  64. convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs);
  65. extern void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
  66. int error_code, int si_code);
  67. extern unsigned long syscall_trace_enter_phase1(struct pt_regs *, u32 arch);
  68. extern long syscall_trace_enter_phase2(struct pt_regs *, u32 arch,
  69. unsigned long phase1_result);
  70. extern long syscall_trace_enter(struct pt_regs *);
  71. extern void syscall_trace_leave(struct pt_regs *);
  72. static inline unsigned long regs_return_value(struct pt_regs *regs)
  73. {
  74. return regs->ax;
  75. }
  76. /*
  77. * user_mode_vm(regs) determines whether a register set came from user mode.
  78. * This is true if V8086 mode was enabled OR if the register set was from
  79. * protected mode with RPL-3 CS value. This tricky test checks that with
  80. * one comparison. Many places in the kernel can bypass this full check
  81. * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
  82. */
  83. static inline int user_mode(struct pt_regs *regs)
  84. {
  85. #ifdef CONFIG_X86_32
  86. return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
  87. #else
  88. return !!(regs->cs & 3);
  89. #endif
  90. }
  91. static inline int user_mode_vm(struct pt_regs *regs)
  92. {
  93. #ifdef CONFIG_X86_32
  94. return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
  95. USER_RPL;
  96. #else
  97. return user_mode(regs);
  98. #endif
  99. }
  100. static inline int v8086_mode(struct pt_regs *regs)
  101. {
  102. #ifdef CONFIG_X86_32
  103. return (regs->flags & X86_VM_MASK);
  104. #else
  105. return 0; /* No V86 mode support in long mode */
  106. #endif
  107. }
  108. #ifdef CONFIG_X86_64
  109. static inline bool user_64bit_mode(struct pt_regs *regs)
  110. {
  111. #ifndef CONFIG_PARAVIRT
  112. /*
  113. * On non-paravirt systems, this is the only long mode CPL 3
  114. * selector. We do not allow long mode selectors in the LDT.
  115. */
  116. return regs->cs == __USER_CS;
  117. #else
  118. /* Headers are too twisted for this to go in paravirt.h. */
  119. return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
  120. #endif
  121. }
  122. #define current_user_stack_pointer() this_cpu_read(old_rsp)
  123. /* ia32 vs. x32 difference */
  124. #define compat_user_stack_pointer() \
  125. (test_thread_flag(TIF_IA32) \
  126. ? current_pt_regs()->sp \
  127. : this_cpu_read(old_rsp))
  128. #endif
  129. #ifdef CONFIG_X86_32
  130. extern unsigned long kernel_stack_pointer(struct pt_regs *regs);
  131. #else
  132. static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
  133. {
  134. return regs->sp;
  135. }
  136. #endif
  137. #define GET_IP(regs) ((regs)->ip)
  138. #define GET_FP(regs) ((regs)->bp)
  139. #define GET_USP(regs) ((regs)->sp)
  140. #include <asm-generic/ptrace.h>
  141. /* Query offset/name of register from its name/offset */
  142. extern int regs_query_register_offset(const char *name);
  143. extern const char *regs_query_register_name(unsigned int offset);
  144. #define MAX_REG_OFFSET (offsetof(struct pt_regs, ss))
  145. /**
  146. * regs_get_register() - get register value from its offset
  147. * @regs: pt_regs from which register value is gotten.
  148. * @offset: offset number of the register.
  149. *
  150. * regs_get_register returns the value of a register. The @offset is the
  151. * offset of the register in struct pt_regs address which specified by @regs.
  152. * If @offset is bigger than MAX_REG_OFFSET, this returns 0.
  153. */
  154. static inline unsigned long regs_get_register(struct pt_regs *regs,
  155. unsigned int offset)
  156. {
  157. if (unlikely(offset > MAX_REG_OFFSET))
  158. return 0;
  159. #ifdef CONFIG_X86_32
  160. /*
  161. * Traps from the kernel do not save sp and ss.
  162. * Use the helper function to retrieve sp.
  163. */
  164. if (offset == offsetof(struct pt_regs, sp) &&
  165. regs->cs == __KERNEL_CS)
  166. return kernel_stack_pointer(regs);
  167. #endif
  168. return *(unsigned long *)((unsigned long)regs + offset);
  169. }
  170. /**
  171. * regs_within_kernel_stack() - check the address in the stack
  172. * @regs: pt_regs which contains kernel stack pointer.
  173. * @addr: address which is checked.
  174. *
  175. * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
  176. * If @addr is within the kernel stack, it returns true. If not, returns false.
  177. */
  178. static inline int regs_within_kernel_stack(struct pt_regs *regs,
  179. unsigned long addr)
  180. {
  181. return ((addr & ~(THREAD_SIZE - 1)) ==
  182. (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
  183. }
  184. /**
  185. * regs_get_kernel_stack_nth() - get Nth entry of the stack
  186. * @regs: pt_regs which contains kernel stack pointer.
  187. * @n: stack entry number.
  188. *
  189. * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
  190. * is specified by @regs. If the @n th entry is NOT in the kernel stack,
  191. * this returns 0.
  192. */
  193. static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
  194. unsigned int n)
  195. {
  196. unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
  197. addr += n;
  198. if (regs_within_kernel_stack(regs, (unsigned long)addr))
  199. return *addr;
  200. else
  201. return 0;
  202. }
  203. #define arch_has_single_step() (1)
  204. #ifdef CONFIG_X86_DEBUGCTLMSR
  205. #define arch_has_block_step() (1)
  206. #else
  207. #define arch_has_block_step() (boot_cpu_data.x86 >= 6)
  208. #endif
  209. #define ARCH_HAS_USER_SINGLE_STEP_INFO
  210. /*
  211. * When hitting ptrace_stop(), we cannot return using SYSRET because
  212. * that does not restore the full CPU state, only a minimal set. The
  213. * ptracer can change arbitrary register values, which is usually okay
  214. * because the usual ptrace stops run off the signal delivery path which
  215. * forces IRET; however, ptrace_event() stops happen in arbitrary places
  216. * in the kernel and don't force IRET path.
  217. *
  218. * So force IRET path after a ptrace stop.
  219. */
  220. #define arch_ptrace_stop_needed(code, info) \
  221. ({ \
  222. set_thread_flag(TIF_NOTIFY_RESUME); \
  223. false; \
  224. })
  225. struct user_desc;
  226. extern int do_get_thread_area(struct task_struct *p, int idx,
  227. struct user_desc __user *info);
  228. extern int do_set_thread_area(struct task_struct *p, int idx,
  229. struct user_desc __user *info, int can_allocate);
  230. #endif /* !__ASSEMBLY__ */
  231. #endif /* _ASM_X86_PTRACE_H */