perf_event.h 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824
  1. /*
  2. * Performance events:
  3. *
  4. * Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
  5. * Copyright (C) 2008-2011, Red Hat, Inc., Ingo Molnar
  6. * Copyright (C) 2008-2011, Red Hat, Inc., Peter Zijlstra
  7. *
  8. * Data type definitions, declarations, prototypes.
  9. *
  10. * Started by: Thomas Gleixner and Ingo Molnar
  11. *
  12. * For licencing details see kernel-base/COPYING
  13. */
  14. #ifndef _UAPI_LINUX_PERF_EVENT_H
  15. #define _UAPI_LINUX_PERF_EVENT_H
  16. #include <linux/types.h>
  17. #include <linux/ioctl.h>
  18. #include <asm/byteorder.h>
  19. /*
  20. * User-space ABI bits:
  21. */
  22. /*
  23. * attr.type
  24. */
  25. enum perf_type_id {
  26. PERF_TYPE_HARDWARE = 0,
  27. PERF_TYPE_SOFTWARE = 1,
  28. PERF_TYPE_TRACEPOINT = 2,
  29. PERF_TYPE_HW_CACHE = 3,
  30. PERF_TYPE_RAW = 4,
  31. PERF_TYPE_BREAKPOINT = 5,
  32. PERF_TYPE_MAX, /* non-ABI */
  33. };
  34. /*
  35. * Generalized performance event event_id types, used by the
  36. * attr.event_id parameter of the sys_perf_event_open()
  37. * syscall:
  38. */
  39. enum perf_hw_id {
  40. /*
  41. * Common hardware events, generalized by the kernel:
  42. */
  43. PERF_COUNT_HW_CPU_CYCLES = 0,
  44. PERF_COUNT_HW_INSTRUCTIONS = 1,
  45. PERF_COUNT_HW_CACHE_REFERENCES = 2,
  46. PERF_COUNT_HW_CACHE_MISSES = 3,
  47. PERF_COUNT_HW_BRANCH_INSTRUCTIONS = 4,
  48. PERF_COUNT_HW_BRANCH_MISSES = 5,
  49. PERF_COUNT_HW_BUS_CYCLES = 6,
  50. PERF_COUNT_HW_STALLED_CYCLES_FRONTEND = 7,
  51. PERF_COUNT_HW_STALLED_CYCLES_BACKEND = 8,
  52. PERF_COUNT_HW_REF_CPU_CYCLES = 9,
  53. PERF_COUNT_HW_MAX, /* non-ABI */
  54. };
  55. /*
  56. * Generalized hardware cache events:
  57. *
  58. * { L1-D, L1-I, LLC, ITLB, DTLB, BPU, NODE } x
  59. * { read, write, prefetch } x
  60. * { accesses, misses }
  61. */
  62. enum perf_hw_cache_id {
  63. PERF_COUNT_HW_CACHE_L1D = 0,
  64. PERF_COUNT_HW_CACHE_L1I = 1,
  65. PERF_COUNT_HW_CACHE_LL = 2,
  66. PERF_COUNT_HW_CACHE_DTLB = 3,
  67. PERF_COUNT_HW_CACHE_ITLB = 4,
  68. PERF_COUNT_HW_CACHE_BPU = 5,
  69. PERF_COUNT_HW_CACHE_NODE = 6,
  70. PERF_COUNT_HW_CACHE_MAX, /* non-ABI */
  71. };
  72. enum perf_hw_cache_op_id {
  73. PERF_COUNT_HW_CACHE_OP_READ = 0,
  74. PERF_COUNT_HW_CACHE_OP_WRITE = 1,
  75. PERF_COUNT_HW_CACHE_OP_PREFETCH = 2,
  76. PERF_COUNT_HW_CACHE_OP_MAX, /* non-ABI */
  77. };
  78. enum perf_hw_cache_op_result_id {
  79. PERF_COUNT_HW_CACHE_RESULT_ACCESS = 0,
  80. PERF_COUNT_HW_CACHE_RESULT_MISS = 1,
  81. PERF_COUNT_HW_CACHE_RESULT_MAX, /* non-ABI */
  82. };
  83. /*
  84. * Special "software" events provided by the kernel, even if the hardware
  85. * does not support performance events. These events measure various
  86. * physical and sw events of the kernel (and allow the profiling of them as
  87. * well):
  88. */
  89. enum perf_sw_ids {
  90. PERF_COUNT_SW_CPU_CLOCK = 0,
  91. PERF_COUNT_SW_TASK_CLOCK = 1,
  92. PERF_COUNT_SW_PAGE_FAULTS = 2,
  93. PERF_COUNT_SW_CONTEXT_SWITCHES = 3,
  94. PERF_COUNT_SW_CPU_MIGRATIONS = 4,
  95. PERF_COUNT_SW_PAGE_FAULTS_MIN = 5,
  96. PERF_COUNT_SW_PAGE_FAULTS_MAJ = 6,
  97. PERF_COUNT_SW_ALIGNMENT_FAULTS = 7,
  98. PERF_COUNT_SW_EMULATION_FAULTS = 8,
  99. PERF_COUNT_SW_DUMMY = 9,
  100. PERF_COUNT_SW_MAX, /* non-ABI */
  101. };
  102. /*
  103. * Bits that can be set in attr.sample_type to request information
  104. * in the overflow packets.
  105. */
  106. enum perf_event_sample_format {
  107. PERF_SAMPLE_IP = 1U << 0,
  108. PERF_SAMPLE_TID = 1U << 1,
  109. PERF_SAMPLE_TIME = 1U << 2,
  110. PERF_SAMPLE_ADDR = 1U << 3,
  111. PERF_SAMPLE_READ = 1U << 4,
  112. PERF_SAMPLE_CALLCHAIN = 1U << 5,
  113. PERF_SAMPLE_ID = 1U << 6,
  114. PERF_SAMPLE_CPU = 1U << 7,
  115. PERF_SAMPLE_PERIOD = 1U << 8,
  116. PERF_SAMPLE_STREAM_ID = 1U << 9,
  117. PERF_SAMPLE_RAW = 1U << 10,
  118. PERF_SAMPLE_BRANCH_STACK = 1U << 11,
  119. PERF_SAMPLE_REGS_USER = 1U << 12,
  120. PERF_SAMPLE_STACK_USER = 1U << 13,
  121. PERF_SAMPLE_WEIGHT = 1U << 14,
  122. PERF_SAMPLE_DATA_SRC = 1U << 15,
  123. PERF_SAMPLE_IDENTIFIER = 1U << 16,
  124. PERF_SAMPLE_TRANSACTION = 1U << 17,
  125. PERF_SAMPLE_MAX = 1U << 18, /* non-ABI */
  126. };
  127. /*
  128. * values to program into branch_sample_type when PERF_SAMPLE_BRANCH is set
  129. *
  130. * If the user does not pass priv level information via branch_sample_type,
  131. * the kernel uses the event's priv level. Branch and event priv levels do
  132. * not have to match. Branch priv level is checked for permissions.
  133. *
  134. * The branch types can be combined, however BRANCH_ANY covers all types
  135. * of branches and therefore it supersedes all the other types.
  136. */
  137. enum perf_branch_sample_type {
  138. PERF_SAMPLE_BRANCH_USER = 1U << 0, /* user branches */
  139. PERF_SAMPLE_BRANCH_KERNEL = 1U << 1, /* kernel branches */
  140. PERF_SAMPLE_BRANCH_HV = 1U << 2, /* hypervisor branches */
  141. PERF_SAMPLE_BRANCH_ANY = 1U << 3, /* any branch types */
  142. PERF_SAMPLE_BRANCH_ANY_CALL = 1U << 4, /* any call branch */
  143. PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << 5, /* any return branch */
  144. PERF_SAMPLE_BRANCH_IND_CALL = 1U << 6, /* indirect calls */
  145. PERF_SAMPLE_BRANCH_ABORT_TX = 1U << 7, /* transaction aborts */
  146. PERF_SAMPLE_BRANCH_IN_TX = 1U << 8, /* in transaction */
  147. PERF_SAMPLE_BRANCH_NO_TX = 1U << 9, /* not in transaction */
  148. PERF_SAMPLE_BRANCH_COND = 1U << 10, /* conditional branches */
  149. PERF_SAMPLE_BRANCH_MAX = 1U << 11, /* non-ABI */
  150. };
  151. #define PERF_SAMPLE_BRANCH_PLM_ALL \
  152. (PERF_SAMPLE_BRANCH_USER|\
  153. PERF_SAMPLE_BRANCH_KERNEL|\
  154. PERF_SAMPLE_BRANCH_HV)
  155. /*
  156. * Values to determine ABI of the registers dump.
  157. */
  158. enum perf_sample_regs_abi {
  159. PERF_SAMPLE_REGS_ABI_NONE = 0,
  160. PERF_SAMPLE_REGS_ABI_32 = 1,
  161. PERF_SAMPLE_REGS_ABI_64 = 2,
  162. };
  163. /*
  164. * Values for the memory transaction event qualifier, mostly for
  165. * abort events. Multiple bits can be set.
  166. */
  167. enum {
  168. PERF_TXN_ELISION = (1 << 0), /* From elision */
  169. PERF_TXN_TRANSACTION = (1 << 1), /* From transaction */
  170. PERF_TXN_SYNC = (1 << 2), /* Instruction is related */
  171. PERF_TXN_ASYNC = (1 << 3), /* Instruction not related */
  172. PERF_TXN_RETRY = (1 << 4), /* Retry possible */
  173. PERF_TXN_CONFLICT = (1 << 5), /* Conflict abort */
  174. PERF_TXN_CAPACITY_WRITE = (1 << 6), /* Capacity write abort */
  175. PERF_TXN_CAPACITY_READ = (1 << 7), /* Capacity read abort */
  176. PERF_TXN_MAX = (1 << 8), /* non-ABI */
  177. /* bits 32..63 are reserved for the abort code */
  178. PERF_TXN_ABORT_MASK = (0xffffffffULL << 32),
  179. PERF_TXN_ABORT_SHIFT = 32,
  180. };
  181. /*
  182. * The format of the data returned by read() on a perf event fd,
  183. * as specified by attr.read_format:
  184. *
  185. * struct read_format {
  186. * { u64 value;
  187. * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
  188. * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
  189. * { u64 id; } && PERF_FORMAT_ID
  190. * } && !PERF_FORMAT_GROUP
  191. *
  192. * { u64 nr;
  193. * { u64 time_enabled; } && PERF_FORMAT_TOTAL_TIME_ENABLED
  194. * { u64 time_running; } && PERF_FORMAT_TOTAL_TIME_RUNNING
  195. * { u64 value;
  196. * { u64 id; } && PERF_FORMAT_ID
  197. * } cntr[nr];
  198. * } && PERF_FORMAT_GROUP
  199. * };
  200. */
  201. enum perf_event_read_format {
  202. PERF_FORMAT_TOTAL_TIME_ENABLED = 1U << 0,
  203. PERF_FORMAT_TOTAL_TIME_RUNNING = 1U << 1,
  204. PERF_FORMAT_ID = 1U << 2,
  205. PERF_FORMAT_GROUP = 1U << 3,
  206. PERF_FORMAT_MAX = 1U << 4, /* non-ABI */
  207. };
  208. #define PERF_ATTR_SIZE_VER0 64 /* sizeof first published struct */
  209. #define PERF_ATTR_SIZE_VER1 72 /* add: config2 */
  210. #define PERF_ATTR_SIZE_VER2 80 /* add: branch_sample_type */
  211. #define PERF_ATTR_SIZE_VER3 96 /* add: sample_regs_user */
  212. /* add: sample_stack_user */
  213. /*
  214. * Hardware event_id to monitor via a performance monitoring event:
  215. */
  216. struct perf_event_attr {
  217. /*
  218. * Major type: hardware/software/tracepoint/etc.
  219. */
  220. __u32 type;
  221. /*
  222. * Size of the attr structure, for fwd/bwd compat.
  223. */
  224. __u32 size;
  225. /*
  226. * Type specific configuration information.
  227. */
  228. __u64 config;
  229. union {
  230. __u64 sample_period;
  231. __u64 sample_freq;
  232. };
  233. __u64 sample_type;
  234. __u64 read_format;
  235. __u64 disabled : 1, /* off by default */
  236. inherit : 1, /* children inherit it */
  237. pinned : 1, /* must always be on PMU */
  238. exclusive : 1, /* only group on PMU */
  239. exclude_user : 1, /* don't count user */
  240. exclude_kernel : 1, /* ditto kernel */
  241. exclude_hv : 1, /* ditto hypervisor */
  242. exclude_idle : 1, /* don't count when idle */
  243. mmap : 1, /* include mmap data */
  244. comm : 1, /* include comm data */
  245. freq : 1, /* use freq, not period */
  246. inherit_stat : 1, /* per task counts */
  247. enable_on_exec : 1, /* next exec enables */
  248. task : 1, /* trace fork/exit */
  249. watermark : 1, /* wakeup_watermark */
  250. /*
  251. * precise_ip:
  252. *
  253. * 0 - SAMPLE_IP can have arbitrary skid
  254. * 1 - SAMPLE_IP must have constant skid
  255. * 2 - SAMPLE_IP requested to have 0 skid
  256. * 3 - SAMPLE_IP must have 0 skid
  257. *
  258. * See also PERF_RECORD_MISC_EXACT_IP
  259. */
  260. precise_ip : 2, /* skid constraint */
  261. mmap_data : 1, /* non-exec mmap data */
  262. sample_id_all : 1, /* sample_type all events */
  263. exclude_host : 1, /* don't count in host */
  264. exclude_guest : 1, /* don't count in guest */
  265. exclude_callchain_kernel : 1, /* exclude kernel callchains */
  266. exclude_callchain_user : 1, /* exclude user callchains */
  267. mmap2 : 1, /* include mmap with inode data */
  268. comm_exec : 1, /* flag comm events that are due to an exec */
  269. __reserved_1 : 39;
  270. union {
  271. __u32 wakeup_events; /* wakeup every n events */
  272. __u32 wakeup_watermark; /* bytes before wakeup */
  273. };
  274. __u32 bp_type;
  275. union {
  276. __u64 bp_addr;
  277. __u64 config1; /* extension of config */
  278. };
  279. union {
  280. __u64 bp_len;
  281. __u64 config2; /* extension of config1 */
  282. };
  283. __u64 branch_sample_type; /* enum perf_branch_sample_type */
  284. /*
  285. * Defines set of user regs to dump on samples.
  286. * See asm/perf_regs.h for details.
  287. */
  288. __u64 sample_regs_user;
  289. /*
  290. * Defines size of the user stack to dump on samples.
  291. */
  292. __u32 sample_stack_user;
  293. /* Align to u64. */
  294. __u32 __reserved_2;
  295. };
  296. #define perf_flags(attr) (*(&(attr)->read_format + 1))
  297. /*
  298. * Ioctls that can be done on a perf event fd:
  299. */
  300. #define PERF_EVENT_IOC_ENABLE _IO ('$', 0)
  301. #define PERF_EVENT_IOC_DISABLE _IO ('$', 1)
  302. #define PERF_EVENT_IOC_REFRESH _IO ('$', 2)
  303. #define PERF_EVENT_IOC_RESET _IO ('$', 3)
  304. #define PERF_EVENT_IOC_PERIOD _IOW('$', 4, __u64)
  305. #define PERF_EVENT_IOC_SET_OUTPUT _IO ('$', 5)
  306. #define PERF_EVENT_IOC_SET_FILTER _IOW('$', 6, char *)
  307. #define PERF_EVENT_IOC_ID _IOR('$', 7, __u64 *)
  308. enum perf_event_ioc_flags {
  309. PERF_IOC_FLAG_GROUP = 1U << 0,
  310. };
  311. /*
  312. * Structure of the page that can be mapped via mmap
  313. */
  314. struct perf_event_mmap_page {
  315. __u32 version; /* version number of this structure */
  316. __u32 compat_version; /* lowest version this is compat with */
  317. /*
  318. * Bits needed to read the hw events in user-space.
  319. *
  320. * u32 seq, time_mult, time_shift, index, width;
  321. * u64 count, enabled, running;
  322. * u64 cyc, time_offset;
  323. * s64 pmc = 0;
  324. *
  325. * do {
  326. * seq = pc->lock;
  327. * barrier()
  328. *
  329. * enabled = pc->time_enabled;
  330. * running = pc->time_running;
  331. *
  332. * if (pc->cap_usr_time && enabled != running) {
  333. * cyc = rdtsc();
  334. * time_offset = pc->time_offset;
  335. * time_mult = pc->time_mult;
  336. * time_shift = pc->time_shift;
  337. * }
  338. *
  339. * index = pc->index;
  340. * count = pc->offset;
  341. * if (pc->cap_user_rdpmc && index) {
  342. * width = pc->pmc_width;
  343. * pmc = rdpmc(index - 1);
  344. * }
  345. *
  346. * barrier();
  347. * } while (pc->lock != seq);
  348. *
  349. * NOTE: for obvious reason this only works on self-monitoring
  350. * processes.
  351. */
  352. __u32 lock; /* seqlock for synchronization */
  353. __u32 index; /* hardware event identifier */
  354. __s64 offset; /* add to hardware event value */
  355. __u64 time_enabled; /* time event active */
  356. __u64 time_running; /* time event on cpu */
  357. union {
  358. __u64 capabilities;
  359. struct {
  360. __u64 cap_bit0 : 1, /* Always 0, deprecated, see commit 860f085b74e9 */
  361. cap_bit0_is_deprecated : 1, /* Always 1, signals that bit 0 is zero */
  362. cap_user_rdpmc : 1, /* The RDPMC instruction can be used to read counts */
  363. cap_user_time : 1, /* The time_* fields are used */
  364. cap_user_time_zero : 1, /* The time_zero field is used */
  365. cap_____res : 59;
  366. };
  367. };
  368. /*
  369. * If cap_user_rdpmc this field provides the bit-width of the value
  370. * read using the rdpmc() or equivalent instruction. This can be used
  371. * to sign extend the result like:
  372. *
  373. * pmc <<= 64 - width;
  374. * pmc >>= 64 - width; // signed shift right
  375. * count += pmc;
  376. */
  377. __u16 pmc_width;
  378. /*
  379. * If cap_usr_time the below fields can be used to compute the time
  380. * delta since time_enabled (in ns) using rdtsc or similar.
  381. *
  382. * u64 quot, rem;
  383. * u64 delta;
  384. *
  385. * quot = (cyc >> time_shift);
  386. * rem = cyc & ((1 << time_shift) - 1);
  387. * delta = time_offset + quot * time_mult +
  388. * ((rem * time_mult) >> time_shift);
  389. *
  390. * Where time_offset,time_mult,time_shift and cyc are read in the
  391. * seqcount loop described above. This delta can then be added to
  392. * enabled and possible running (if index), improving the scaling:
  393. *
  394. * enabled += delta;
  395. * if (index)
  396. * running += delta;
  397. *
  398. * quot = count / running;
  399. * rem = count % running;
  400. * count = quot * enabled + (rem * enabled) / running;
  401. */
  402. __u16 time_shift;
  403. __u32 time_mult;
  404. __u64 time_offset;
  405. /*
  406. * If cap_usr_time_zero, the hardware clock (e.g. TSC) can be calculated
  407. * from sample timestamps.
  408. *
  409. * time = timestamp - time_zero;
  410. * quot = time / time_mult;
  411. * rem = time % time_mult;
  412. * cyc = (quot << time_shift) + (rem << time_shift) / time_mult;
  413. *
  414. * And vice versa:
  415. *
  416. * quot = cyc >> time_shift;
  417. * rem = cyc & ((1 << time_shift) - 1);
  418. * timestamp = time_zero + quot * time_mult +
  419. * ((rem * time_mult) >> time_shift);
  420. */
  421. __u64 time_zero;
  422. __u32 size; /* Header size up to __reserved[] fields. */
  423. /*
  424. * Hole for extension of the self monitor capabilities
  425. */
  426. __u8 __reserved[118*8+4]; /* align to 1k. */
  427. /*
  428. * Control data for the mmap() data buffer.
  429. *
  430. * User-space reading the @data_head value should issue an smp_rmb(),
  431. * after reading this value.
  432. *
  433. * When the mapping is PROT_WRITE the @data_tail value should be
  434. * written by userspace to reflect the last read data, after issueing
  435. * an smp_mb() to separate the data read from the ->data_tail store.
  436. * In this case the kernel will not over-write unread data.
  437. *
  438. * See perf_output_put_handle() for the data ordering.
  439. */
  440. __u64 data_head; /* head in the data section */
  441. __u64 data_tail; /* user-space written tail */
  442. };
  443. #define PERF_RECORD_MISC_CPUMODE_MASK (7 << 0)
  444. #define PERF_RECORD_MISC_CPUMODE_UNKNOWN (0 << 0)
  445. #define PERF_RECORD_MISC_KERNEL (1 << 0)
  446. #define PERF_RECORD_MISC_USER (2 << 0)
  447. #define PERF_RECORD_MISC_HYPERVISOR (3 << 0)
  448. #define PERF_RECORD_MISC_GUEST_KERNEL (4 << 0)
  449. #define PERF_RECORD_MISC_GUEST_USER (5 << 0)
  450. /*
  451. * PERF_RECORD_MISC_MMAP_DATA and PERF_RECORD_MISC_COMM_EXEC are used on
  452. * different events so can reuse the same bit position.
  453. */
  454. #define PERF_RECORD_MISC_MMAP_DATA (1 << 13)
  455. #define PERF_RECORD_MISC_COMM_EXEC (1 << 13)
  456. /*
  457. * Indicates that the content of PERF_SAMPLE_IP points to
  458. * the actual instruction that triggered the event. See also
  459. * perf_event_attr::precise_ip.
  460. */
  461. #define PERF_RECORD_MISC_EXACT_IP (1 << 14)
  462. /*
  463. * Reserve the last bit to indicate some extended misc field
  464. */
  465. #define PERF_RECORD_MISC_EXT_RESERVED (1 << 15)
  466. struct perf_event_header {
  467. __u32 type;
  468. __u16 misc;
  469. __u16 size;
  470. };
  471. enum perf_event_type {
  472. /*
  473. * If perf_event_attr.sample_id_all is set then all event types will
  474. * have the sample_type selected fields related to where/when
  475. * (identity) an event took place (TID, TIME, ID, STREAM_ID, CPU,
  476. * IDENTIFIER) described in PERF_RECORD_SAMPLE below, it will be stashed
  477. * just after the perf_event_header and the fields already present for
  478. * the existing fields, i.e. at the end of the payload. That way a newer
  479. * perf.data file will be supported by older perf tools, with these new
  480. * optional fields being ignored.
  481. *
  482. * struct sample_id {
  483. * { u32 pid, tid; } && PERF_SAMPLE_TID
  484. * { u64 time; } && PERF_SAMPLE_TIME
  485. * { u64 id; } && PERF_SAMPLE_ID
  486. * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
  487. * { u32 cpu, res; } && PERF_SAMPLE_CPU
  488. * { u64 id; } && PERF_SAMPLE_IDENTIFIER
  489. * } && perf_event_attr::sample_id_all
  490. *
  491. * Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID. The
  492. * advantage of PERF_SAMPLE_IDENTIFIER is that its position is fixed
  493. * relative to header.size.
  494. */
  495. /*
  496. * The MMAP events record the PROT_EXEC mappings so that we can
  497. * correlate userspace IPs to code. They have the following structure:
  498. *
  499. * struct {
  500. * struct perf_event_header header;
  501. *
  502. * u32 pid, tid;
  503. * u64 addr;
  504. * u64 len;
  505. * u64 pgoff;
  506. * char filename[];
  507. * struct sample_id sample_id;
  508. * };
  509. */
  510. PERF_RECORD_MMAP = 1,
  511. /*
  512. * struct {
  513. * struct perf_event_header header;
  514. * u64 id;
  515. * u64 lost;
  516. * struct sample_id sample_id;
  517. * };
  518. */
  519. PERF_RECORD_LOST = 2,
  520. /*
  521. * struct {
  522. * struct perf_event_header header;
  523. *
  524. * u32 pid, tid;
  525. * char comm[];
  526. * struct sample_id sample_id;
  527. * };
  528. */
  529. PERF_RECORD_COMM = 3,
  530. /*
  531. * struct {
  532. * struct perf_event_header header;
  533. * u32 pid, ppid;
  534. * u32 tid, ptid;
  535. * u64 time;
  536. * struct sample_id sample_id;
  537. * };
  538. */
  539. PERF_RECORD_EXIT = 4,
  540. /*
  541. * struct {
  542. * struct perf_event_header header;
  543. * u64 time;
  544. * u64 id;
  545. * u64 stream_id;
  546. * struct sample_id sample_id;
  547. * };
  548. */
  549. PERF_RECORD_THROTTLE = 5,
  550. PERF_RECORD_UNTHROTTLE = 6,
  551. /*
  552. * struct {
  553. * struct perf_event_header header;
  554. * u32 pid, ppid;
  555. * u32 tid, ptid;
  556. * u64 time;
  557. * struct sample_id sample_id;
  558. * };
  559. */
  560. PERF_RECORD_FORK = 7,
  561. /*
  562. * struct {
  563. * struct perf_event_header header;
  564. * u32 pid, tid;
  565. *
  566. * struct read_format values;
  567. * struct sample_id sample_id;
  568. * };
  569. */
  570. PERF_RECORD_READ = 8,
  571. /*
  572. * struct {
  573. * struct perf_event_header header;
  574. *
  575. * #
  576. * # Note that PERF_SAMPLE_IDENTIFIER duplicates PERF_SAMPLE_ID.
  577. * # The advantage of PERF_SAMPLE_IDENTIFIER is that its position
  578. * # is fixed relative to header.
  579. * #
  580. *
  581. * { u64 id; } && PERF_SAMPLE_IDENTIFIER
  582. * { u64 ip; } && PERF_SAMPLE_IP
  583. * { u32 pid, tid; } && PERF_SAMPLE_TID
  584. * { u64 time; } && PERF_SAMPLE_TIME
  585. * { u64 addr; } && PERF_SAMPLE_ADDR
  586. * { u64 id; } && PERF_SAMPLE_ID
  587. * { u64 stream_id;} && PERF_SAMPLE_STREAM_ID
  588. * { u32 cpu, res; } && PERF_SAMPLE_CPU
  589. * { u64 period; } && PERF_SAMPLE_PERIOD
  590. *
  591. * { struct read_format values; } && PERF_SAMPLE_READ
  592. *
  593. * { u64 nr,
  594. * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
  595. *
  596. * #
  597. * # The RAW record below is opaque data wrt the ABI
  598. * #
  599. * # That is, the ABI doesn't make any promises wrt to
  600. * # the stability of its content, it may vary depending
  601. * # on event, hardware, kernel version and phase of
  602. * # the moon.
  603. * #
  604. * # In other words, PERF_SAMPLE_RAW contents are not an ABI.
  605. * #
  606. *
  607. * { u32 size;
  608. * char data[size];}&& PERF_SAMPLE_RAW
  609. *
  610. * { u64 nr;
  611. * { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK
  612. *
  613. * { u64 abi; # enum perf_sample_regs_abi
  614. * u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER
  615. *
  616. * { u64 size;
  617. * char data[size];
  618. * u64 dyn_size; } && PERF_SAMPLE_STACK_USER
  619. *
  620. * { u64 weight; } && PERF_SAMPLE_WEIGHT
  621. * { u64 data_src; } && PERF_SAMPLE_DATA_SRC
  622. * { u64 transaction; } && PERF_SAMPLE_TRANSACTION
  623. * };
  624. */
  625. PERF_RECORD_SAMPLE = 9,
  626. /*
  627. * The MMAP2 records are an augmented version of MMAP, they add
  628. * maj, min, ino numbers to be used to uniquely identify each mapping
  629. *
  630. * struct {
  631. * struct perf_event_header header;
  632. *
  633. * u32 pid, tid;
  634. * u64 addr;
  635. * u64 len;
  636. * u64 pgoff;
  637. * u32 maj;
  638. * u32 min;
  639. * u64 ino;
  640. * u64 ino_generation;
  641. * u32 prot, flags;
  642. * char filename[];
  643. * struct sample_id sample_id;
  644. * };
  645. */
  646. PERF_RECORD_MMAP2 = 10,
  647. PERF_RECORD_MAX, /* non-ABI */
  648. };
  649. #define PERF_MAX_STACK_DEPTH 127
  650. enum perf_callchain_context {
  651. PERF_CONTEXT_HV = (__u64)-32,
  652. PERF_CONTEXT_KERNEL = (__u64)-128,
  653. PERF_CONTEXT_USER = (__u64)-512,
  654. PERF_CONTEXT_GUEST = (__u64)-2048,
  655. PERF_CONTEXT_GUEST_KERNEL = (__u64)-2176,
  656. PERF_CONTEXT_GUEST_USER = (__u64)-2560,
  657. PERF_CONTEXT_MAX = (__u64)-4095,
  658. };
  659. #define PERF_FLAG_FD_NO_GROUP (1UL << 0)
  660. #define PERF_FLAG_FD_OUTPUT (1UL << 1)
  661. #define PERF_FLAG_PID_CGROUP (1UL << 2) /* pid=cgroup id, per-cpu mode only */
  662. #define PERF_FLAG_FD_CLOEXEC (1UL << 3) /* O_CLOEXEC */
  663. union perf_mem_data_src {
  664. __u64 val;
  665. struct {
  666. __u64 mem_op:5, /* type of opcode */
  667. mem_lvl:14, /* memory hierarchy level */
  668. mem_snoop:5, /* snoop mode */
  669. mem_lock:2, /* lock instr */
  670. mem_dtlb:7, /* tlb access */
  671. mem_rsvd:31;
  672. };
  673. };
  674. /* type of opcode (load/store/prefetch,code) */
  675. #define PERF_MEM_OP_NA 0x01 /* not available */
  676. #define PERF_MEM_OP_LOAD 0x02 /* load instruction */
  677. #define PERF_MEM_OP_STORE 0x04 /* store instruction */
  678. #define PERF_MEM_OP_PFETCH 0x08 /* prefetch */
  679. #define PERF_MEM_OP_EXEC 0x10 /* code (execution) */
  680. #define PERF_MEM_OP_SHIFT 0
  681. /* memory hierarchy (memory level, hit or miss) */
  682. #define PERF_MEM_LVL_NA 0x01 /* not available */
  683. #define PERF_MEM_LVL_HIT 0x02 /* hit level */
  684. #define PERF_MEM_LVL_MISS 0x04 /* miss level */
  685. #define PERF_MEM_LVL_L1 0x08 /* L1 */
  686. #define PERF_MEM_LVL_LFB 0x10 /* Line Fill Buffer */
  687. #define PERF_MEM_LVL_L2 0x20 /* L2 */
  688. #define PERF_MEM_LVL_L3 0x40 /* L3 */
  689. #define PERF_MEM_LVL_LOC_RAM 0x80 /* Local DRAM */
  690. #define PERF_MEM_LVL_REM_RAM1 0x100 /* Remote DRAM (1 hop) */
  691. #define PERF_MEM_LVL_REM_RAM2 0x200 /* Remote DRAM (2 hops) */
  692. #define PERF_MEM_LVL_REM_CCE1 0x400 /* Remote Cache (1 hop) */
  693. #define PERF_MEM_LVL_REM_CCE2 0x800 /* Remote Cache (2 hops) */
  694. #define PERF_MEM_LVL_IO 0x1000 /* I/O memory */
  695. #define PERF_MEM_LVL_UNC 0x2000 /* Uncached memory */
  696. #define PERF_MEM_LVL_SHIFT 5
  697. /* snoop mode */
  698. #define PERF_MEM_SNOOP_NA 0x01 /* not available */
  699. #define PERF_MEM_SNOOP_NONE 0x02 /* no snoop */
  700. #define PERF_MEM_SNOOP_HIT 0x04 /* snoop hit */
  701. #define PERF_MEM_SNOOP_MISS 0x08 /* snoop miss */
  702. #define PERF_MEM_SNOOP_HITM 0x10 /* snoop hit modified */
  703. #define PERF_MEM_SNOOP_SHIFT 19
  704. /* locked instruction */
  705. #define PERF_MEM_LOCK_NA 0x01 /* not available */
  706. #define PERF_MEM_LOCK_LOCKED 0x02 /* locked transaction */
  707. #define PERF_MEM_LOCK_SHIFT 24
  708. /* TLB access */
  709. #define PERF_MEM_TLB_NA 0x01 /* not available */
  710. #define PERF_MEM_TLB_HIT 0x02 /* hit level */
  711. #define PERF_MEM_TLB_MISS 0x04 /* miss level */
  712. #define PERF_MEM_TLB_L1 0x08 /* L1 */
  713. #define PERF_MEM_TLB_L2 0x10 /* L2 */
  714. #define PERF_MEM_TLB_WK 0x20 /* Hardware Walker*/
  715. #define PERF_MEM_TLB_OS 0x40 /* OS fault handler */
  716. #define PERF_MEM_TLB_SHIFT 26
  717. #define PERF_MEM_S(a, s) \
  718. (((__u64)PERF_MEM_##a##_##s) << PERF_MEM_##a##_SHIFT)
  719. /*
  720. * single taken branch record layout:
  721. *
  722. * from: source instruction (may not always be a branch insn)
  723. * to: branch target
  724. * mispred: branch target was mispredicted
  725. * predicted: branch target was predicted
  726. *
  727. * support for mispred, predicted is optional. In case it
  728. * is not supported mispred = predicted = 0.
  729. *
  730. * in_tx: running in a hardware transaction
  731. * abort: aborting a hardware transaction
  732. */
  733. struct perf_branch_entry {
  734. __u64 from;
  735. __u64 to;
  736. __u64 mispred:1, /* target mispredicted */
  737. predicted:1,/* target predicted */
  738. in_tx:1, /* in transaction */
  739. abort:1, /* transaction abort */
  740. reserved:60;
  741. };
  742. #endif /* _UAPI_LINUX_PERF_EVENT_H */