sched.h 32 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350
  1. #undef TRACE_SYSTEM
  2. #define TRACE_SYSTEM sched
  3. #if !defined(_TRACE_SCHED_H) || defined(TRACE_HEADER_MULTI_READ)
  4. #define _TRACE_SCHED_H
  5. #include <linux/sched.h>
  6. #include <linux/tracepoint.h>
  7. #include <linux/binfmts.h>
  8. #ifdef CONFIG_MTK_SCHED_TRACERS
  9. /* M: states for tracking I/O & mutex events
  10. * notice avoid to conflict with linux/sched.h
  11. *
  12. * A bug linux not fixed:
  13. * 'K' for TASK_WAKEKILL specified in linux/sched.h
  14. * but marked 'K' in sched_switch will cause Android systrace parser confused
  15. * therefore for sched_switch events, these extra states will be printed
  16. * in the end of each line
  17. */
  18. #define _MT_TASK_BLOCKED_RTMUX (TASK_STATE_MAX << 1)
  19. #define _MT_TASK_BLOCKED_MUTEX (TASK_STATE_MAX << 2)
  20. #define _MT_TASK_BLOCKED_IO (TASK_STATE_MAX << 3)
  21. #define _MT_EXTRA_STATE_MASK (_MT_TASK_BLOCKED_RTMUX | _MT_TASK_BLOCKED_MUTEX | \
  22. _MT_TASK_BLOCKED_IO | TASK_WAKEKILL)
  23. #endif
  24. #define _MT_TASK_STATE_MASK ((TASK_STATE_MAX - 1) & ~(TASK_WAKEKILL | TASK_PARKED))
  25. /*
  26. * Tracepoint for calling kthread_stop, performed to end a kthread:
  27. */
  28. TRACE_EVENT(sched_kthread_stop,
  29. TP_PROTO(struct task_struct *t),
  30. TP_ARGS(t),
  31. TP_STRUCT__entry(
  32. __array( char, comm, TASK_COMM_LEN )
  33. __field( pid_t, pid )
  34. ),
  35. TP_fast_assign(
  36. memcpy(__entry->comm, t->comm, TASK_COMM_LEN);
  37. __entry->pid = t->pid;
  38. ),
  39. TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
  40. );
  41. /*
  42. * Tracepoint for the return value of the kthread stopping:
  43. */
  44. TRACE_EVENT(sched_kthread_stop_ret,
  45. TP_PROTO(int ret),
  46. TP_ARGS(ret),
  47. TP_STRUCT__entry(
  48. __field( int, ret )
  49. ),
  50. TP_fast_assign(
  51. __entry->ret = ret;
  52. ),
  53. TP_printk("ret=%d", __entry->ret)
  54. );
  55. #ifdef CREATE_TRACE_POINTS
  56. static inline long __trace_sched_switch_state(struct task_struct *p);
  57. #endif
  58. /*
  59. * Tracepoint for waking up a task:
  60. */
  61. DECLARE_EVENT_CLASS(sched_wakeup_template,
  62. TP_PROTO(struct task_struct *p, int success),
  63. TP_ARGS(__perf_task(p), success),
  64. TP_STRUCT__entry(
  65. __array( char, comm, TASK_COMM_LEN )
  66. __field( pid_t, pid )
  67. __field( int, prio )
  68. __field( int, success )
  69. __field( int, target_cpu )
  70. #ifdef CONFIG_MTK_SCHED_TRACERS
  71. __field(long, state)
  72. #endif
  73. ),
  74. TP_fast_assign(
  75. memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
  76. __entry->pid = p->pid;
  77. __entry->prio = p->prio;
  78. __entry->success = success;
  79. __entry->target_cpu = task_cpu(p);
  80. #ifdef CONFIG_MTK_SCHED_TRACERS
  81. __entry->state = __trace_sched_switch_state(p);
  82. #endif
  83. ),
  84. TP_printk(
  85. #ifdef CONFIG_MTK_SCHED_TRACERS
  86. "comm=%s pid=%d prio=%d success=%d target_cpu=%03d state=%s",
  87. #else
  88. "comm=%s pid=%d prio=%d success=%d target_cpu=%03d",
  89. #endif
  90. __entry->comm, __entry->pid, __entry->prio,
  91. __entry->success, __entry->target_cpu
  92. #ifdef CONFIG_MTK_SCHED_TRACERS
  93. ,
  94. __entry->state & ~TASK_STATE_MAX ?
  95. __print_flags(__entry->state & ~TASK_STATE_MAX, "|",
  96. {TASK_INTERRUPTIBLE, "S"},
  97. {TASK_UNINTERRUPTIBLE, "D"},
  98. {__TASK_STOPPED, "T"},
  99. {__TASK_TRACED, "t"},
  100. {EXIT_ZOMBIE, "Z"},
  101. {EXIT_DEAD, "X"},
  102. {TASK_DEAD, "x"},
  103. {TASK_WAKEKILL, "K"},
  104. {TASK_WAKING, "W"},
  105. {_MT_TASK_BLOCKED_RTMUX, "r"},
  106. {_MT_TASK_BLOCKED_MUTEX, "m"},
  107. {_MT_TASK_BLOCKED_IO, "d"}) : "R"
  108. #endif
  109. )
  110. );
  111. DEFINE_EVENT(sched_wakeup_template, sched_wakeup,
  112. TP_PROTO(struct task_struct *p, int success),
  113. TP_ARGS(p, success));
  114. /*
  115. * Tracepoint for waking up a new task:
  116. */
  117. DEFINE_EVENT(sched_wakeup_template, sched_wakeup_new,
  118. TP_PROTO(struct task_struct *p, int success),
  119. TP_ARGS(p, success));
  120. #ifdef CREATE_TRACE_POINTS
  121. static inline long __trace_sched_switch_state(struct task_struct *p)
  122. {
  123. long state = p->state;
  124. #ifdef CONFIG_PREEMPT
  125. /*
  126. * For all intents and purposes a preempted task is a running task.
  127. */
  128. if (preempt_count() & PREEMPT_ACTIVE)
  129. state = TASK_RUNNING | TASK_STATE_MAX;
  130. #endif
  131. #ifdef CONFIG_MTK_SCHED_TRACERS
  132. #ifdef CONFIG_RT_MUTEXES
  133. if (p->pi_blocked_on)
  134. state |= _MT_TASK_BLOCKED_RTMUX;
  135. #endif
  136. #ifdef CONFIG_DEBUG_MUTEXES
  137. if (p->blocked_on)
  138. state |= _MT_TASK_BLOCKED_MUTEX;
  139. #endif
  140. if ((p->state & TASK_UNINTERRUPTIBLE) && p->in_iowait)
  141. state |= _MT_TASK_BLOCKED_IO;
  142. #endif
  143. return state;
  144. }
  145. # if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_MTK_SCHED_TRACERS)
  146. /*
  147. * legacy cgroup hierarchy depth is no more than 3, and here we limit the
  148. * size of each load printing no more than 10, 9 chars with a slash '/'.
  149. * thus, making MTK_FAIR_DBG_SZ = 100 is pretty safe from array overflow,
  150. * because 100 is much larger than 60, ((3 * 10) * 2), 2 for @prev and @next
  151. * tasks.
  152. */
  153. # define MTK_FAIR_DBG_SZ 100
  154. /*
  155. * snprintf writes at most @size bytes (including the trailing null bytes
  156. * ('\0'), so increment 10 to 11
  157. */
  158. # define MTK_FAIR_DBG_LEN (10 + 1)
  159. # define MTK_FAIR_DBG_DEP 3
  160. static int fair_cgroup_load(char *buf, int cnt, struct task_struct *p)
  161. {
  162. int loc = cnt;
  163. int t, depth = 0;
  164. unsigned long w[MTK_FAIR_DBG_DEP];
  165. struct sched_entity *se = p->se.parent;
  166. for (; se && (depth < MTK_FAIR_DBG_DEP); se = se->parent)
  167. w[depth++] = se->load.weight;
  168. switch (p->policy) {
  169. case SCHED_NORMAL:
  170. loc += snprintf(&buf[loc], 7, "NORMAL"); break;
  171. case SCHED_IDLE:
  172. loc += snprintf(&buf[loc], 5, "IDLE"); break;
  173. case SCHED_BATCH:
  174. loc += snprintf(&buf[loc], 6, "BATCH"); break;
  175. }
  176. for (depth--; depth >= 0; depth--) {
  177. t = snprintf(&buf[loc], MTK_FAIR_DBG_LEN, "/%lu", w[depth]);
  178. if ((t < MTK_FAIR_DBG_LEN) && (t > 0))
  179. loc += t;
  180. else
  181. loc += snprintf(&buf[loc], 7, "/ERROR");
  182. }
  183. t = snprintf(&buf[loc], MTK_FAIR_DBG_LEN, "/%lu", p->se.load.weight);
  184. if ((t < MTK_FAIR_DBG_LEN) && (t > 0))
  185. loc += t;
  186. else
  187. loc += snprintf(&buf[loc], 7, "/ERROR");
  188. return loc;
  189. }
  190. static int is_fair_preempt(char *buf, struct task_struct *prev,
  191. struct task_struct *next)
  192. {
  193. int cnt;
  194. /* nothing needs to be clarified for RT class or yielding from IDLE */
  195. if ((task_pid_nr(prev) == 0) || (rt_task(next) || rt_task(prev)))
  196. return 0;
  197. /* take care about preemption only */
  198. if (prev->state &&
  199. !(task_thread_info(prev)->preempt_count & PREEMPT_ACTIVE)) {
  200. return 0;
  201. }
  202. memset(buf, 0, MTK_FAIR_DBG_SZ);
  203. cnt = fair_cgroup_load(buf, 0, prev);
  204. cnt += snprintf(&buf[cnt], 6, " ==> ");
  205. fair_cgroup_load(buf, cnt, next);
  206. return 1;
  207. }
  208. # endif
  209. #endif
  210. /*
  211. * Tracepoint for task switches, performed by the scheduler:
  212. */
  213. TRACE_EVENT(sched_switch,
  214. TP_PROTO(struct task_struct *prev,
  215. struct task_struct *next),
  216. TP_ARGS(prev, next),
  217. TP_STRUCT__entry(
  218. __array( char, prev_comm, TASK_COMM_LEN )
  219. __field( pid_t, prev_pid )
  220. __field( int, prev_prio )
  221. __field( long, prev_state )
  222. __array( char, next_comm, TASK_COMM_LEN )
  223. __field( pid_t, next_pid )
  224. __field( int, next_prio )
  225. #if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_MTK_SCHED_TRACERS)
  226. __field(int, fair_preempt)
  227. __array(char, fair_dbg_buf, MTK_FAIR_DBG_SZ)
  228. #endif
  229. ),
  230. TP_fast_assign(
  231. memcpy(__entry->next_comm, next->comm, TASK_COMM_LEN);
  232. __entry->prev_pid = prev->pid;
  233. __entry->prev_prio = prev->prio;
  234. __entry->prev_state = __trace_sched_switch_state(prev);
  235. memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
  236. __entry->next_pid = next->pid;
  237. __entry->next_prio = next->prio;
  238. #if defined(CONFIG_FAIR_GROUP_SCHED) && defined(CONFIG_MTK_SCHED_TRACERS)
  239. __entry->fair_preempt = is_fair_preempt(__entry->fair_dbg_buf,
  240. prev, next);
  241. #endif
  242. ),
  243. TP_printk(
  244. #ifdef CONFIG_MTK_SCHED_TRACERS
  245. "prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_pid=%d next_prio=%d%s%s %s",
  246. #else
  247. "prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_pid=%d next_prio=%d",
  248. #endif
  249. __entry->prev_comm, __entry->prev_pid, __entry->prev_prio,
  250. __entry->prev_state & (_MT_TASK_STATE_MASK) ?
  251. __print_flags(__entry->prev_state & (_MT_TASK_STATE_MASK), "|",
  252. { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
  253. { 16, "Z" }, { 32, "X" }, { 64, "x" },
  254. {128, "K"}, { 256, "W"}) : "R",
  255. __entry->prev_state & TASK_STATE_MAX ? "+" : "",
  256. __entry->next_comm, __entry->next_pid, __entry->next_prio
  257. #ifdef CONFIG_MTK_SCHED_TRACERS
  258. ,
  259. (__entry->prev_state & _MT_EXTRA_STATE_MASK) ?
  260. " extra_prev_state=" : "",
  261. __print_flags(__entry->prev_state & _MT_EXTRA_STATE_MASK, "|",
  262. { TASK_WAKEKILL, "K" },
  263. { TASK_PARKED, "P" },
  264. { _MT_TASK_BLOCKED_RTMUX, "r" },
  265. { _MT_TASK_BLOCKED_MUTEX, "m" },
  266. { _MT_TASK_BLOCKED_IO, "d" })
  267. # ifdef CONFIG_FAIR_GROUP_SCHED
  268. , (__entry->fair_preempt ? __entry->fair_dbg_buf : "")
  269. # else
  270. , ""
  271. # endif
  272. #endif
  273. )
  274. );
  275. /*
  276. * Tracepoint for a task being migrated:
  277. */
  278. TRACE_EVENT(sched_migrate_task,
  279. TP_PROTO(struct task_struct *p, int dest_cpu),
  280. TP_ARGS(p, dest_cpu),
  281. TP_STRUCT__entry(
  282. __array( char, comm, TASK_COMM_LEN )
  283. __field( pid_t, pid )
  284. __field( int, prio )
  285. __field( int, orig_cpu )
  286. __field( int, dest_cpu )
  287. #ifdef CONFIG_MTK_SCHED_TRACERS
  288. __field(long, state)
  289. #endif
  290. ),
  291. TP_fast_assign(
  292. memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
  293. __entry->pid = p->pid;
  294. __entry->prio = p->prio;
  295. __entry->orig_cpu = task_cpu(p);
  296. __entry->dest_cpu = dest_cpu;
  297. #ifdef CONFIG_MTK_SCHED_TRACERS
  298. __entry->state = __trace_sched_switch_state(p);
  299. #endif
  300. ),
  301. #ifdef CONFIG_MTK_SCHED_TRACERS
  302. TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d state=%s",
  303. #else
  304. TP_printk("comm=%s pid=%d prio=%d orig_cpu=%d dest_cpu=%d",
  305. #endif
  306. __entry->comm, __entry->pid, __entry->prio,
  307. __entry->orig_cpu, __entry->dest_cpu
  308. #ifdef CONFIG_MTK_SCHED_TRACERS
  309. ,
  310. __entry->state & ~TASK_STATE_MAX ?
  311. __print_flags(__entry->state & ~TASK_STATE_MAX, "|",
  312. { TASK_INTERRUPTIBLE, "S"},
  313. { TASK_UNINTERRUPTIBLE, "D" },
  314. { __TASK_STOPPED, "T" },
  315. { __TASK_TRACED, "t" },
  316. { EXIT_ZOMBIE, "Z" },
  317. { EXIT_DEAD, "X" },
  318. { TASK_DEAD, "x" },
  319. { TASK_WAKEKILL, "K" },
  320. { TASK_WAKING, "W"},
  321. { _MT_TASK_BLOCKED_RTMUX, "r"},
  322. { _MT_TASK_BLOCKED_MUTEX, "m"},
  323. { _MT_TASK_BLOCKED_IO, "d"}) : "R"
  324. #endif
  325. )
  326. );
  327. DECLARE_EVENT_CLASS(sched_process_template,
  328. TP_PROTO(struct task_struct *p),
  329. TP_ARGS(p),
  330. TP_STRUCT__entry(
  331. __array( char, comm, TASK_COMM_LEN )
  332. __field( pid_t, pid )
  333. __field( int, prio )
  334. ),
  335. TP_fast_assign(
  336. memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
  337. __entry->pid = p->pid;
  338. __entry->prio = p->prio;
  339. ),
  340. TP_printk("comm=%s pid=%d prio=%d",
  341. __entry->comm, __entry->pid, __entry->prio)
  342. );
  343. /*
  344. * Tracepoint for freeing a task:
  345. */
  346. DEFINE_EVENT(sched_process_template, sched_process_free,
  347. TP_PROTO(struct task_struct *p),
  348. TP_ARGS(p));
  349. /*
  350. * Tracepoint for a task exiting:
  351. */
  352. DEFINE_EVENT(sched_process_template, sched_process_exit,
  353. TP_PROTO(struct task_struct *p),
  354. TP_ARGS(p));
  355. /*
  356. * Tracepoint for waiting on task to unschedule:
  357. */
  358. DEFINE_EVENT(sched_process_template, sched_wait_task,
  359. TP_PROTO(struct task_struct *p),
  360. TP_ARGS(p));
  361. /*
  362. * Tracepoint for a waiting task:
  363. */
  364. TRACE_EVENT(sched_process_wait,
  365. TP_PROTO(struct pid *pid),
  366. TP_ARGS(pid),
  367. TP_STRUCT__entry(
  368. __array( char, comm, TASK_COMM_LEN )
  369. __field( pid_t, pid )
  370. __field( int, prio )
  371. ),
  372. TP_fast_assign(
  373. memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
  374. __entry->pid = pid_nr(pid);
  375. __entry->prio = current->prio;
  376. ),
  377. TP_printk("comm=%s pid=%d prio=%d",
  378. __entry->comm, __entry->pid, __entry->prio)
  379. );
  380. /*
  381. * Tracepoint for do_fork:
  382. */
  383. TRACE_EVENT(sched_process_fork,
  384. TP_PROTO(struct task_struct *parent, struct task_struct *child),
  385. TP_ARGS(parent, child),
  386. TP_STRUCT__entry(
  387. __array( char, parent_comm, TASK_COMM_LEN )
  388. __field( pid_t, parent_pid )
  389. __array( char, child_comm, TASK_COMM_LEN )
  390. __field( pid_t, child_pid )
  391. ),
  392. TP_fast_assign(
  393. memcpy(__entry->parent_comm, parent->comm, TASK_COMM_LEN);
  394. __entry->parent_pid = parent->pid;
  395. memcpy(__entry->child_comm, child->comm, TASK_COMM_LEN);
  396. __entry->child_pid = child->pid;
  397. ),
  398. TP_printk("comm=%s pid=%d child_comm=%s child_pid=%d",
  399. __entry->parent_comm, __entry->parent_pid,
  400. __entry->child_comm, __entry->child_pid)
  401. );
  402. /*
  403. * Tracepoint for exec:
  404. */
  405. TRACE_EVENT(sched_process_exec,
  406. TP_PROTO(struct task_struct *p, pid_t old_pid,
  407. struct linux_binprm *bprm),
  408. TP_ARGS(p, old_pid, bprm),
  409. TP_STRUCT__entry(
  410. __string( filename, bprm->filename )
  411. __field( pid_t, pid )
  412. __field( pid_t, old_pid )
  413. ),
  414. TP_fast_assign(
  415. __assign_str(filename, bprm->filename);
  416. __entry->pid = p->pid;
  417. __entry->old_pid = old_pid;
  418. ),
  419. TP_printk("filename=%s pid=%d old_pid=%d", __get_str(filename),
  420. __entry->pid, __entry->old_pid)
  421. );
  422. /*
  423. * XXX the below sched_stat tracepoints only apply to SCHED_OTHER/BATCH/IDLE
  424. * adding sched_stat support to SCHED_FIFO/RR would be welcome.
  425. */
  426. DECLARE_EVENT_CLASS(sched_stat_template,
  427. TP_PROTO(struct task_struct *tsk, u64 delay),
  428. TP_ARGS(__perf_task(tsk), __perf_count(delay)),
  429. TP_STRUCT__entry(
  430. __array( char, comm, TASK_COMM_LEN )
  431. __field( pid_t, pid )
  432. __field( u64, delay )
  433. ),
  434. TP_fast_assign(
  435. memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
  436. __entry->pid = tsk->pid;
  437. __entry->delay = delay;
  438. ),
  439. TP_printk("comm=%s pid=%d delay=%Lu [ns]",
  440. __entry->comm, __entry->pid,
  441. (unsigned long long)__entry->delay)
  442. );
  443. /*
  444. * Tracepoint for accounting wait time (time the task is runnable
  445. * but not actually running due to scheduler contention).
  446. */
  447. DEFINE_EVENT(sched_stat_template, sched_stat_wait,
  448. TP_PROTO(struct task_struct *tsk, u64 delay),
  449. TP_ARGS(tsk, delay));
  450. /*
  451. * Tracepoint for accounting sleep time (time the task is not runnable,
  452. * including iowait, see below).
  453. */
  454. DEFINE_EVENT(sched_stat_template, sched_stat_sleep,
  455. TP_PROTO(struct task_struct *tsk, u64 delay),
  456. TP_ARGS(tsk, delay));
  457. /*
  458. * Tracepoint for accounting iowait time (time the task is not runnable
  459. * due to waiting on IO to complete).
  460. */
  461. DEFINE_EVENT(sched_stat_template, sched_stat_iowait,
  462. TP_PROTO(struct task_struct *tsk, u64 delay),
  463. TP_ARGS(tsk, delay));
  464. /*
  465. * Tracepoint for accounting blocked time (time the task is in uninterruptible).
  466. */
  467. DEFINE_EVENT(sched_stat_template, sched_stat_blocked,
  468. TP_PROTO(struct task_struct *tsk, u64 delay),
  469. TP_ARGS(tsk, delay));
  470. /*
  471. * Tracepoint for accounting runtime (time the task is executing
  472. * on a CPU).
  473. */
  474. DECLARE_EVENT_CLASS(sched_stat_runtime,
  475. TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
  476. TP_ARGS(tsk, __perf_count(runtime), vruntime),
  477. TP_STRUCT__entry(
  478. __array( char, comm, TASK_COMM_LEN )
  479. __field( pid_t, pid )
  480. __field( u64, runtime )
  481. __field( u64, vruntime )
  482. ),
  483. TP_fast_assign(
  484. memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
  485. __entry->pid = tsk->pid;
  486. __entry->runtime = runtime;
  487. __entry->vruntime = vruntime;
  488. ),
  489. TP_printk("comm=%s pid=%d runtime=%Lu [ns] vruntime=%Lu [ns]",
  490. __entry->comm, __entry->pid,
  491. (unsigned long long)__entry->runtime,
  492. (unsigned long long)__entry->vruntime)
  493. );
  494. DEFINE_EVENT(sched_stat_runtime, sched_stat_runtime,
  495. TP_PROTO(struct task_struct *tsk, u64 runtime, u64 vruntime),
  496. TP_ARGS(tsk, runtime, vruntime));
  497. /*
  498. * Tracepoint for showing priority inheritance modifying a tasks
  499. * priority.
  500. */
  501. TRACE_EVENT(sched_pi_setprio,
  502. TP_PROTO(struct task_struct *tsk, int newprio),
  503. TP_ARGS(tsk, newprio),
  504. TP_STRUCT__entry(
  505. __array( char, comm, TASK_COMM_LEN )
  506. __field( pid_t, pid )
  507. __field( int, oldprio )
  508. __field( int, newprio )
  509. ),
  510. TP_fast_assign(
  511. memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
  512. __entry->pid = tsk->pid;
  513. __entry->oldprio = tsk->prio;
  514. __entry->newprio = newprio;
  515. ),
  516. TP_printk("comm=%s pid=%d oldprio=%d newprio=%d",
  517. __entry->comm, __entry->pid,
  518. __entry->oldprio, __entry->newprio)
  519. );
  520. #ifdef CONFIG_DETECT_HUNG_TASK
  521. TRACE_EVENT(sched_process_hang,
  522. TP_PROTO(struct task_struct *tsk),
  523. TP_ARGS(tsk),
  524. TP_STRUCT__entry(
  525. __array( char, comm, TASK_COMM_LEN )
  526. __field( pid_t, pid )
  527. ),
  528. TP_fast_assign(
  529. memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
  530. __entry->pid = tsk->pid;
  531. ),
  532. TP_printk("comm=%s pid=%d", __entry->comm, __entry->pid)
  533. );
  534. #endif /* CONFIG_DETECT_HUNG_TASK */
  535. DECLARE_EVENT_CLASS(sched_move_task_template,
  536. TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
  537. TP_ARGS(tsk, src_cpu, dst_cpu),
  538. TP_STRUCT__entry(
  539. __field( pid_t, pid )
  540. __field( pid_t, tgid )
  541. __field( pid_t, ngid )
  542. __field( int, src_cpu )
  543. __field( int, src_nid )
  544. __field( int, dst_cpu )
  545. __field( int, dst_nid )
  546. ),
  547. TP_fast_assign(
  548. __entry->pid = task_pid_nr(tsk);
  549. __entry->tgid = task_tgid_nr(tsk);
  550. __entry->ngid = task_numa_group_id(tsk);
  551. __entry->src_cpu = src_cpu;
  552. __entry->src_nid = cpu_to_node(src_cpu);
  553. __entry->dst_cpu = dst_cpu;
  554. __entry->dst_nid = cpu_to_node(dst_cpu);
  555. ),
  556. TP_printk("pid=%d tgid=%d ngid=%d src_cpu=%d src_nid=%d dst_cpu=%d dst_nid=%d",
  557. __entry->pid, __entry->tgid, __entry->ngid,
  558. __entry->src_cpu, __entry->src_nid,
  559. __entry->dst_cpu, __entry->dst_nid)
  560. );
  561. /*
  562. * Tracks migration of tasks from one runqueue to another. Can be used to
  563. * detect if automatic NUMA balancing is bouncing between nodes
  564. */
  565. DEFINE_EVENT(sched_move_task_template, sched_move_numa,
  566. TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
  567. TP_ARGS(tsk, src_cpu, dst_cpu)
  568. );
  569. DEFINE_EVENT(sched_move_task_template, sched_stick_numa,
  570. TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
  571. TP_ARGS(tsk, src_cpu, dst_cpu)
  572. );
  573. TRACE_EVENT(sched_swap_numa,
  574. TP_PROTO(struct task_struct *src_tsk, int src_cpu,
  575. struct task_struct *dst_tsk, int dst_cpu),
  576. TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu),
  577. TP_STRUCT__entry(
  578. __field( pid_t, src_pid )
  579. __field( pid_t, src_tgid )
  580. __field( pid_t, src_ngid )
  581. __field( int, src_cpu )
  582. __field( int, src_nid )
  583. __field( pid_t, dst_pid )
  584. __field( pid_t, dst_tgid )
  585. __field( pid_t, dst_ngid )
  586. __field( int, dst_cpu )
  587. __field( int, dst_nid )
  588. ),
  589. TP_fast_assign(
  590. __entry->src_pid = task_pid_nr(src_tsk);
  591. __entry->src_tgid = task_tgid_nr(src_tsk);
  592. __entry->src_ngid = task_numa_group_id(src_tsk);
  593. __entry->src_cpu = src_cpu;
  594. __entry->src_nid = cpu_to_node(src_cpu);
  595. __entry->dst_pid = task_pid_nr(dst_tsk);
  596. __entry->dst_tgid = task_tgid_nr(dst_tsk);
  597. __entry->dst_ngid = task_numa_group_id(dst_tsk);
  598. __entry->dst_cpu = dst_cpu;
  599. __entry->dst_nid = cpu_to_node(dst_cpu);
  600. ),
  601. TP_printk("src_pid=%d src_tgid=%d src_ngid=%d src_cpu=%d src_nid=%d dst_pid=%d dst_tgid=%d dst_ngid=%d dst_cpu=%d dst_nid=%d",
  602. __entry->src_pid, __entry->src_tgid, __entry->src_ngid,
  603. __entry->src_cpu, __entry->src_nid,
  604. __entry->dst_pid, __entry->dst_tgid, __entry->dst_ngid,
  605. __entry->dst_cpu, __entry->dst_nid)
  606. );
  607. /*
  608. * Tracepoint for waking a polling cpu without an IPI.
  609. */
  610. TRACE_EVENT(sched_wake_idle_without_ipi,
  611. TP_PROTO(int cpu),
  612. TP_ARGS(cpu),
  613. TP_STRUCT__entry(
  614. __field( int, cpu )
  615. ),
  616. TP_fast_assign(
  617. __entry->cpu = cpu;
  618. ),
  619. TP_printk("cpu=%d", __entry->cpu)
  620. );
  621. #ifdef CONFIG_MTK_SCHED_TRACERS
  622. /*
  623. * Tracepoint for showing the result of task runqueue selection
  624. */
  625. TRACE_EVENT(sched_select_task_rq,
  626. TP_PROTO(struct task_struct *tsk, int policy, int prev_cpu, int target_cpu),
  627. TP_ARGS(tsk, policy, prev_cpu, target_cpu),
  628. TP_STRUCT__entry(
  629. __field(pid_t, pid)
  630. __field(int, policy)
  631. __field(int, prev_cpu)
  632. __field(int, target_cpu)
  633. ),
  634. TP_fast_assign(
  635. __entry->pid = tsk->pid;
  636. __entry->policy = policy;
  637. __entry->prev_cpu = prev_cpu;
  638. __entry->target_cpu = target_cpu;
  639. ),
  640. TP_printk("pid=%4d policy=0x%08x pre-cpu=%d target=%d",
  641. __entry->pid,
  642. __entry->policy,
  643. __entry->prev_cpu,
  644. __entry->target_cpu)
  645. );
  646. #endif
  647. #ifdef CONFIG_MT_SCHED_TRACE
  648. #define sched_trace(event) \
  649. TRACE_EVENT(event, \
  650. TP_PROTO(char *strings), \
  651. TP_ARGS(strings), \
  652. TP_STRUCT__entry( \
  653. __array( char, strings, 128) \
  654. ), \
  655. TP_fast_assign( \
  656. memcpy(__entry->strings, strings, 128); \
  657. ), \
  658. TP_printk("%s",__entry->strings))
  659. sched_trace(sched_log);
  660. // mtk rt enhancement
  661. sched_trace(sched_rt);
  662. sched_trace(sched_rt_info);
  663. sched_trace(sched_lb);
  664. sched_trace(sched_lb_info);
  665. #ifdef CONFIG_MTK_SCHED_CMP
  666. sched_trace(sched_cmp);
  667. sched_trace(sched_cmp_info);
  668. #endif
  669. // mtk scheduling interopertion enhancement
  670. #ifdef CONFIG_MT_SCHED_INTEROP
  671. sched_trace(sched_interop);
  672. #endif
  673. #ifdef CONFIG_MT_DEBUG_PREEMPT
  674. sched_trace(sched_preempt);
  675. #endif
  676. #endif
  677. /*sched: add trace_sched*/
  678. TRACE_EVENT(sched_task_entity_avg,
  679. TP_PROTO(unsigned int tag, struct task_struct *tsk, struct sched_avg *avg),
  680. TP_ARGS(tag, tsk, avg),
  681. TP_STRUCT__entry(
  682. __field(u32, tag)
  683. __array(char, comm, TASK_COMM_LEN)
  684. __field(pid_t, tgid)
  685. __field(pid_t, pid)
  686. __field(unsigned long, contrib)
  687. __field(unsigned long, ratio)
  688. __field(u32, usage_sum)
  689. __field(unsigned long, rq_time)
  690. __field(unsigned long, live_time)
  691. ),
  692. TP_fast_assign(
  693. __entry->tag = tag;
  694. memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
  695. __entry->tgid = task_pid_nr(tsk->group_leader);
  696. __entry->pid = task_pid_nr(tsk);
  697. __entry->contrib = avg->load_avg_contrib;
  698. __entry->ratio = 0;
  699. __entry->usage_sum = -1;
  700. __entry->rq_time = avg->runnable_avg_sum;
  701. __entry->live_time = avg->avg_period;
  702. ),
  703. TP_printk("[%d]comm=%s tgid=%d pid=%d contrib=%lu ratio=%lu exe_time=%d rq_time=%lu live_time=%lu",
  704. __entry->tag, __entry->comm, __entry->tgid, __entry->pid,
  705. __entry->contrib, __entry->ratio, __entry->usage_sum,
  706. __entry->rq_time, __entry->live_time)
  707. );
  708. /*
  709. * Tracepoint for HMP (CONFIG_SCHED_HMP) task migrations.
  710. */
  711. TRACE_EVENT(sched_hmp_migrate,
  712. TP_PROTO(struct task_struct *tsk, int dest, int force),
  713. TP_ARGS(tsk, dest, force),
  714. TP_STRUCT__entry(
  715. __array(char, comm, TASK_COMM_LEN)
  716. __field(pid_t, pid)
  717. __field(int, dest)
  718. __field(int, force)
  719. ),
  720. TP_fast_assign(
  721. memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
  722. __entry->pid = tsk->pid;
  723. __entry->dest = dest;
  724. __entry->force = force;
  725. ),
  726. TP_printk("comm=%s pid=%d dest=%d force=%d",
  727. __entry->comm, __entry->pid,
  728. __entry->dest, __entry->force)
  729. );
  730. /*
  731. * sched: tracepoint for showing tracked load contribution.
  732. */
  733. TRACE_EVENT(sched_task_load_contrib,
  734. TP_PROTO(struct task_struct *tsk, unsigned long load_contrib),
  735. TP_ARGS(tsk, load_contrib),
  736. TP_STRUCT__entry(
  737. __array(char, comm, TASK_COMM_LEN)
  738. __field(pid_t, pid)
  739. __field(unsigned long, load_contrib)
  740. ),
  741. TP_fast_assign(
  742. memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
  743. __entry->pid = tsk->pid;
  744. __entry->load_contrib = load_contrib;
  745. ),
  746. TP_printk("comm=%s pid=%d load_contrib=%lu",
  747. __entry->comm, __entry->pid,
  748. __entry->load_contrib)
  749. );
  750. /*
  751. * sched: tracepoint for showing tracked task runnable ratio [0..1023].
  752. */
  753. TRACE_EVENT(sched_task_runnable_ratio,
  754. TP_PROTO(struct task_struct *tsk, unsigned long ratio),
  755. TP_ARGS(tsk, ratio),
  756. TP_STRUCT__entry(
  757. __array(char, comm, TASK_COMM_LEN)
  758. __field(pid_t, pid)
  759. __field(unsigned long, ratio)
  760. ),
  761. TP_fast_assign(
  762. memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
  763. __entry->pid = tsk->pid;
  764. __entry->ratio = ratio;
  765. ),
  766. TP_printk("comm=%s pid=%d ratio=%lu",
  767. __entry->comm, __entry->pid,
  768. __entry->ratio)
  769. );
  770. #ifdef CONFIG_HMP_TRACER
  771. /*
  772. * Tracepoint for showing tracked migration information
  773. */
  774. TRACE_EVENT(sched_dynamic_threshold,
  775. TP_PROTO(struct task_struct *tsk, unsigned int threshold,
  776. unsigned int status, int curr_cpu, int target_cpu, int task_load,
  777. struct clb_stats *B, struct clb_stats *L),
  778. TP_ARGS(tsk, threshold, status, curr_cpu, target_cpu, task_load, B, L),
  779. TP_STRUCT__entry(
  780. __array(char, comm, TASK_COMM_LEN)
  781. __field(pid_t, pid)
  782. __field(int, prio)
  783. __field(unsigned int, threshold)
  784. __field(unsigned int, status)
  785. __field(int, curr_cpu)
  786. __field(int, target_cpu)
  787. __field(int, curr_load)
  788. __field(int, target_load)
  789. __field(int, task_load)
  790. __field(int, B_load_avg)
  791. __field(int, L_load_avg)
  792. ),
  793. TP_fast_assign(
  794. memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
  795. __entry->pid = tsk->pid;
  796. __entry->prio = tsk->prio;
  797. __entry->threshold = threshold;
  798. __entry->status = status;
  799. __entry->curr_cpu = curr_cpu;
  800. __entry->target_cpu = target_cpu;
  801. __entry->curr_load = cpu_rq(curr_cpu)->cfs.avg.load_avg_contrib;
  802. __entry->target_load = cpu_rq(target_cpu)->cfs.avg.load_avg_contrib;
  803. __entry->task_load = task_load;
  804. __entry->B_load_avg = B->load_avg;
  805. __entry->L_load_avg = L->load_avg;
  806. ),
  807. TP_printk(
  808. "pid=%4d prio=%d status=0x%4x dyn=%4u task-load=%4d curr-cpu=%d(%4d) target=%d(%4d) L-load-avg=%4d B-load-avg=%4d comm=%s",
  809. __entry->pid,
  810. __entry->prio,
  811. __entry->status,
  812. __entry->threshold,
  813. __entry->task_load,
  814. __entry->curr_cpu,
  815. __entry->curr_load,
  816. __entry->target_cpu,
  817. __entry->target_load,
  818. __entry->L_load_avg,
  819. __entry->B_load_avg,
  820. __entry->comm)
  821. );
  822. /*
  823. * Tracepoint for showing the result of hmp task runqueue selection
  824. */
  825. TRACE_EVENT(sched_hmp_select_task_rq,
  826. TP_PROTO(struct task_struct *tsk, int step, int sd_flag, int prev_cpu,
  827. int target_cpu, int task_load, struct clb_stats *B,
  828. struct clb_stats *L),
  829. TP_ARGS(tsk, step, sd_flag, prev_cpu, target_cpu, task_load, B, L),
  830. TP_STRUCT__entry(
  831. __array(char, comm, TASK_COMM_LEN)
  832. __field(pid_t, pid)
  833. __field(int, prio)
  834. __field(int, step)
  835. __field(int, sd_flag)
  836. __field(int, prev_cpu)
  837. __field(int, target_cpu)
  838. __field(int, prev_load)
  839. __field(int, target_load)
  840. __field(int, task_load)
  841. __field(int, B_load_avg)
  842. __field(int, L_load_avg)
  843. ),
  844. TP_fast_assign(
  845. memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
  846. __entry->pid = tsk->pid;
  847. __entry->prio = tsk->prio;
  848. __entry->step = step;
  849. __entry->sd_flag = sd_flag;
  850. __entry->prev_cpu = prev_cpu;
  851. __entry->target_cpu = target_cpu;
  852. __entry->prev_load = cpu_rq(prev_cpu)->cfs.avg.load_avg_contrib;
  853. __entry->target_load = cpu_rq(target_cpu)->cfs.avg.load_avg_contrib;
  854. __entry->task_load = task_load;
  855. __entry->B_load_avg = B->load_avg;
  856. __entry->L_load_avg = L->load_avg;
  857. ),
  858. TP_printk(
  859. "pid=%4d prio=%d task-load=%4d sd-flag=%2d step=%d pre-cpu=%d(%4d) target=%d(%4d) L-load-avg=%4d B-load-avg=%4d comm=%s",
  860. __entry->pid,
  861. __entry->prio,
  862. __entry->task_load,
  863. __entry->sd_flag,
  864. __entry->step,
  865. __entry->prev_cpu,
  866. __entry->prev_load,
  867. __entry->target_cpu,
  868. __entry->target_load,
  869. __entry->L_load_avg,
  870. __entry->B_load_avg,
  871. __entry->comm)
  872. );
  873. /*
  874. * Tracepoint for dumping hmp cluster load ratio
  875. */
  876. TRACE_EVENT(sched_hmp_load,
  877. TP_PROTO(int B_load_avg, int L_load_avg),
  878. TP_ARGS(B_load_avg, L_load_avg),
  879. TP_STRUCT__entry(
  880. __field(int, B_load_avg)
  881. __field(int, L_load_avg)
  882. ),
  883. TP_fast_assign(
  884. __entry->B_load_avg = B_load_avg;
  885. __entry->L_load_avg = L_load_avg;
  886. ),
  887. TP_printk("B-load-avg=%4d L-load-avg=%4d",
  888. __entry->B_load_avg,
  889. __entry->L_load_avg)
  890. );
  891. /*
  892. * Tracepoint for dumping hmp statistics
  893. */
  894. TRACE_EVENT(sched_hmp_stats,
  895. TP_PROTO(struct hmp_statisic *hmp_stats),
  896. TP_ARGS(hmp_stats),
  897. TP_STRUCT__entry(
  898. __field(unsigned int, nr_force_up)
  899. __field(unsigned int, nr_force_down)
  900. ),
  901. TP_fast_assign(
  902. __entry->nr_force_up = hmp_stats->nr_force_up;
  903. __entry->nr_force_down = hmp_stats->nr_force_down;
  904. ),
  905. TP_printk("nr-force-up=%d nr-force-down=%2d",
  906. __entry->nr_force_up,
  907. __entry->nr_force_down)
  908. );
  909. /*
  910. * Tracepoint for cfs task enqueue event
  911. */
  912. TRACE_EVENT(sched_cfs_enqueue_task,
  913. TP_PROTO(struct task_struct *tsk, int tsk_load, int cpu_id),
  914. TP_ARGS(tsk, tsk_load, cpu_id),
  915. TP_STRUCT__entry(
  916. __array(char, comm, TASK_COMM_LEN)
  917. __field(pid_t, tsk_pid)
  918. __field(int, tsk_load)
  919. __field(int, cpu_id)
  920. ),
  921. TP_fast_assign(
  922. memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
  923. __entry->tsk_pid = tsk->pid;
  924. __entry->tsk_load = tsk_load;
  925. __entry->cpu_id = cpu_id;
  926. ),
  927. TP_printk("cpu-id=%d task-pid=%4d task-load=%4d comm=%s",
  928. __entry->cpu_id,
  929. __entry->tsk_pid,
  930. __entry->tsk_load,
  931. __entry->comm)
  932. );
  933. /*
  934. * Tracepoint for cfs task dequeue event
  935. */
  936. TRACE_EVENT(sched_cfs_dequeue_task,
  937. TP_PROTO(struct task_struct *tsk, int tsk_load, int cpu_id),
  938. TP_ARGS(tsk, tsk_load, cpu_id),
  939. TP_STRUCT__entry(
  940. __array(char, comm, TASK_COMM_LEN)
  941. __field(pid_t, tsk_pid)
  942. __field(int, tsk_load)
  943. __field(int, cpu_id)
  944. ),
  945. TP_fast_assign(
  946. memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
  947. __entry->tsk_pid = tsk->pid;
  948. __entry->tsk_load = tsk_load;
  949. __entry->cpu_id = cpu_id;
  950. ),
  951. TP_printk("cpu-id=%d task-pid=%4d task-load=%4d comm=%s",
  952. __entry->cpu_id,
  953. __entry->tsk_pid,
  954. __entry->tsk_load,
  955. __entry->comm)
  956. );
  957. /*
  958. * Tracepoint for cfs runqueue load ratio update
  959. */
  960. TRACE_EVENT(sched_cfs_load_update,
  961. TP_PROTO(struct task_struct *tsk, int tsk_load, int tsk_delta, int cpu_id),
  962. TP_ARGS(tsk, tsk_load, tsk_delta, cpu_id),
  963. TP_STRUCT__entry(
  964. __array(char, comm, TASK_COMM_LEN)
  965. __field(pid_t, tsk_pid)
  966. __field(int, tsk_load)
  967. __field(int, tsk_delta)
  968. __field(int, cpu_id)
  969. ),
  970. TP_fast_assign(
  971. memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
  972. __entry->tsk_pid = tsk->pid;
  973. __entry->tsk_load = tsk_load;
  974. __entry->tsk_delta = tsk_delta;
  975. __entry->cpu_id = cpu_id;
  976. ),
  977. TP_printk("cpu-id=%d task-pid=%4d task-load=%4d(%d) comm=%s",
  978. __entry->cpu_id,
  979. __entry->tsk_pid,
  980. __entry->tsk_load,
  981. __entry->tsk_delta,
  982. __entry->comm)
  983. );
  984. /*
  985. * Tracepoint for showing tracked cfs runqueue runnable load.
  986. */
  987. TRACE_EVENT(sched_cfs_runnable_load,
  988. TP_PROTO(int cpu_id, int cpu_load, int cpu_ntask),
  989. TP_ARGS(cpu_id, cpu_load, cpu_ntask),
  990. TP_STRUCT__entry(
  991. __field(int, cpu_id)
  992. __field(int, cpu_load)
  993. __field(int, cpu_ntask)
  994. ),
  995. TP_fast_assign(
  996. __entry->cpu_id = cpu_id;
  997. __entry->cpu_load = cpu_load;
  998. __entry->cpu_ntask = cpu_ntask;
  999. ),
  1000. TP_printk("cpu-id=%d cfs-load=%4d, cfs-ntask=%2d",
  1001. __entry->cpu_id,
  1002. __entry->cpu_load,
  1003. __entry->cpu_ntask)
  1004. );
  1005. /*
  1006. * Tracepoint for profiling runqueue length
  1007. */
  1008. TRACE_EVENT(sched_runqueue_length,
  1009. TP_PROTO(int cpu, int length),
  1010. TP_ARGS(cpu, length),
  1011. TP_STRUCT__entry(
  1012. __field(int, cpu)
  1013. __field(int, length)
  1014. ),
  1015. TP_fast_assign(
  1016. __entry->cpu = cpu;
  1017. __entry->length = length;
  1018. ),
  1019. TP_printk("cpu=%d rq-length=%2d",
  1020. __entry->cpu,
  1021. __entry->length)
  1022. );
  1023. TRACE_EVENT(sched_cfs_length,
  1024. TP_PROTO(int cpu, int length),
  1025. TP_ARGS(cpu, length),
  1026. TP_STRUCT__entry(
  1027. __field(int, cpu)
  1028. __field(int, length)
  1029. ),
  1030. TP_fast_assign(
  1031. __entry->cpu = cpu;
  1032. __entry->length = length;
  1033. ),
  1034. TP_printk("cpu=%d cfs-length=%2d",
  1035. __entry->cpu,
  1036. __entry->length)
  1037. );
  1038. #endif /* CONFIG_HMP_TRACER */
  1039. /*
  1040. * Tracepoint for showing tracked rq runnable ratio [0..1023].
  1041. */
  1042. TRACE_EVENT(sched_rq_runnable_ratio,
  1043. TP_PROTO(int cpu, unsigned long ratio),
  1044. TP_ARGS(cpu, ratio),
  1045. TP_STRUCT__entry(
  1046. __field(int, cpu)
  1047. __field(unsigned long, ratio)
  1048. ),
  1049. TP_fast_assign(
  1050. __entry->cpu = cpu;
  1051. __entry->ratio = ratio;
  1052. ),
  1053. TP_printk("cpu=%d ratio=%lu",
  1054. __entry->cpu,
  1055. __entry->ratio)
  1056. );
  1057. /*
  1058. * Tracepoint for showing tracked rq runnable load.
  1059. */
  1060. TRACE_EVENT(sched_rq_runnable_load,
  1061. TP_PROTO(int cpu, u64 load),
  1062. TP_ARGS(cpu, load),
  1063. TP_STRUCT__entry(
  1064. __field(int, cpu)
  1065. __field(u64, load)
  1066. ),
  1067. TP_fast_assign(
  1068. __entry->cpu = cpu;
  1069. __entry->load = load;
  1070. ),
  1071. TP_printk("cpu=%d load=%llu",
  1072. __entry->cpu,
  1073. __entry->load)
  1074. );
  1075. /*
  1076. * Tracepoint for showing tracked task cpu usage ratio [0..1023].
  1077. */
  1078. TRACE_EVENT(sched_task_usage_ratio,
  1079. TP_PROTO(struct task_struct *tsk, unsigned long ratio),
  1080. TP_ARGS(tsk, ratio),
  1081. TP_STRUCT__entry(
  1082. __array(char, comm, TASK_COMM_LEN)
  1083. __field(pid_t, pid)
  1084. __field(unsigned long, ratio)
  1085. ),
  1086. TP_fast_assign(
  1087. memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
  1088. __entry->pid = tsk->pid;
  1089. __entry->ratio = ratio;
  1090. ),
  1091. TP_printk("comm=%s pid=%d ratio=%lu",
  1092. __entry->comm, __entry->pid,
  1093. __entry->ratio)
  1094. );
  1095. TRACE_EVENT(sched_heavy_task,
  1096. TP_PROTO(const char *s),
  1097. TP_ARGS(s),
  1098. TP_STRUCT__entry(
  1099. __string(s, s)
  1100. ),
  1101. TP_fast_assign(
  1102. __assign_str(s, s);
  1103. ),
  1104. TP_printk("%s", __get_str(s))
  1105. );
  1106. #endif /* _TRACE_SCHED_H */
  1107. /* This part must be outside protection */
  1108. #include <trace/define_trace.h>