debug.c 33 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329
  1. /*
  2. * kernel/sched/debug.c
  3. *
  4. * Print the CFS rbtree
  5. *
  6. * Copyright(C) 2007, Red Hat, Inc., Ingo Molnar
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #define DEBUG
  13. #include <linux/proc_fs.h>
  14. #include <linux/sched.h>
  15. #include <linux/seq_file.h>
  16. #include <linux/kallsyms.h>
  17. #include <linux/utsname.h>
  18. #include <linux/mempolicy.h>
  19. #include <linux/sched.h>
  20. #ifdef CONFIG_KGDB_KDB
  21. #include <linux/kdb.h>
  22. #endif
  23. #ifdef CONFIG_MTPROF
  24. #include "mt_sched_mon.h"
  25. #endif
  26. #include "sched.h"
  27. /* sched: kdb for sched/debug*/
  28. /* #define TEST_SCHED_DEBUG_ENHANCEMENT */
  29. /* #define MTK_SCHED_CMP_PRINT */
  30. #define TRYLOCK_NUM 10
  31. #include <linux/delay.h>
  32. static DEFINE_SPINLOCK(sched_debug_lock);
  33. /* sched: add rt_exec_task info */
  34. DECLARE_PER_CPU(u64, rt_throttling_start);
  35. DECLARE_PER_CPU(u64, exec_delta_time);
  36. DECLARE_PER_CPU(u64, clock_task);
  37. DECLARE_PER_CPU(u64, exec_start);
  38. DECLARE_PER_CPU(struct task_struct, exec_task);
  39. /*
  40. * This allows printing both to /proc/sched_debug and
  41. * to the console
  42. */
  43. #ifndef CONFIG_KGDB_KDB
  44. #define SEQ_printf(m, x...) \
  45. do { \
  46. if (m) \
  47. seq_printf(m, x); \
  48. else \
  49. pr_debug(x); \
  50. } while (0)
  51. #else
  52. #define SEQ_printf(m, x...) \
  53. do { \
  54. if (m) \
  55. seq_printf(m, x); \
  56. else if (__get_cpu_var(kdb_in_use) == 1) \
  57. kdb_printf(x); \
  58. else \
  59. pr_debug(x); \
  60. } while (0)
  61. #endif
  62. /*
  63. * Ease the printing of nsec fields:
  64. */
  65. static long long nsec_high(unsigned long long nsec)
  66. {
  67. if ((long long)nsec < 0) {
  68. nsec = -nsec;
  69. do_div(nsec, 1000000);
  70. return -nsec;
  71. }
  72. do_div(nsec, 1000000);
  73. return nsec;
  74. }
  75. static unsigned long nsec_low(unsigned long long nsec)
  76. {
  77. if ((long long)nsec < 0)
  78. nsec = -nsec;
  79. return do_div(nsec, 1000000);
  80. }
  81. #define SPLIT_NS(x) nsec_high(x), nsec_low(x)
  82. #ifdef CONFIG_FAIR_GROUP_SCHED
  83. static void print_cfs_group_stats(struct seq_file *m, int cpu, struct task_group *tg)
  84. {
  85. struct sched_entity *se = tg->se[cpu];
  86. #define P(F) \
  87. SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F)
  88. #define PN(F) \
  89. SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
  90. if (!se) {
  91. struct sched_avg *avg = &cpu_rq(cpu)->avg;
  92. P(avg->runnable_avg_sum);
  93. P(avg->avg_period);
  94. #ifdef MTK_SCHED_CMP_PRINT
  95. P(avg->last_runnable_update);
  96. #endif
  97. return;
  98. }
  99. PN(se->exec_start);
  100. PN(se->vruntime);
  101. PN(se->sum_exec_runtime);
  102. #ifdef CONFIG_SCHEDSTATS
  103. PN(se->statistics.wait_start);
  104. PN(se->statistics.sleep_start);
  105. PN(se->statistics.block_start);
  106. PN(se->statistics.sleep_max);
  107. PN(se->statistics.block_max);
  108. PN(se->statistics.exec_max);
  109. PN(se->statistics.slice_max);
  110. PN(se->statistics.wait_max);
  111. PN(se->statistics.wait_sum);
  112. P(se->statistics.wait_count);
  113. #endif
  114. P(se->load.weight);
  115. #ifdef CONFIG_SMP
  116. P(se->avg.runnable_avg_sum);
  117. P(se->avg.running_avg_sum);
  118. P(se->avg.avg_period);
  119. P(se->avg.load_avg_contrib);
  120. P(se->avg.utilization_avg_contrib);
  121. P(se->avg.decay_count);
  122. #endif
  123. #undef PN
  124. #undef P
  125. }
  126. #endif
  127. #ifdef CONFIG_CGROUP_SCHED
  128. static char group_path[PATH_MAX];
  129. static char *task_group_path(struct task_group *tg)
  130. {
  131. if (autogroup_path(tg, group_path, PATH_MAX))
  132. return group_path;
  133. return cgroup_path(tg->css.cgroup, group_path, PATH_MAX);
  134. }
  135. #endif
  136. static void
  137. print_task_at_AEE(struct seq_file *m, struct rq *rq, struct task_struct *p)
  138. {
  139. #ifdef CONFIG_SCHEDSTATS
  140. if (rq->curr == p) {
  141. #ifdef CONFIG_CGROUP_SCHED
  142. SEQ_printf(m, "R %15s %5d %9Ld.%06ld %9Ld %5d %9Ld.%06ld %9Ld.%06ld %9Ld.%06ld %s\n",
  143. p->comm,
  144. task_pid_nr(p),
  145. SPLIT_NS(p->se.vruntime),
  146. (long long)(p->nvcsw + p->nivcsw),
  147. p->prio,
  148. SPLIT_NS(p->se.vruntime),
  149. SPLIT_NS(p->se.sum_exec_runtime),
  150. SPLIT_NS(p->se.statistics.sum_sleep_runtime),
  151. task_group_path(task_group(p)));
  152. #else
  153. SEQ_printf(m, "R %15s %5d %9Ld.%06ld %9Ld %5d %9Ld.%06ld %9Ld.%06ld %9Ld.%06ld\n",
  154. p->comm,
  155. task_pid_nr(p),
  156. SPLIT_NS(p->se.vruntime),
  157. (long long)(p->nvcsw + p->nivcsw),
  158. p->prio,
  159. SPLIT_NS(p->se.vruntime),
  160. SPLIT_NS(p->se.sum_exec_runtime),
  161. SPLIT_NS(p->se.statistics.sum_sleep_runtime));
  162. #endif
  163. } else {
  164. #ifdef CONFIG_CGROUP_SCHED
  165. SEQ_printf(m, " %15s %5d %9Ld.%06ld %9Ld %5d %9Ld.%06ld %9Ld.%06ld %9Ld.%06ld %s\n",
  166. p->comm,
  167. task_pid_nr(p),
  168. SPLIT_NS(p->se.vruntime),
  169. (long long)(p->nvcsw + p->nivcsw),
  170. p->prio,
  171. SPLIT_NS(p->se.vruntime),
  172. SPLIT_NS(p->se.sum_exec_runtime),
  173. SPLIT_NS(p->se.statistics.sum_sleep_runtime),
  174. task_group_path(task_group(p)));
  175. #else
  176. SEQ_printf(m, " %15s %5d %9Ld.%06ld %9Ld %5d %9Ld.%06ld %9Ld.%06ld %9Ld.%06ld\n",
  177. p->comm,
  178. task_pid_nr(p),
  179. SPLIT_NS(p->se.vruntime),
  180. (long long)(p->nvcsw + p->nivcsw),
  181. p->prio,
  182. SPLIT_NS(p->se.vruntime),
  183. SPLIT_NS(p->se.sum_exec_runtime),
  184. SPLIT_NS(p->se.statistics.sum_sleep_runtime));
  185. #endif
  186. }
  187. #else
  188. SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld",
  189. 0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L);
  190. #endif
  191. }
  192. static void
  193. print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
  194. {
  195. if (rq->curr == p)
  196. SEQ_printf(m, "R");
  197. else
  198. SEQ_printf(m, " ");
  199. SEQ_printf(m, "%15s %5d %9Ld.%06ld %9Ld %5d ",
  200. p->comm, task_pid_nr(p),
  201. SPLIT_NS(p->se.vruntime),
  202. (long long)(p->nvcsw + p->nivcsw),
  203. p->prio);
  204. #ifdef CONFIG_SCHEDSTATS
  205. SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
  206. SPLIT_NS(p->se.vruntime),
  207. SPLIT_NS(p->se.sum_exec_runtime),
  208. SPLIT_NS(p->se.statistics.sum_sleep_runtime));
  209. #else
  210. SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld",
  211. 0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L);
  212. #endif
  213. #ifdef CONFIG_NUMA_BALANCING
  214. SEQ_printf(m, " %d", task_node(p));
  215. #endif
  216. #ifdef CONFIG_CGROUP_SCHED
  217. SEQ_printf(m, " %s", task_group_path(task_group(p)));
  218. #endif
  219. SEQ_printf(m, "\n");
  220. }
  221. static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
  222. {
  223. struct task_struct *g, *p;
  224. SEQ_printf(m,
  225. "\nrunnable tasks:\n"
  226. " task PID tree-key switches prio"
  227. " exec-runtime sum-exec sum-sleep\n"
  228. "------------------------------------------------------"
  229. "----------------------------------------------------\n");
  230. rcu_read_lock();
  231. for_each_process_thread(g, p) {
  232. if (task_cpu(p) != rq_cpu)
  233. continue;
  234. print_task(m, rq, p);
  235. }
  236. rcu_read_unlock();
  237. }
  238. void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
  239. {
  240. s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
  241. spread, rq0_min_vruntime, spread0;
  242. struct rq *rq = cpu_rq(cpu);
  243. struct sched_entity *last;
  244. unsigned long flags;
  245. #ifdef CONFIG_FAIR_GROUP_SCHED
  246. SEQ_printf(m, "\ncfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg));
  247. #else
  248. SEQ_printf(m, "\ncfs_rq[%d]:\n", cpu);
  249. #endif
  250. SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "exec_clock",
  251. SPLIT_NS(cfs_rq->exec_clock));
  252. raw_spin_lock_irqsave(&rq->lock, flags);
  253. if (cfs_rq->rb_leftmost)
  254. MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
  255. last = __pick_last_entity(cfs_rq);
  256. if (last)
  257. max_vruntime = last->vruntime;
  258. min_vruntime = cfs_rq->min_vruntime;
  259. rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
  260. raw_spin_unlock_irqrestore(&rq->lock, flags);
  261. SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "MIN_vruntime",
  262. SPLIT_NS(MIN_vruntime));
  263. SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "min_vruntime",
  264. SPLIT_NS(min_vruntime));
  265. SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "max_vruntime",
  266. SPLIT_NS(max_vruntime));
  267. spread = max_vruntime - MIN_vruntime;
  268. SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread",
  269. SPLIT_NS(spread));
  270. spread0 = min_vruntime - rq0_min_vruntime;
  271. SEQ_printf(m, " .%-30s: %Ld.%06ld\n", "spread0",
  272. SPLIT_NS(spread0));
  273. SEQ_printf(m, " .%-30s: %d\n", "nr_spread_over",
  274. cfs_rq->nr_spread_over);
  275. SEQ_printf(m, " .%-30s: %d\n", "nr_running", cfs_rq->nr_running);
  276. SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
  277. #ifdef CONFIG_SMP
  278. SEQ_printf(m, " .%-30s: %ld\n", "runnable_load_avg",
  279. cfs_rq->runnable_load_avg);
  280. SEQ_printf(m, " .%-30s: %ld\n", "blocked_load_avg",
  281. cfs_rq->blocked_load_avg);
  282. SEQ_printf(m, " .%-30s: %ld\n", "utilization_load_avg",
  283. cfs_rq->utilization_load_avg);
  284. #ifdef CONFIG_FAIR_GROUP_SCHED
  285. SEQ_printf(m, " .%-30s: %ld\n", "tg_load_contrib",
  286. cfs_rq->tg_load_contrib);
  287. SEQ_printf(m, " .%-30s: %d\n", "tg_runnable_contrib",
  288. cfs_rq->tg_runnable_contrib);
  289. SEQ_printf(m, " .%-30s: %ld\n", "tg_load_avg",
  290. atomic_long_read(&cfs_rq->tg->load_avg));
  291. SEQ_printf(m, " .%-30s: %d\n", "tg->runnable_avg",
  292. atomic_read(&cfs_rq->tg->runnable_avg));
  293. #endif
  294. #endif
  295. #ifdef CONFIG_CFS_BANDWIDTH
  296. SEQ_printf(m, " .%-30s: %d\n", "tg->cfs_bandwidth.timer_active",
  297. cfs_rq->tg->cfs_bandwidth.timer_active);
  298. SEQ_printf(m, " .%-30s: %d\n", "throttled",
  299. cfs_rq->throttled);
  300. SEQ_printf(m, " .%-30s: %d\n", "throttle_count",
  301. cfs_rq->throttle_count);
  302. #endif
  303. #ifdef CONFIG_FAIR_GROUP_SCHED
  304. print_cfs_group_stats(m, cpu, cfs_rq->tg);
  305. #endif
  306. }
  307. void print_rt_rq(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
  308. {
  309. #ifdef CONFIG_RT_GROUP_SCHED
  310. SEQ_printf(m, "\nrt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg));
  311. #else
  312. SEQ_printf(m, "\nrt_rq[%d]:\n", cpu);
  313. #endif
  314. #define P(x) \
  315. SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rt_rq->x))
  316. #define PN(x) \
  317. SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
  318. P(rt_nr_running);
  319. P(rt_throttled);
  320. PN(rt_time);
  321. PN(rt_runtime);
  322. #undef PN
  323. #undef P
  324. }
  325. extern __read_mostly int sched_clock_running;
  326. static void print_cpu(struct seq_file *m, int cpu)
  327. {
  328. struct rq *rq = cpu_rq(cpu);
  329. unsigned long flags;
  330. #ifdef CONFIG_X86
  331. {
  332. unsigned int freq = cpu_khz ? : 1;
  333. SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
  334. cpu, freq / 1000, (freq % 1000));
  335. }
  336. #else
  337. /* sched: add cpu info */
  338. SEQ_printf(m, "cpu#%d: %s\n", cpu, cpu_is_offline(cpu)?"Offline":"Online");
  339. #endif
  340. #define P(x) \
  341. do { \
  342. if (sizeof(rq->x) == 4) \
  343. SEQ_printf(m, " .%-30s: %ld\n", #x, (long)(rq->x)); \
  344. else \
  345. SEQ_printf(m, " .%-30s: %Ld\n", #x, (long long)(rq->x));\
  346. } while (0)
  347. #define PN(x) \
  348. SEQ_printf(m, " .%-30s: %Ld.%06ld\n", #x, SPLIT_NS(rq->x))
  349. P(nr_running);
  350. SEQ_printf(m, " .%-30s: %lu\n", "load",
  351. rq->load.weight);
  352. P(nr_switches);
  353. P(nr_load_updates);
  354. P(nr_uninterruptible);
  355. PN(next_balance);
  356. SEQ_printf(m, " .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
  357. PN(clock);
  358. P(cpu_load[0]);
  359. P(cpu_load[1]);
  360. P(cpu_load[2]);
  361. P(cpu_load[3]);
  362. P(cpu_load[4]);
  363. #undef P
  364. #undef PN
  365. #ifdef CONFIG_SCHEDSTATS
  366. #define P(n) SEQ_printf(m, " .%-30s: %d\n", #n, rq->n);
  367. #define P64(n) SEQ_printf(m, " .%-30s: %Ld\n", #n, rq->n);
  368. P(yld_count);
  369. P(sched_count);
  370. P(sched_goidle);
  371. #ifdef CONFIG_SMP
  372. P64(avg_idle);
  373. P64(max_idle_balance_cost);
  374. #endif
  375. P(ttwu_count);
  376. P(ttwu_local);
  377. #undef P
  378. #undef P64
  379. #endif
  380. spin_lock_irqsave(&sched_debug_lock, flags);
  381. print_cfs_stats(m, cpu);
  382. print_rt_stats(m, cpu);
  383. print_rq(m, rq, cpu);
  384. spin_unlock_irqrestore(&sched_debug_lock, flags);
  385. SEQ_printf(m, "\n");
  386. }
  387. static const char *sched_tunable_scaling_names[] = {
  388. "none",
  389. "logaritmic",
  390. "linear"
  391. };
  392. static void sched_debug_header(struct seq_file *m)
  393. {
  394. u64 ktime, sched_clk, cpu_clk;
  395. unsigned long flags;
  396. #ifdef TEST_SCHED_DEBUG_ENHANCEMENT
  397. static int i;
  398. i++;
  399. if (i == 10) {
  400. struct rq *rq = cpu_rq(0);
  401. /* lock_timekeeper(); */
  402. raw_spin_lock_irq(&rq->lock);
  403. spin_lock_irqsave(&sched_debug_lock, flags);
  404. write_lock_irqsave(&tasklist_lock, flags);
  405. BUG_ON(1);
  406. }
  407. #endif
  408. local_irq_save(flags);
  409. ktime = ktime_to_ns(ktime_get());
  410. sched_clk = sched_clock();
  411. cpu_clk = local_clock();
  412. local_irq_restore(flags);
  413. SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
  414. init_utsname()->release,
  415. (int)strcspn(init_utsname()->version, " "),
  416. init_utsname()->version);
  417. #define P(x) \
  418. SEQ_printf(m, "%-40s: %Ld\n", #x, (long long)(x))
  419. #define PN(x) \
  420. SEQ_printf(m, "%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
  421. PN(ktime);
  422. PN(sched_clk);
  423. PN(cpu_clk);
  424. P(jiffies);
  425. #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
  426. P(sched_clock_stable());
  427. #endif
  428. #undef PN
  429. #undef P
  430. SEQ_printf(m, "\n");
  431. SEQ_printf(m, "sysctl_sched\n");
  432. #define P(x) \
  433. SEQ_printf(m, " .%-40s: %Ld\n", #x, (long long)(x))
  434. #define PN(x) \
  435. SEQ_printf(m, " .%-40s: %Ld.%06ld\n", #x, SPLIT_NS(x))
  436. PN(sysctl_sched_latency);
  437. PN(sysctl_sched_min_granularity);
  438. PN(sysctl_sched_wakeup_granularity);
  439. P(sysctl_sched_child_runs_first);
  440. P(sysctl_sched_features);
  441. #undef PN
  442. #undef P
  443. SEQ_printf(m, " .%-40s: %d (%s)\n",
  444. "sysctl_sched_tunable_scaling",
  445. sysctl_sched_tunable_scaling,
  446. sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
  447. SEQ_printf(m, "\n");
  448. }
  449. static int sched_debug_show(struct seq_file *m, void *v)
  450. {
  451. int cpu = (unsigned long)(v - 2);
  452. unsigned long flags;
  453. if (cpu != -1) {
  454. /* sched: add lock */
  455. read_lock_irqsave(&tasklist_lock, flags);
  456. print_cpu(m, cpu);
  457. read_unlock_irqrestore(&tasklist_lock, flags);
  458. SEQ_printf(m, "\n");
  459. } else
  460. sched_debug_header(m);
  461. return 0;
  462. }
  463. void sysrq_sched_debug_show(void)
  464. {
  465. int cpu;
  466. unsigned long flags;
  467. sched_debug_header(NULL);
  468. /* sched: add lock */
  469. read_lock_irqsave(&tasklist_lock, flags);
  470. /* for_each_online_cpu(cpu) */
  471. for_each_possible_cpu(cpu)
  472. print_cpu(NULL, cpu);
  473. read_unlock_irqrestore(&tasklist_lock, flags);
  474. }
  475. /*
  476. * This itererator needs some explanation.
  477. * It returns 1 for the header position.
  478. * This means 2 is cpu 0.
  479. * In a hotplugged system some cpus, including cpu 0, may be missing so we have
  480. * to use cpumask_* to iterate over the cpus.
  481. */
  482. static void *sched_debug_start(struct seq_file *file, loff_t *offset)
  483. {
  484. unsigned long n = *offset;
  485. if (n == 0)
  486. return (void *) 1;
  487. n--;
  488. if (n > 0)
  489. n = cpumask_next(n - 1, cpu_online_mask);
  490. else
  491. n = cpumask_first(cpu_online_mask);
  492. *offset = n + 1;
  493. if (n < nr_cpu_ids)
  494. return (void *)(unsigned long)(n + 2);
  495. return NULL;
  496. }
  497. static void *sched_debug_next(struct seq_file *file, void *data, loff_t *offset)
  498. {
  499. (*offset)++;
  500. return sched_debug_start(file, offset);
  501. }
  502. static void sched_debug_stop(struct seq_file *file, void *data)
  503. {
  504. }
  505. static const struct seq_operations sched_debug_sops = {
  506. .start = sched_debug_start,
  507. .next = sched_debug_next,
  508. .stop = sched_debug_stop,
  509. .show = sched_debug_show,
  510. };
  511. static int sched_debug_release(struct inode *inode, struct file *file)
  512. {
  513. seq_release(inode, file);
  514. return 0;
  515. }
  516. static int sched_debug_open(struct inode *inode, struct file *filp)
  517. {
  518. int ret = 0;
  519. ret = seq_open(filp, &sched_debug_sops);
  520. return ret;
  521. }
  522. static const struct file_operations sched_debug_fops = {
  523. .open = sched_debug_open,
  524. .read = seq_read,
  525. .llseek = seq_lseek,
  526. .release = sched_debug_release,
  527. };
  528. static int __init init_sched_debug_procfs(void)
  529. {
  530. struct proc_dir_entry *pe;
  531. pe = proc_create("sched_debug", 0444, NULL, &sched_debug_fops);
  532. if (!pe)
  533. return -ENOMEM;
  534. return 0;
  535. }
  536. __initcall(init_sched_debug_procfs);
  537. #define __P(F) \
  538. SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
  539. #define P(F) \
  540. SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
  541. #define __PN(F) \
  542. SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
  543. #define PN(F) \
  544. SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
  545. static void sched_show_numa(struct task_struct *p, struct seq_file *m)
  546. {
  547. #ifdef CONFIG_NUMA_BALANCING
  548. struct mempolicy *pol;
  549. int node, i;
  550. if (p->mm)
  551. P(mm->numa_scan_seq);
  552. task_lock(p);
  553. pol = p->mempolicy;
  554. if (pol && !(pol->flags & MPOL_F_MORON))
  555. pol = NULL;
  556. mpol_get(pol);
  557. task_unlock(p);
  558. SEQ_printf(m, "numa_migrations, %ld\n", xchg(&p->numa_pages_migrated, 0));
  559. for_each_online_node(node) {
  560. for (i = 0; i < 2; i++) {
  561. unsigned long nr_faults = -1;
  562. int cpu_current, home_node;
  563. if (p->numa_faults_memory)
  564. nr_faults = p->numa_faults_memory[2*node + i];
  565. cpu_current = !i ? (task_node(p) == node) :
  566. (pol && node_isset(node, pol->v.nodes));
  567. home_node = (p->numa_preferred_nid == node);
  568. SEQ_printf(m, "numa_faults_memory, %d, %d, %d, %d, %ld\n",
  569. i, node, cpu_current, home_node, nr_faults);
  570. }
  571. }
  572. mpol_put(pol);
  573. #endif
  574. }
  575. void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
  576. {
  577. unsigned long nr_switches;
  578. SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, task_pid_nr(p),
  579. get_nr_threads(p));
  580. SEQ_printf(m,
  581. "---------------------------------------------------------"
  582. "----------\n");
  583. #define __P(F) \
  584. SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)F)
  585. #define P(F) \
  586. SEQ_printf(m, "%-45s:%21Ld\n", #F, (long long)p->F)
  587. #define __PN(F) \
  588. SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)F))
  589. #define PN(F) \
  590. SEQ_printf(m, "%-45s:%14Ld.%06ld\n", #F, SPLIT_NS((long long)p->F))
  591. PN(se.exec_start);
  592. PN(se.vruntime);
  593. PN(se.sum_exec_runtime);
  594. nr_switches = p->nvcsw + p->nivcsw;
  595. #ifdef CONFIG_SCHEDSTATS
  596. PN(se.statistics.wait_start);
  597. PN(se.statistics.sleep_start);
  598. PN(se.statistics.block_start);
  599. PN(se.statistics.sleep_max);
  600. PN(se.statistics.block_max);
  601. PN(se.statistics.exec_max);
  602. PN(se.statistics.slice_max);
  603. PN(se.statistics.wait_max);
  604. PN(se.statistics.wait_sum);
  605. P(se.statistics.wait_count);
  606. PN(se.statistics.iowait_sum);
  607. P(se.statistics.iowait_count);
  608. P(se.nr_migrations);
  609. P(se.statistics.nr_migrations_cold);
  610. P(se.statistics.nr_failed_migrations_affine);
  611. P(se.statistics.nr_failed_migrations_running);
  612. P(se.statistics.nr_failed_migrations_hot);
  613. P(se.statistics.nr_forced_migrations);
  614. P(se.statistics.nr_wakeups);
  615. P(se.statistics.nr_wakeups_sync);
  616. P(se.statistics.nr_wakeups_migrate);
  617. P(se.statistics.nr_wakeups_local);
  618. P(se.statistics.nr_wakeups_remote);
  619. P(se.statistics.nr_wakeups_affine);
  620. P(se.statistics.nr_wakeups_affine_attempts);
  621. P(se.statistics.nr_wakeups_passive);
  622. P(se.statistics.nr_wakeups_idle);
  623. {
  624. u64 avg_atom, avg_per_cpu;
  625. avg_atom = p->se.sum_exec_runtime;
  626. if (nr_switches)
  627. avg_atom = div64_ul(avg_atom, nr_switches);
  628. else
  629. avg_atom = -1LL;
  630. avg_per_cpu = p->se.sum_exec_runtime;
  631. if (p->se.nr_migrations) {
  632. avg_per_cpu = div64_u64(avg_per_cpu,
  633. p->se.nr_migrations);
  634. } else {
  635. avg_per_cpu = -1LL;
  636. }
  637. __PN(avg_atom);
  638. __PN(avg_per_cpu);
  639. }
  640. #endif
  641. __P(nr_switches);
  642. SEQ_printf(m, "%-45s:%21Ld\n",
  643. "nr_voluntary_switches", (long long)p->nvcsw);
  644. SEQ_printf(m, "%-45s:%21Ld\n",
  645. "nr_involuntary_switches", (long long)p->nivcsw);
  646. P(se.load.weight);
  647. #ifdef CONFIG_SMP
  648. P(se.avg.runnable_avg_sum);
  649. P(se.avg.running_avg_sum);
  650. P(se.avg.avg_period);
  651. P(se.avg.load_avg_contrib);
  652. P(se.avg.utilization_avg_contrib);
  653. P(se.avg.decay_count);
  654. #endif
  655. P(policy);
  656. P(prio);
  657. #undef PN
  658. #undef __PN
  659. #undef P
  660. #undef __P
  661. {
  662. unsigned int this_cpu = raw_smp_processor_id();
  663. u64 t0, t1;
  664. t0 = cpu_clock(this_cpu);
  665. t1 = cpu_clock(this_cpu);
  666. SEQ_printf(m, "%-45s:%21Ld\n",
  667. "clock-delta", (long long)(t1-t0));
  668. }
  669. sched_show_numa(p, m);
  670. }
  671. void proc_sched_set_task(struct task_struct *p)
  672. {
  673. #ifdef CONFIG_SCHEDSTATS
  674. memset(&p->se.statistics, 0, sizeof(p->se.statistics));
  675. #endif
  676. }
  677. /* sched: add ke log */
  678. #define read_trylock_irqsave(lock, flags) \
  679. ({ \
  680. typecheck(unsigned long, flags); \
  681. local_irq_save(flags); \
  682. read_trylock(lock) ? \
  683. 1 : ({ local_irq_restore(flags); 0; }); \
  684. })
  685. int read_trylock_n_irqsave(rwlock_t *lock, unsigned long *flags, struct seq_file *m, char *msg)
  686. {
  687. int locked, trylock_cnt = 0;
  688. do {
  689. locked = read_trylock_irqsave(lock, *flags);
  690. trylock_cnt++;
  691. mdelay(10);
  692. } while ((!locked) && (trylock_cnt < TRYLOCK_NUM));
  693. if (!locked) {
  694. #ifdef CONFIG_DEBUG_SPINLOCK
  695. struct task_struct *owner = NULL;
  696. #endif
  697. SEQ_printf(m, "Warning: fail to get lock in %s\n", msg);
  698. #ifdef CONFIG_DEBUG_SPINLOCK
  699. if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT)
  700. owner = lock->owner;
  701. #ifdef CONFIG_SMP
  702. SEQ_printf(m, " lock: %p, .magic: %08x, .owner: %s/%d",
  703. lock, lock->magic,
  704. owner ? owner->comm : "<<none>>",
  705. owner ? task_pid_nr(owner) : -1);
  706. SEQ_printf(m, ".owner_cpu: %d, value: %d\n",
  707. lock->owner_cpu, lock->raw_lock.lock);
  708. #else
  709. SEQ_printf(m, " lock: %p, .magic: %08x, .owner: %s/%d",
  710. lock, lock->magic,
  711. owner ? owner->comm : "<<none>>",
  712. owner ? task_pid_nr(owner) : -1);
  713. SEQ_printf(m, ".owner_cpu: %d\n", lock->owner_cpu);
  714. #endif
  715. #endif
  716. }
  717. return locked;
  718. }
  719. int raw_spin_trylock_n_irqsave(raw_spinlock_t *lock, unsigned long *flags, struct seq_file *m, char *msg)
  720. {
  721. int locked, trylock_cnt = 0;
  722. do {
  723. locked = raw_spin_trylock_irqsave(lock, *flags);
  724. trylock_cnt++;
  725. mdelay(10);
  726. } while ((!locked) && (trylock_cnt < TRYLOCK_NUM));
  727. if (!locked) {
  728. #ifdef CONFIG_DEBUG_SPINLOCK
  729. struct task_struct *owner = NULL;
  730. #endif
  731. SEQ_printf(m, "Warning: fail to get lock in %s\n", msg);
  732. #ifdef CONFIG_DEBUG_SPINLOCK
  733. if (lock->owner && lock->owner != SPINLOCK_OWNER_INIT)
  734. owner = lock->owner;
  735. #ifdef CONFIG_ARM64
  736. #ifdef CONFIG_SMP
  737. SEQ_printf(m, " lock: %lx, .magic: %08x, .owner: %s/%d",
  738. (long)lock, lock->magic,
  739. owner ? owner->comm : "<<none>>",
  740. owner ? task_pid_nr(owner) : -1);
  741. SEQ_printf(m, ".owner_cpu: %d, owner: %hu, next: %hu\n",
  742. lock->owner_cpu,
  743. lock->raw_lock.owner, lock->raw_lock.next);
  744. #else
  745. SEQ_printf(m, " lock: %lx, .magic: %08x, .owner: %s/%d",
  746. (long)lock, lock->magic,
  747. owner ? owner->comm : "<<none>>",
  748. owner ? task_pid_nr(owner) : -1);
  749. SEQ_printf(m, ".owner_cpu: %d, value: %d\n",
  750. lock->owner_cpu, lock->raw_lock.slock);
  751. #endif
  752. #else
  753. SEQ_printf(m, " lock: %x, .magic: %08x, .owner: %s/%d",
  754. (int)lock, lock->magic,
  755. owner ? owner->comm : "<<none>>",
  756. owner ? task_pid_nr(owner) : -1);
  757. SEQ_printf(m, ".owner_cpu: %d, value: %d\n",
  758. lock->owner_cpu, lock->raw_lock.slock);
  759. #endif
  760. #endif
  761. }
  762. return locked;
  763. }
  764. int spin_trylock_n_irqsave(spinlock_t *lock, unsigned long *flags, struct seq_file *m, char *msg)
  765. {
  766. int locked, trylock_cnt = 0;
  767. do {
  768. locked = spin_trylock_irqsave(lock, *flags);
  769. trylock_cnt++;
  770. mdelay(10);
  771. } while ((!locked) && (trylock_cnt < TRYLOCK_NUM));
  772. if (!locked) {
  773. #ifdef CONFIG_DEBUG_SPINLOCK
  774. raw_spinlock_t rlock = lock->rlock;
  775. struct task_struct *owner = NULL;
  776. #endif
  777. SEQ_printf(m, "Warning: fail to get lock in %s\n", msg);
  778. #ifdef CONFIG_DEBUG_SPINLOCK
  779. if (rlock.owner && rlock.owner != SPINLOCK_OWNER_INIT)
  780. owner = rlock.owner;
  781. #ifdef CONFIG_ARM64
  782. #ifdef CONFIG_SMP
  783. SEQ_printf(m, " lock: %lx, .magic: %08x, .owner: %s/%d",
  784. (long)&rlock, rlock.magic,
  785. owner ? owner->comm : "<<none>>",
  786. owner ? task_pid_nr(owner) : -1);
  787. SEQ_printf(m, ".owner_cpu: %d, owner: %hu, next: %hu\n",
  788. rlock.owner_cpu,
  789. rlock.raw_lock.owner, rlock.raw_lock.next);
  790. #else
  791. SEQ_printf(m, " lock: %lx, .magic: %08x, .owner: %s/%d",
  792. (long)&rlock, rlock.magic,
  793. owner ? owner->comm : "<<none>>",
  794. owner ? task_pid_nr(owner) : -1);
  795. SEQ_printf(m, ".owner_cpu: %d, value: %d\n",
  796. rlock.owner_cpu, rlock.raw_lock.slock);
  797. #endif
  798. #else
  799. SEQ_printf(m, " lock: %x, .magic: %08x, .owner: %s/%d",
  800. (int)&rlock, rlock.magic,
  801. owner ? owner->comm : "<<none>>",
  802. owner ? task_pid_nr(owner) : -1);
  803. SEQ_printf(m, ".owner_cpu: %d, value: %d\n",
  804. rlock.owner_cpu, rlock.raw_lock.slock);
  805. #endif
  806. #endif
  807. }
  808. return locked;
  809. }
  810. static void print_rq_at_AEE(struct seq_file *m, struct rq *rq, int rq_cpu)
  811. {
  812. struct task_struct *g, *p;
  813. SEQ_printf(m, "runnable tasks:\n");
  814. SEQ_printf(m,
  815. " task PID tree-key switches prio exec-runtime sum-exec sum-sleep\n");
  816. SEQ_printf(m, "------------------------------------------------------------------------------\n");
  817. rcu_read_lock();
  818. for_each_process_thread(g, p) {
  819. /*
  820. if (task_cpu(p) != rq_cpu)
  821. sched: only output the runnable tasks rather than ALL tasks in runqueues
  822. */
  823. if (!p->on_rq || task_cpu(p) != rq_cpu)
  824. continue;
  825. print_task_at_AEE(m, rq, p);
  826. }
  827. rcu_read_unlock();
  828. }
  829. #ifdef CONFIG_FAIR_GROUP_SCHED
  830. static void print_cfs_group_stats_at_AEE(struct seq_file *m, int cpu, struct task_group *tg)
  831. {
  832. struct sched_entity *se = tg->se[cpu];
  833. #define P(F) \
  834. SEQ_printf(m, " .%-30s: %lld\n", #F, (long long)F)
  835. #define PN(F) \
  836. SEQ_printf(m, " .%-30s: %lld.%06ld\n", #F, SPLIT_NS((long long)F))
  837. if (!se) {
  838. struct sched_avg *avg = &cpu_rq(cpu)->avg;
  839. P(avg->runnable_avg_sum);
  840. P(avg->avg_period);
  841. #ifdef MTK_SCHED_CMP_PRINT
  842. P(avg->last_runnable_update);
  843. #endif
  844. return;
  845. }
  846. PN(se->exec_start);
  847. PN(se->vruntime);
  848. PN(se->sum_exec_runtime);
  849. #ifdef CONFIG_SCHEDSTATS
  850. PN(se->statistics.wait_start);
  851. PN(se->statistics.sleep_start);
  852. PN(se->statistics.block_start);
  853. PN(se->statistics.sleep_max);
  854. PN(se->statistics.block_max);
  855. PN(se->statistics.exec_max);
  856. PN(se->statistics.slice_max);
  857. PN(se->statistics.wait_max);
  858. PN(se->statistics.wait_sum);
  859. P(se->statistics.wait_count);
  860. #endif
  861. P(se->load.weight);
  862. #ifdef CONFIG_SMP
  863. P(se->avg.runnable_avg_sum);
  864. P(se->avg.avg_period);
  865. P(se->avg.load_avg_contrib);
  866. P(se->avg.decay_count);
  867. # ifdef MTK_SCHED_CMP_PRINT
  868. P(se->last_runnable_update);
  869. # endif
  870. #endif
  871. #undef PN
  872. #undef P
  873. }
  874. #endif
  875. void print_cfs_rq_at_AEE(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
  876. {
  877. s64 MIN_vruntime = -1, min_vruntime, max_vruntime = -1,
  878. spread, rq0_min_vruntime, spread0;
  879. struct rq *rq = cpu_rq(cpu);
  880. struct sched_entity *last;
  881. unsigned long flags;
  882. int locked;
  883. #ifdef CONFIG_FAIR_GROUP_SCHED
  884. SEQ_printf(m, "cfs_rq[%d]:%s\n", cpu, task_group_path(cfs_rq->tg));
  885. #else
  886. SEQ_printf(m, "cfs_rq[%d]:\n", cpu);
  887. #endif
  888. SEQ_printf(m, " .%-22s: %lld.%06ld\n", "exec_clock",
  889. SPLIT_NS(cfs_rq->exec_clock));
  890. /*raw_spin_lock_irqsave(&rq->lock, flags);*/
  891. locked = raw_spin_trylock_n_irqsave(&rq->lock, &flags, m, "print_cfs_rq_at_AEE");
  892. if (cfs_rq->rb_leftmost)
  893. MIN_vruntime = (__pick_first_entity(cfs_rq))->vruntime;
  894. last = __pick_last_entity(cfs_rq);
  895. if (last)
  896. max_vruntime = last->vruntime;
  897. min_vruntime = cfs_rq->min_vruntime;
  898. rq0_min_vruntime = cpu_rq(0)->cfs.min_vruntime;
  899. if (locked)
  900. raw_spin_unlock_irqrestore(&rq->lock, flags);
  901. SEQ_printf(m, " .%-22s: %lld.%06ld\n", "MIN_vruntime",
  902. SPLIT_NS(MIN_vruntime));
  903. SEQ_printf(m, " .%-22s: %lld.%06ld\n", "min_vruntime",
  904. SPLIT_NS(min_vruntime));
  905. SEQ_printf(m, " .%-22s: %lld.%06ld\n", "max_vruntime",
  906. SPLIT_NS(max_vruntime));
  907. spread = max_vruntime - MIN_vruntime;
  908. /*
  909. SEQ_printf(m, " .%-22s: %lld.%06ld\n", "spread",
  910. SPLIT_NS(spread));
  911. */
  912. spread0 = min_vruntime - rq0_min_vruntime;
  913. /*
  914. SEQ_printf(m, " .%-22s: %lld.%06ld\n", "spread0",
  915. SPLIT_NS(spread0));
  916. SEQ_printf(m, " .%-22s: %d\n", "nr_spread_over",
  917. cfs_rq->nr_spread_over);
  918. */
  919. SEQ_printf(m, " .%-22s: %d\n", "nr_running", cfs_rq->nr_running);
  920. SEQ_printf(m, " .%-22s: %ld\n", "load", cfs_rq->load.weight);
  921. #ifdef CONFIG_SMP
  922. SEQ_printf(m, " .%-22s: %ld\n", "runnable_load_avg",
  923. cfs_rq->runnable_load_avg);
  924. SEQ_printf(m, " .%-22s: %ld\n", "blocked_load_avg",
  925. cfs_rq->blocked_load_avg);
  926. #ifdef CONFIG_FAIR_GROUP_SCHED
  927. SEQ_printf(m, " .%-22s: %ld\n", "tg_load_contrib",
  928. cfs_rq->tg_load_contrib);
  929. SEQ_printf(m, " .%-22s: %d\n", "tg_runnable_contrib",
  930. cfs_rq->tg_runnable_contrib);
  931. SEQ_printf(m, " .%-22s: %ld\n", "tg_load_avg",
  932. atomic_long_read(&cfs_rq->tg->load_avg));
  933. SEQ_printf(m, " .%-22s: %d\n", "tg->runnable_avg",
  934. atomic_read(&cfs_rq->tg->runnable_avg));
  935. #endif
  936. #endif
  937. #ifdef CONFIG_CFS_BANDWIDTH
  938. SEQ_printf(m, " .%-30s: %d\n", "tg->cfs_bandwidth.timer_active",
  939. cfs_rq->tg->cfs_bandwidth.timer_active);
  940. SEQ_printf(m, " .%-30s: %d\n", "throttled",
  941. cfs_rq->throttled);
  942. SEQ_printf(m, " .%-30s: %d\n", "throttle_count",
  943. cfs_rq->throttle_count);
  944. #endif
  945. #ifdef CONFIG_FAIR_GROUP_SCHED
  946. print_cfs_group_stats_at_AEE(m, cpu, cfs_rq->tg);
  947. #endif
  948. }
  949. #define for_each_leaf_cfs_rq(rq, cfs_rq) \
  950. list_for_each_entry_rcu(cfs_rq, &rq->leaf_cfs_rq_list, leaf_cfs_rq_list)
  951. void print_cfs_stats_at_AEE(struct seq_file *m, int cpu)
  952. {
  953. struct cfs_rq *cfs_rq;
  954. rcu_read_lock();
  955. cfs_rq = &cpu_rq(cpu)->cfs;
  956. /*sched: only output / cgroup schedule info*/
  957. print_cfs_rq_at_AEE(m, cpu, cfs_rq);
  958. rcu_read_unlock();
  959. }
  960. void print_rt_rq_at_AEE(struct seq_file *m, int cpu, struct rt_rq *rt_rq)
  961. {
  962. #ifdef CONFIG_RT_GROUP_SCHED
  963. int cpu_rq_throttle = rq_cpu(rt_rq->rq);
  964. SEQ_printf(m, "rt_rq[%d]:%s\n", cpu, task_group_path(rt_rq->tg));
  965. #else
  966. SEQ_printf(m, "rt_rq[%d]:\n", cpu);
  967. #endif
  968. #define P(x) \
  969. SEQ_printf(m, " .%-22s: %lld\n", #x, (long long)(rt_rq->x))
  970. #define PN(x) \
  971. SEQ_printf(m, " .%-22s: %lld.%06ld\n", #x, SPLIT_NS(rt_rq->x))
  972. P(rt_nr_running);
  973. P(rt_throttled);
  974. SEQ_printf(m, " exec_task[%d:%s], prio=%d\n",
  975. per_cpu(exec_task, cpu).pid,
  976. per_cpu(exec_task, cpu).comm,
  977. per_cpu(exec_task, cpu).prio);
  978. #ifdef CONFIG_RT_GROUP_SCHED
  979. SEQ_printf(m, " .rt_throttling_start : [%llu]\n", per_cpu(rt_throttling_start, cpu_rq_throttle));
  980. #endif
  981. PN(rt_time);
  982. PN(rt_runtime);
  983. #undef PN
  984. #undef P
  985. }
  986. #ifdef CONFIG_RT_GROUP_SCHED
  987. typedef struct task_group *rt_rq_iter_t;
  988. static inline struct task_group *next_task_group(struct task_group *tg)
  989. {
  990. do {
  991. tg = list_entry_rcu(tg->list.next,
  992. typeof(struct task_group), list);
  993. } while (&tg->list != &task_groups && task_group_is_autogroup(tg));
  994. if (&tg->list == &task_groups)
  995. tg = NULL;
  996. return tg;
  997. }
  998. #define for_each_rt_rq(rt_rq, iter, rq) \
  999. for (iter = container_of(&task_groups, typeof(*iter), list); \
  1000. (iter = next_task_group(iter)) && \
  1001. (rt_rq = iter->rt_rq[cpu_of(rq)]);)
  1002. #else /* !CONFIG_RT_GROUP_SCHED */
  1003. typedef struct rt_rq *rt_rq_iter_t;
  1004. #define for_each_rt_rq(rt_rq, iter, rq) \
  1005. for ((void) iter, rt_rq = &rq->rt; rt_rq; rt_rq = NULL)
  1006. #endif
  1007. void print_rt_stats_at_AEE(struct seq_file *m, int cpu)
  1008. {
  1009. struct rt_rq *rt_rq;
  1010. rt_rq = &cpu_rq(cpu)->rt;
  1011. rcu_read_lock();
  1012. /*sched: only output / cgroup schedule info*/
  1013. print_rt_rq_at_AEE(m, cpu, rt_rq);
  1014. rcu_read_unlock();
  1015. }
  1016. static void print_cpu_at_AEE(struct seq_file *m, int cpu)
  1017. {
  1018. struct rq *rq = cpu_rq(cpu);
  1019. unsigned long flags;
  1020. int locked;
  1021. #ifdef CONFIG_X86
  1022. {
  1023. unsigned int freq = cpu_khz ? : 1;
  1024. SEQ_printf(m, "cpu#%d, %u.%03u MHz\n",
  1025. cpu, freq / 1000, (freq % 1000));
  1026. }
  1027. #else
  1028. SEQ_printf(m, "cpu#%d: %s\n", cpu, cpu_is_offline(cpu)?"Offline":"Online");
  1029. #endif
  1030. #define P(x) \
  1031. do { \
  1032. if (sizeof(rq->x) == 4) \
  1033. SEQ_printf(m, " .%-22s: %ld\n", #x, (long)(rq->x)); \
  1034. else \
  1035. SEQ_printf(m, " .%-22s: %lld\n", #x, (long long)(rq->x));\
  1036. } while (0)
  1037. #define PN(x) \
  1038. SEQ_printf(m, " .%-22s: %lld.%06ld\n", #x, SPLIT_NS(rq->x))
  1039. P(nr_running);
  1040. SEQ_printf(m, " .%-22s: %lu\n", "load",
  1041. rq->load.weight);
  1042. /*P(nr_switches);*/
  1043. P(nr_load_updates);
  1044. P(nr_uninterruptible);
  1045. PN(next_balance);
  1046. SEQ_printf(m, " .%-22s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr)));
  1047. PN(clock);
  1048. SEQ_printf(m, " .%-22s: %ld %ld %ld %ld %ld\n", "cpu_load",
  1049. (long)(rq->cpu_load[0]),
  1050. (long)(rq->cpu_load[1]),
  1051. (long)(rq->cpu_load[2]),
  1052. (long)(rq->cpu_load[3]),
  1053. (long)(rq->cpu_load[4]));
  1054. /*
  1055. P(cpu_load[0]);
  1056. P(cpu_load[1]);
  1057. P(cpu_load[2]);
  1058. P(cpu_load[3]);
  1059. P(cpu_load[4]);
  1060. */
  1061. #undef P
  1062. #undef PN
  1063. #ifdef CONFIG_SCHEDSTATS
  1064. #define P(n) SEQ_printf(m, " .%-22s: %d\n", #n, rq->n)
  1065. #define P64(n) SEQ_printf(m, " .%-22s: %lld\n", #n, rq->n)
  1066. /*
  1067. P(yld_count);
  1068. P(sched_count);
  1069. P(sched_goidle);
  1070. */
  1071. #ifdef CONFIG_SMP
  1072. P64(avg_idle);
  1073. P64(max_idle_balance_cost);
  1074. #endif
  1075. /*P(ttwu_count);
  1076. P(ttwu_local);*/
  1077. #undef P
  1078. #undef P64
  1079. #endif
  1080. /*spin_lock_irqsave_lock_irqsave(&sched_debug_lock, flags);*/
  1081. locked = spin_trylock_n_irqsave(&sched_debug_lock, &flags, m, "print_cpu_at_AEE");
  1082. print_cfs_stats_at_AEE(m, cpu);
  1083. print_rt_stats_at_AEE(m, cpu);
  1084. rcu_read_lock();
  1085. print_rq_at_AEE(m, rq, cpu);
  1086. SEQ_printf(m, "============================================\n");
  1087. rcu_read_unlock();
  1088. /*spin_unlock_irqrestore(&sched_debug_lock, flags);*/
  1089. if (locked)
  1090. spin_unlock_irqrestore(&sched_debug_lock, flags);
  1091. }
  1092. static void sched_debug_header_at_AEE(struct seq_file *m)
  1093. {
  1094. u64 sched_clk, cpu_clk;
  1095. unsigned long flags;
  1096. local_irq_save(flags);
  1097. /*ktime = ktime_to_ns(ktime_get());*/
  1098. sched_clk = sched_clock();
  1099. cpu_clk = local_clock();
  1100. local_irq_restore(flags);
  1101. SEQ_printf(m, "Sched Debug Version: v0.11, %s %.*s\n",
  1102. init_utsname()->release,
  1103. (int)strcspn(init_utsname()->version, " "),
  1104. init_utsname()->version);
  1105. #define P(x) \
  1106. SEQ_printf(m, "%-22s: %lld\n", #x, (long long)(x))
  1107. #define PN(x) \
  1108. SEQ_printf(m, "%-22s: %lld.%06ld\n", #x, SPLIT_NS(x))
  1109. /*PN(ktime);*/
  1110. PN(sched_clk);
  1111. PN(cpu_clk);
  1112. P(jiffies);
  1113. #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK
  1114. P(sched_clock_stable());
  1115. #endif
  1116. #undef PN
  1117. #undef P
  1118. /*SEQ_printf(m, "\n");*/
  1119. SEQ_printf(m, "sysctl_sched\n");
  1120. #define P(x) \
  1121. SEQ_printf(m, " .%-35s: %lld\n", #x, (long long)(x))
  1122. #define PN(x) \
  1123. SEQ_printf(m, " .%-35s: %lld.%06ld\n", #x, SPLIT_NS(x))
  1124. PN(sysctl_sched_latency);
  1125. PN(sysctl_sched_min_granularity);
  1126. PN(sysctl_sched_wakeup_granularity);
  1127. P(sysctl_sched_child_runs_first);
  1128. P(sysctl_sched_features);
  1129. #undef PN
  1130. #undef P
  1131. SEQ_printf(m, " .%-35s: %d (%s)\n",
  1132. "sysctl_sched_tunable_scaling",
  1133. sysctl_sched_tunable_scaling,
  1134. sched_tunable_scaling_names[sysctl_sched_tunable_scaling]);
  1135. SEQ_printf(m, "\n");
  1136. }
  1137. void sysrq_sched_debug_show_at_AEE(void)
  1138. {
  1139. int cpu;
  1140. unsigned long flags;
  1141. int locked;
  1142. sched_debug_header_at_AEE(NULL);
  1143. /* read_lock_irqsave(&tasklist_lock, flags); */
  1144. locked = read_trylock_n_irqsave(&tasklist_lock, &flags, NULL, "sched_debug_show_at_AEE");
  1145. /* for_each_online_cpu(cpu) */
  1146. for_each_possible_cpu(cpu) {
  1147. print_cpu_at_AEE(NULL, cpu);
  1148. }
  1149. if (locked)
  1150. read_unlock_irqrestore(&tasklist_lock, flags);
  1151. #ifdef CONFIG_MTPROF
  1152. /* sched:rt throttle monitor */
  1153. mt_rt_mon_print_task_from_buffer();
  1154. #endif
  1155. }
  1156. /* sched: add ke log */