prof_ctl.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442
  1. #include <linux/slab.h>
  2. #include <linux/proc_fs.h>
  3. #include <linux/sched.h>
  4. #include <linux/seq_file.h>
  5. #include <linux/kallsyms.h>
  6. #include <linux/utsname.h>
  7. #include <linux/jiffies.h>
  8. #include <linux/kernel_stat.h>
  9. #include <linux/uaccess.h>
  10. #include <linux/tick.h>
  11. #include "internal.h"
  12. #include "mt_cputime.h"
  13. #ifdef CONFIG_MT_ENG_BUILD
  14. /* max debug thread count,
  15. * if reach the level, stop store new thread informaiton. */
  16. #define MAX_THREAD_COUNT (50000)
  17. /* max debug time,
  18. * if reach the level, stop and clear the debug information */
  19. #define MAX_TIME (5*60*60)
  20. #else
  21. #define MAX_THREAD_COUNT (10000)
  22. #define MAX_TIME (1*60*60)
  23. #endif
  24. struct mt_proc_struct *mt_proc_curr = NULL;
  25. struct mt_proc_struct *mt_proc_head = NULL;
  26. static int proc_count;
  27. static int mtsched_enabled;
  28. unsigned long long prof_start_ts, prof_end_ts, prof_dur_ts;
  29. static DEFINE_MUTEX(mt_cputime_lock);
  30. static DEFINE_MUTEX(mt_memprof_lock);
  31. struct mt_cpu_info *mt_cpu_info_head = NULL;
  32. int mt_cpu_num = 1;
  33. bool mtsched_is_enabled(void)
  34. {
  35. return mtsched_enabled != 0;
  36. }
  37. int mtproc_counts(void)
  38. {
  39. return proc_count;
  40. }
  41. static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
  42. {
  43. u64 idle_time;
  44. u64 cur_wall_time;
  45. u64 busy_time;
  46. cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
  47. busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
  48. busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
  49. busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
  50. busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
  51. busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
  52. busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
  53. idle_time = cur_wall_time - busy_time;
  54. if (wall)
  55. *wall = jiffies_to_usecs(cur_wall_time);
  56. return jiffies_to_usecs(idle_time);
  57. }
  58. unsigned long long mtprof_get_cpu_idle(int cpu)
  59. {
  60. u64 unused = 0, idle_time = 0;
  61. idle_time = get_cpu_idle_time_us(cpu, NULL);
  62. if (idle_time == -1ULL)
  63. return get_cpu_idle_time_jiffy(cpu, &unused);
  64. idle_time += get_cpu_iowait_time_us(cpu, &unused);
  65. return idle_time;
  66. }
  67. unsigned long long mtprof_get_cpu_iowait(int cpu)
  68. {
  69. unsigned long long *unused = 0;
  70. return get_cpu_iowait_time_us(cpu, unused);
  71. }
  72. void mt_task_times(struct task_struct *p, cputime_t *ut, cputime_t *st)
  73. {
  74. task_cputime_adjusted(p, ut, st);
  75. }
  76. /********************
  77. MT cputime prof
  78. *********************/
  79. #ifdef CONFIG_MTPROF_CPUTIME
  80. void setup_mtproc_info(struct task_struct *p, unsigned long long ts)
  81. {
  82. struct mt_proc_struct *mtproc;
  83. if (0 == mtsched_enabled)
  84. return;
  85. if (proc_count >= MAX_THREAD_COUNT)
  86. return;
  87. mtproc = kmalloc(sizeof(struct mt_proc_struct), GFP_ATOMIC);
  88. if (!mtproc)
  89. return;
  90. memset(mtproc, 0, sizeof(struct mt_proc_struct));
  91. proc_count++;
  92. mtproc->pid = p->pid;
  93. mtproc->tgid = p->tgid;
  94. mtproc->index = proc_count;
  95. mtproc->cputime = p->se.sum_exec_runtime;
  96. mtproc->cputime_init = p->se.sum_exec_runtime;
  97. mtproc->prof_start = ts;
  98. mtproc->prof_end = 0;
  99. mtproc->isr_time = p->se.mtk_isr_time;
  100. mtproc->isr_time_init = p->se.mtk_isr_time;
  101. p->se.mtk_isr = NULL;
  102. p->se.mtk_isr_count = 0;
  103. mtproc->next = NULL;
  104. mt_task_times(p, &mtproc->utime_init, &mtproc->stime_init);
  105. strcpy(mtproc->comm, p->comm);
  106. if (mt_proc_head != NULL) {
  107. mt_proc_curr->next = mtproc;
  108. mt_proc_curr = mtproc;
  109. } else {
  110. mt_proc_head = mtproc;
  111. mt_proc_curr = mtproc;
  112. }
  113. }
  114. void save_mtproc_info(struct task_struct *p, unsigned long long ts)
  115. {
  116. struct mt_proc_struct *mtproc;
  117. unsigned long long prof_now_ts;
  118. mutex_lock(&mt_cputime_lock);
  119. if (0 == mtsched_enabled) {
  120. mutex_unlock(&mt_cputime_lock);
  121. return;
  122. }
  123. if (proc_count >= MAX_THREAD_COUNT) {
  124. mutex_unlock(&mt_cputime_lock);
  125. return;
  126. }
  127. mutex_unlock(&mt_cputime_lock);
  128. prof_now_ts = sched_clock();
  129. prof_dur_ts = prof_now_ts - prof_start_ts;
  130. do_div(prof_dur_ts, 1000000); /* put prof_dur_ts to ms */
  131. if (prof_dur_ts >= MAX_TIME * 1000) {
  132. mtsched_enabled = 0;
  133. mt_cputime_switch(2);
  134. return;
  135. }
  136. mtproc = kmalloc(sizeof(struct mt_proc_struct), GFP_KERNEL);
  137. if (!mtproc)
  138. return;
  139. memset(mtproc, 0, sizeof(struct mt_proc_struct));
  140. mutex_lock(&mt_cputime_lock);
  141. proc_count++;
  142. mtproc->pid = p->pid;
  143. mtproc->tgid = p->tgid;
  144. mtproc->index = proc_count;
  145. mtproc->cputime = p->se.sum_exec_runtime;
  146. mtproc->cputime_init = p->se.sum_exec_runtime;
  147. mtproc->isr_time = p->se.mtk_isr_time;
  148. mtproc->isr_time_init = p->se.mtk_isr_time;
  149. p->se.mtk_isr = NULL;
  150. p->se.mtk_isr_count = 0;
  151. mtproc->prof_start = ts;
  152. mtproc->prof_end = 0;
  153. mtproc->next = NULL;
  154. mt_task_times(p, &mtproc->utime_init, &mtproc->stime_init);
  155. strcpy(mtproc->comm, p->comm);
  156. if (mt_proc_head != NULL) {
  157. mt_proc_curr->next = mtproc;
  158. mt_proc_curr = mtproc;
  159. } else {
  160. mt_proc_head = mtproc;
  161. mt_proc_curr = mtproc;
  162. }
  163. mutex_unlock(&mt_cputime_lock);
  164. }
  165. void end_mtproc_info(struct task_struct *p)
  166. {
  167. struct mt_proc_struct *mtproc = NULL;
  168. mutex_lock(&mt_cputime_lock);
  169. mtproc = mt_proc_head;
  170. /* check profiling enable flag */
  171. if (0 == mtsched_enabled) {
  172. mutex_unlock(&mt_cputime_lock);
  173. return;
  174. }
  175. /* may waste time... */
  176. while (mtproc != NULL) {
  177. if (p->pid != mtproc->pid)
  178. mtproc = mtproc->next;
  179. else
  180. break;
  181. }
  182. if (mtproc == NULL) {
  183. mutex_unlock(&mt_cputime_lock);
  184. return;
  185. }
  186. mtproc->prof_end = sched_clock();
  187. /* update cputime */
  188. mtproc->cputime = p->se.sum_exec_runtime;
  189. mtproc->isr_time = p->se.mtk_isr_time;
  190. mtproc->isr_count = p->se.mtk_isr_count;
  191. mtproc->mtk_isr = p->se.mtk_isr;
  192. strcpy(mtproc->comm, p->comm);
  193. p->se.mtk_isr = NULL;
  194. mt_task_times(p, &mtproc->utime, &mtproc->stime);
  195. mtproc->utime = mtproc->utime - mtproc->utime_init;
  196. mtproc->stime = mtproc->stime - mtproc->stime_init;
  197. mutex_unlock(&mt_cputime_lock);
  198. }
  199. void set_mtprof_comm(char *comm, int pid)
  200. {
  201. struct mt_proc_struct *mtproc = NULL;
  202. mutex_lock(&mt_cputime_lock);
  203. mtproc = mt_proc_head;
  204. if (0 == mtsched_enabled) {
  205. mutex_unlock(&mt_cputime_lock);
  206. return;
  207. }
  208. while (mtproc != NULL) {
  209. if (pid != mtproc->pid)
  210. mtproc = mtproc->next;
  211. else
  212. break;
  213. }
  214. if (mtproc == NULL) {
  215. mutex_unlock(&mt_cputime_lock);
  216. return;
  217. }
  218. memset(mtproc->comm, 0, TASK_COMM_LEN);
  219. wmb(); /* need memory barrier */
  220. strlcpy(mtproc->comm, comm, TASK_COMM_LEN);
  221. mutex_unlock(&mt_cputime_lock);
  222. }
  223. void start_record_task(void)
  224. {
  225. unsigned long long ts;
  226. int i = 0;
  227. struct task_struct *g, *p;
  228. unsigned long flags;
  229. mtsched_enabled = 1;
  230. prof_start_ts = sched_clock();
  231. for (i = 0; i < mt_cpu_num; i++) {
  232. mt_cpu_info_head[i].cpu_idletime_start = mtprof_get_cpu_idle(i);
  233. mt_cpu_info_head[i].cpu_iowait_start = mtprof_get_cpu_iowait(i);
  234. }
  235. ts = sched_clock();
  236. read_lock_irqsave(&tasklist_lock, flags);
  237. do_each_thread(g, p) {
  238. setup_mtproc_info(p, ts);
  239. } while_each_thread(g, p);
  240. read_unlock_irqrestore(&tasklist_lock, flags);
  241. }
  242. void stop_record_task(void)
  243. {
  244. struct mt_proc_struct *mtproc = mt_proc_head;
  245. struct task_struct *tsk;
  246. unsigned long long cost_isrtime = 0;
  247. unsigned long long cost_cputime = 0;
  248. int i = 0;
  249. mtsched_enabled = 0;
  250. prof_end_ts = sched_clock();
  251. prof_dur_ts = prof_end_ts - prof_start_ts;
  252. do_div(prof_dur_ts, 1000000); /* put prof_dur_ts to ms */
  253. for (i = 0; i < mt_cpu_num; i++) {
  254. mt_cpu_info_head[i].cpu_idletime_end = mtprof_get_cpu_idle(i);
  255. if (mt_cpu_info_head[i].cpu_idletime_end <
  256. mt_cpu_info_head[i].cpu_idletime_start) {
  257. mt_cpu_info_head[i].cpu_idletime_end =
  258. mt_cpu_info_head[i].cpu_idletime_start;
  259. }
  260. mt_cpu_info_head[i].cpu_iowait_end = mtprof_get_cpu_iowait(i);
  261. if (mt_cpu_info_head[i].cpu_iowait_end <
  262. mt_cpu_info_head[i].cpu_iowait_start)
  263. mt_cpu_info_head[i].cpu_iowait_end =
  264. mt_cpu_info_head[i].cpu_iowait_start;
  265. }
  266. while (mtproc != NULL) {
  267. tsk = find_task_by_vpid(mtproc->pid);
  268. if (tsk != NULL) {
  269. mtproc->cputime = tsk->se.sum_exec_runtime;
  270. mtproc->isr_time = tsk->se.mtk_isr_time;
  271. mtproc->isr_count = tsk->se.mtk_isr_count;
  272. strcpy(mtproc->comm, tsk->comm);
  273. mt_task_times(tsk, &mtproc->utime, &mtproc->stime);
  274. mtproc->utime = mtproc->utime - mtproc->utime_init;
  275. mtproc->stime = mtproc->stime - mtproc->stime_init;
  276. cost_isrtime = mtproc->isr_time - mtproc->isr_time_init;
  277. mtproc->mtk_isr = tsk->se.mtk_isr;
  278. tsk->se.mtk_isr_count = 0;
  279. tsk->se.mtk_isr = NULL;
  280. }
  281. if (mtproc->cputime >=
  282. (mtproc->cputime_init + cost_isrtime)) {
  283. cost_cputime =
  284. mtproc->cputime - cost_isrtime
  285. - mtproc->cputime_init;
  286. mtproc->cost_cputime = cost_cputime;
  287. do_div(cost_cputime, prof_dur_ts);
  288. mtproc->cputime_percen_6 = cost_cputime;
  289. } else {
  290. mtproc->cost_cputime = 0;
  291. mtproc->cputime_percen_6 = 0;
  292. }
  293. mtproc = mtproc->next;
  294. }
  295. }
  296. void reset_record_task(void)
  297. {
  298. struct mt_proc_struct *mtproc = mt_proc_head;
  299. struct mt_proc_struct *mtproc_next;
  300. struct mtk_isr_info *mtk_isr_current, *mtk_isr_next;
  301. struct task_struct *idle;
  302. int i = 0;
  303. while (mtproc != NULL) {
  304. mtk_isr_current = mtproc->mtk_isr;
  305. while (mtk_isr_current != NULL) {
  306. mtk_isr_next = mtk_isr_current->next;
  307. if (mtk_isr_current->isr_name != NULL)
  308. kfree(mtk_isr_current->isr_name);
  309. kfree(mtk_isr_current);
  310. mtk_isr_current = mtk_isr_next;
  311. }
  312. mtproc_next = mtproc->next;
  313. kfree(mtproc);
  314. mtproc = mtproc_next;
  315. }
  316. proc_count = 0;
  317. prof_end_ts = 0;
  318. for (i = 0; i < mt_cpu_num; i++) {
  319. mt_cpu_info_head[i].cpu_idletime_start = 0;
  320. mt_cpu_info_head[i].cpu_idletime_end = 0;
  321. mt_cpu_info_head[i].cpu_iowait_start = 0;
  322. mt_cpu_info_head[i].cpu_iowait_end = 0;
  323. idle = idle_task(i);
  324. mtk_isr_current = idle->se.mtk_isr;
  325. while (mtk_isr_current != NULL) {
  326. mtk_isr_next = mtk_isr_current->next;
  327. if (mtk_isr_current->isr_name != NULL)
  328. kfree(mtk_isr_current->isr_name);
  329. kfree(mtk_isr_current);
  330. mtk_isr_current = mtk_isr_next;
  331. }
  332. idle->se.mtk_isr_time = 0;
  333. idle->se.mtk_isr_count = 0;
  334. idle->se.mtk_isr = NULL;
  335. }
  336. mt_proc_head = NULL;
  337. mt_proc_curr = NULL;
  338. }
  339. void mt_cputime_switch(int on)
  340. {
  341. mutex_lock(&mt_cputime_lock);
  342. if (mtsched_enabled == 1) {
  343. if (on == 0)
  344. stop_record_task();
  345. } else {
  346. if (on == 1) {
  347. reset_record_task();
  348. start_record_task();
  349. } else if (on == 2)
  350. reset_record_task();
  351. }
  352. mutex_unlock(&mt_cputime_lock);
  353. }
  354. #else /* CONFIG_MTPROF_CPUTIME */
  355. void setup_mtproc_info(struct task_struct *p, unsigned long long ts)
  356. {
  357. }
  358. void set_mtprof_comm(char *comm, int pid)
  359. {
  360. }
  361. void start_record_task(void)
  362. {
  363. }
  364. void stop_record_task(void)
  365. {
  366. }
  367. void reset_record_task(void)
  368. {
  369. }
  370. #endif /* end of CONFIG_MTPROF_CPUTIME */