rq_stats.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749
  1. /* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. *
  12. */
  13. #include <linux/kernel.h>
  14. #include <linux/init.h>
  15. #include <linux/module.h>
  16. #include <linux/hrtimer.h>
  17. #include <linux/cpu.h>
  18. #include <linux/kobject.h>
  19. #include <linux/sysfs.h>
  20. #include <linux/notifier.h>
  21. #include <linux/slab.h>
  22. #include <linux/workqueue.h>
  23. #include <linux/sched.h>
  24. #include <linux/spinlock.h>
  25. #include <linux/cpufreq.h>
  26. #include <linux/kernel_stat.h>
  27. #include <linux/tick.h>
  28. #include <linux/suspend.h>
  29. #include <linux/version.h>
  30. #include <asm/smp_plat.h>
  31. #include <trace/events/sched.h>
  32. #include "rq_stats.h"
  33. #define MAX_LONG_SIZE 24
  34. #define DEFAULT_RQ_POLL_JIFFIES 1
  35. #define DEFAULT_DEF_TIMER_JIFFIES 5
  36. #define CPU_FREQ_VARIANT 0
  37. #ifdef CONFIG_SCHED_HMP_PRIO_FILTER
  38. static unsigned int heavy_task_prio = NICE_TO_PRIO(CONFIG_SCHED_HMP_PRIO_FILTER_VAL);
  39. #define task_low_priority(prio) ((prio >= heavy_task_prio)?1:0)
  40. #endif
  41. #ifdef CONFIG_MTK_SCHED_RQAVG_US
  42. struct rq_data rq_info;
  43. spinlock_t rq_lock;
  44. #endif
  45. /* struct notifier_block freq_policy;*/
  46. struct notifier_block freq_transition;
  47. struct notifier_block cpu_hotplug;
  48. static unsigned int heavy_task_threshold = 650; /* max=1023 */
  49. struct cpu_load_data {
  50. cputime64_t prev_cpu_idle;
  51. cputime64_t prev_cpu_wall;
  52. cputime64_t prev_cpu_iowait;
  53. unsigned int avg_load_maxfreq;
  54. unsigned int samples;
  55. unsigned int window_size;
  56. unsigned int cur_freq;
  57. unsigned int policy_max;
  58. cpumask_var_t related_cpus;
  59. spinlock_t cpu_load_lock;
  60. };
  61. static DEFINE_PER_CPU(struct cpu_load_data, cpuload);
  62. /* (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)) */
  63. /*
  64. #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0))
  65. #define RQSTATS_USE_CPU_IDLE_INTERNAL 1
  66. #endif
  67. */
  68. #if defined(RQSTATS_USE_CPU_IDLE_INTERNAL) || !defined(CONFIG_CPU_FREQ)
  69. static inline u64 get_cpu_idle_time_jiffy(unsigned int cpu, u64 *wall)
  70. {
  71. u64 idle_time;
  72. u64 cur_wall_time;
  73. u64 busy_time;
  74. cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
  75. busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
  76. busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
  77. busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_IRQ];
  78. busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SOFTIRQ];
  79. busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_STEAL];
  80. busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_NICE];
  81. idle_time = cur_wall_time - busy_time;
  82. if (wall)
  83. *wall = cputime_to_usecs(cur_wall_time);
  84. return cputime_to_usecs(idle_time);
  85. }
  86. static inline cputime64_t get_cpu_idle_time_internal(unsigned int cpu, cputime64_t *wall)
  87. {
  88. u64 idle_time = get_cpu_idle_time_us(cpu, NULL);
  89. if (idle_time == -1ULL)
  90. idle_time = get_cpu_idle_time_jiffy(cpu, wall);
  91. else
  92. idle_time += get_cpu_iowait_time_us(cpu, wall);
  93. return idle_time;
  94. }
  95. #else /* !RQSTATS_USE_CPU_IDLE_INTERNAL && CONFIG_CPU_FREQ */
  96. #include <linux/cpufreq.h>
  97. #endif /* RQSTATS_USE_CPU_IDLE_INTERNAL || !CONFIG_CPU_FREQ */
  98. static inline cputime64_t get_cpu_iowait_time(unsigned int cpu,
  99. cputime64_t *wall)
  100. {
  101. u64 iowait_time = get_cpu_iowait_time_us(cpu, wall);
  102. if (iowait_time == -1ULL)
  103. return 0;
  104. return iowait_time;
  105. }
  106. static int update_average_load(unsigned int freq, unsigned int cpu, bool use_maxfreq)
  107. {
  108. struct cpu_load_data *pcpu = &per_cpu(cpuload, cpu);
  109. cputime64_t cur_wall_time, cur_idle_time, cur_iowait_time;
  110. unsigned int idle_time, wall_time, iowait_time;
  111. unsigned int cur_load, load_at_max_freq, prev_avg_load;
  112. cputime64_t prev_wall_time, prev_cpu_idle, prev_cpu_iowait;
  113. #if defined(RQSTATS_USE_CPU_IDLE_INTERNAL) || !defined(CONFIG_CPU_FREQ)
  114. cur_idle_time = get_cpu_idle_time_internal(cpu, &cur_wall_time);
  115. #else /* !RQSTATS_USE_CPU_IDLE_INTERNAL && CONFIG_CPU_FREQ */
  116. cur_idle_time = get_cpu_idle_time(cpu, &cur_wall_time, 0);
  117. #endif /* RQSTATS_USE_CPU_IDLE_INTERNAL || !CONFIG_CPU_FREQ */
  118. cur_iowait_time = get_cpu_iowait_time(cpu, &cur_wall_time);
  119. prev_wall_time = pcpu->prev_cpu_wall;
  120. prev_cpu_idle = pcpu->prev_cpu_idle;
  121. prev_cpu_iowait = pcpu->prev_cpu_iowait;
  122. wall_time = (unsigned int) (cur_wall_time - pcpu->prev_cpu_wall);
  123. pcpu->prev_cpu_wall = cur_wall_time;
  124. idle_time = (unsigned int) (cur_idle_time - pcpu->prev_cpu_idle);
  125. pcpu->prev_cpu_idle = cur_idle_time;
  126. iowait_time = (unsigned int) (cur_iowait_time - pcpu->prev_cpu_iowait);
  127. pcpu->prev_cpu_iowait = cur_iowait_time;
  128. if (idle_time >= iowait_time)
  129. idle_time -= iowait_time;
  130. if (unlikely(!wall_time || wall_time < idle_time))
  131. return 0;
  132. if (freq)
  133. cur_load = 100 * (wall_time - idle_time) / wall_time;
  134. else
  135. cur_load = 0;
  136. /* Calculate the scaled load across CPU */
  137. if (cpu_online(cpu)) {
  138. if (use_maxfreq)
  139. load_at_max_freq = (cur_load * freq) / pcpu->policy_max;
  140. else
  141. load_at_max_freq = cur_load;
  142. } else
  143. load_at_max_freq = 0;
  144. prev_avg_load = pcpu->avg_load_maxfreq;
  145. #if 1
  146. if (!pcpu->avg_load_maxfreq) {
  147. /* This is the first sample in this window*/
  148. pcpu->avg_load_maxfreq = load_at_max_freq;
  149. pcpu->window_size = wall_time;
  150. } else {
  151. /*
  152. * The is already a sample available in this window.
  153. * Compute weighted average with prev entry, so that we get
  154. * the precise weighted load.
  155. */
  156. pcpu->avg_load_maxfreq =
  157. ((pcpu->avg_load_maxfreq * pcpu->window_size) +
  158. (load_at_max_freq * wall_time)) /
  159. (wall_time + pcpu->window_size);
  160. pcpu->window_size += wall_time;
  161. }
  162. #else /* debug */
  163. pcpu->avg_load_maxfreq = load_at_max_freq;
  164. pcpu->window_size = wall_time;
  165. #endif
  166. mt_sched_printf(sched_log,
  167. "[%s] cpu(%u) load:%u(%u/%u) wdz:%u wall:%u(%llu/%llu) idle: %u(%llu/%llu) iowait: %u(%llu/%llu)\n",
  168. __func__, cpu, pcpu->avg_load_maxfreq, load_at_max_freq, prev_avg_load, pcpu->window_size,
  169. wall_time, cur_wall_time, prev_wall_time,
  170. idle_time, cur_idle_time, prev_cpu_idle,
  171. iowait_time, cur_iowait_time, prev_cpu_iowait);
  172. return 0;
  173. }
  174. #if 0
  175. static unsigned int report_load_at_max_freq(bool reset)
  176. {
  177. int cpu;
  178. struct cpu_load_data *pcpu;
  179. unsigned int total_load = 0;
  180. unsigned long flags;
  181. for_each_online_cpu(cpu) {
  182. pcpu = &per_cpu(cpuload, cpu);
  183. spin_lock_irqsave(&pcpu->cpu_load_lock, flags);
  184. update_average_load(pcpu->cur_freq, cpu, 0);
  185. total_load += pcpu->avg_load_maxfreq;
  186. if (reset)
  187. pcpu->avg_load_maxfreq = 0;
  188. spin_unlock_irqrestore(&pcpu->cpu_load_lock, flags);
  189. }
  190. return total_load;
  191. }
  192. #endif
  193. unsigned int sched_get_percpu_load(int cpu, bool reset, bool use_maxfreq)
  194. {
  195. struct cpu_load_data *pcpu;
  196. unsigned int load = 0;
  197. unsigned long flags;
  198. #if 0
  199. if (!cpu_online(cpu))
  200. return 0;
  201. #endif
  202. if (rq_info.init != 1)
  203. return 100;
  204. pcpu = &per_cpu(cpuload, cpu);
  205. spin_lock_irqsave(&pcpu->cpu_load_lock, flags);
  206. update_average_load(pcpu->cur_freq, cpu, use_maxfreq);
  207. load = pcpu->avg_load_maxfreq;
  208. if (reset)
  209. pcpu->avg_load_maxfreq = 0;
  210. spin_unlock_irqrestore(&pcpu->cpu_load_lock, flags);
  211. return load;
  212. }
  213. EXPORT_SYMBOL(sched_get_percpu_load);
  214. #define HMP_RATIO(v) ((v)*10/17)
  215. /*#define DETECT_HTASK_HEAT */
  216. #ifdef DETECT_HTASK_HEAT
  217. #define MAX_HTASK_TEMPERATURE 10
  218. static unsigned int htask_temperature;
  219. static void __heat_refined(int *count)
  220. {
  221. if (!arch_is_smp()) {
  222. if (*count) {
  223. htask_temperature += (htask_temperature < MAX_HTASK_TEMPERATURE) ? 1 : 0;
  224. } else {
  225. *count = (htask_temperature > 0) ? 1 : 0;
  226. htask_temperature -= (htask_temperature > 0) ? 1 : 0;
  227. }
  228. }
  229. }
  230. #else
  231. static inline void __heat_refined(int *count) {}
  232. #endif
  233. #ifdef CONFIG_SCHED_HMP
  234. static void __trace_out(int heavy, int cpu, struct task_struct *p)
  235. {
  236. #define TRACEBUF_LEN 128
  237. char tracebuf[TRACEBUF_LEN];
  238. #ifdef CONFIG_ARCH_SCALE_INVARIANT_CPU_CAPACITY
  239. snprintf(tracebuf, TRACEBUF_LEN, " %s cpu=%d load=%4lu cpucap=%4lu/%4lu pid=%4d name=%s",
  240. heavy ? "Y" : "N",
  241. cpu, p->se.avg.loadwop_avg_contrib,
  242. topology_cpu_capacity(cpu), topology_max_cpu_capacity(cpu),
  243. p->pid, p->comm);
  244. #else
  245. snprintf(tracebuf, TRACEBUF_LEN, " %s cpu=%d load=%4lu pid=%4d name=%s",
  246. heavy ? "Y" : "N",
  247. cpu, p->se.avg.loadwop_avg_contrib,
  248. p->pid, p->comm);
  249. #endif
  250. trace_sched_heavy_task(tracebuf);
  251. if (unlikely(heavy))
  252. trace_sched_task_entity_avg(5, p, &p->se.avg);
  253. }
  254. #endif
  255. static unsigned int htask_statistic;
  256. #ifdef CONFIG_ARCH_SCALE_INVARIANT_CPU_CAPACITY
  257. #define OVER_L_TH(cpu) ((topology_cpu_capacity(cpu) >= topology_max_cpu_capacity(cpu)) ? 1:0)
  258. #define OVER_B_TH(cpu) ((topology_cpu_capacity(cpu)*8 > topology_max_cpu_capacity(cpu)*5) ? 1:0)
  259. #else
  260. #define OVER_L_TH(cpu) (1)
  261. #define OVER_B_TH(cpu) (1)
  262. #endif
  263. #ifdef CONFIG_SCHED_HMP
  264. unsigned int sched_get_nr_heavy_task_by_threshold(unsigned int threshold)
  265. {
  266. int cpu;
  267. struct task_struct *p;
  268. unsigned long flags;
  269. unsigned int count = 0;
  270. int is_heavy = 0;
  271. unsigned int hmp_threshold;
  272. if (rq_info.init != 1)
  273. return 0;
  274. for_each_online_cpu(cpu) {
  275. int bigcore = arch_cpu_is_big(cpu);
  276. hmp_threshold = bigcore ? HMP_RATIO(threshold) : threshold;
  277. raw_spin_lock_irqsave(&cpu_rq(cpu)->lock, flags);
  278. list_for_each_entry(p, &cpu_rq(cpu)->cfs_tasks, se.group_node) {
  279. is_heavy = 0;
  280. #ifdef CONFIG_SCHED_HMP_PRIO_FILTER
  281. if (task_low_priority(p->prio))
  282. continue;
  283. #endif
  284. if (p->se.avg.loadwop_avg_contrib >= hmp_threshold)
  285. is_heavy = (!bigcore && OVER_L_TH(cpu)) || (bigcore && OVER_B_TH(cpu));
  286. count += is_heavy ? 1 : 0;
  287. __trace_out(is_heavy, cpu, p);
  288. }
  289. raw_spin_unlock_irqrestore(&cpu_rq(cpu)->lock, flags);
  290. }
  291. __heat_refined(&count);
  292. if (count)
  293. htask_statistic++;
  294. return count;
  295. }
  296. EXPORT_SYMBOL(sched_get_nr_heavy_task_by_threshold);
  297. unsigned int sched_get_nr_heavy_task(void)
  298. {
  299. return sched_get_nr_heavy_task_by_threshold(heavy_task_threshold);
  300. }
  301. EXPORT_SYMBOL(sched_get_nr_heavy_task);
  302. #else
  303. unsigned int sched_get_nr_heavy_task_by_threshold(unsigned int threshold)
  304. {
  305. return 0;
  306. }
  307. EXPORT_SYMBOL(sched_get_nr_heavy_task_by_threshold);
  308. unsigned int sched_get_nr_heavy_task(void)
  309. {
  310. return 0;
  311. }
  312. EXPORT_SYMBOL(sched_get_nr_heavy_task);
  313. #endif
  314. void sched_set_heavy_task_threshold(unsigned int val)
  315. {
  316. heavy_task_threshold = val;
  317. }
  318. EXPORT_SYMBOL(sched_set_heavy_task_threshold);
  319. #if 0
  320. static int cpufreq_policy_handler(struct notifier_block *nb,
  321. unsigned long event, void *data)
  322. {
  323. int cpu = 0;
  324. struct cpufreq_policy *policy = data;
  325. struct cpu_load_data *this_cpu;
  326. if (event == CPUFREQ_START)
  327. return 0;
  328. if (event != CPUFREQ_INCOMPATIBLE)
  329. return 0;
  330. /* Make sure that all CPUs impacted by this policy are
  331. * updated since we will only get a notification when the
  332. * user explicitly changes the policy on a CPU.
  333. */
  334. for_each_cpu(cpu, policy->cpus) {
  335. struct cpu_load_data *pcpu = &per_cpu(cpuload, j);
  336. spin_lock_irqsave(&pcpu->cpu_load_lock, flags);
  337. pcpu->policy_max = policy->cpuinfo.max_freq;
  338. spin_unlock_irqrestore(&pcpu->cpu_load_lock, flags);
  339. }
  340. return 0;
  341. }
  342. #endif
  343. static int cpufreq_transition_handler(struct notifier_block *nb,
  344. unsigned long val, void *data)
  345. {
  346. #if 1
  347. struct cpufreq_freqs *freqs = data;
  348. struct cpu_load_data *this_cpu = &per_cpu(cpuload, freqs->cpu);
  349. int j;
  350. unsigned long flags;
  351. if (rq_info.init != 1)
  352. return 0;
  353. switch (val) {
  354. case CPUFREQ_POSTCHANGE:
  355. for_each_cpu(j, this_cpu->related_cpus) {
  356. struct cpu_load_data *pcpu = &per_cpu(cpuload, j);
  357. /* flush previous laod */
  358. spin_lock_irqsave(&pcpu->cpu_load_lock, flags);
  359. if (cpu_online(j))
  360. update_average_load(freqs->old, freqs->cpu, 0);
  361. pcpu->cur_freq = freqs->new;
  362. spin_unlock_irqrestore(&pcpu->cpu_load_lock, flags);
  363. }
  364. break;
  365. }
  366. #endif
  367. return 0;
  368. }
  369. static int cpu_hotplug_handler(struct notifier_block *nb,
  370. unsigned long val, void *data)
  371. {
  372. #if 1
  373. unsigned int cpu = (unsigned long)data;
  374. struct cpu_load_data *this_cpu = &per_cpu(cpuload, cpu);
  375. unsigned long flags;
  376. if (rq_info.init != 1)
  377. return NOTIFY_OK;
  378. switch (val) {
  379. case CPU_UP_PREPARE:
  380. /* cpu_online()=0 here, count cpu offline period as idle */
  381. spin_lock_irqsave(&this_cpu->cpu_load_lock, flags);
  382. update_average_load(0, cpu, 0);
  383. spin_unlock_irqrestore(&this_cpu->cpu_load_lock, flags);
  384. break;
  385. case CPU_DOWN_PREPARE:
  386. /* cpu_online()=1 here, flush previous load */
  387. spin_lock_irqsave(&this_cpu->cpu_load_lock, flags);
  388. update_average_load(this_cpu->cur_freq, cpu, 0);
  389. spin_unlock_irqrestore(&this_cpu->cpu_load_lock, flags);
  390. break;
  391. }
  392. #endif
  393. return NOTIFY_OK;
  394. }
  395. static int system_suspend_handler(struct notifier_block *nb,
  396. unsigned long val, void *data)
  397. {
  398. switch (val) {
  399. case PM_POST_HIBERNATION:
  400. case PM_POST_SUSPEND:
  401. case PM_POST_RESTORE:
  402. rq_info.hotplug_disabled = 0;
  403. break;
  404. case PM_HIBERNATION_PREPARE:
  405. case PM_SUSPEND_PREPARE:
  406. rq_info.hotplug_disabled = 1;
  407. break;
  408. default:
  409. return NOTIFY_DONE;
  410. }
  411. return NOTIFY_OK;
  412. }
  413. static ssize_t hotplug_disable_show(struct kobject *kobj,
  414. struct kobj_attribute *attr, char *buf)
  415. {
  416. unsigned int val = 0;
  417. val = rq_info.hotplug_disabled;
  418. return snprintf(buf, MAX_LONG_SIZE, "%d\n", val);
  419. }
  420. static struct kobj_attribute hotplug_disabled_attr = __ATTR_RO(hotplug_disable);
  421. #ifdef CONFIG_MTK_SCHED_RQAVG_US_ENABLE_WQ
  422. static void def_work_fn(struct work_struct *work)
  423. {
  424. int64_t diff;
  425. diff = ktime_to_ns(ktime_get()) - rq_info.def_start_time;
  426. do_div(diff, 1000 * 1000);
  427. rq_info.def_interval = (unsigned int) diff;
  428. /* Notify polling threads on change of value */
  429. sysfs_notify(rq_info.kobj, NULL, "def_timer_ms");
  430. }
  431. #endif
  432. static ssize_t run_queue_avg_show(struct kobject *kobj,
  433. struct kobj_attribute *attr, char *buf)
  434. {
  435. unsigned int val = 0;
  436. unsigned long flags = 0;
  437. spin_lock_irqsave(&rq_lock, flags);
  438. /* rq avg currently available only on one core */
  439. val = rq_info.rq_avg;
  440. rq_info.rq_avg = 0;
  441. spin_unlock_irqrestore(&rq_lock, flags);
  442. return snprintf(buf, PAGE_SIZE, "%d.%d\n", val/10, val%10);
  443. }
  444. static struct kobj_attribute run_queue_avg_attr = __ATTR_RO(run_queue_avg);
  445. static ssize_t show_run_queue_poll_ms(struct kobject *kobj,
  446. struct kobj_attribute *attr, char *buf)
  447. {
  448. int ret = 0;
  449. unsigned long flags = 0;
  450. spin_lock_irqsave(&rq_lock, flags);
  451. ret = snprintf(buf, MAX_LONG_SIZE, "%u\n",
  452. jiffies_to_msecs(rq_info.rq_poll_jiffies));
  453. spin_unlock_irqrestore(&rq_lock, flags);
  454. return ret;
  455. }
  456. static ssize_t store_run_queue_poll_ms(struct kobject *kobj,
  457. struct kobj_attribute *attr,
  458. const char *buf, size_t count)
  459. {
  460. unsigned int val = 0;
  461. unsigned long flags = 0;
  462. static DEFINE_MUTEX(lock_poll_ms);
  463. mutex_lock(&lock_poll_ms);
  464. spin_lock_irqsave(&rq_lock, flags);
  465. if (0 != sscanf(buf, "%iu", &val))
  466. rq_info.rq_poll_jiffies = msecs_to_jiffies(val);
  467. spin_unlock_irqrestore(&rq_lock, flags);
  468. mutex_unlock(&lock_poll_ms);
  469. return count;
  470. }
  471. static struct kobj_attribute run_queue_poll_ms_attr =
  472. __ATTR(run_queue_poll_ms, S_IWUSR | S_IRUSR, show_run_queue_poll_ms,
  473. store_run_queue_poll_ms);
  474. static ssize_t show_def_timer_ms(struct kobject *kobj,
  475. struct kobj_attribute *attr, char *buf)
  476. {
  477. return snprintf(buf, MAX_LONG_SIZE, "%u\n", rq_info.def_interval);
  478. }
  479. static ssize_t store_def_timer_ms(struct kobject *kobj,
  480. struct kobj_attribute *attr, const char *buf, size_t count)
  481. {
  482. unsigned int val = 0;
  483. if (0 != sscanf(buf, "%iu", &val))
  484. rq_info.def_timer_jiffies = msecs_to_jiffies(val);
  485. rq_info.def_start_time = ktime_to_ns(ktime_get());
  486. return count;
  487. }
  488. static ssize_t store_heavy_task_threshold(struct kobject *kobj,
  489. struct kobj_attribute *attr, const char *buf, size_t count)
  490. {
  491. unsigned int val = 0;
  492. if (0 != sscanf(buf, "%iu", &val))
  493. sched_set_heavy_task_threshold(val);
  494. return count;
  495. }
  496. static struct kobj_attribute def_timer_ms_attr =
  497. __ATTR(def_timer_ms, S_IWUSR | S_IRUSR, show_def_timer_ms,
  498. store_def_timer_ms);
  499. static ssize_t show_cpu_normalized_load(struct kobject *kobj,
  500. struct kobj_attribute *attr, char *buf)
  501. {
  502. int cpu;
  503. unsigned int len = 0;
  504. unsigned int load = 0;
  505. unsigned int max_len = 4096;
  506. /*len = snprintf(buf, MAX_LONG_SIZE, "%u\n", report_load_at_max_freq(0)); */
  507. for_each_possible_cpu(cpu) {
  508. /* reset cpu load
  509. load = sched_get_percpu_load(cpu, 1, 0); */
  510. /* not reset */
  511. load = sched_get_percpu_load(cpu, 0, 0);
  512. len += snprintf(buf+len, max_len-len, "cpu(%d)=%d\n", cpu, load);
  513. #if 0
  514. unsigned int idle_time, wall_time, iowait_time;
  515. struct cpu_load_data *pcpu = &per_cpu(cpuload, cpu);
  516. idle_time = get_cpu_idle_time(cpu, &wall_time, 0);
  517. iowait_time = get_cpu_iowait_time(cpu, &wall_time);
  518. len += snprintf(buf+len, max_len-len, "curr idle=%u, io=%u, wall=%u\n",
  519. (unsigned int)idle_time,
  520. (unsigned int)iowait_time,
  521. (unsigned int)wall_time);
  522. len += snprintf(buf+len, max_len-len, "prev idle=%u, io=%u, wall=%u, l=%u, w=%u, f=%u m=%u, %s\n",
  523. (unsigned int)pcpu->prev_cpu_idle,
  524. (unsigned int)pcpu->prev_cpu_iowait,
  525. (unsigned int)pcpu->prev_cpu_wall,
  526. pcpu->avg_load_maxfreq,
  527. pcpu->window_size,
  528. pcpu->cur_freq,
  529. pcpu->policy_max,
  530. (unsigned int)(cpu_online(cpu))?"on":"off");
  531. #endif
  532. }
  533. len += snprintf(buf+len, max_len-len, "htask_threshold=%d, current_htask#=%u, total_htask#=%d\n",
  534. heavy_task_threshold, sched_get_nr_heavy_task(), htask_statistic);
  535. return len;
  536. }
  537. static struct kobj_attribute cpu_normalized_load_attr =
  538. __ATTR(cpu_normalized_load, S_IWUSR | S_IRUSR, show_cpu_normalized_load,
  539. store_heavy_task_threshold);
  540. static struct attribute *rq_attrs[] = {
  541. &cpu_normalized_load_attr.attr,
  542. &def_timer_ms_attr.attr,
  543. &run_queue_avg_attr.attr,
  544. &run_queue_poll_ms_attr.attr,
  545. &hotplug_disabled_attr.attr,
  546. NULL,
  547. };
  548. static struct attribute_group rq_attr_group = {
  549. .attrs = rq_attrs,
  550. };
  551. static int init_rq_attribs(void)
  552. {
  553. int err;
  554. rq_info.rq_avg = 0;
  555. rq_info.attr_group = &rq_attr_group;
  556. /* Create /sys/devices/system/cpu/cpu0/rq-stats/... */
  557. rq_info.kobj = kobject_create_and_add("rq-stats",
  558. &get_cpu_device(0)->kobj);
  559. if (!rq_info.kobj)
  560. return -ENOMEM;
  561. err = sysfs_create_group(rq_info.kobj, rq_info.attr_group);
  562. if (err)
  563. kobject_put(rq_info.kobj);
  564. else
  565. kobject_uevent(rq_info.kobj, KOBJ_ADD);
  566. return err;
  567. }
  568. static int __init rq_stats_init(void)
  569. {
  570. int ret = 0;
  571. int i;
  572. #if CPU_FREQ_VARIANT
  573. struct cpufreq_policy cpu_policy;
  574. #endif
  575. /* Bail out if this is not an SMP Target */
  576. /* FIX-ME : mark to avoid arm64 build error
  577. if (!is_smp()) {
  578. rq_info.init = 0;
  579. return -ENOSYS;
  580. }
  581. */
  582. #ifdef CONFIG_MTK_SCHED_RQAVG_US_ENABLE_WQ
  583. rq_wq = create_singlethread_workqueue("rq_stats");
  584. BUG_ON(!rq_wq);
  585. INIT_WORK(&rq_info.def_timer_work, def_work_fn);
  586. #endif
  587. spin_lock_init(&rq_lock);
  588. rq_info.rq_poll_jiffies = DEFAULT_RQ_POLL_JIFFIES;
  589. rq_info.def_timer_jiffies = DEFAULT_DEF_TIMER_JIFFIES;
  590. rq_info.rq_poll_last_jiffy = 0;
  591. rq_info.def_timer_last_jiffy = 0;
  592. rq_info.hotplug_disabled = 0;
  593. ret = init_rq_attribs();
  594. for_each_possible_cpu(i) {
  595. struct cpu_load_data *pcpu = &per_cpu(cpuload, i);
  596. spin_lock_init(&pcpu->cpu_load_lock);
  597. #if CPU_FREQ_VARIANT
  598. cpufreq_get_policy(&cpu_policy, i);
  599. pcpu->policy_max = cpu_policy.cpuinfo.max_freq;
  600. if (cpu_online(i))
  601. pcpu->cur_freq = cpufreq_get(i);
  602. cpumask_copy(pcpu->related_cpus, cpu_policy.cpus);
  603. #else
  604. pcpu->policy_max = 1;
  605. pcpu->cur_freq = 1;
  606. #endif
  607. }
  608. freq_transition.notifier_call = cpufreq_transition_handler;
  609. /* freq_policy.notifier_call = cpufreq_policy_handler;*/
  610. cpu_hotplug.notifier_call = cpu_hotplug_handler;
  611. #if CPU_FREQ_VARIANT
  612. cpufreq_register_notifier(&freq_transition, CPUFREQ_TRANSITION_NOTIFIER);
  613. #endif
  614. /* cpufreq_register_notifier(&freq_policy, CPUFREQ_POLICY_NOTIFIER); */
  615. register_hotcpu_notifier(&cpu_hotplug);
  616. rq_info.init = 1;
  617. return ret;
  618. }
  619. late_initcall(rq_stats_init);
  620. static int __init rq_stats_early_init(void)
  621. {
  622. /* Bail out if this is not an SMP Target */
  623. /* FIX-ME : mark to avoid arm64 build error
  624. if (!is_smp()) {
  625. rq_info.init = 0;
  626. return -ENOSYS;
  627. }
  628. */
  629. pm_notifier(system_suspend_handler, 0);
  630. return 0;
  631. }
  632. core_initcall(rq_stats_early_init);