cpufreq_stats.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733
  1. /*
  2. * drivers/cpufreq/cpufreq_stats.c
  3. *
  4. * Copyright (C) 2003-2004 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>.
  5. * (C) 2004 Zou Nan hai <nanhai.zou@intel.com>.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #include <linux/cpu.h>
  12. #include <linux/cpufreq.h>
  13. #include <linux/module.h>
  14. #include <linux/slab.h>
  15. #include <linux/sort.h>
  16. #include <linux/of.h>
  17. #include <linux/sched.h>
  18. #include <linux/cputime.h>
  19. static spinlock_t cpufreq_stats_lock;
  20. struct cpufreq_stats {
  21. unsigned int cpu;
  22. unsigned int total_trans;
  23. unsigned long long last_time;
  24. unsigned int max_state;
  25. unsigned int state_num;
  26. unsigned int last_index;
  27. u64 *time_in_state;
  28. unsigned int *freq_table;
  29. #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
  30. unsigned int *trans_table;
  31. #endif
  32. };
  33. struct all_cpufreq_stats {
  34. unsigned int state_num;
  35. cputime64_t *time_in_state;
  36. unsigned int *freq_table;
  37. };
  38. struct cpufreq_power_stats {
  39. unsigned int state_num;
  40. unsigned int *curr;
  41. unsigned int *freq_table;
  42. };
  43. struct all_freq_table {
  44. unsigned int *freq_table;
  45. unsigned int table_size;
  46. };
  47. static struct all_freq_table *all_freq_table;
  48. static DEFINE_PER_CPU(struct all_cpufreq_stats *, all_cpufreq_stats);
  49. static DEFINE_PER_CPU(struct cpufreq_stats *, cpufreq_stats_table);
  50. static DEFINE_PER_CPU(struct cpufreq_power_stats *, cpufreq_power_stats);
  51. struct cpufreq_stats_attribute {
  52. struct attribute attr;
  53. ssize_t(*show) (struct cpufreq_stats *, char *);
  54. };
  55. static int cpufreq_stats_update(unsigned int cpu)
  56. {
  57. struct cpufreq_stats *stat;
  58. struct all_cpufreq_stats *all_stat;
  59. unsigned long long cur_time;
  60. cur_time = get_jiffies_64();
  61. spin_lock(&cpufreq_stats_lock);
  62. stat = per_cpu(cpufreq_stats_table, cpu);
  63. all_stat = per_cpu(all_cpufreq_stats, cpu);
  64. if (!stat) {
  65. spin_unlock(&cpufreq_stats_lock);
  66. return 0;
  67. }
  68. if (stat->time_in_state) {
  69. stat->time_in_state[stat->last_index] +=
  70. cur_time - stat->last_time;
  71. if (all_stat)
  72. all_stat->time_in_state[stat->last_index] +=
  73. cur_time - stat->last_time;
  74. }
  75. stat->last_time = cur_time;
  76. spin_unlock(&cpufreq_stats_lock);
  77. return 0;
  78. }
  79. static ssize_t show_total_trans(struct cpufreq_policy *policy, char *buf)
  80. {
  81. struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
  82. if (!stat)
  83. return 0;
  84. return sprintf(buf, "%d\n",
  85. per_cpu(cpufreq_stats_table, stat->cpu)->total_trans);
  86. }
  87. static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf)
  88. {
  89. ssize_t len = 0;
  90. int i;
  91. struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
  92. if (!stat)
  93. return 0;
  94. cpufreq_stats_update(stat->cpu);
  95. for (i = 0; i < stat->state_num; i++) {
  96. len += sprintf(buf + len, "%u %llu\n", stat->freq_table[i],
  97. (unsigned long long)
  98. jiffies_64_to_clock_t(stat->time_in_state[i]));
  99. }
  100. return len;
  101. }
  102. static int get_index_all_cpufreq_stat(struct all_cpufreq_stats *all_stat,
  103. unsigned int freq)
  104. {
  105. int i;
  106. if (!all_stat)
  107. return -1;
  108. for (i = 0; i < all_stat->state_num; i++) {
  109. if (all_stat->freq_table[i] == freq)
  110. return i;
  111. }
  112. return -1;
  113. }
  114. void acct_update_power(struct task_struct *task, cputime_t cputime) {
  115. struct cpufreq_power_stats *powerstats;
  116. struct cpufreq_stats *stats;
  117. unsigned int cpu_num, curr;
  118. if (!task)
  119. return;
  120. cpu_num = task_cpu(task);
  121. powerstats = per_cpu(cpufreq_power_stats, cpu_num);
  122. stats = per_cpu(cpufreq_stats_table, cpu_num);
  123. if (!powerstats || !stats)
  124. return;
  125. curr = powerstats->curr[stats->last_index];
  126. if (task->cpu_power != ULLONG_MAX)
  127. task->cpu_power += curr * cputime_to_usecs(cputime);
  128. }
  129. EXPORT_SYMBOL_GPL(acct_update_power);
  130. static ssize_t show_current_in_state(struct kobject *kobj,
  131. struct kobj_attribute *attr, char *buf)
  132. {
  133. ssize_t len = 0;
  134. unsigned int i, cpu;
  135. struct cpufreq_power_stats *powerstats;
  136. spin_lock(&cpufreq_stats_lock);
  137. for_each_possible_cpu(cpu) {
  138. powerstats = per_cpu(cpufreq_power_stats, cpu);
  139. if (!powerstats)
  140. continue;
  141. len += scnprintf(buf + len, PAGE_SIZE - len, "CPU%d:", cpu);
  142. for (i = 0; i < powerstats->state_num; i++)
  143. len += scnprintf(buf + len, PAGE_SIZE - len,
  144. "%d=%d ", powerstats->freq_table[i],
  145. powerstats->curr[i]);
  146. len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
  147. }
  148. spin_unlock(&cpufreq_stats_lock);
  149. return len;
  150. }
  151. static ssize_t show_all_time_in_state(struct kobject *kobj,
  152. struct kobj_attribute *attr, char *buf)
  153. {
  154. ssize_t len = 0;
  155. unsigned int i, cpu, freq, index;
  156. struct all_cpufreq_stats *all_stat;
  157. struct cpufreq_policy *policy;
  158. len += scnprintf(buf + len, PAGE_SIZE - len, "freq\t\t");
  159. for_each_possible_cpu(cpu) {
  160. len += scnprintf(buf + len, PAGE_SIZE - len, "cpu%d\t\t", cpu);
  161. if (cpu_online(cpu))
  162. cpufreq_stats_update(cpu);
  163. }
  164. if (!all_freq_table)
  165. goto out;
  166. for (i = 0; i < all_freq_table->table_size; i++) {
  167. freq = all_freq_table->freq_table[i];
  168. len += scnprintf(buf + len, PAGE_SIZE - len, "\n%u\t\t", freq);
  169. for_each_possible_cpu(cpu) {
  170. policy = cpufreq_cpu_get(cpu);
  171. if (policy == NULL)
  172. continue;
  173. all_stat = per_cpu(all_cpufreq_stats, policy->cpu);
  174. index = get_index_all_cpufreq_stat(all_stat, freq);
  175. if (index != -1) {
  176. len += scnprintf(buf + len, PAGE_SIZE - len,
  177. "%llu\t\t", (unsigned long long)
  178. cputime64_to_clock_t(all_stat->time_in_state[index]));
  179. } else {
  180. len += scnprintf(buf + len, PAGE_SIZE - len,
  181. "N/A\t\t");
  182. }
  183. cpufreq_cpu_put(policy);
  184. }
  185. }
  186. out:
  187. len += scnprintf(buf + len, PAGE_SIZE - len, "\n");
  188. return len;
  189. }
  190. #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
  191. static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf)
  192. {
  193. ssize_t len = 0;
  194. int i, j;
  195. struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
  196. if (!stat)
  197. return 0;
  198. cpufreq_stats_update(stat->cpu);
  199. len += snprintf(buf + len, PAGE_SIZE - len, " From : To\n");
  200. len += snprintf(buf + len, PAGE_SIZE - len, " : ");
  201. for (i = 0; i < stat->state_num; i++) {
  202. if (len >= PAGE_SIZE)
  203. break;
  204. len += snprintf(buf + len, PAGE_SIZE - len, "%9u ",
  205. stat->freq_table[i]);
  206. }
  207. if (len >= PAGE_SIZE)
  208. return PAGE_SIZE;
  209. len += snprintf(buf + len, PAGE_SIZE - len, "\n");
  210. for (i = 0; i < stat->state_num; i++) {
  211. if (len >= PAGE_SIZE)
  212. break;
  213. len += snprintf(buf + len, PAGE_SIZE - len, "%9u: ",
  214. stat->freq_table[i]);
  215. for (j = 0; j < stat->state_num; j++) {
  216. if (len >= PAGE_SIZE)
  217. break;
  218. len += snprintf(buf + len, PAGE_SIZE - len, "%9u ",
  219. stat->trans_table[i*stat->max_state+j]);
  220. }
  221. if (len >= PAGE_SIZE)
  222. break;
  223. len += snprintf(buf + len, PAGE_SIZE - len, "\n");
  224. }
  225. if (len >= PAGE_SIZE)
  226. return PAGE_SIZE;
  227. return len;
  228. }
  229. cpufreq_freq_attr_ro(trans_table);
  230. #endif
  231. cpufreq_freq_attr_ro(total_trans);
  232. cpufreq_freq_attr_ro(time_in_state);
  233. static struct attribute *default_attrs[] = {
  234. &total_trans.attr,
  235. &time_in_state.attr,
  236. #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
  237. &trans_table.attr,
  238. #endif
  239. NULL
  240. };
  241. static struct attribute_group stats_attr_group = {
  242. .attrs = default_attrs,
  243. .name = "stats"
  244. };
  245. static struct kobj_attribute _attr_all_time_in_state = __ATTR(all_time_in_state,
  246. 0444, show_all_time_in_state, NULL);
  247. static struct kobj_attribute _attr_current_in_state = __ATTR(current_in_state,
  248. 0444, show_current_in_state, NULL);
  249. static int freq_table_get_index(struct cpufreq_stats *stat, unsigned int freq)
  250. {
  251. int index;
  252. for (index = 0; index < stat->max_state; index++)
  253. if (stat->freq_table[index] == freq)
  254. return index;
  255. return -1;
  256. }
  257. static void __cpufreq_stats_free_table(struct cpufreq_policy *policy)
  258. {
  259. struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table, policy->cpu);
  260. if (!stat)
  261. return;
  262. pr_debug("%s: Free stat table\n", __func__);
  263. sysfs_remove_group(&policy->kobj, &stats_attr_group);
  264. kfree(stat->time_in_state);
  265. kfree(stat);
  266. per_cpu(cpufreq_stats_table, policy->cpu) = NULL;
  267. }
  268. static void cpufreq_stats_free_table(unsigned int cpu)
  269. {
  270. struct cpufreq_policy *policy;
  271. policy = cpufreq_cpu_get(cpu);
  272. if (!policy)
  273. return;
  274. if (cpufreq_frequency_get_table(policy->cpu))
  275. __cpufreq_stats_free_table(policy);
  276. cpufreq_cpu_put(policy);
  277. }
  278. static void cpufreq_allstats_free(void)
  279. {
  280. int cpu;
  281. struct all_cpufreq_stats *all_stat;
  282. sysfs_remove_file(cpufreq_global_kobject,
  283. &_attr_all_time_in_state.attr);
  284. for_each_possible_cpu(cpu) {
  285. all_stat = per_cpu(all_cpufreq_stats, cpu);
  286. if (!all_stat)
  287. continue;
  288. kfree(all_stat->time_in_state);
  289. kfree(all_stat);
  290. per_cpu(all_cpufreq_stats, cpu) = NULL;
  291. }
  292. if (all_freq_table) {
  293. kfree(all_freq_table->freq_table);
  294. kfree(all_freq_table);
  295. all_freq_table = NULL;
  296. }
  297. }
  298. static void cpufreq_powerstats_free(void)
  299. {
  300. int cpu;
  301. struct cpufreq_power_stats *powerstats;
  302. sysfs_remove_file(cpufreq_global_kobject, &_attr_current_in_state.attr);
  303. for_each_possible_cpu(cpu) {
  304. powerstats = per_cpu(cpufreq_power_stats, cpu);
  305. if (!powerstats)
  306. continue;
  307. kfree(powerstats->curr);
  308. kfree(powerstats);
  309. per_cpu(cpufreq_power_stats, cpu) = NULL;
  310. }
  311. }
  312. static int __cpufreq_stats_create_table(struct cpufreq_policy *policy,
  313. struct cpufreq_frequency_table *table, int count)
  314. {
  315. unsigned int i, ret = 0;
  316. struct cpufreq_stats *stat;
  317. unsigned int alloc_size;
  318. unsigned int cpu = policy->cpu;
  319. struct cpufreq_frequency_table *pos;
  320. if (per_cpu(cpufreq_stats_table, cpu))
  321. return -EBUSY;
  322. stat = kzalloc(sizeof(*stat), GFP_KERNEL);
  323. if ((stat) == NULL)
  324. return -ENOMEM;
  325. ret = sysfs_create_group(&policy->kobj, &stats_attr_group);
  326. if (ret)
  327. goto error_out;
  328. stat->cpu = cpu;
  329. per_cpu(cpufreq_stats_table, cpu) = stat;
  330. alloc_size = count * sizeof(int) + count * sizeof(u64);
  331. #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
  332. alloc_size += count * count * sizeof(int);
  333. #endif
  334. stat->max_state = count;
  335. stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
  336. if (!stat->time_in_state) {
  337. ret = -ENOMEM;
  338. goto error_alloc;
  339. }
  340. stat->freq_table = (unsigned int *)(stat->time_in_state + count);
  341. #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
  342. stat->trans_table = stat->freq_table + count;
  343. #endif
  344. i = 0;
  345. cpufreq_for_each_valid_entry(pos, table)
  346. if (freq_table_get_index(stat, pos->frequency) == -1)
  347. stat->freq_table[i++] = pos->frequency;
  348. stat->state_num = i;
  349. spin_lock(&cpufreq_stats_lock);
  350. stat->last_time = get_jiffies_64();
  351. stat->last_index = freq_table_get_index(stat, policy->cur);
  352. spin_unlock(&cpufreq_stats_lock);
  353. return 0;
  354. error_alloc:
  355. sysfs_remove_group(&policy->kobj, &stats_attr_group);
  356. error_out:
  357. kfree(stat);
  358. per_cpu(cpufreq_stats_table, cpu) = NULL;
  359. return ret;
  360. }
  361. static void cpufreq_stats_update_policy_cpu(struct cpufreq_policy *policy)
  362. {
  363. struct cpufreq_stats *stat = per_cpu(cpufreq_stats_table,
  364. policy->last_cpu);
  365. pr_debug("Updating stats_table for new_cpu %u from last_cpu %u\n",
  366. policy->cpu, policy->last_cpu);
  367. per_cpu(cpufreq_stats_table, policy->cpu) = per_cpu(cpufreq_stats_table,
  368. policy->last_cpu);
  369. per_cpu(cpufreq_stats_table, policy->last_cpu) = NULL;
  370. stat->cpu = policy->cpu;
  371. }
  372. static void cpufreq_powerstats_create(unsigned int cpu,
  373. struct cpufreq_frequency_table *table, int count) {
  374. unsigned int alloc_size, i = 0, ret = 0;
  375. struct cpufreq_power_stats *powerstats;
  376. struct cpufreq_frequency_table *pos;
  377. struct device_node *cpu_node;
  378. char device_path[16];
  379. powerstats = kzalloc(sizeof(struct cpufreq_power_stats),
  380. GFP_KERNEL);
  381. if (!powerstats)
  382. return;
  383. /* Allocate memory for freq table per cpu as well as clockticks per
  384. * freq*/
  385. alloc_size = count * sizeof(unsigned int) +
  386. count * sizeof(unsigned int);
  387. powerstats->curr = kzalloc(alloc_size, GFP_KERNEL);
  388. if (!powerstats->curr) {
  389. kfree(powerstats);
  390. return;
  391. }
  392. powerstats->freq_table = powerstats->curr + count;
  393. spin_lock(&cpufreq_stats_lock);
  394. i = 0;
  395. cpufreq_for_each_valid_entry(pos, table)
  396. powerstats->freq_table[i++] = pos->frequency;
  397. powerstats->state_num = i;
  398. snprintf(device_path, sizeof(device_path), "/cpus/cpu@%d", cpu);
  399. cpu_node = of_find_node_by_path(device_path);
  400. if (cpu_node) {
  401. ret = of_property_read_u32_array(cpu_node, "current",
  402. powerstats->curr, count);
  403. if (ret) {
  404. kfree(powerstats->curr);
  405. kfree(powerstats);
  406. powerstats = NULL;
  407. }
  408. }
  409. per_cpu(cpufreq_power_stats, cpu) = powerstats;
  410. spin_unlock(&cpufreq_stats_lock);
  411. }
  412. static int compare_for_sort(const void *lhs_ptr, const void *rhs_ptr)
  413. {
  414. unsigned int lhs = *(const unsigned int *)(lhs_ptr);
  415. unsigned int rhs = *(const unsigned int *)(rhs_ptr);
  416. if (lhs < rhs)
  417. return -1;
  418. if (lhs > rhs)
  419. return 1;
  420. return 0;
  421. }
  422. static bool check_all_freq_table(unsigned int freq)
  423. {
  424. int i;
  425. for (i = 0; i < all_freq_table->table_size; i++) {
  426. if (freq == all_freq_table->freq_table[i])
  427. return true;
  428. }
  429. return false;
  430. }
  431. static void create_all_freq_table(void)
  432. {
  433. all_freq_table = kzalloc(sizeof(struct all_freq_table),
  434. GFP_KERNEL);
  435. if (!all_freq_table)
  436. pr_warn("could not allocate memory for all_freq_table\n");
  437. return;
  438. }
  439. static void add_all_freq_table(unsigned int freq)
  440. {
  441. unsigned int size;
  442. size = sizeof(unsigned int) * (all_freq_table->table_size + 1);
  443. all_freq_table->freq_table = krealloc(all_freq_table->freq_table,
  444. size, GFP_ATOMIC);
  445. if (IS_ERR(all_freq_table->freq_table)) {
  446. pr_warn("Could not reallocate memory for freq_table\n");
  447. all_freq_table->freq_table = NULL;
  448. return;
  449. }
  450. all_freq_table->freq_table[all_freq_table->table_size++] = freq;
  451. }
  452. static void cpufreq_allstats_create(unsigned int cpu,
  453. struct cpufreq_frequency_table *table, int count)
  454. {
  455. int i , j = 0;
  456. unsigned int alloc_size;
  457. struct all_cpufreq_stats *all_stat;
  458. bool sort_needed = false;
  459. all_stat = kzalloc(sizeof(struct all_cpufreq_stats),
  460. GFP_KERNEL);
  461. if (!all_stat) {
  462. pr_warn("Cannot allocate memory for cpufreq stats\n");
  463. return;
  464. }
  465. /*Allocate memory for freq table per cpu as well as clockticks per freq*/
  466. alloc_size = count * sizeof(int) + count * sizeof(cputime64_t);
  467. all_stat->time_in_state = kzalloc(alloc_size, GFP_KERNEL);
  468. if (!all_stat->time_in_state) {
  469. pr_warn("Cannot allocate memory for cpufreq time_in_state\n");
  470. kfree(all_stat);
  471. all_stat = NULL;
  472. return;
  473. }
  474. all_stat->freq_table = (unsigned int *)
  475. (all_stat->time_in_state + count);
  476. spin_lock(&cpufreq_stats_lock);
  477. for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) {
  478. unsigned int freq = table[i].frequency;
  479. if (freq == CPUFREQ_ENTRY_INVALID)
  480. continue;
  481. all_stat->freq_table[j++] = freq;
  482. if (all_freq_table && !check_all_freq_table(freq)) {
  483. add_all_freq_table(freq);
  484. sort_needed = true;
  485. }
  486. }
  487. if (sort_needed)
  488. sort(all_freq_table->freq_table, all_freq_table->table_size,
  489. sizeof(unsigned int), &compare_for_sort, NULL);
  490. all_stat->state_num = j;
  491. per_cpu(all_cpufreq_stats, cpu) = all_stat;
  492. spin_unlock(&cpufreq_stats_lock);
  493. }
  494. static void cpufreq_stats_create_table(unsigned int cpu)
  495. {
  496. struct cpufreq_policy *policy;
  497. struct cpufreq_frequency_table *table, *pos;
  498. int count = 0;
  499. /*
  500. * "likely(!policy)" because normally cpufreq_stats will be registered
  501. * before cpufreq driver
  502. */
  503. policy = cpufreq_cpu_get(cpu);
  504. if (likely(!policy))
  505. return;
  506. table = cpufreq_frequency_get_table(policy->cpu);
  507. if (likely(table)) {
  508. cpufreq_for_each_valid_entry(pos, table)
  509. count++;
  510. if (!per_cpu(all_cpufreq_stats, cpu))
  511. cpufreq_allstats_create(cpu, table, count);
  512. if (!per_cpu(cpufreq_power_stats, cpu))
  513. cpufreq_powerstats_create(cpu, table, count);
  514. __cpufreq_stats_create_table(policy, table, count);
  515. }
  516. cpufreq_cpu_put(policy);
  517. }
  518. static int cpufreq_stat_notifier_policy(struct notifier_block *nb,
  519. unsigned long val, void *data)
  520. {
  521. int ret = 0, count = 0;
  522. struct cpufreq_policy *policy = data;
  523. struct cpufreq_frequency_table *table, *pos;
  524. unsigned int cpu_num, cpu = policy->cpu;
  525. if (val == CPUFREQ_UPDATE_POLICY_CPU) {
  526. cpufreq_stats_update_policy_cpu(policy);
  527. return 0;
  528. }
  529. table = cpufreq_frequency_get_table(cpu);
  530. if (!table)
  531. return 0;
  532. cpufreq_for_each_valid_entry(pos, table)
  533. count++;
  534. if (!per_cpu(all_cpufreq_stats, cpu))
  535. cpufreq_allstats_create(cpu, table, count);
  536. for_each_possible_cpu(cpu_num) {
  537. if (!per_cpu(cpufreq_power_stats, cpu_num))
  538. cpufreq_powerstats_create(cpu_num, table, count);
  539. }
  540. if (val == CPUFREQ_CREATE_POLICY)
  541. ret = __cpufreq_stats_create_table(policy, table, count);
  542. else if (val == CPUFREQ_REMOVE_POLICY)
  543. __cpufreq_stats_free_table(policy);
  544. return ret;
  545. }
  546. static int cpufreq_stat_notifier_trans(struct notifier_block *nb,
  547. unsigned long val, void *data)
  548. {
  549. struct cpufreq_freqs *freq = data;
  550. struct cpufreq_stats *stat;
  551. int old_index, new_index;
  552. if (val != CPUFREQ_POSTCHANGE)
  553. return 0;
  554. stat = per_cpu(cpufreq_stats_table, freq->cpu);
  555. if (!stat)
  556. return 0;
  557. old_index = stat->last_index;
  558. new_index = freq_table_get_index(stat, freq->new);
  559. /* We can't do stat->time_in_state[-1]= .. */
  560. if (old_index == -1 || new_index == -1)
  561. return 0;
  562. cpufreq_stats_update(freq->cpu);
  563. if (old_index == new_index)
  564. return 0;
  565. spin_lock(&cpufreq_stats_lock);
  566. stat->last_index = new_index;
  567. #ifdef CONFIG_CPU_FREQ_STAT_DETAILS
  568. stat->trans_table[old_index * stat->max_state + new_index]++;
  569. #endif
  570. stat->total_trans++;
  571. spin_unlock(&cpufreq_stats_lock);
  572. return 0;
  573. }
  574. static struct notifier_block notifier_policy_block = {
  575. .notifier_call = cpufreq_stat_notifier_policy
  576. };
  577. static struct notifier_block notifier_trans_block = {
  578. .notifier_call = cpufreq_stat_notifier_trans
  579. };
  580. static int __init cpufreq_stats_init(void)
  581. {
  582. int ret;
  583. unsigned int cpu;
  584. spin_lock_init(&cpufreq_stats_lock);
  585. ret = cpufreq_register_notifier(&notifier_policy_block,
  586. CPUFREQ_POLICY_NOTIFIER);
  587. if (ret)
  588. return ret;
  589. for_each_online_cpu(cpu)
  590. cpufreq_stats_create_table(cpu);
  591. ret = cpufreq_register_notifier(&notifier_trans_block,
  592. CPUFREQ_TRANSITION_NOTIFIER);
  593. if (ret) {
  594. cpufreq_unregister_notifier(&notifier_policy_block,
  595. CPUFREQ_POLICY_NOTIFIER);
  596. for_each_online_cpu(cpu)
  597. cpufreq_stats_free_table(cpu);
  598. return ret;
  599. }
  600. create_all_freq_table();
  601. WARN_ON(cpufreq_get_global_kobject());
  602. ret = sysfs_create_file(cpufreq_global_kobject,
  603. &_attr_all_time_in_state.attr);
  604. if (ret)
  605. pr_warn("Cannot create sysfs file for cpufreq stats\n");
  606. ret = sysfs_create_file(cpufreq_global_kobject,
  607. &_attr_current_in_state.attr);
  608. if (ret)
  609. pr_warn("Cannot create sysfs file for cpufreq current stats\n");
  610. return 0;
  611. }
  612. static void __exit cpufreq_stats_exit(void)
  613. {
  614. unsigned int cpu;
  615. cpufreq_unregister_notifier(&notifier_policy_block,
  616. CPUFREQ_POLICY_NOTIFIER);
  617. cpufreq_unregister_notifier(&notifier_trans_block,
  618. CPUFREQ_TRANSITION_NOTIFIER);
  619. for_each_online_cpu(cpu)
  620. cpufreq_stats_free_table(cpu);
  621. cpufreq_allstats_free();
  622. cpufreq_powerstats_free();
  623. cpufreq_put_global_kobject();
  624. }
  625. MODULE_AUTHOR("Zou Nan hai <nanhai.zou@intel.com>");
  626. MODULE_DESCRIPTION("'cpufreq_stats' - A driver to export cpufreq stats "
  627. "through sysfs filesystem");
  628. MODULE_LICENSE("GPL");
  629. module_init(cpufreq_stats_init);
  630. module_exit(cpufreq_stats_exit);