sched_avg.c 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135
  1. /* Copyright (c) 2012, The Linux Foundation. All rights reserved.
  2. *
  3. * This program is free software; you can redistribute it and/or modify
  4. * it under the terms of the GNU General Public License version 2 and
  5. * only version 2 as published by the Free Software Foundation.
  6. *
  7. * This program is distributed in the hope that it will be useful,
  8. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  9. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  10. * GNU General Public License for more details.
  11. */
  12. /*
  13. * Scheduler hook for average runqueue determination
  14. */
  15. #include <linux/module.h>
  16. #include <linux/percpu.h>
  17. #include <linux/hrtimer.h>
  18. #include <linux/sched.h>
  19. #include <linux/math64.h>
  20. static DEFINE_PER_CPU(u64, nr_prod_sum);
  21. static DEFINE_PER_CPU(u64, last_time);
  22. static DEFINE_PER_CPU(u64, nr);
  23. static DEFINE_PER_CPU(unsigned long, iowait_prod_sum);
  24. static DEFINE_PER_CPU(spinlock_t, nr_lock) = __SPIN_LOCK_UNLOCKED(nr_lock);
  25. static u64 last_get_time;
  26. /**
  27. * sched_get_nr_running_avg
  28. * @return: Average nr_running and iowait value since last poll.
  29. * Returns the avg * 100 to return up to two decimal points
  30. * of accuracy.
  31. *
  32. * Obtains the average nr_running value since the last poll.
  33. * This function may not be called concurrently with itself
  34. */
  35. void sched_get_nr_running_avg(int *avg, int *iowait_avg)
  36. {
  37. int cpu;
  38. u64 curr_time = sched_clock();
  39. s64 diff = (s64) (curr_time - last_get_time);
  40. u64 tmp_avg = 0, tmp_iowait = 0, old_lgt;
  41. bool clk_faulty = 0;
  42. u32 cpumask = 0;
  43. *avg = 0;
  44. *iowait_avg = 0;
  45. if (!diff)
  46. return;
  47. WARN(diff < 0, "[sched_get_nr_running_avg] time last:%llu curr:%llu ",
  48. last_get_time, curr_time);
  49. old_lgt = last_get_time;
  50. last_get_time = curr_time;
  51. /* read and reset nr_running counts */
  52. for_each_possible_cpu(cpu) {
  53. unsigned long flags;
  54. spin_lock_irqsave(&per_cpu(nr_lock, cpu), flags);
  55. /* error handling for problematic clock violation */
  56. if ((s64) (curr_time - per_cpu(last_time, cpu) < 0)) {
  57. clk_faulty = 1;
  58. cpumask |= 1 << cpu;
  59. }
  60. /* ////// */
  61. tmp_avg += per_cpu(nr_prod_sum, cpu);
  62. tmp_avg += per_cpu(nr, cpu) * (curr_time - per_cpu(last_time, cpu));
  63. tmp_iowait = per_cpu(iowait_prod_sum, cpu);
  64. tmp_iowait += nr_iowait_cpu(cpu) * (curr_time - per_cpu(last_time, cpu));
  65. per_cpu(last_time, cpu) = curr_time;
  66. per_cpu(nr_prod_sum, cpu) = 0;
  67. per_cpu(iowait_prod_sum, cpu) = 0;
  68. spin_unlock_irqrestore(&per_cpu(nr_lock, cpu), flags);
  69. }
  70. /* error handling for problematic clock violation */
  71. if (clk_faulty) {
  72. *avg = 0;
  73. *iowait_avg = 0;
  74. pr_warn("[%s] **** CPU (0x%08x)clock may unstable !!\n", __func__, cpumask);
  75. return;
  76. }
  77. /* ///// */
  78. *avg = (int)div64_u64(tmp_avg * 100, (u64) diff);
  79. *iowait_avg = (int)div64_u64(tmp_iowait * 100, (u64) diff);
  80. WARN(*avg < 0, "[sched_get_nr_running_avg] avg:%d(%llu/%lld), time last:%llu curr:%llu ",
  81. *avg, tmp_avg, diff, old_lgt, curr_time);
  82. if (unlikely(*avg < 0))
  83. *avg = 0;
  84. WARN(*iowait_avg < 0, "[sched_get_nr_running_avg] iowait_avg:%d(%llu/%lld) time last:%llu curr:%llu ",
  85. *iowait_avg, tmp_iowait, diff, old_lgt, curr_time);
  86. if (unlikely(*iowait_avg < 0))
  87. *iowait_avg = 0;
  88. }
  89. EXPORT_SYMBOL(sched_get_nr_running_avg);
  90. /**
  91. * sched_update_nr_prod
  92. * @cpu: The core id of the nr running driver.
  93. * @nr: Updated nr running value for cpu.
  94. * @inc: Whether we are increasing or decreasing the count
  95. * @return: N/A
  96. *
  97. * Update average with latest nr_running value for CPU
  98. */
  99. void sched_update_nr_prod(int cpu, unsigned long nr_running, int inc)
  100. {
  101. s64 diff;
  102. u64 curr_time;
  103. unsigned long flags;
  104. spin_lock_irqsave(&per_cpu(nr_lock, cpu), flags);
  105. curr_time = sched_clock();
  106. diff = (s64) (curr_time - per_cpu(last_time, cpu));
  107. /* skip this problematic clock violation */
  108. if (diff < 0) {
  109. spin_unlock_irqrestore(&per_cpu(nr_lock, cpu), flags);
  110. return;
  111. }
  112. /* ////////////////////////////////////// */
  113. per_cpu(last_time, cpu) = curr_time;
  114. per_cpu(nr, cpu) = nr_running + inc;
  115. BUG_ON(per_cpu(nr, cpu) < 0);
  116. per_cpu(nr_prod_sum, cpu) += nr_running * diff;
  117. per_cpu(iowait_prod_sum, cpu) += nr_iowait_cpu(cpu) * diff;
  118. spin_unlock_irqrestore(&per_cpu(nr_lock, cpu), flags);
  119. }
  120. EXPORT_SYMBOL(sched_update_nr_prod);