cpufreq_interactive.c 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382
  1. /*
  2. * drivers/cpufreq/cpufreq_interactive.c
  3. *
  4. * Copyright (C) 2010 Google, Inc.
  5. *
  6. * This software is licensed under the terms of the GNU General Public
  7. * License version 2, as published by the Free Software Foundation, and
  8. * may be copied, distributed, and modified under those terms.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * Author: Mike Chan (mike@android.com)
  16. *
  17. */
  18. #include <linux/cpu.h>
  19. #include <linux/cpumask.h>
  20. #include <linux/cpufreq.h>
  21. #include <linux/module.h>
  22. #include <linux/moduleparam.h>
  23. #include <linux/rwsem.h>
  24. #include <linux/sched.h>
  25. #include <linux/sched/rt.h>
  26. #include <linux/tick.h>
  27. #include <linux/time.h>
  28. #include <linux/timer.h>
  29. #include <linux/workqueue.h>
  30. #include <linux/kthread.h>
  31. #include <linux/slab.h>
  32. #ifdef CONFIG_ARCH_MT6755
  33. #include <asm/topology.h>
  34. #include <../misc/mediatek/base/power/mt6755/mt_cpufreq.h>
  35. unsigned int hispeed_freq_perf = 0;
  36. unsigned int min_sample_time_perf = 0;
  37. #endif
  38. #define CREATE_TRACE_POINTS
  39. #include <trace/events/cpufreq_interactive.h>
  40. struct cpufreq_interactive_cpuinfo {
  41. struct timer_list cpu_timer;
  42. struct timer_list cpu_slack_timer;
  43. spinlock_t load_lock; /* protects the next 4 fields */
  44. u64 time_in_idle;
  45. u64 time_in_idle_timestamp;
  46. u64 cputime_speedadj;
  47. u64 cputime_speedadj_timestamp;
  48. struct cpufreq_policy *policy;
  49. struct cpufreq_frequency_table *freq_table;
  50. spinlock_t target_freq_lock; /*protects target freq */
  51. unsigned int target_freq;
  52. unsigned int floor_freq;
  53. u64 pol_floor_val_time; /* policy floor_validate_time */
  54. u64 loc_floor_val_time; /* per-cpu floor_validate_time */
  55. u64 pol_hispeed_val_time; /* policy hispeed_validate_time */
  56. u64 loc_hispeed_val_time; /* per-cpu hispeed_validate_time */
  57. struct rw_semaphore enable_sem;
  58. int governor_enabled;
  59. };
  60. static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
  61. /* realtime thread handles frequency scaling */
  62. static struct task_struct *speedchange_task;
  63. static cpumask_t speedchange_cpumask;
  64. static spinlock_t speedchange_cpumask_lock;
  65. static struct mutex gov_lock;
  66. /* Target load. Lower values result in higher CPU speeds. */
  67. #define DEFAULT_TARGET_LOAD 90
  68. static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
  69. #define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
  70. #define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
  71. static unsigned int default_above_hispeed_delay[] = {
  72. DEFAULT_ABOVE_HISPEED_DELAY };
  73. struct cpufreq_interactive_tunables {
  74. int usage_count;
  75. /* Hi speed to bump to from lo speed when load burst (default max) */
  76. unsigned int hispeed_freq;
  77. /* Go to hi speed when CPU load at or above this value. */
  78. #define DEFAULT_GO_HISPEED_LOAD 99
  79. unsigned long go_hispeed_load;
  80. /* Target load. Lower values result in higher CPU speeds. */
  81. spinlock_t target_loads_lock;
  82. unsigned int *target_loads;
  83. int ntarget_loads;
  84. /*
  85. * The minimum amount of time to spend at a frequency before we can ramp
  86. * down.
  87. */
  88. #define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
  89. unsigned long min_sample_time;
  90. /*
  91. * The sample rate of the timer used to increase frequency
  92. */
  93. unsigned long timer_rate;
  94. /*
  95. * Wait this long before raising speed above hispeed, by default a
  96. * single timer interval.
  97. */
  98. spinlock_t above_hispeed_delay_lock;
  99. unsigned int *above_hispeed_delay;
  100. int nabove_hispeed_delay;
  101. /* Non-zero means indefinite speed boost active */
  102. int boost_val;
  103. /* Duration of a boot pulse in usecs */
  104. int boostpulse_duration_val;
  105. /* End time of boost pulse in ktime converted to usecs */
  106. u64 boostpulse_endtime;
  107. bool boosted;
  108. /*
  109. * Max additional time to wait in idle, beyond timer_rate, at speeds
  110. * above minimum before wakeup to reduce speed, or -1 if unnecessary.
  111. */
  112. #define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
  113. int timer_slack_val;
  114. bool io_is_busy;
  115. };
  116. /* For cases where we have single governor instance for system */
  117. static struct cpufreq_interactive_tunables *common_tunables;
  118. static struct attribute_group *get_sysfs_attr(void);
  119. static void cpufreq_interactive_timer_resched(
  120. struct cpufreq_interactive_cpuinfo *pcpu)
  121. {
  122. struct cpufreq_interactive_tunables *tunables =
  123. pcpu->policy->governor_data;
  124. unsigned long expires;
  125. unsigned long flags;
  126. spin_lock_irqsave(&pcpu->load_lock, flags);
  127. pcpu->time_in_idle =
  128. get_cpu_idle_time(smp_processor_id(),
  129. &pcpu->time_in_idle_timestamp,
  130. tunables->io_is_busy);
  131. pcpu->cputime_speedadj = 0;
  132. pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
  133. expires = jiffies + usecs_to_jiffies(tunables->timer_rate);
  134. mod_timer_pinned(&pcpu->cpu_timer, expires);
  135. if (tunables->timer_slack_val >= 0 &&
  136. pcpu->target_freq > pcpu->policy->min) {
  137. expires += usecs_to_jiffies(tunables->timer_slack_val);
  138. mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
  139. }
  140. spin_unlock_irqrestore(&pcpu->load_lock, flags);
  141. }
  142. /* The caller shall take enable_sem write semaphore to avoid any timer race.
  143. * The cpu_timer and cpu_slack_timer must be deactivated when calling this
  144. * function.
  145. */
  146. static void cpufreq_interactive_timer_start(
  147. struct cpufreq_interactive_tunables *tunables, int cpu)
  148. {
  149. struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
  150. unsigned long expires = jiffies +
  151. usecs_to_jiffies(tunables->timer_rate);
  152. unsigned long flags;
  153. pcpu->cpu_timer.expires = expires;
  154. add_timer_on(&pcpu->cpu_timer, cpu);
  155. if (tunables->timer_slack_val >= 0 &&
  156. pcpu->target_freq > pcpu->policy->min) {
  157. expires += usecs_to_jiffies(tunables->timer_slack_val);
  158. pcpu->cpu_slack_timer.expires = expires;
  159. add_timer_on(&pcpu->cpu_slack_timer, cpu);
  160. }
  161. spin_lock_irqsave(&pcpu->load_lock, flags);
  162. pcpu->time_in_idle =
  163. get_cpu_idle_time(cpu, &pcpu->time_in_idle_timestamp,
  164. tunables->io_is_busy);
  165. pcpu->cputime_speedadj = 0;
  166. pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
  167. spin_unlock_irqrestore(&pcpu->load_lock, flags);
  168. }
  169. static unsigned int freq_to_above_hispeed_delay(
  170. struct cpufreq_interactive_tunables *tunables,
  171. unsigned int freq)
  172. {
  173. int i;
  174. unsigned int ret;
  175. unsigned long flags;
  176. spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
  177. for (i = 0; i < tunables->nabove_hispeed_delay - 1 &&
  178. freq >= tunables->above_hispeed_delay[i+1]; i += 2)
  179. ;
  180. ret = tunables->above_hispeed_delay[i];
  181. spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
  182. return ret;
  183. }
  184. static unsigned int freq_to_targetload(
  185. struct cpufreq_interactive_tunables *tunables, unsigned int freq)
  186. {
  187. int i;
  188. unsigned int ret;
  189. unsigned long flags;
  190. spin_lock_irqsave(&tunables->target_loads_lock, flags);
  191. for (i = 0; i < tunables->ntarget_loads - 1 &&
  192. freq >= tunables->target_loads[i+1]; i += 2)
  193. ;
  194. ret = tunables->target_loads[i];
  195. spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
  196. return ret;
  197. }
  198. /*
  199. * If increasing frequencies never map to a lower target load then
  200. * choose_freq() will find the minimum frequency that does not exceed its
  201. * target load given the current load.
  202. */
  203. static unsigned int choose_freq(struct cpufreq_interactive_cpuinfo *pcpu,
  204. unsigned int loadadjfreq)
  205. {
  206. unsigned int freq = pcpu->policy->cur;
  207. unsigned int prevfreq, freqmin, freqmax;
  208. unsigned int tl;
  209. int index;
  210. freqmin = 0;
  211. freqmax = UINT_MAX;
  212. do {
  213. prevfreq = freq;
  214. tl = freq_to_targetload(pcpu->policy->governor_data, freq);
  215. /*
  216. * Find the lowest frequency where the computed load is less
  217. * than or equal to the target load.
  218. */
  219. if (cpufreq_frequency_table_target(
  220. pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
  221. CPUFREQ_RELATION_L, &index))
  222. break;
  223. freq = pcpu->freq_table[index].frequency;
  224. if (freq > prevfreq) {
  225. /* The previous frequency is too low. */
  226. freqmin = prevfreq;
  227. if (freq >= freqmax) {
  228. /*
  229. * Find the highest frequency that is less
  230. * than freqmax.
  231. */
  232. if (cpufreq_frequency_table_target(
  233. pcpu->policy, pcpu->freq_table,
  234. freqmax - 1, CPUFREQ_RELATION_H,
  235. &index))
  236. break;
  237. freq = pcpu->freq_table[index].frequency;
  238. if (freq == freqmin) {
  239. /*
  240. * The first frequency below freqmax
  241. * has already been found to be too
  242. * low. freqmax is the lowest speed
  243. * we found that is fast enough.
  244. */
  245. freq = freqmax;
  246. break;
  247. }
  248. }
  249. } else if (freq < prevfreq) {
  250. /* The previous frequency is high enough. */
  251. freqmax = prevfreq;
  252. if (freq <= freqmin) {
  253. /*
  254. * Find the lowest frequency that is higher
  255. * than freqmin.
  256. */
  257. if (cpufreq_frequency_table_target(
  258. pcpu->policy, pcpu->freq_table,
  259. freqmin + 1, CPUFREQ_RELATION_L,
  260. &index))
  261. break;
  262. freq = pcpu->freq_table[index].frequency;
  263. /*
  264. * If freqmax is the first frequency above
  265. * freqmin then we have already found that
  266. * this speed is fast enough.
  267. */
  268. if (freq == freqmax)
  269. break;
  270. }
  271. }
  272. /* If same frequency chosen as previous then done. */
  273. } while (freq != prevfreq);
  274. return freq;
  275. }
  276. static u64 update_load(int cpu)
  277. {
  278. struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
  279. struct cpufreq_interactive_tunables *tunables =
  280. pcpu->policy->governor_data;
  281. u64 now;
  282. u64 now_idle;
  283. unsigned int delta_idle;
  284. unsigned int delta_time;
  285. u64 active_time;
  286. now_idle = get_cpu_idle_time(cpu, &now, tunables->io_is_busy);
  287. delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
  288. delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
  289. if (delta_time <= delta_idle)
  290. active_time = 0;
  291. else
  292. active_time = delta_time - delta_idle;
  293. pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
  294. pcpu->time_in_idle = now_idle;
  295. pcpu->time_in_idle_timestamp = now;
  296. return now;
  297. }
  298. static void cpufreq_interactive_timer(unsigned long data)
  299. {
  300. u64 now;
  301. unsigned int delta_time;
  302. u64 cputime_speedadj;
  303. int cpu_load;
  304. struct cpufreq_interactive_cpuinfo *pcpu =
  305. &per_cpu(cpuinfo, data);
  306. struct cpufreq_interactive_tunables *tunables =
  307. pcpu->policy->governor_data;
  308. unsigned int new_freq;
  309. unsigned int loadadjfreq;
  310. unsigned int index;
  311. unsigned long flags;
  312. u64 max_fvtime;
  313. #ifdef CONFIG_ARCH_MT6755
  314. int ppb_idx;
  315. /* Default, low power, just make, performance */
  316. int freq_idx[4] = {2, 6, 4, 0};
  317. int min_sample_t[4] = {80, 20, 20, 80};
  318. #endif
  319. if (!down_read_trylock(&pcpu->enable_sem))
  320. return;
  321. if (!pcpu->governor_enabled)
  322. goto exit;
  323. spin_lock_irqsave(&pcpu->load_lock, flags);
  324. now = update_load(data);
  325. delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
  326. cputime_speedadj = pcpu->cputime_speedadj;
  327. spin_unlock_irqrestore(&pcpu->load_lock, flags);
  328. if (WARN_ON_ONCE(!delta_time))
  329. goto rearm;
  330. spin_lock_irqsave(&pcpu->target_freq_lock, flags);
  331. do_div(cputime_speedadj, delta_time);
  332. loadadjfreq = (unsigned int)cputime_speedadj * 100;
  333. cpu_load = loadadjfreq / pcpu->policy->cur;
  334. tunables->boosted = tunables->boost_val || now < tunables->boostpulse_endtime;
  335. #ifdef CONFIG_ARCH_MT6755
  336. ppb_idx = mt_cpufreq_get_ppb_state();
  337. /* Not to modify if L in default mode */
  338. if (ppb_idx == 0 && (arch_get_cluster_id(pcpu->policy->cpu) >= 1)) {
  339. tunables->hispeed_freq = pcpu->freq_table[0].frequency;
  340. tunables->min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
  341. } else {
  342. tunables->hispeed_freq = pcpu->freq_table[freq_idx[ppb_idx]].frequency;
  343. tunables->min_sample_time = min_sample_t[ppb_idx] * USEC_PER_MSEC;
  344. if (hispeed_freq_perf != 0)
  345. tunables->hispeed_freq = hispeed_freq_perf;
  346. if (min_sample_time_perf != 0)
  347. tunables->min_sample_time = min_sample_time_perf;
  348. }
  349. #endif
  350. if (cpu_load >= tunables->go_hispeed_load || tunables->boosted) {
  351. if (pcpu->policy->cur < tunables->hispeed_freq) {
  352. new_freq = tunables->hispeed_freq;
  353. } else {
  354. new_freq = choose_freq(pcpu, loadadjfreq);
  355. if (new_freq < tunables->hispeed_freq)
  356. new_freq = tunables->hispeed_freq;
  357. }
  358. } else {
  359. new_freq = choose_freq(pcpu, loadadjfreq);
  360. if (new_freq > tunables->hispeed_freq &&
  361. pcpu->policy->cur < tunables->hispeed_freq)
  362. new_freq = tunables->hispeed_freq;
  363. }
  364. if (pcpu->policy->cur >= tunables->hispeed_freq &&
  365. new_freq > pcpu->policy->cur &&
  366. now - pcpu->pol_hispeed_val_time <
  367. freq_to_above_hispeed_delay(tunables, pcpu->policy->cur)) {
  368. trace_cpufreq_interactive_notyet(
  369. data, cpu_load, pcpu->target_freq,
  370. pcpu->policy->cur, new_freq);
  371. spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
  372. goto rearm;
  373. }
  374. pcpu->loc_hispeed_val_time = now;
  375. if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
  376. new_freq, CPUFREQ_RELATION_L,
  377. &index)) {
  378. spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
  379. goto rearm;
  380. }
  381. new_freq = pcpu->freq_table[index].frequency;
  382. /*
  383. * Do not scale below floor_freq unless we have been at or above the
  384. * floor frequency for the minimum sample time since last validated.
  385. */
  386. max_fvtime = max(pcpu->pol_floor_val_time, pcpu->loc_floor_val_time);
  387. if (new_freq < pcpu->floor_freq &&
  388. pcpu->target_freq >= pcpu->policy->cur) {
  389. if (now - max_fvtime < tunables->min_sample_time) {
  390. trace_cpufreq_interactive_notyet(
  391. data, cpu_load, pcpu->target_freq,
  392. pcpu->policy->cur, new_freq);
  393. spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
  394. goto rearm;
  395. }
  396. }
  397. /*
  398. * Update the timestamp for checking whether speed has been held at
  399. * or above the selected frequency for a minimum of min_sample_time,
  400. * if not boosted to hispeed_freq. If boosted to hispeed_freq then we
  401. * allow the speed to drop as soon as the boostpulse duration expires
  402. * (or the indefinite boost is turned off).
  403. */
  404. if (!tunables->boosted || new_freq > tunables->hispeed_freq) {
  405. pcpu->floor_freq = new_freq;
  406. if (pcpu->target_freq >= pcpu->policy->cur ||
  407. new_freq >= pcpu->policy->cur)
  408. pcpu->loc_floor_val_time = now;
  409. }
  410. if (pcpu->target_freq == new_freq &&
  411. pcpu->target_freq <= pcpu->policy->cur) {
  412. trace_cpufreq_interactive_already(
  413. data, cpu_load, pcpu->target_freq,
  414. pcpu->policy->cur, new_freq);
  415. spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
  416. goto rearm;
  417. }
  418. trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
  419. pcpu->policy->cur, new_freq);
  420. pcpu->target_freq = new_freq;
  421. spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
  422. spin_lock_irqsave(&speedchange_cpumask_lock, flags);
  423. cpumask_set_cpu(data, &speedchange_cpumask);
  424. spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
  425. wake_up_process(speedchange_task);
  426. rearm:
  427. if (!timer_pending(&pcpu->cpu_timer))
  428. cpufreq_interactive_timer_resched(pcpu);
  429. exit:
  430. up_read(&pcpu->enable_sem);
  431. return;
  432. }
  433. static void cpufreq_interactive_idle_end(void)
  434. {
  435. struct cpufreq_interactive_cpuinfo *pcpu =
  436. &per_cpu(cpuinfo, smp_processor_id());
  437. if (!down_read_trylock(&pcpu->enable_sem))
  438. return;
  439. if (!pcpu->governor_enabled) {
  440. up_read(&pcpu->enable_sem);
  441. return;
  442. }
  443. /* Arm the timer for 1-2 ticks later if not already. */
  444. if (!timer_pending(&pcpu->cpu_timer)) {
  445. cpufreq_interactive_timer_resched(pcpu);
  446. } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
  447. del_timer(&pcpu->cpu_timer);
  448. del_timer(&pcpu->cpu_slack_timer);
  449. cpufreq_interactive_timer(smp_processor_id());
  450. }
  451. up_read(&pcpu->enable_sem);
  452. }
  453. static int cpufreq_interactive_speedchange_task(void *data)
  454. {
  455. unsigned int cpu;
  456. cpumask_t tmp_mask;
  457. unsigned long flags;
  458. struct cpufreq_interactive_cpuinfo *pcpu;
  459. while (1) {
  460. set_current_state(TASK_INTERRUPTIBLE);
  461. spin_lock_irqsave(&speedchange_cpumask_lock, flags);
  462. if (cpumask_empty(&speedchange_cpumask)) {
  463. spin_unlock_irqrestore(&speedchange_cpumask_lock,
  464. flags);
  465. schedule();
  466. if (kthread_should_stop())
  467. break;
  468. spin_lock_irqsave(&speedchange_cpumask_lock, flags);
  469. }
  470. set_current_state(TASK_RUNNING);
  471. tmp_mask = speedchange_cpumask;
  472. cpumask_clear(&speedchange_cpumask);
  473. spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
  474. for_each_cpu(cpu, &tmp_mask) {
  475. unsigned int j;
  476. unsigned int max_freq = 0;
  477. struct cpufreq_interactive_cpuinfo *pjcpu;
  478. u64 hvt = ~0ULL, fvt = 0;
  479. pcpu = &per_cpu(cpuinfo, cpu);
  480. if (!down_read_trylock(&pcpu->enable_sem))
  481. continue;
  482. if (!pcpu->governor_enabled) {
  483. up_read(&pcpu->enable_sem);
  484. continue;
  485. }
  486. for_each_cpu(j, pcpu->policy->cpus) {
  487. pjcpu = &per_cpu(cpuinfo, j);
  488. fvt = max(fvt, pjcpu->loc_floor_val_time);
  489. if (pjcpu->target_freq > max_freq) {
  490. max_freq = pjcpu->target_freq;
  491. hvt = pjcpu->loc_hispeed_val_time;
  492. } else if (pjcpu->target_freq == max_freq) {
  493. hvt = min(hvt, pjcpu->loc_hispeed_val_time);
  494. }
  495. }
  496. for_each_cpu(j, pcpu->policy->cpus) {
  497. pjcpu = &per_cpu(cpuinfo, j);
  498. pjcpu->pol_floor_val_time = fvt;
  499. }
  500. if (max_freq != pcpu->policy->cur) {
  501. __cpufreq_driver_target(pcpu->policy,
  502. max_freq,
  503. CPUFREQ_RELATION_H);
  504. for_each_cpu(j, pcpu->policy->cpus) {
  505. pjcpu = &per_cpu(cpuinfo, j);
  506. pjcpu->pol_hispeed_val_time = hvt;
  507. }
  508. }
  509. trace_cpufreq_interactive_setspeed(cpu,
  510. pcpu->target_freq,
  511. pcpu->policy->cur);
  512. up_read(&pcpu->enable_sem);
  513. }
  514. }
  515. return 0;
  516. }
  517. static void cpufreq_interactive_boost(struct cpufreq_interactive_tunables *tunables)
  518. {
  519. int i;
  520. int anyboost = 0;
  521. unsigned long flags[2];
  522. struct cpufreq_interactive_cpuinfo *pcpu;
  523. tunables->boosted = true;
  524. spin_lock_irqsave(&speedchange_cpumask_lock, flags[0]);
  525. for_each_online_cpu(i) {
  526. pcpu = &per_cpu(cpuinfo, i);
  527. if (tunables != pcpu->policy->governor_data)
  528. continue;
  529. spin_lock_irqsave(&pcpu->target_freq_lock, flags[1]);
  530. if (pcpu->target_freq < tunables->hispeed_freq) {
  531. pcpu->target_freq = tunables->hispeed_freq;
  532. cpumask_set_cpu(i, &speedchange_cpumask);
  533. pcpu->pol_hispeed_val_time =
  534. ktime_to_us(ktime_get());
  535. anyboost = 1;
  536. }
  537. spin_unlock_irqrestore(&pcpu->target_freq_lock, flags[1]);
  538. }
  539. spin_unlock_irqrestore(&speedchange_cpumask_lock, flags[0]);
  540. if (anyboost)
  541. wake_up_process(speedchange_task);
  542. }
  543. static int cpufreq_interactive_notifier(
  544. struct notifier_block *nb, unsigned long val, void *data)
  545. {
  546. struct cpufreq_freqs *freq = data;
  547. struct cpufreq_interactive_cpuinfo *pcpu;
  548. int cpu;
  549. unsigned long flags;
  550. if (val == CPUFREQ_POSTCHANGE) {
  551. pcpu = &per_cpu(cpuinfo, freq->cpu);
  552. if (!down_read_trylock(&pcpu->enable_sem))
  553. return 0;
  554. if (!pcpu->governor_enabled) {
  555. up_read(&pcpu->enable_sem);
  556. return 0;
  557. }
  558. for_each_cpu(cpu, pcpu->policy->cpus) {
  559. struct cpufreq_interactive_cpuinfo *pjcpu =
  560. &per_cpu(cpuinfo, cpu);
  561. if (cpu != freq->cpu) {
  562. if (!down_read_trylock(&pjcpu->enable_sem))
  563. continue;
  564. if (!pjcpu->governor_enabled) {
  565. up_read(&pjcpu->enable_sem);
  566. continue;
  567. }
  568. }
  569. spin_lock_irqsave(&pjcpu->load_lock, flags);
  570. update_load(cpu);
  571. spin_unlock_irqrestore(&pjcpu->load_lock, flags);
  572. if (cpu != freq->cpu)
  573. up_read(&pjcpu->enable_sem);
  574. }
  575. up_read(&pcpu->enable_sem);
  576. }
  577. return 0;
  578. }
  579. static struct notifier_block cpufreq_notifier_block = {
  580. .notifier_call = cpufreq_interactive_notifier,
  581. };
  582. static unsigned int *get_tokenized_data(const char *buf, int *num_tokens)
  583. {
  584. const char *cp;
  585. int i;
  586. int ntokens = 1;
  587. unsigned int *tokenized_data;
  588. int err = -EINVAL;
  589. cp = buf;
  590. while ((cp = strpbrk(cp + 1, " :")))
  591. ntokens++;
  592. if (!(ntokens & 0x1))
  593. goto err;
  594. tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
  595. if (!tokenized_data) {
  596. err = -ENOMEM;
  597. goto err;
  598. }
  599. cp = buf;
  600. i = 0;
  601. while (i < ntokens) {
  602. if (sscanf(cp, "%u", &tokenized_data[i++]) != 1)
  603. goto err_kfree;
  604. cp = strpbrk(cp, " :");
  605. if (!cp)
  606. break;
  607. cp++;
  608. }
  609. if (i != ntokens)
  610. goto err_kfree;
  611. *num_tokens = ntokens;
  612. return tokenized_data;
  613. err_kfree:
  614. kfree(tokenized_data);
  615. err:
  616. return ERR_PTR(err);
  617. }
  618. static ssize_t show_target_loads(
  619. struct cpufreq_interactive_tunables *tunables,
  620. char *buf)
  621. {
  622. int i;
  623. ssize_t ret = 0;
  624. unsigned long flags;
  625. spin_lock_irqsave(&tunables->target_loads_lock, flags);
  626. for (i = 0; i < tunables->ntarget_loads; i++)
  627. ret += sprintf(buf + ret, "%u%s", tunables->target_loads[i],
  628. i & 0x1 ? ":" : " ");
  629. sprintf(buf + ret - 1, "\n");
  630. spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
  631. return ret;
  632. }
  633. static ssize_t store_target_loads(
  634. struct cpufreq_interactive_tunables *tunables,
  635. const char *buf, size_t count)
  636. {
  637. int ntokens;
  638. unsigned int *new_target_loads = NULL;
  639. unsigned long flags;
  640. new_target_loads = get_tokenized_data(buf, &ntokens);
  641. if (IS_ERR(new_target_loads))
  642. return PTR_RET(new_target_loads);
  643. spin_lock_irqsave(&tunables->target_loads_lock, flags);
  644. if (tunables->target_loads != default_target_loads)
  645. kfree(tunables->target_loads);
  646. tunables->target_loads = new_target_loads;
  647. tunables->ntarget_loads = ntokens;
  648. spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
  649. return count;
  650. }
  651. static ssize_t show_above_hispeed_delay(
  652. struct cpufreq_interactive_tunables *tunables, char *buf)
  653. {
  654. int i;
  655. ssize_t ret = 0;
  656. unsigned long flags;
  657. spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
  658. for (i = 0; i < tunables->nabove_hispeed_delay; i++)
  659. ret += sprintf(buf + ret, "%u%s",
  660. tunables->above_hispeed_delay[i],
  661. i & 0x1 ? ":" : " ");
  662. sprintf(buf + ret - 1, "\n");
  663. spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
  664. return ret;
  665. }
  666. static ssize_t store_above_hispeed_delay(
  667. struct cpufreq_interactive_tunables *tunables,
  668. const char *buf, size_t count)
  669. {
  670. int ntokens;
  671. unsigned int *new_above_hispeed_delay = NULL;
  672. unsigned long flags;
  673. new_above_hispeed_delay = get_tokenized_data(buf, &ntokens);
  674. if (IS_ERR(new_above_hispeed_delay))
  675. return PTR_RET(new_above_hispeed_delay);
  676. spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
  677. if (tunables->above_hispeed_delay != default_above_hispeed_delay)
  678. kfree(tunables->above_hispeed_delay);
  679. tunables->above_hispeed_delay = new_above_hispeed_delay;
  680. tunables->nabove_hispeed_delay = ntokens;
  681. spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
  682. return count;
  683. }
  684. static ssize_t show_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
  685. char *buf)
  686. {
  687. return sprintf(buf, "%u\n", tunables->hispeed_freq);
  688. }
  689. static ssize_t store_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
  690. const char *buf, size_t count)
  691. {
  692. int ret;
  693. long unsigned int val;
  694. ret = kstrtoul(buf, 0, &val);
  695. if (ret < 0)
  696. return ret;
  697. tunables->hispeed_freq = val;
  698. #ifdef CONFIG_ARCH_MT6755
  699. hispeed_freq_perf = val;
  700. #endif
  701. return count;
  702. }
  703. static ssize_t show_go_hispeed_load(struct cpufreq_interactive_tunables
  704. *tunables, char *buf)
  705. {
  706. return sprintf(buf, "%lu\n", tunables->go_hispeed_load);
  707. }
  708. static ssize_t store_go_hispeed_load(struct cpufreq_interactive_tunables
  709. *tunables, const char *buf, size_t count)
  710. {
  711. int ret;
  712. unsigned long val;
  713. ret = kstrtoul(buf, 0, &val);
  714. if (ret < 0)
  715. return ret;
  716. tunables->go_hispeed_load = val;
  717. return count;
  718. }
  719. static ssize_t show_min_sample_time(struct cpufreq_interactive_tunables
  720. *tunables, char *buf)
  721. {
  722. return sprintf(buf, "%lu\n", tunables->min_sample_time);
  723. }
  724. static ssize_t store_min_sample_time(struct cpufreq_interactive_tunables
  725. *tunables, const char *buf, size_t count)
  726. {
  727. int ret;
  728. unsigned long val;
  729. ret = kstrtoul(buf, 0, &val);
  730. if (ret < 0)
  731. return ret;
  732. tunables->min_sample_time = val;
  733. #ifdef CONFIG_ARCH_MT6755
  734. min_sample_time_perf = val;
  735. #endif
  736. return count;
  737. }
  738. static ssize_t show_timer_rate(struct cpufreq_interactive_tunables *tunables,
  739. char *buf)
  740. {
  741. return sprintf(buf, "%lu\n", tunables->timer_rate);
  742. }
  743. static ssize_t store_timer_rate(struct cpufreq_interactive_tunables *tunables,
  744. const char *buf, size_t count)
  745. {
  746. int ret;
  747. unsigned long val, val_round;
  748. ret = kstrtoul(buf, 0, &val);
  749. if (ret < 0)
  750. return ret;
  751. val_round = jiffies_to_usecs(usecs_to_jiffies(val));
  752. if (val != val_round)
  753. pr_warn("timer_rate not aligned to jiffy. Rounded up to %lu\n",
  754. val_round);
  755. tunables->timer_rate = val_round;
  756. return count;
  757. }
  758. static ssize_t show_timer_slack(struct cpufreq_interactive_tunables *tunables,
  759. char *buf)
  760. {
  761. return sprintf(buf, "%d\n", tunables->timer_slack_val);
  762. }
  763. static ssize_t store_timer_slack(struct cpufreq_interactive_tunables *tunables,
  764. const char *buf, size_t count)
  765. {
  766. int ret;
  767. unsigned long val;
  768. ret = kstrtol(buf, 10, &val);
  769. if (ret < 0)
  770. return ret;
  771. tunables->timer_slack_val = val;
  772. return count;
  773. }
  774. static ssize_t show_boost(struct cpufreq_interactive_tunables *tunables,
  775. char *buf)
  776. {
  777. return sprintf(buf, "%d\n", tunables->boost_val);
  778. }
  779. static ssize_t store_boost(struct cpufreq_interactive_tunables *tunables,
  780. const char *buf, size_t count)
  781. {
  782. int ret;
  783. unsigned long val;
  784. ret = kstrtoul(buf, 0, &val);
  785. if (ret < 0)
  786. return ret;
  787. tunables->boost_val = val;
  788. if (tunables->boost_val) {
  789. trace_cpufreq_interactive_boost("on");
  790. if (!tunables->boosted)
  791. cpufreq_interactive_boost(tunables);
  792. } else {
  793. tunables->boostpulse_endtime = ktime_to_us(ktime_get());
  794. trace_cpufreq_interactive_unboost("off");
  795. }
  796. return count;
  797. }
  798. static ssize_t store_boostpulse(struct cpufreq_interactive_tunables *tunables,
  799. const char *buf, size_t count)
  800. {
  801. int ret;
  802. unsigned long val;
  803. ret = kstrtoul(buf, 0, &val);
  804. if (ret < 0)
  805. return ret;
  806. tunables->boostpulse_endtime = ktime_to_us(ktime_get()) +
  807. tunables->boostpulse_duration_val;
  808. trace_cpufreq_interactive_boost("pulse");
  809. if (!tunables->boosted)
  810. cpufreq_interactive_boost(tunables);
  811. return count;
  812. }
  813. static ssize_t show_boostpulse_duration(struct cpufreq_interactive_tunables
  814. *tunables, char *buf)
  815. {
  816. return sprintf(buf, "%d\n", tunables->boostpulse_duration_val);
  817. }
  818. static ssize_t store_boostpulse_duration(struct cpufreq_interactive_tunables
  819. *tunables, const char *buf, size_t count)
  820. {
  821. int ret;
  822. unsigned long val;
  823. ret = kstrtoul(buf, 0, &val);
  824. if (ret < 0)
  825. return ret;
  826. tunables->boostpulse_duration_val = val;
  827. return count;
  828. }
  829. static ssize_t show_io_is_busy(struct cpufreq_interactive_tunables *tunables,
  830. char *buf)
  831. {
  832. return sprintf(buf, "%u\n", tunables->io_is_busy);
  833. }
  834. static ssize_t store_io_is_busy(struct cpufreq_interactive_tunables *tunables,
  835. const char *buf, size_t count)
  836. {
  837. int ret;
  838. unsigned long val;
  839. ret = kstrtoul(buf, 0, &val);
  840. if (ret < 0)
  841. return ret;
  842. tunables->io_is_busy = val;
  843. return count;
  844. }
  845. /*
  846. * Create show/store routines
  847. * - sys: One governor instance for complete SYSTEM
  848. * - pol: One governor instance per struct cpufreq_policy
  849. */
  850. #define show_gov_pol_sys(file_name) \
  851. static ssize_t show_##file_name##_gov_sys \
  852. (struct kobject *kobj, struct attribute *attr, char *buf) \
  853. { \
  854. return show_##file_name(common_tunables, buf); \
  855. } \
  856. \
  857. static ssize_t show_##file_name##_gov_pol \
  858. (struct cpufreq_policy *policy, char *buf) \
  859. { \
  860. return show_##file_name(policy->governor_data, buf); \
  861. }
  862. #define store_gov_pol_sys(file_name) \
  863. static ssize_t store_##file_name##_gov_sys \
  864. (struct kobject *kobj, struct attribute *attr, const char *buf, \
  865. size_t count) \
  866. { \
  867. return store_##file_name(common_tunables, buf, count); \
  868. } \
  869. \
  870. static ssize_t store_##file_name##_gov_pol \
  871. (struct cpufreq_policy *policy, const char *buf, size_t count) \
  872. { \
  873. return store_##file_name(policy->governor_data, buf, count); \
  874. }
  875. #define show_store_gov_pol_sys(file_name) \
  876. show_gov_pol_sys(file_name); \
  877. store_gov_pol_sys(file_name)
  878. show_store_gov_pol_sys(target_loads);
  879. show_store_gov_pol_sys(above_hispeed_delay);
  880. show_store_gov_pol_sys(hispeed_freq);
  881. show_store_gov_pol_sys(go_hispeed_load);
  882. show_store_gov_pol_sys(min_sample_time);
  883. show_store_gov_pol_sys(timer_rate);
  884. show_store_gov_pol_sys(timer_slack);
  885. show_store_gov_pol_sys(boost);
  886. store_gov_pol_sys(boostpulse);
  887. show_store_gov_pol_sys(boostpulse_duration);
  888. show_store_gov_pol_sys(io_is_busy);
  889. #define gov_sys_attr_rw(_name) \
  890. static struct global_attr _name##_gov_sys = \
  891. __ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys)
  892. #define gov_pol_attr_rw(_name) \
  893. static struct freq_attr _name##_gov_pol = \
  894. __ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
  895. #define gov_sys_pol_attr_rw(_name) \
  896. gov_sys_attr_rw(_name); \
  897. gov_pol_attr_rw(_name)
  898. gov_sys_pol_attr_rw(target_loads);
  899. gov_sys_pol_attr_rw(above_hispeed_delay);
  900. gov_sys_pol_attr_rw(hispeed_freq);
  901. gov_sys_pol_attr_rw(go_hispeed_load);
  902. gov_sys_pol_attr_rw(min_sample_time);
  903. gov_sys_pol_attr_rw(timer_rate);
  904. gov_sys_pol_attr_rw(timer_slack);
  905. gov_sys_pol_attr_rw(boost);
  906. gov_sys_pol_attr_rw(boostpulse_duration);
  907. gov_sys_pol_attr_rw(io_is_busy);
  908. static struct global_attr boostpulse_gov_sys =
  909. __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_sys);
  910. static struct freq_attr boostpulse_gov_pol =
  911. __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_pol);
  912. /* One Governor instance for entire system */
  913. static struct attribute *interactive_attributes_gov_sys[] = {
  914. &target_loads_gov_sys.attr,
  915. &above_hispeed_delay_gov_sys.attr,
  916. &hispeed_freq_gov_sys.attr,
  917. &go_hispeed_load_gov_sys.attr,
  918. &min_sample_time_gov_sys.attr,
  919. &timer_rate_gov_sys.attr,
  920. &timer_slack_gov_sys.attr,
  921. &boost_gov_sys.attr,
  922. &boostpulse_gov_sys.attr,
  923. &boostpulse_duration_gov_sys.attr,
  924. &io_is_busy_gov_sys.attr,
  925. NULL,
  926. };
  927. static struct attribute_group interactive_attr_group_gov_sys = {
  928. .attrs = interactive_attributes_gov_sys,
  929. .name = "interactive",
  930. };
  931. /* Per policy governor instance */
  932. static struct attribute *interactive_attributes_gov_pol[] = {
  933. &target_loads_gov_pol.attr,
  934. &above_hispeed_delay_gov_pol.attr,
  935. &hispeed_freq_gov_pol.attr,
  936. &go_hispeed_load_gov_pol.attr,
  937. &min_sample_time_gov_pol.attr,
  938. &timer_rate_gov_pol.attr,
  939. &timer_slack_gov_pol.attr,
  940. &boost_gov_pol.attr,
  941. &boostpulse_gov_pol.attr,
  942. &boostpulse_duration_gov_pol.attr,
  943. &io_is_busy_gov_pol.attr,
  944. NULL,
  945. };
  946. static struct attribute_group interactive_attr_group_gov_pol = {
  947. .attrs = interactive_attributes_gov_pol,
  948. .name = "interactive",
  949. };
  950. static struct attribute_group *get_sysfs_attr(void)
  951. {
  952. if (have_governor_per_policy())
  953. return &interactive_attr_group_gov_pol;
  954. else
  955. return &interactive_attr_group_gov_sys;
  956. }
  957. static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
  958. unsigned long val,
  959. void *data)
  960. {
  961. if (val == IDLE_END)
  962. cpufreq_interactive_idle_end();
  963. return 0;
  964. }
  965. static struct notifier_block cpufreq_interactive_idle_nb = {
  966. .notifier_call = cpufreq_interactive_idle_notifier,
  967. };
  968. static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
  969. unsigned int event)
  970. {
  971. int rc;
  972. unsigned int j;
  973. struct cpufreq_interactive_cpuinfo *pcpu;
  974. struct cpufreq_frequency_table *freq_table;
  975. struct cpufreq_interactive_tunables *tunables;
  976. unsigned long flags;
  977. if (have_governor_per_policy())
  978. tunables = policy->governor_data;
  979. else
  980. tunables = common_tunables;
  981. WARN_ON(!tunables && (event != CPUFREQ_GOV_POLICY_INIT));
  982. switch (event) {
  983. case CPUFREQ_GOV_POLICY_INIT:
  984. if (have_governor_per_policy()) {
  985. WARN_ON(tunables);
  986. } else if (tunables) {
  987. tunables->usage_count++;
  988. policy->governor_data = tunables;
  989. return 0;
  990. }
  991. tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
  992. if (!tunables) {
  993. pr_err("%s: POLICY_INIT: kzalloc failed\n", __func__);
  994. return -ENOMEM;
  995. }
  996. tunables->usage_count = 1;
  997. tunables->above_hispeed_delay = default_above_hispeed_delay;
  998. tunables->nabove_hispeed_delay =
  999. ARRAY_SIZE(default_above_hispeed_delay);
  1000. tunables->go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
  1001. tunables->target_loads = default_target_loads;
  1002. tunables->ntarget_loads = ARRAY_SIZE(default_target_loads);
  1003. tunables->min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
  1004. tunables->timer_rate = DEFAULT_TIMER_RATE;
  1005. tunables->boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
  1006. tunables->timer_slack_val = DEFAULT_TIMER_SLACK;
  1007. spin_lock_init(&tunables->target_loads_lock);
  1008. spin_lock_init(&tunables->above_hispeed_delay_lock);
  1009. policy->governor_data = tunables;
  1010. if (!have_governor_per_policy()) {
  1011. common_tunables = tunables;
  1012. WARN_ON(cpufreq_get_global_kobject());
  1013. }
  1014. rc = sysfs_create_group(get_governor_parent_kobj(policy),
  1015. get_sysfs_attr());
  1016. if (rc) {
  1017. kfree(tunables);
  1018. policy->governor_data = NULL;
  1019. if (!have_governor_per_policy()) {
  1020. common_tunables = NULL;
  1021. cpufreq_put_global_kobject();
  1022. }
  1023. return rc;
  1024. }
  1025. if (!policy->governor->initialized) {
  1026. idle_notifier_register(&cpufreq_interactive_idle_nb);
  1027. cpufreq_register_notifier(&cpufreq_notifier_block,
  1028. CPUFREQ_TRANSITION_NOTIFIER);
  1029. }
  1030. break;
  1031. case CPUFREQ_GOV_POLICY_EXIT:
  1032. if (!--tunables->usage_count) {
  1033. if (policy->governor->initialized == 1) {
  1034. cpufreq_unregister_notifier(&cpufreq_notifier_block,
  1035. CPUFREQ_TRANSITION_NOTIFIER);
  1036. idle_notifier_unregister(&cpufreq_interactive_idle_nb);
  1037. }
  1038. sysfs_remove_group(get_governor_parent_kobj(policy),
  1039. get_sysfs_attr());
  1040. if (!have_governor_per_policy())
  1041. cpufreq_put_global_kobject();
  1042. kfree(tunables);
  1043. common_tunables = NULL;
  1044. }
  1045. policy->governor_data = NULL;
  1046. break;
  1047. case CPUFREQ_GOV_START:
  1048. mutex_lock(&gov_lock);
  1049. freq_table = cpufreq_frequency_get_table(policy->cpu);
  1050. if (tunables) {
  1051. if (!tunables->hispeed_freq)
  1052. tunables->hispeed_freq = policy->max;
  1053. }
  1054. for_each_cpu(j, policy->cpus) {
  1055. pcpu = &per_cpu(cpuinfo, j);
  1056. pcpu->policy = policy;
  1057. pcpu->target_freq = policy->cur;
  1058. pcpu->freq_table = freq_table;
  1059. pcpu->floor_freq = pcpu->target_freq;
  1060. pcpu->pol_floor_val_time =
  1061. ktime_to_us(ktime_get());
  1062. pcpu->loc_floor_val_time = pcpu->pol_floor_val_time;
  1063. pcpu->pol_hispeed_val_time = pcpu->pol_floor_val_time;
  1064. pcpu->loc_hispeed_val_time = pcpu->pol_floor_val_time;
  1065. down_write(&pcpu->enable_sem);
  1066. del_timer_sync(&pcpu->cpu_timer);
  1067. del_timer_sync(&pcpu->cpu_slack_timer);
  1068. cpufreq_interactive_timer_start(tunables, j);
  1069. pcpu->governor_enabled = 1;
  1070. up_write(&pcpu->enable_sem);
  1071. }
  1072. mutex_unlock(&gov_lock);
  1073. break;
  1074. case CPUFREQ_GOV_STOP:
  1075. mutex_lock(&gov_lock);
  1076. for_each_cpu(j, policy->cpus) {
  1077. pcpu = &per_cpu(cpuinfo, j);
  1078. down_write(&pcpu->enable_sem);
  1079. pcpu->governor_enabled = 0;
  1080. del_timer_sync(&pcpu->cpu_timer);
  1081. del_timer_sync(&pcpu->cpu_slack_timer);
  1082. up_write(&pcpu->enable_sem);
  1083. }
  1084. mutex_unlock(&gov_lock);
  1085. break;
  1086. case CPUFREQ_GOV_LIMITS:
  1087. if (policy->max < policy->cur)
  1088. __cpufreq_driver_target(policy,
  1089. policy->max, CPUFREQ_RELATION_H);
  1090. else if (policy->min > policy->cur)
  1091. __cpufreq_driver_target(policy,
  1092. policy->min, CPUFREQ_RELATION_L);
  1093. for_each_cpu(j, policy->cpus) {
  1094. pcpu = &per_cpu(cpuinfo, j);
  1095. down_read(&pcpu->enable_sem);
  1096. if (pcpu->governor_enabled == 0) {
  1097. up_read(&pcpu->enable_sem);
  1098. continue;
  1099. }
  1100. spin_lock_irqsave(&pcpu->target_freq_lock, flags);
  1101. if (policy->max < pcpu->target_freq)
  1102. pcpu->target_freq = policy->max;
  1103. else if (policy->min > pcpu->target_freq)
  1104. pcpu->target_freq = policy->min;
  1105. spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
  1106. up_read(&pcpu->enable_sem);
  1107. }
  1108. break;
  1109. }
  1110. return 0;
  1111. }
  1112. #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
  1113. static
  1114. #endif
  1115. struct cpufreq_governor cpufreq_gov_interactive = {
  1116. .name = "interactive",
  1117. .governor = cpufreq_governor_interactive,
  1118. .max_transition_latency = 10000000,
  1119. .owner = THIS_MODULE,
  1120. };
  1121. static void cpufreq_interactive_nop_timer(unsigned long data)
  1122. {
  1123. }
  1124. static int __init cpufreq_interactive_init(void)
  1125. {
  1126. unsigned int i;
  1127. struct cpufreq_interactive_cpuinfo *pcpu;
  1128. struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
  1129. /* Initalize per-cpu timers */
  1130. for_each_possible_cpu(i) {
  1131. pcpu = &per_cpu(cpuinfo, i);
  1132. init_timer_deferrable(&pcpu->cpu_timer);
  1133. pcpu->cpu_timer.function = cpufreq_interactive_timer;
  1134. pcpu->cpu_timer.data = i;
  1135. init_timer(&pcpu->cpu_slack_timer);
  1136. pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
  1137. spin_lock_init(&pcpu->load_lock);
  1138. spin_lock_init(&pcpu->target_freq_lock);
  1139. init_rwsem(&pcpu->enable_sem);
  1140. }
  1141. spin_lock_init(&speedchange_cpumask_lock);
  1142. mutex_init(&gov_lock);
  1143. speedchange_task =
  1144. kthread_create(cpufreq_interactive_speedchange_task, NULL,
  1145. "cfinteractive");
  1146. if (IS_ERR(speedchange_task))
  1147. return PTR_ERR(speedchange_task);
  1148. sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, &param);
  1149. get_task_struct(speedchange_task);
  1150. /* NB: wake up so the thread does not look hung to the freezer */
  1151. wake_up_process(speedchange_task);
  1152. return cpufreq_register_governor(&cpufreq_gov_interactive);
  1153. }
  1154. #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
  1155. fs_initcall(cpufreq_interactive_init);
  1156. #else
  1157. module_init(cpufreq_interactive_init);
  1158. #endif
  1159. static void __exit cpufreq_interactive_exit(void)
  1160. {
  1161. cpufreq_unregister_governor(&cpufreq_gov_interactive);
  1162. kthread_stop(speedchange_task);
  1163. put_task_struct(speedchange_task);
  1164. }
  1165. module_exit(cpufreq_interactive_exit);
  1166. MODULE_AUTHOR("Mike Chan <mike@android.com>");
  1167. MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
  1168. "Latency sensitive workloads");
  1169. MODULE_LICENSE("GPL");