| 12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382 |
- /*
- * drivers/cpufreq/cpufreq_interactive.c
- *
- * Copyright (C) 2010 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * Author: Mike Chan (mike@android.com)
- *
- */
- #include <linux/cpu.h>
- #include <linux/cpumask.h>
- #include <linux/cpufreq.h>
- #include <linux/module.h>
- #include <linux/moduleparam.h>
- #include <linux/rwsem.h>
- #include <linux/sched.h>
- #include <linux/sched/rt.h>
- #include <linux/tick.h>
- #include <linux/time.h>
- #include <linux/timer.h>
- #include <linux/workqueue.h>
- #include <linux/kthread.h>
- #include <linux/slab.h>
- #ifdef CONFIG_ARCH_MT6755
- #include <asm/topology.h>
- #include <../misc/mediatek/base/power/mt6755/mt_cpufreq.h>
- unsigned int hispeed_freq_perf = 0;
- unsigned int min_sample_time_perf = 0;
- #endif
- #define CREATE_TRACE_POINTS
- #include <trace/events/cpufreq_interactive.h>
- struct cpufreq_interactive_cpuinfo {
- struct timer_list cpu_timer;
- struct timer_list cpu_slack_timer;
- spinlock_t load_lock; /* protects the next 4 fields */
- u64 time_in_idle;
- u64 time_in_idle_timestamp;
- u64 cputime_speedadj;
- u64 cputime_speedadj_timestamp;
- struct cpufreq_policy *policy;
- struct cpufreq_frequency_table *freq_table;
- spinlock_t target_freq_lock; /*protects target freq */
- unsigned int target_freq;
- unsigned int floor_freq;
- u64 pol_floor_val_time; /* policy floor_validate_time */
- u64 loc_floor_val_time; /* per-cpu floor_validate_time */
- u64 pol_hispeed_val_time; /* policy hispeed_validate_time */
- u64 loc_hispeed_val_time; /* per-cpu hispeed_validate_time */
- struct rw_semaphore enable_sem;
- int governor_enabled;
- };
- static DEFINE_PER_CPU(struct cpufreq_interactive_cpuinfo, cpuinfo);
- /* realtime thread handles frequency scaling */
- static struct task_struct *speedchange_task;
- static cpumask_t speedchange_cpumask;
- static spinlock_t speedchange_cpumask_lock;
- static struct mutex gov_lock;
- /* Target load. Lower values result in higher CPU speeds. */
- #define DEFAULT_TARGET_LOAD 90
- static unsigned int default_target_loads[] = {DEFAULT_TARGET_LOAD};
- #define DEFAULT_TIMER_RATE (20 * USEC_PER_MSEC)
- #define DEFAULT_ABOVE_HISPEED_DELAY DEFAULT_TIMER_RATE
- static unsigned int default_above_hispeed_delay[] = {
- DEFAULT_ABOVE_HISPEED_DELAY };
- struct cpufreq_interactive_tunables {
- int usage_count;
- /* Hi speed to bump to from lo speed when load burst (default max) */
- unsigned int hispeed_freq;
- /* Go to hi speed when CPU load at or above this value. */
- #define DEFAULT_GO_HISPEED_LOAD 99
- unsigned long go_hispeed_load;
- /* Target load. Lower values result in higher CPU speeds. */
- spinlock_t target_loads_lock;
- unsigned int *target_loads;
- int ntarget_loads;
- /*
- * The minimum amount of time to spend at a frequency before we can ramp
- * down.
- */
- #define DEFAULT_MIN_SAMPLE_TIME (80 * USEC_PER_MSEC)
- unsigned long min_sample_time;
- /*
- * The sample rate of the timer used to increase frequency
- */
- unsigned long timer_rate;
- /*
- * Wait this long before raising speed above hispeed, by default a
- * single timer interval.
- */
- spinlock_t above_hispeed_delay_lock;
- unsigned int *above_hispeed_delay;
- int nabove_hispeed_delay;
- /* Non-zero means indefinite speed boost active */
- int boost_val;
- /* Duration of a boot pulse in usecs */
- int boostpulse_duration_val;
- /* End time of boost pulse in ktime converted to usecs */
- u64 boostpulse_endtime;
- bool boosted;
- /*
- * Max additional time to wait in idle, beyond timer_rate, at speeds
- * above minimum before wakeup to reduce speed, or -1 if unnecessary.
- */
- #define DEFAULT_TIMER_SLACK (4 * DEFAULT_TIMER_RATE)
- int timer_slack_val;
- bool io_is_busy;
- };
- /* For cases where we have single governor instance for system */
- static struct cpufreq_interactive_tunables *common_tunables;
- static struct attribute_group *get_sysfs_attr(void);
- static void cpufreq_interactive_timer_resched(
- struct cpufreq_interactive_cpuinfo *pcpu)
- {
- struct cpufreq_interactive_tunables *tunables =
- pcpu->policy->governor_data;
- unsigned long expires;
- unsigned long flags;
- spin_lock_irqsave(&pcpu->load_lock, flags);
- pcpu->time_in_idle =
- get_cpu_idle_time(smp_processor_id(),
- &pcpu->time_in_idle_timestamp,
- tunables->io_is_busy);
- pcpu->cputime_speedadj = 0;
- pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
- expires = jiffies + usecs_to_jiffies(tunables->timer_rate);
- mod_timer_pinned(&pcpu->cpu_timer, expires);
- if (tunables->timer_slack_val >= 0 &&
- pcpu->target_freq > pcpu->policy->min) {
- expires += usecs_to_jiffies(tunables->timer_slack_val);
- mod_timer_pinned(&pcpu->cpu_slack_timer, expires);
- }
- spin_unlock_irqrestore(&pcpu->load_lock, flags);
- }
- /* The caller shall take enable_sem write semaphore to avoid any timer race.
- * The cpu_timer and cpu_slack_timer must be deactivated when calling this
- * function.
- */
- static void cpufreq_interactive_timer_start(
- struct cpufreq_interactive_tunables *tunables, int cpu)
- {
- struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
- unsigned long expires = jiffies +
- usecs_to_jiffies(tunables->timer_rate);
- unsigned long flags;
- pcpu->cpu_timer.expires = expires;
- add_timer_on(&pcpu->cpu_timer, cpu);
- if (tunables->timer_slack_val >= 0 &&
- pcpu->target_freq > pcpu->policy->min) {
- expires += usecs_to_jiffies(tunables->timer_slack_val);
- pcpu->cpu_slack_timer.expires = expires;
- add_timer_on(&pcpu->cpu_slack_timer, cpu);
- }
- spin_lock_irqsave(&pcpu->load_lock, flags);
- pcpu->time_in_idle =
- get_cpu_idle_time(cpu, &pcpu->time_in_idle_timestamp,
- tunables->io_is_busy);
- pcpu->cputime_speedadj = 0;
- pcpu->cputime_speedadj_timestamp = pcpu->time_in_idle_timestamp;
- spin_unlock_irqrestore(&pcpu->load_lock, flags);
- }
- static unsigned int freq_to_above_hispeed_delay(
- struct cpufreq_interactive_tunables *tunables,
- unsigned int freq)
- {
- int i;
- unsigned int ret;
- unsigned long flags;
- spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
- for (i = 0; i < tunables->nabove_hispeed_delay - 1 &&
- freq >= tunables->above_hispeed_delay[i+1]; i += 2)
- ;
- ret = tunables->above_hispeed_delay[i];
- spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
- return ret;
- }
- static unsigned int freq_to_targetload(
- struct cpufreq_interactive_tunables *tunables, unsigned int freq)
- {
- int i;
- unsigned int ret;
- unsigned long flags;
- spin_lock_irqsave(&tunables->target_loads_lock, flags);
- for (i = 0; i < tunables->ntarget_loads - 1 &&
- freq >= tunables->target_loads[i+1]; i += 2)
- ;
- ret = tunables->target_loads[i];
- spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
- return ret;
- }
- /*
- * If increasing frequencies never map to a lower target load then
- * choose_freq() will find the minimum frequency that does not exceed its
- * target load given the current load.
- */
- static unsigned int choose_freq(struct cpufreq_interactive_cpuinfo *pcpu,
- unsigned int loadadjfreq)
- {
- unsigned int freq = pcpu->policy->cur;
- unsigned int prevfreq, freqmin, freqmax;
- unsigned int tl;
- int index;
- freqmin = 0;
- freqmax = UINT_MAX;
- do {
- prevfreq = freq;
- tl = freq_to_targetload(pcpu->policy->governor_data, freq);
- /*
- * Find the lowest frequency where the computed load is less
- * than or equal to the target load.
- */
- if (cpufreq_frequency_table_target(
- pcpu->policy, pcpu->freq_table, loadadjfreq / tl,
- CPUFREQ_RELATION_L, &index))
- break;
- freq = pcpu->freq_table[index].frequency;
- if (freq > prevfreq) {
- /* The previous frequency is too low. */
- freqmin = prevfreq;
- if (freq >= freqmax) {
- /*
- * Find the highest frequency that is less
- * than freqmax.
- */
- if (cpufreq_frequency_table_target(
- pcpu->policy, pcpu->freq_table,
- freqmax - 1, CPUFREQ_RELATION_H,
- &index))
- break;
- freq = pcpu->freq_table[index].frequency;
- if (freq == freqmin) {
- /*
- * The first frequency below freqmax
- * has already been found to be too
- * low. freqmax is the lowest speed
- * we found that is fast enough.
- */
- freq = freqmax;
- break;
- }
- }
- } else if (freq < prevfreq) {
- /* The previous frequency is high enough. */
- freqmax = prevfreq;
- if (freq <= freqmin) {
- /*
- * Find the lowest frequency that is higher
- * than freqmin.
- */
- if (cpufreq_frequency_table_target(
- pcpu->policy, pcpu->freq_table,
- freqmin + 1, CPUFREQ_RELATION_L,
- &index))
- break;
- freq = pcpu->freq_table[index].frequency;
- /*
- * If freqmax is the first frequency above
- * freqmin then we have already found that
- * this speed is fast enough.
- */
- if (freq == freqmax)
- break;
- }
- }
- /* If same frequency chosen as previous then done. */
- } while (freq != prevfreq);
- return freq;
- }
- static u64 update_load(int cpu)
- {
- struct cpufreq_interactive_cpuinfo *pcpu = &per_cpu(cpuinfo, cpu);
- struct cpufreq_interactive_tunables *tunables =
- pcpu->policy->governor_data;
- u64 now;
- u64 now_idle;
- unsigned int delta_idle;
- unsigned int delta_time;
- u64 active_time;
- now_idle = get_cpu_idle_time(cpu, &now, tunables->io_is_busy);
- delta_idle = (unsigned int)(now_idle - pcpu->time_in_idle);
- delta_time = (unsigned int)(now - pcpu->time_in_idle_timestamp);
- if (delta_time <= delta_idle)
- active_time = 0;
- else
- active_time = delta_time - delta_idle;
- pcpu->cputime_speedadj += active_time * pcpu->policy->cur;
- pcpu->time_in_idle = now_idle;
- pcpu->time_in_idle_timestamp = now;
- return now;
- }
- static void cpufreq_interactive_timer(unsigned long data)
- {
- u64 now;
- unsigned int delta_time;
- u64 cputime_speedadj;
- int cpu_load;
- struct cpufreq_interactive_cpuinfo *pcpu =
- &per_cpu(cpuinfo, data);
- struct cpufreq_interactive_tunables *tunables =
- pcpu->policy->governor_data;
- unsigned int new_freq;
- unsigned int loadadjfreq;
- unsigned int index;
- unsigned long flags;
- u64 max_fvtime;
- #ifdef CONFIG_ARCH_MT6755
- int ppb_idx;
- /* Default, low power, just make, performance */
- int freq_idx[4] = {2, 6, 4, 0};
- int min_sample_t[4] = {80, 20, 20, 80};
- #endif
- if (!down_read_trylock(&pcpu->enable_sem))
- return;
- if (!pcpu->governor_enabled)
- goto exit;
- spin_lock_irqsave(&pcpu->load_lock, flags);
- now = update_load(data);
- delta_time = (unsigned int)(now - pcpu->cputime_speedadj_timestamp);
- cputime_speedadj = pcpu->cputime_speedadj;
- spin_unlock_irqrestore(&pcpu->load_lock, flags);
- if (WARN_ON_ONCE(!delta_time))
- goto rearm;
- spin_lock_irqsave(&pcpu->target_freq_lock, flags);
- do_div(cputime_speedadj, delta_time);
- loadadjfreq = (unsigned int)cputime_speedadj * 100;
- cpu_load = loadadjfreq / pcpu->policy->cur;
- tunables->boosted = tunables->boost_val || now < tunables->boostpulse_endtime;
- #ifdef CONFIG_ARCH_MT6755
- ppb_idx = mt_cpufreq_get_ppb_state();
- /* Not to modify if L in default mode */
- if (ppb_idx == 0 && (arch_get_cluster_id(pcpu->policy->cpu) >= 1)) {
- tunables->hispeed_freq = pcpu->freq_table[0].frequency;
- tunables->min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
- } else {
- tunables->hispeed_freq = pcpu->freq_table[freq_idx[ppb_idx]].frequency;
- tunables->min_sample_time = min_sample_t[ppb_idx] * USEC_PER_MSEC;
- if (hispeed_freq_perf != 0)
- tunables->hispeed_freq = hispeed_freq_perf;
- if (min_sample_time_perf != 0)
- tunables->min_sample_time = min_sample_time_perf;
- }
- #endif
- if (cpu_load >= tunables->go_hispeed_load || tunables->boosted) {
- if (pcpu->policy->cur < tunables->hispeed_freq) {
- new_freq = tunables->hispeed_freq;
- } else {
- new_freq = choose_freq(pcpu, loadadjfreq);
- if (new_freq < tunables->hispeed_freq)
- new_freq = tunables->hispeed_freq;
- }
- } else {
- new_freq = choose_freq(pcpu, loadadjfreq);
- if (new_freq > tunables->hispeed_freq &&
- pcpu->policy->cur < tunables->hispeed_freq)
- new_freq = tunables->hispeed_freq;
- }
- if (pcpu->policy->cur >= tunables->hispeed_freq &&
- new_freq > pcpu->policy->cur &&
- now - pcpu->pol_hispeed_val_time <
- freq_to_above_hispeed_delay(tunables, pcpu->policy->cur)) {
- trace_cpufreq_interactive_notyet(
- data, cpu_load, pcpu->target_freq,
- pcpu->policy->cur, new_freq);
- spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
- goto rearm;
- }
- pcpu->loc_hispeed_val_time = now;
- if (cpufreq_frequency_table_target(pcpu->policy, pcpu->freq_table,
- new_freq, CPUFREQ_RELATION_L,
- &index)) {
- spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
- goto rearm;
- }
- new_freq = pcpu->freq_table[index].frequency;
- /*
- * Do not scale below floor_freq unless we have been at or above the
- * floor frequency for the minimum sample time since last validated.
- */
- max_fvtime = max(pcpu->pol_floor_val_time, pcpu->loc_floor_val_time);
- if (new_freq < pcpu->floor_freq &&
- pcpu->target_freq >= pcpu->policy->cur) {
- if (now - max_fvtime < tunables->min_sample_time) {
- trace_cpufreq_interactive_notyet(
- data, cpu_load, pcpu->target_freq,
- pcpu->policy->cur, new_freq);
- spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
- goto rearm;
- }
- }
- /*
- * Update the timestamp for checking whether speed has been held at
- * or above the selected frequency for a minimum of min_sample_time,
- * if not boosted to hispeed_freq. If boosted to hispeed_freq then we
- * allow the speed to drop as soon as the boostpulse duration expires
- * (or the indefinite boost is turned off).
- */
- if (!tunables->boosted || new_freq > tunables->hispeed_freq) {
- pcpu->floor_freq = new_freq;
- if (pcpu->target_freq >= pcpu->policy->cur ||
- new_freq >= pcpu->policy->cur)
- pcpu->loc_floor_val_time = now;
- }
- if (pcpu->target_freq == new_freq &&
- pcpu->target_freq <= pcpu->policy->cur) {
- trace_cpufreq_interactive_already(
- data, cpu_load, pcpu->target_freq,
- pcpu->policy->cur, new_freq);
- spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
- goto rearm;
- }
- trace_cpufreq_interactive_target(data, cpu_load, pcpu->target_freq,
- pcpu->policy->cur, new_freq);
- pcpu->target_freq = new_freq;
- spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
- spin_lock_irqsave(&speedchange_cpumask_lock, flags);
- cpumask_set_cpu(data, &speedchange_cpumask);
- spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
- wake_up_process(speedchange_task);
- rearm:
- if (!timer_pending(&pcpu->cpu_timer))
- cpufreq_interactive_timer_resched(pcpu);
- exit:
- up_read(&pcpu->enable_sem);
- return;
- }
- static void cpufreq_interactive_idle_end(void)
- {
- struct cpufreq_interactive_cpuinfo *pcpu =
- &per_cpu(cpuinfo, smp_processor_id());
- if (!down_read_trylock(&pcpu->enable_sem))
- return;
- if (!pcpu->governor_enabled) {
- up_read(&pcpu->enable_sem);
- return;
- }
- /* Arm the timer for 1-2 ticks later if not already. */
- if (!timer_pending(&pcpu->cpu_timer)) {
- cpufreq_interactive_timer_resched(pcpu);
- } else if (time_after_eq(jiffies, pcpu->cpu_timer.expires)) {
- del_timer(&pcpu->cpu_timer);
- del_timer(&pcpu->cpu_slack_timer);
- cpufreq_interactive_timer(smp_processor_id());
- }
- up_read(&pcpu->enable_sem);
- }
- static int cpufreq_interactive_speedchange_task(void *data)
- {
- unsigned int cpu;
- cpumask_t tmp_mask;
- unsigned long flags;
- struct cpufreq_interactive_cpuinfo *pcpu;
- while (1) {
- set_current_state(TASK_INTERRUPTIBLE);
- spin_lock_irqsave(&speedchange_cpumask_lock, flags);
- if (cpumask_empty(&speedchange_cpumask)) {
- spin_unlock_irqrestore(&speedchange_cpumask_lock,
- flags);
- schedule();
- if (kthread_should_stop())
- break;
- spin_lock_irqsave(&speedchange_cpumask_lock, flags);
- }
- set_current_state(TASK_RUNNING);
- tmp_mask = speedchange_cpumask;
- cpumask_clear(&speedchange_cpumask);
- spin_unlock_irqrestore(&speedchange_cpumask_lock, flags);
- for_each_cpu(cpu, &tmp_mask) {
- unsigned int j;
- unsigned int max_freq = 0;
- struct cpufreq_interactive_cpuinfo *pjcpu;
- u64 hvt = ~0ULL, fvt = 0;
- pcpu = &per_cpu(cpuinfo, cpu);
- if (!down_read_trylock(&pcpu->enable_sem))
- continue;
- if (!pcpu->governor_enabled) {
- up_read(&pcpu->enable_sem);
- continue;
- }
- for_each_cpu(j, pcpu->policy->cpus) {
- pjcpu = &per_cpu(cpuinfo, j);
- fvt = max(fvt, pjcpu->loc_floor_val_time);
- if (pjcpu->target_freq > max_freq) {
- max_freq = pjcpu->target_freq;
- hvt = pjcpu->loc_hispeed_val_time;
- } else if (pjcpu->target_freq == max_freq) {
- hvt = min(hvt, pjcpu->loc_hispeed_val_time);
- }
- }
- for_each_cpu(j, pcpu->policy->cpus) {
- pjcpu = &per_cpu(cpuinfo, j);
- pjcpu->pol_floor_val_time = fvt;
- }
- if (max_freq != pcpu->policy->cur) {
- __cpufreq_driver_target(pcpu->policy,
- max_freq,
- CPUFREQ_RELATION_H);
- for_each_cpu(j, pcpu->policy->cpus) {
- pjcpu = &per_cpu(cpuinfo, j);
- pjcpu->pol_hispeed_val_time = hvt;
- }
- }
- trace_cpufreq_interactive_setspeed(cpu,
- pcpu->target_freq,
- pcpu->policy->cur);
- up_read(&pcpu->enable_sem);
- }
- }
- return 0;
- }
- static void cpufreq_interactive_boost(struct cpufreq_interactive_tunables *tunables)
- {
- int i;
- int anyboost = 0;
- unsigned long flags[2];
- struct cpufreq_interactive_cpuinfo *pcpu;
- tunables->boosted = true;
- spin_lock_irqsave(&speedchange_cpumask_lock, flags[0]);
- for_each_online_cpu(i) {
- pcpu = &per_cpu(cpuinfo, i);
- if (tunables != pcpu->policy->governor_data)
- continue;
- spin_lock_irqsave(&pcpu->target_freq_lock, flags[1]);
- if (pcpu->target_freq < tunables->hispeed_freq) {
- pcpu->target_freq = tunables->hispeed_freq;
- cpumask_set_cpu(i, &speedchange_cpumask);
- pcpu->pol_hispeed_val_time =
- ktime_to_us(ktime_get());
- anyboost = 1;
- }
- spin_unlock_irqrestore(&pcpu->target_freq_lock, flags[1]);
- }
- spin_unlock_irqrestore(&speedchange_cpumask_lock, flags[0]);
- if (anyboost)
- wake_up_process(speedchange_task);
- }
- static int cpufreq_interactive_notifier(
- struct notifier_block *nb, unsigned long val, void *data)
- {
- struct cpufreq_freqs *freq = data;
- struct cpufreq_interactive_cpuinfo *pcpu;
- int cpu;
- unsigned long flags;
- if (val == CPUFREQ_POSTCHANGE) {
- pcpu = &per_cpu(cpuinfo, freq->cpu);
- if (!down_read_trylock(&pcpu->enable_sem))
- return 0;
- if (!pcpu->governor_enabled) {
- up_read(&pcpu->enable_sem);
- return 0;
- }
- for_each_cpu(cpu, pcpu->policy->cpus) {
- struct cpufreq_interactive_cpuinfo *pjcpu =
- &per_cpu(cpuinfo, cpu);
- if (cpu != freq->cpu) {
- if (!down_read_trylock(&pjcpu->enable_sem))
- continue;
- if (!pjcpu->governor_enabled) {
- up_read(&pjcpu->enable_sem);
- continue;
- }
- }
- spin_lock_irqsave(&pjcpu->load_lock, flags);
- update_load(cpu);
- spin_unlock_irqrestore(&pjcpu->load_lock, flags);
- if (cpu != freq->cpu)
- up_read(&pjcpu->enable_sem);
- }
- up_read(&pcpu->enable_sem);
- }
- return 0;
- }
- static struct notifier_block cpufreq_notifier_block = {
- .notifier_call = cpufreq_interactive_notifier,
- };
- static unsigned int *get_tokenized_data(const char *buf, int *num_tokens)
- {
- const char *cp;
- int i;
- int ntokens = 1;
- unsigned int *tokenized_data;
- int err = -EINVAL;
- cp = buf;
- while ((cp = strpbrk(cp + 1, " :")))
- ntokens++;
- if (!(ntokens & 0x1))
- goto err;
- tokenized_data = kmalloc(ntokens * sizeof(unsigned int), GFP_KERNEL);
- if (!tokenized_data) {
- err = -ENOMEM;
- goto err;
- }
- cp = buf;
- i = 0;
- while (i < ntokens) {
- if (sscanf(cp, "%u", &tokenized_data[i++]) != 1)
- goto err_kfree;
- cp = strpbrk(cp, " :");
- if (!cp)
- break;
- cp++;
- }
- if (i != ntokens)
- goto err_kfree;
- *num_tokens = ntokens;
- return tokenized_data;
- err_kfree:
- kfree(tokenized_data);
- err:
- return ERR_PTR(err);
- }
- static ssize_t show_target_loads(
- struct cpufreq_interactive_tunables *tunables,
- char *buf)
- {
- int i;
- ssize_t ret = 0;
- unsigned long flags;
- spin_lock_irqsave(&tunables->target_loads_lock, flags);
- for (i = 0; i < tunables->ntarget_loads; i++)
- ret += sprintf(buf + ret, "%u%s", tunables->target_loads[i],
- i & 0x1 ? ":" : " ");
- sprintf(buf + ret - 1, "\n");
- spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
- return ret;
- }
- static ssize_t store_target_loads(
- struct cpufreq_interactive_tunables *tunables,
- const char *buf, size_t count)
- {
- int ntokens;
- unsigned int *new_target_loads = NULL;
- unsigned long flags;
- new_target_loads = get_tokenized_data(buf, &ntokens);
- if (IS_ERR(new_target_loads))
- return PTR_RET(new_target_loads);
- spin_lock_irqsave(&tunables->target_loads_lock, flags);
- if (tunables->target_loads != default_target_loads)
- kfree(tunables->target_loads);
- tunables->target_loads = new_target_loads;
- tunables->ntarget_loads = ntokens;
- spin_unlock_irqrestore(&tunables->target_loads_lock, flags);
- return count;
- }
- static ssize_t show_above_hispeed_delay(
- struct cpufreq_interactive_tunables *tunables, char *buf)
- {
- int i;
- ssize_t ret = 0;
- unsigned long flags;
- spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
- for (i = 0; i < tunables->nabove_hispeed_delay; i++)
- ret += sprintf(buf + ret, "%u%s",
- tunables->above_hispeed_delay[i],
- i & 0x1 ? ":" : " ");
- sprintf(buf + ret - 1, "\n");
- spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
- return ret;
- }
- static ssize_t store_above_hispeed_delay(
- struct cpufreq_interactive_tunables *tunables,
- const char *buf, size_t count)
- {
- int ntokens;
- unsigned int *new_above_hispeed_delay = NULL;
- unsigned long flags;
- new_above_hispeed_delay = get_tokenized_data(buf, &ntokens);
- if (IS_ERR(new_above_hispeed_delay))
- return PTR_RET(new_above_hispeed_delay);
- spin_lock_irqsave(&tunables->above_hispeed_delay_lock, flags);
- if (tunables->above_hispeed_delay != default_above_hispeed_delay)
- kfree(tunables->above_hispeed_delay);
- tunables->above_hispeed_delay = new_above_hispeed_delay;
- tunables->nabove_hispeed_delay = ntokens;
- spin_unlock_irqrestore(&tunables->above_hispeed_delay_lock, flags);
- return count;
- }
- static ssize_t show_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
- char *buf)
- {
- return sprintf(buf, "%u\n", tunables->hispeed_freq);
- }
- static ssize_t store_hispeed_freq(struct cpufreq_interactive_tunables *tunables,
- const char *buf, size_t count)
- {
- int ret;
- long unsigned int val;
- ret = kstrtoul(buf, 0, &val);
- if (ret < 0)
- return ret;
- tunables->hispeed_freq = val;
- #ifdef CONFIG_ARCH_MT6755
- hispeed_freq_perf = val;
- #endif
- return count;
- }
- static ssize_t show_go_hispeed_load(struct cpufreq_interactive_tunables
- *tunables, char *buf)
- {
- return sprintf(buf, "%lu\n", tunables->go_hispeed_load);
- }
- static ssize_t store_go_hispeed_load(struct cpufreq_interactive_tunables
- *tunables, const char *buf, size_t count)
- {
- int ret;
- unsigned long val;
- ret = kstrtoul(buf, 0, &val);
- if (ret < 0)
- return ret;
- tunables->go_hispeed_load = val;
- return count;
- }
- static ssize_t show_min_sample_time(struct cpufreq_interactive_tunables
- *tunables, char *buf)
- {
- return sprintf(buf, "%lu\n", tunables->min_sample_time);
- }
- static ssize_t store_min_sample_time(struct cpufreq_interactive_tunables
- *tunables, const char *buf, size_t count)
- {
- int ret;
- unsigned long val;
- ret = kstrtoul(buf, 0, &val);
- if (ret < 0)
- return ret;
- tunables->min_sample_time = val;
- #ifdef CONFIG_ARCH_MT6755
- min_sample_time_perf = val;
- #endif
- return count;
- }
- static ssize_t show_timer_rate(struct cpufreq_interactive_tunables *tunables,
- char *buf)
- {
- return sprintf(buf, "%lu\n", tunables->timer_rate);
- }
- static ssize_t store_timer_rate(struct cpufreq_interactive_tunables *tunables,
- const char *buf, size_t count)
- {
- int ret;
- unsigned long val, val_round;
- ret = kstrtoul(buf, 0, &val);
- if (ret < 0)
- return ret;
- val_round = jiffies_to_usecs(usecs_to_jiffies(val));
- if (val != val_round)
- pr_warn("timer_rate not aligned to jiffy. Rounded up to %lu\n",
- val_round);
- tunables->timer_rate = val_round;
- return count;
- }
- static ssize_t show_timer_slack(struct cpufreq_interactive_tunables *tunables,
- char *buf)
- {
- return sprintf(buf, "%d\n", tunables->timer_slack_val);
- }
- static ssize_t store_timer_slack(struct cpufreq_interactive_tunables *tunables,
- const char *buf, size_t count)
- {
- int ret;
- unsigned long val;
- ret = kstrtol(buf, 10, &val);
- if (ret < 0)
- return ret;
- tunables->timer_slack_val = val;
- return count;
- }
- static ssize_t show_boost(struct cpufreq_interactive_tunables *tunables,
- char *buf)
- {
- return sprintf(buf, "%d\n", tunables->boost_val);
- }
- static ssize_t store_boost(struct cpufreq_interactive_tunables *tunables,
- const char *buf, size_t count)
- {
- int ret;
- unsigned long val;
- ret = kstrtoul(buf, 0, &val);
- if (ret < 0)
- return ret;
- tunables->boost_val = val;
- if (tunables->boost_val) {
- trace_cpufreq_interactive_boost("on");
- if (!tunables->boosted)
- cpufreq_interactive_boost(tunables);
- } else {
- tunables->boostpulse_endtime = ktime_to_us(ktime_get());
- trace_cpufreq_interactive_unboost("off");
- }
- return count;
- }
- static ssize_t store_boostpulse(struct cpufreq_interactive_tunables *tunables,
- const char *buf, size_t count)
- {
- int ret;
- unsigned long val;
- ret = kstrtoul(buf, 0, &val);
- if (ret < 0)
- return ret;
- tunables->boostpulse_endtime = ktime_to_us(ktime_get()) +
- tunables->boostpulse_duration_val;
- trace_cpufreq_interactive_boost("pulse");
- if (!tunables->boosted)
- cpufreq_interactive_boost(tunables);
- return count;
- }
- static ssize_t show_boostpulse_duration(struct cpufreq_interactive_tunables
- *tunables, char *buf)
- {
- return sprintf(buf, "%d\n", tunables->boostpulse_duration_val);
- }
- static ssize_t store_boostpulse_duration(struct cpufreq_interactive_tunables
- *tunables, const char *buf, size_t count)
- {
- int ret;
- unsigned long val;
- ret = kstrtoul(buf, 0, &val);
- if (ret < 0)
- return ret;
- tunables->boostpulse_duration_val = val;
- return count;
- }
- static ssize_t show_io_is_busy(struct cpufreq_interactive_tunables *tunables,
- char *buf)
- {
- return sprintf(buf, "%u\n", tunables->io_is_busy);
- }
- static ssize_t store_io_is_busy(struct cpufreq_interactive_tunables *tunables,
- const char *buf, size_t count)
- {
- int ret;
- unsigned long val;
- ret = kstrtoul(buf, 0, &val);
- if (ret < 0)
- return ret;
- tunables->io_is_busy = val;
- return count;
- }
- /*
- * Create show/store routines
- * - sys: One governor instance for complete SYSTEM
- * - pol: One governor instance per struct cpufreq_policy
- */
- #define show_gov_pol_sys(file_name) \
- static ssize_t show_##file_name##_gov_sys \
- (struct kobject *kobj, struct attribute *attr, char *buf) \
- { \
- return show_##file_name(common_tunables, buf); \
- } \
- \
- static ssize_t show_##file_name##_gov_pol \
- (struct cpufreq_policy *policy, char *buf) \
- { \
- return show_##file_name(policy->governor_data, buf); \
- }
- #define store_gov_pol_sys(file_name) \
- static ssize_t store_##file_name##_gov_sys \
- (struct kobject *kobj, struct attribute *attr, const char *buf, \
- size_t count) \
- { \
- return store_##file_name(common_tunables, buf, count); \
- } \
- \
- static ssize_t store_##file_name##_gov_pol \
- (struct cpufreq_policy *policy, const char *buf, size_t count) \
- { \
- return store_##file_name(policy->governor_data, buf, count); \
- }
- #define show_store_gov_pol_sys(file_name) \
- show_gov_pol_sys(file_name); \
- store_gov_pol_sys(file_name)
- show_store_gov_pol_sys(target_loads);
- show_store_gov_pol_sys(above_hispeed_delay);
- show_store_gov_pol_sys(hispeed_freq);
- show_store_gov_pol_sys(go_hispeed_load);
- show_store_gov_pol_sys(min_sample_time);
- show_store_gov_pol_sys(timer_rate);
- show_store_gov_pol_sys(timer_slack);
- show_store_gov_pol_sys(boost);
- store_gov_pol_sys(boostpulse);
- show_store_gov_pol_sys(boostpulse_duration);
- show_store_gov_pol_sys(io_is_busy);
- #define gov_sys_attr_rw(_name) \
- static struct global_attr _name##_gov_sys = \
- __ATTR(_name, 0644, show_##_name##_gov_sys, store_##_name##_gov_sys)
- #define gov_pol_attr_rw(_name) \
- static struct freq_attr _name##_gov_pol = \
- __ATTR(_name, 0644, show_##_name##_gov_pol, store_##_name##_gov_pol)
- #define gov_sys_pol_attr_rw(_name) \
- gov_sys_attr_rw(_name); \
- gov_pol_attr_rw(_name)
- gov_sys_pol_attr_rw(target_loads);
- gov_sys_pol_attr_rw(above_hispeed_delay);
- gov_sys_pol_attr_rw(hispeed_freq);
- gov_sys_pol_attr_rw(go_hispeed_load);
- gov_sys_pol_attr_rw(min_sample_time);
- gov_sys_pol_attr_rw(timer_rate);
- gov_sys_pol_attr_rw(timer_slack);
- gov_sys_pol_attr_rw(boost);
- gov_sys_pol_attr_rw(boostpulse_duration);
- gov_sys_pol_attr_rw(io_is_busy);
- static struct global_attr boostpulse_gov_sys =
- __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_sys);
- static struct freq_attr boostpulse_gov_pol =
- __ATTR(boostpulse, 0200, NULL, store_boostpulse_gov_pol);
- /* One Governor instance for entire system */
- static struct attribute *interactive_attributes_gov_sys[] = {
- &target_loads_gov_sys.attr,
- &above_hispeed_delay_gov_sys.attr,
- &hispeed_freq_gov_sys.attr,
- &go_hispeed_load_gov_sys.attr,
- &min_sample_time_gov_sys.attr,
- &timer_rate_gov_sys.attr,
- &timer_slack_gov_sys.attr,
- &boost_gov_sys.attr,
- &boostpulse_gov_sys.attr,
- &boostpulse_duration_gov_sys.attr,
- &io_is_busy_gov_sys.attr,
- NULL,
- };
- static struct attribute_group interactive_attr_group_gov_sys = {
- .attrs = interactive_attributes_gov_sys,
- .name = "interactive",
- };
- /* Per policy governor instance */
- static struct attribute *interactive_attributes_gov_pol[] = {
- &target_loads_gov_pol.attr,
- &above_hispeed_delay_gov_pol.attr,
- &hispeed_freq_gov_pol.attr,
- &go_hispeed_load_gov_pol.attr,
- &min_sample_time_gov_pol.attr,
- &timer_rate_gov_pol.attr,
- &timer_slack_gov_pol.attr,
- &boost_gov_pol.attr,
- &boostpulse_gov_pol.attr,
- &boostpulse_duration_gov_pol.attr,
- &io_is_busy_gov_pol.attr,
- NULL,
- };
- static struct attribute_group interactive_attr_group_gov_pol = {
- .attrs = interactive_attributes_gov_pol,
- .name = "interactive",
- };
- static struct attribute_group *get_sysfs_attr(void)
- {
- if (have_governor_per_policy())
- return &interactive_attr_group_gov_pol;
- else
- return &interactive_attr_group_gov_sys;
- }
- static int cpufreq_interactive_idle_notifier(struct notifier_block *nb,
- unsigned long val,
- void *data)
- {
- if (val == IDLE_END)
- cpufreq_interactive_idle_end();
- return 0;
- }
- static struct notifier_block cpufreq_interactive_idle_nb = {
- .notifier_call = cpufreq_interactive_idle_notifier,
- };
- static int cpufreq_governor_interactive(struct cpufreq_policy *policy,
- unsigned int event)
- {
- int rc;
- unsigned int j;
- struct cpufreq_interactive_cpuinfo *pcpu;
- struct cpufreq_frequency_table *freq_table;
- struct cpufreq_interactive_tunables *tunables;
- unsigned long flags;
- if (have_governor_per_policy())
- tunables = policy->governor_data;
- else
- tunables = common_tunables;
- WARN_ON(!tunables && (event != CPUFREQ_GOV_POLICY_INIT));
- switch (event) {
- case CPUFREQ_GOV_POLICY_INIT:
- if (have_governor_per_policy()) {
- WARN_ON(tunables);
- } else if (tunables) {
- tunables->usage_count++;
- policy->governor_data = tunables;
- return 0;
- }
- tunables = kzalloc(sizeof(*tunables), GFP_KERNEL);
- if (!tunables) {
- pr_err("%s: POLICY_INIT: kzalloc failed\n", __func__);
- return -ENOMEM;
- }
- tunables->usage_count = 1;
- tunables->above_hispeed_delay = default_above_hispeed_delay;
- tunables->nabove_hispeed_delay =
- ARRAY_SIZE(default_above_hispeed_delay);
- tunables->go_hispeed_load = DEFAULT_GO_HISPEED_LOAD;
- tunables->target_loads = default_target_loads;
- tunables->ntarget_loads = ARRAY_SIZE(default_target_loads);
- tunables->min_sample_time = DEFAULT_MIN_SAMPLE_TIME;
- tunables->timer_rate = DEFAULT_TIMER_RATE;
- tunables->boostpulse_duration_val = DEFAULT_MIN_SAMPLE_TIME;
- tunables->timer_slack_val = DEFAULT_TIMER_SLACK;
- spin_lock_init(&tunables->target_loads_lock);
- spin_lock_init(&tunables->above_hispeed_delay_lock);
- policy->governor_data = tunables;
- if (!have_governor_per_policy()) {
- common_tunables = tunables;
- WARN_ON(cpufreq_get_global_kobject());
- }
- rc = sysfs_create_group(get_governor_parent_kobj(policy),
- get_sysfs_attr());
- if (rc) {
- kfree(tunables);
- policy->governor_data = NULL;
- if (!have_governor_per_policy()) {
- common_tunables = NULL;
- cpufreq_put_global_kobject();
- }
- return rc;
- }
- if (!policy->governor->initialized) {
- idle_notifier_register(&cpufreq_interactive_idle_nb);
- cpufreq_register_notifier(&cpufreq_notifier_block,
- CPUFREQ_TRANSITION_NOTIFIER);
- }
- break;
- case CPUFREQ_GOV_POLICY_EXIT:
- if (!--tunables->usage_count) {
- if (policy->governor->initialized == 1) {
- cpufreq_unregister_notifier(&cpufreq_notifier_block,
- CPUFREQ_TRANSITION_NOTIFIER);
- idle_notifier_unregister(&cpufreq_interactive_idle_nb);
- }
- sysfs_remove_group(get_governor_parent_kobj(policy),
- get_sysfs_attr());
- if (!have_governor_per_policy())
- cpufreq_put_global_kobject();
- kfree(tunables);
- common_tunables = NULL;
- }
- policy->governor_data = NULL;
- break;
- case CPUFREQ_GOV_START:
- mutex_lock(&gov_lock);
- freq_table = cpufreq_frequency_get_table(policy->cpu);
- if (tunables) {
- if (!tunables->hispeed_freq)
- tunables->hispeed_freq = policy->max;
- }
- for_each_cpu(j, policy->cpus) {
- pcpu = &per_cpu(cpuinfo, j);
- pcpu->policy = policy;
- pcpu->target_freq = policy->cur;
- pcpu->freq_table = freq_table;
- pcpu->floor_freq = pcpu->target_freq;
- pcpu->pol_floor_val_time =
- ktime_to_us(ktime_get());
- pcpu->loc_floor_val_time = pcpu->pol_floor_val_time;
- pcpu->pol_hispeed_val_time = pcpu->pol_floor_val_time;
- pcpu->loc_hispeed_val_time = pcpu->pol_floor_val_time;
- down_write(&pcpu->enable_sem);
- del_timer_sync(&pcpu->cpu_timer);
- del_timer_sync(&pcpu->cpu_slack_timer);
- cpufreq_interactive_timer_start(tunables, j);
- pcpu->governor_enabled = 1;
- up_write(&pcpu->enable_sem);
- }
- mutex_unlock(&gov_lock);
- break;
- case CPUFREQ_GOV_STOP:
- mutex_lock(&gov_lock);
- for_each_cpu(j, policy->cpus) {
- pcpu = &per_cpu(cpuinfo, j);
- down_write(&pcpu->enable_sem);
- pcpu->governor_enabled = 0;
- del_timer_sync(&pcpu->cpu_timer);
- del_timer_sync(&pcpu->cpu_slack_timer);
- up_write(&pcpu->enable_sem);
- }
- mutex_unlock(&gov_lock);
- break;
- case CPUFREQ_GOV_LIMITS:
- if (policy->max < policy->cur)
- __cpufreq_driver_target(policy,
- policy->max, CPUFREQ_RELATION_H);
- else if (policy->min > policy->cur)
- __cpufreq_driver_target(policy,
- policy->min, CPUFREQ_RELATION_L);
- for_each_cpu(j, policy->cpus) {
- pcpu = &per_cpu(cpuinfo, j);
- down_read(&pcpu->enable_sem);
- if (pcpu->governor_enabled == 0) {
- up_read(&pcpu->enable_sem);
- continue;
- }
- spin_lock_irqsave(&pcpu->target_freq_lock, flags);
- if (policy->max < pcpu->target_freq)
- pcpu->target_freq = policy->max;
- else if (policy->min > pcpu->target_freq)
- pcpu->target_freq = policy->min;
- spin_unlock_irqrestore(&pcpu->target_freq_lock, flags);
- up_read(&pcpu->enable_sem);
- }
- break;
- }
- return 0;
- }
- #ifndef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
- static
- #endif
- struct cpufreq_governor cpufreq_gov_interactive = {
- .name = "interactive",
- .governor = cpufreq_governor_interactive,
- .max_transition_latency = 10000000,
- .owner = THIS_MODULE,
- };
- static void cpufreq_interactive_nop_timer(unsigned long data)
- {
- }
- static int __init cpufreq_interactive_init(void)
- {
- unsigned int i;
- struct cpufreq_interactive_cpuinfo *pcpu;
- struct sched_param param = { .sched_priority = MAX_RT_PRIO-1 };
- /* Initalize per-cpu timers */
- for_each_possible_cpu(i) {
- pcpu = &per_cpu(cpuinfo, i);
- init_timer_deferrable(&pcpu->cpu_timer);
- pcpu->cpu_timer.function = cpufreq_interactive_timer;
- pcpu->cpu_timer.data = i;
- init_timer(&pcpu->cpu_slack_timer);
- pcpu->cpu_slack_timer.function = cpufreq_interactive_nop_timer;
- spin_lock_init(&pcpu->load_lock);
- spin_lock_init(&pcpu->target_freq_lock);
- init_rwsem(&pcpu->enable_sem);
- }
- spin_lock_init(&speedchange_cpumask_lock);
- mutex_init(&gov_lock);
- speedchange_task =
- kthread_create(cpufreq_interactive_speedchange_task, NULL,
- "cfinteractive");
- if (IS_ERR(speedchange_task))
- return PTR_ERR(speedchange_task);
- sched_setscheduler_nocheck(speedchange_task, SCHED_FIFO, ¶m);
- get_task_struct(speedchange_task);
- /* NB: wake up so the thread does not look hung to the freezer */
- wake_up_process(speedchange_task);
- return cpufreq_register_governor(&cpufreq_gov_interactive);
- }
- #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE
- fs_initcall(cpufreq_interactive_init);
- #else
- module_init(cpufreq_interactive_init);
- #endif
- static void __exit cpufreq_interactive_exit(void)
- {
- cpufreq_unregister_governor(&cpufreq_gov_interactive);
- kthread_stop(speedchange_task);
- put_task_struct(speedchange_task);
- }
- module_exit(cpufreq_interactive_exit);
- MODULE_AUTHOR("Mike Chan <mike@android.com>");
- MODULE_DESCRIPTION("'cpufreq_interactive' - A cpufreq governor for "
- "Latency sensitive workloads");
- MODULE_LICENSE("GPL");
|