diff --git a/kernel/sched/walt/cpufreq_walt.c b/kernel/sched/walt/cpufreq_walt.c index e9cb1659ca67..0268e75bdf18 100644 --- a/kernel/sched/walt/cpufreq_walt.c +++ b/kernel/sched/walt/cpufreq_walt.c @@ -1046,6 +1046,7 @@ static void waltgov_tunables_restore(struct cpufreq_policy *policy) tunables->target_load_shift = cached->target_load_shift; } +bool waltgov_disabled = true; static int waltgov_init(struct cpufreq_policy *policy) { struct waltgov_policy *wg_policy; @@ -1164,6 +1165,7 @@ static int waltgov_start(struct cpufreq_policy *policy) waltgov_add_callback(cpu, &wg_cpu->cb, waltgov_update_freq); } + waltgov_disabled = false; return 0; } @@ -1181,6 +1183,8 @@ static void waltgov_stop(struct cpufreq_policy *policy) irq_work_sync(&wg_policy->irq_work); kthread_cancel_work_sync(&wg_policy->work); } + + waltgov_disabled = true; } static void waltgov_limits(struct cpufreq_policy *policy) diff --git a/kernel/sched/walt/walt.h b/kernel/sched/walt/walt.h index 0d17e6590be7..b68fcbe31873 100644 --- a/kernel/sched/walt/walt.h +++ b/kernel/sched/walt/walt.h @@ -43,6 +43,7 @@ #define MAX_MARGIN_LEVELS (MAX_CLUSTERS - 1) extern bool walt_disabled; +extern bool waltgov_disabled; enum task_event { PUT_PREV_TASK = 0, @@ -1126,11 +1127,14 @@ static inline bool has_internal_freq_limit_changed(struct walt_sched_cluster *cl int i; internal_freq = cluster->walt_internal_freq_limit; - cluster->walt_internal_freq_limit = cluster->max_freq; - for (i = 0; i < MAX_FREQ_CAP; i++) - cluster->walt_internal_freq_limit = min(fmax_cap[i][cluster->id], + + if (likely(!waltgov_disabled)) { + for (i = 0; i < MAX_FREQ_CAP; i++) + cluster->walt_internal_freq_limit = min(fmax_cap[i][cluster->id], cluster->walt_internal_freq_limit); + } + return cluster->walt_internal_freq_limit != internal_freq; }