diff --git a/include/linux/sched/walt.h b/include/linux/sched/walt.h index 6da2d917db7f..7666b91d5dcf 100644 --- a/include/linux/sched/walt.h +++ b/include/linux/sched/walt.h @@ -201,10 +201,10 @@ struct notifier_block; extern void core_ctl_notifier_register(struct notifier_block *n); extern void core_ctl_notifier_unregister(struct notifier_block *n); extern int core_ctl_set_boost(bool boost); -extern void walt_set_cpus_taken(struct cpumask *set); -extern void walt_unset_cpus_taken(struct cpumask *unset); +extern int walt_set_cpus_taken(struct cpumask *set); +extern int walt_unset_cpus_taken(struct cpumask *unset); extern cpumask_t walt_get_cpus_taken(void); -extern void walt_get_cpus_in_state1(struct cpumask *cpus); +extern int walt_get_cpus_in_state1(struct cpumask *cpus); extern int walt_pause_cpus(struct cpumask *cpus, enum pause_client client); extern int walt_resume_cpus(struct cpumask *cpus, enum pause_client client); diff --git a/kernel/sched/walt/boost.c b/kernel/sched/walt/boost.c index 2b82b34cceec..bd834124c5d2 100644 --- a/kernel/sched/walt/boost.c +++ b/kernel/sched/walt/boost.c @@ -279,6 +279,9 @@ int sched_set_boost(int type) { int ret = 0; + if (unlikely(walt_disabled)) + return -EAGAIN; + mutex_lock(&boost_mutex); if (verify_boost_params(type)) _sched_set_boost(type); diff --git a/kernel/sched/walt/core_ctl.c b/kernel/sched/walt/core_ctl.c index bb4cb6e6fe05..1276c55e0a49 100644 --- a/kernel/sched/walt/core_ctl.c +++ b/kernel/sched/walt/core_ctl.c @@ -1208,6 +1208,9 @@ int core_ctl_set_boost(bool boost) int ret = 0; bool boost_state_changed = false; + if (unlikely(walt_disabled)) + return -EAGAIN; + if (unlikely(!initialized)) return 0; diff --git a/kernel/sched/walt/cpufreq_walt.c b/kernel/sched/walt/cpufreq_walt.c index 0268e75bdf18..d67b2fae7864 100644 --- a/kernel/sched/walt/cpufreq_walt.c +++ b/kernel/sched/walt/cpufreq_walt.c @@ -769,12 +769,17 @@ int cpufreq_walt_set_adaptive_freq(unsigned int cpu, unsigned int adaptive_low_f unsigned int adaptive_high_freq) { struct waltgov_cpu *wg_cpu = &per_cpu(waltgov_cpu, cpu); - struct waltgov_policy *wg_policy = wg_cpu->wg_policy; - struct cpufreq_policy *policy = wg_policy->policy; + struct waltgov_policy *wg_policy; + struct cpufreq_policy *policy; + + if (unlikely(walt_disabled)) + return -EAGAIN; if (!cpu_possible(cpu)) return -EFAULT; + wg_policy = wg_cpu->wg_policy; + policy = wg_policy->policy; if (policy->min <= adaptive_low_freq && policy->max >= adaptive_high_freq) { wg_policy->tunables->adaptive_low_freq_kernel = adaptive_low_freq; wg_policy->tunables->adaptive_high_freq_kernel = adaptive_high_freq; @@ -799,11 +804,15 @@ int cpufreq_walt_get_adaptive_freq(unsigned int cpu, unsigned int *adaptive_low_ unsigned int *adaptive_high_freq) { struct waltgov_cpu *wg_cpu = &per_cpu(waltgov_cpu, cpu); - struct waltgov_policy *wg_policy = wg_cpu->wg_policy; + struct waltgov_policy *wg_policy; + + if (unlikely(walt_disabled)) + return -EAGAIN; if (!cpu_possible(cpu)) return -EFAULT; + wg_policy = wg_cpu->wg_policy; if (adaptive_low_freq && adaptive_high_freq) { *adaptive_low_freq = get_adaptive_low_freq(wg_policy); *adaptive_high_freq = get_adaptive_high_freq(wg_policy); @@ -825,11 +834,15 @@ EXPORT_SYMBOL_GPL(cpufreq_walt_get_adaptive_freq); int cpufreq_walt_reset_adaptive_freq(unsigned int cpu) { struct waltgov_cpu *wg_cpu = &per_cpu(waltgov_cpu, cpu); - struct waltgov_policy *wg_policy = wg_cpu->wg_policy; + struct waltgov_policy *wg_policy; + + if (unlikely(walt_disabled)) + return -EAGAIN; if (!cpu_possible(cpu)) return -EFAULT; + wg_policy = wg_cpu->wg_policy; wg_policy->tunables->adaptive_low_freq_kernel = 0; wg_policy->tunables->adaptive_high_freq_kernel = 0; diff --git a/kernel/sched/walt/sched_avg.c b/kernel/sched/walt/sched_avg.c index 49b38df02025..a5b7c9efb39b 100644 --- a/kernel/sched/walt/sched_avg.c +++ b/kernel/sched/walt/sched_avg.c @@ -77,6 +77,9 @@ struct sched_avg_stats *sched_get_nr_running_avg(void) bool any_hyst_time = false; struct walt_sched_cluster *cluster; + if (unlikely(walt_disabled)) + return NULL; + if (!period) goto done; @@ -323,6 +326,9 @@ int sched_lpm_disallowed_time(int cpu, u64 *timeout) u64 now = sched_clock(); u64 bias_end_time = atomic64_read(&per_cpu(busy_hyst_end_time, cpu)); + if (unlikely(walt_disabled)) + return -EAGAIN; + if (unlikely(is_reserved(cpu))) { *timeout = 10 * NSEC_PER_MSEC; return 0; /* shallowest c-state */ diff --git a/kernel/sched/walt/walt.c b/kernel/sched/walt/walt.c index c12f087733fe..99c2273f30a7 100644 --- a/kernel/sched/walt/walt.c +++ b/kernel/sched/walt/walt.c @@ -113,6 +113,9 @@ int set_task_boost(int boost, u64 period) { struct walt_task_struct *wts = (struct walt_task_struct *) current->android_vendor_data1; + if (unlikely(walt_disabled)) + return -EAGAIN; + if (boost < TASK_BOOST_NONE || boost >= TASK_BOOST_END) return -EINVAL; if (boost) { @@ -5236,25 +5239,32 @@ static void walt_do_sched_yield(void *unused, struct rq *rq) per_cpu(rt_task_arrival_time, cpu_of(rq)) = 0; } -void walt_set_cpus_taken(struct cpumask *set) +int walt_set_cpus_taken(struct cpumask *set) { unsigned long flags; int cpu; + if (unlikely(walt_disabled)) + return -EAGAIN; + spin_lock_irqsave(&cpus_taken_lock, flags); for_each_cpu(cpu, set) { per_cpu(cpus_taken_refcount, cpu)++; } cpumask_or(&walt_cpus_taken_mask, &walt_cpus_taken_mask, set); spin_unlock_irqrestore(&cpus_taken_lock, flags); + return 0; } EXPORT_SYMBOL_GPL(walt_set_cpus_taken); -void walt_unset_cpus_taken(struct cpumask *unset) +int walt_unset_cpus_taken(struct cpumask *unset) { unsigned long flags; int cpu; + if (unlikely(walt_disabled)) + return -EAGAIN; + spin_lock_irqsave(&cpus_taken_lock, flags); for_each_cpu(cpu, unset) { if (per_cpu(cpus_taken_refcount, cpu) >= 1) @@ -5263,6 +5273,7 @@ void walt_unset_cpus_taken(struct cpumask *unset) cpumask_clear_cpu(cpu, &walt_cpus_taken_mask); } spin_unlock_irqrestore(&cpus_taken_lock, flags); + return 0; } EXPORT_SYMBOL_GPL(walt_unset_cpus_taken); @@ -5272,10 +5283,14 @@ cpumask_t walt_get_cpus_taken(void) } EXPORT_SYMBOL_GPL(walt_get_cpus_taken); -void walt_get_cpus_in_state1(struct cpumask *cpus) +int walt_get_cpus_in_state1(struct cpumask *cpus) { + if (unlikely(walt_disabled)) + return -EAGAIN; + cpumask_or(cpus, cpu_partial_halt_mask, &sched_cluster[0]->cpus); cpumask_andnot(cpus, cpus, cpu_halt_mask); + return 0; } EXPORT_SYMBOL_GPL(walt_get_cpus_in_state1);