firmware/br-ext-chip-allwinner/board/v83x/kernel/patches/00000-drivers_cpufreq_cpufr...

180 lines
5.3 KiB
Diff

diff -drupN a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
--- a/drivers/cpufreq/cpufreq.c 2018-08-06 17:23:04.000000000 +0300
+++ b/drivers/cpufreq/cpufreq.c 2022-06-12 05:28:14.000000000 +0300
@@ -19,6 +19,7 @@
#include <linux/cpu.h>
#include <linux/cpufreq.h>
+#include <linux/cpufreq_times.h>
#include <linux/delay.h>
#include <linux/device.h>
#include <linux/init.h>
@@ -29,6 +30,9 @@
#include <linux/suspend.h>
#include <linux/syscore_ops.h>
#include <linux/tick.h>
+#ifdef CONFIG_SMP
+#include <linux/sched.h>
+#endif
#include <trace/events/power.h>
static LIST_HEAD(cpufreq_policy_list);
@@ -117,6 +121,12 @@ bool have_governor_per_policy(void)
}
EXPORT_SYMBOL_GPL(have_governor_per_policy);
+bool cpufreq_driver_is_slow(void)
+{
+ return !(cpufreq_driver->flags & CPUFREQ_DRIVER_FAST);
+}
+EXPORT_SYMBOL_GPL(cpufreq_driver_is_slow);
+
struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
{
if (have_governor_per_policy())
@@ -301,6 +311,92 @@ static void adjust_jiffies(unsigned long
#endif
}
+/*********************************************************************
+ * FREQUENCY INVARIANT CPU CAPACITY *
+ *********************************************************************/
+
+static DEFINE_PER_CPU(unsigned long, freq_scale) = SCHED_CAPACITY_SCALE;
+static DEFINE_PER_CPU(unsigned long, max_freq_cpu);
+static DEFINE_PER_CPU(unsigned long, max_freq_scale) = SCHED_CAPACITY_SCALE;
+static DEFINE_PER_CPU(unsigned long, min_freq_scale);
+
+static void
+scale_freq_capacity(const cpumask_t *cpus, unsigned long cur_freq,
+ unsigned long max_freq)
+{
+ unsigned long scale = (cur_freq << SCHED_CAPACITY_SHIFT) / max_freq;
+ int cpu;
+
+ for_each_cpu(cpu, cpus) {
+ per_cpu(freq_scale, cpu) = scale;
+ per_cpu(max_freq_cpu, cpu) = max_freq;
+ }
+
+ pr_debug("cpus %*pbl cur freq/max freq %lu/%lu kHz freq scale %lu\n",
+ cpumask_pr_args(cpus), cur_freq, max_freq, scale);
+}
+
+unsigned long cpufreq_scale_freq_capacity(struct sched_domain *sd, int cpu)
+{
+ return per_cpu(freq_scale, cpu);
+}
+
+static void
+scale_max_freq_capacity(const cpumask_t *cpus, unsigned long policy_max_freq)
+{
+ unsigned long scale, max_freq;
+ int cpu = cpumask_first(cpus);
+
+ if (cpu >= nr_cpu_ids)
+ return;
+
+ max_freq = per_cpu(max_freq_cpu, cpu);
+
+ if (!max_freq)
+ return;
+
+ scale = (policy_max_freq << SCHED_CAPACITY_SHIFT) / max_freq;
+
+ for_each_cpu(cpu, cpus)
+ per_cpu(max_freq_scale, cpu) = scale;
+
+ pr_debug("cpus %*pbl policy max freq/max freq %lu/%lu kHz max freq scale %lu\n",
+ cpumask_pr_args(cpus), policy_max_freq, max_freq, scale);
+}
+
+unsigned long cpufreq_scale_max_freq_capacity(struct sched_domain *sd, int cpu)
+{
+ return per_cpu(max_freq_scale, cpu);
+}
+
+static void
+scale_min_freq_capacity(const cpumask_t *cpus, unsigned long policy_min_freq)
+{
+ unsigned long scale, max_freq;
+ int cpu = cpumask_first(cpus);
+
+ if (cpu >= nr_cpu_ids)
+ return;
+
+ max_freq = per_cpu(max_freq_cpu, cpu);
+
+ if (!max_freq)
+ return;
+
+ scale = (policy_min_freq << SCHED_CAPACITY_SHIFT) / max_freq;
+
+ for_each_cpu(cpu, cpus)
+ per_cpu(min_freq_scale, cpu) = scale;
+
+ pr_debug("cpus %*pbl policy min freq/max freq %lu/%lu kHz min freq scale %lu\n",
+ cpumask_pr_args(cpus), policy_min_freq, max_freq, scale);
+}
+
+unsigned long cpufreq_scale_min_freq_capacity(struct sched_domain *sd, int cpu)
+{
+ return per_cpu(min_freq_scale, cpu);
+}
+
static void __cpufreq_notify_transition(struct cpufreq_policy *policy,
struct cpufreq_freqs *freqs, unsigned int state)
{
@@ -339,6 +435,7 @@ static void __cpufreq_notify_transition(
(unsigned long)freqs->new, (unsigned long)freqs->cpu);
trace_cpu_frequency(freqs->new, freqs->cpu);
cpufreq_stats_record_transition(policy, freqs->new);
+ cpufreq_times_record_transition(freqs);
srcu_notifier_call_chain(&cpufreq_transition_notifier_list,
CPUFREQ_POSTCHANGE, freqs);
if (likely(policy) && likely(policy->cpu == freqs->cpu))
@@ -378,6 +475,9 @@ static void cpufreq_notify_post_transiti
void cpufreq_freq_transition_begin(struct cpufreq_policy *policy,
struct cpufreq_freqs *freqs)
{
+#ifdef CONFIG_SMP
+ int cpu;
+#endif
/*
* Catch double invocations of _begin() which lead to self-deadlock.
@@ -405,6 +505,12 @@ wait:
spin_unlock(&policy->transition_lock);
+ scale_freq_capacity(policy->cpus, freqs->new, policy->cpuinfo.max_freq);
+#ifdef CONFIG_SMP
+ for_each_cpu(cpu, policy->cpus)
+ trace_cpu_capacity(capacity_curr_of(cpu), cpu);
+#endif
+
cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE);
}
EXPORT_SYMBOL_GPL(cpufreq_freq_transition_begin);
@@ -1257,6 +1363,7 @@ static int cpufreq_online(unsigned int c
goto out_exit_policy;
cpufreq_stats_create_table(policy);
+ cpufreq_times_create_policy(policy);
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_CREATE_POLICY, policy);
@@ -2201,8 +2308,12 @@ static int cpufreq_set_policy(struct cpu
blocking_notifier_call_chain(&cpufreq_policy_notifier_list,
CPUFREQ_NOTIFY, new_policy);
+ scale_max_freq_capacity(policy->cpus, policy->max);
+ scale_min_freq_capacity(policy->cpus, policy->min);
+
policy->min = new_policy->min;
policy->max = new_policy->max;
+ trace_cpu_frequency_limits(policy->max, policy->min, policy->cpu);
policy->cached_target_freq = UINT_MAX;