mirror of https://github.com/OpenIPC/firmware.git
85 lines
3.6 KiB
Diff
85 lines
3.6 KiB
Diff
diff -drupN a/include/linux/cpufreq.h b/include/linux/cpufreq.h
|
|
--- a/include/linux/cpufreq.h 2018-08-06 17:23:04.000000000 +0300
|
|
+++ b/include/linux/cpufreq.h 2022-06-12 05:28:14.000000000 +0300
|
|
@@ -120,6 +120,14 @@ struct cpufreq_policy {
|
|
bool fast_switch_possible;
|
|
bool fast_switch_enabled;
|
|
|
|
+ /*
|
|
+ * Preferred average time interval between consecutive invocations of
|
|
+ * the driver to set the frequency for this policy. To be set by the
|
|
+ * scaling driver (0, which is the default, means no preference).
|
|
+ */
|
|
+ unsigned int up_transition_delay_us;
|
|
+ unsigned int down_transition_delay_us;
|
|
+
|
|
/* Cached frequency lookup from cpufreq_driver_resolve_freq. */
|
|
unsigned int cached_target_freq;
|
|
int cached_resolved_idx;
|
|
@@ -177,6 +185,7 @@ u64 get_cpu_idle_time(unsigned int cpu,
|
|
int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
|
|
int cpufreq_update_policy(unsigned int cpu);
|
|
bool have_governor_per_policy(void);
|
|
+bool cpufreq_driver_is_slow(void);
|
|
struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy);
|
|
void cpufreq_enable_fast_switch(struct cpufreq_policy *policy);
|
|
void cpufreq_disable_fast_switch(struct cpufreq_policy *policy);
|
|
@@ -359,6 +368,14 @@ struct cpufreq_driver {
|
|
*/
|
|
#define CPUFREQ_NEED_INITIAL_FREQ_CHECK (1 << 5)
|
|
|
|
+/*
|
|
+ * Indicates that it is safe to call cpufreq_driver_target from
|
|
+ * non-interruptable context in scheduler hot paths. Drivers must
|
|
+ * opt-in to this flag, as the safe default is that they might sleep
|
|
+ * or be too slow for hot path use.
|
|
+ */
|
|
+#define CPUFREQ_DRIVER_FAST (1 << 6)
|
|
+
|
|
int cpufreq_register_driver(struct cpufreq_driver *driver_data);
|
|
int cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
|
|
|
|
@@ -553,6 +570,32 @@ struct governor_attr {
|
|
ssize_t (*store)(struct gov_attr_set *attr_set, const char *buf,
|
|
size_t count);
|
|
};
|
|
+/* CPUFREQ DEFAULT GOVERNOR */
|
|
+/*
|
|
+ * Performance governor is fallback governor if any other gov failed to auto
|
|
+ * load due latency restrictions
|
|
+ */
|
|
+#ifdef CONFIG_CPU_FREQ_GOV_PERFORMANCE
|
|
+extern struct cpufreq_governor cpufreq_gov_performance;
|
|
+#endif
|
|
+#ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE
|
|
+#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_performance)
|
|
+#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE)
|
|
+extern struct cpufreq_governor cpufreq_gov_powersave;
|
|
+#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_powersave)
|
|
+#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE)
|
|
+extern struct cpufreq_governor cpufreq_gov_userspace;
|
|
+#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_userspace)
|
|
+#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND)
|
|
+extern struct cpufreq_governor cpufreq_gov_ondemand;
|
|
+#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_ondemand)
|
|
+#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE)
|
|
+extern struct cpufreq_governor cpufreq_gov_conservative;
|
|
+#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_conservative)
|
|
+#elif defined(CONFIG_CPU_FREQ_DEFAULT_GOV_SCHED)
|
|
+extern struct cpufreq_governor cpufreq_gov_sched;
|
|
+#define CPUFREQ_DEFAULT_GOVERNOR (&cpufreq_gov_sched)
|
|
+#endif
|
|
|
|
/*********************************************************************
|
|
* FREQUENCY TABLE HELPERS *
|
|
@@ -886,4 +929,9 @@ unsigned int cpufreq_generic_get(unsigne
|
|
int cpufreq_generic_init(struct cpufreq_policy *policy,
|
|
struct cpufreq_frequency_table *table,
|
|
unsigned int transition_latency);
|
|
+
|
|
+struct sched_domain;
|
|
+unsigned long cpufreq_scale_freq_capacity(struct sched_domain *sd, int cpu);
|
|
+unsigned long cpufreq_scale_max_freq_capacity(struct sched_domain *sd, int cpu);
|
|
+unsigned long cpufreq_scale_min_freq_capacity(struct sched_domain *sd, int cpu);
|
|
#endif /* _LINUX_CPUFREQ_H */
|