mirror of https://github.com/OpenIPC/firmware.git
1263 lines
35 KiB
Diff
1263 lines
35 KiB
Diff
diff -drupN a/drivers/cpufreq/sunxi-iks-cpufreq.c b/drivers/cpufreq/sunxi-iks-cpufreq.c
|
|
--- a/drivers/cpufreq/sunxi-iks-cpufreq.c 1970-01-01 03:00:00.000000000 +0300
|
|
+++ b/drivers/cpufreq/sunxi-iks-cpufreq.c 2022-06-12 05:28:14.000000000 +0300
|
|
@@ -0,0 +1,1258 @@
|
|
+/*
|
|
+ * linux/drivers/cpufreq/sunxi-iks-cpufreq.c
|
|
+ *
|
|
+ * Copyright(c) 2013-2015 Allwinnertech Co., Ltd.
|
|
+ * http://www.allwinnertech.com
|
|
+ *
|
|
+ * Author: sunny <sunny@allwinnertech.com>
|
|
+ *
|
|
+ * allwinner sunxi iks cpufreq driver.
|
|
+ *
|
|
+ * This program is free software; you can redistribute it and/or modify
|
|
+ * it under the terms of the GNU General Public License as published by
|
|
+ * the Free Software Foundation; either version 2 of the License, or
|
|
+ * (at your option) any later version.
|
|
+ */
|
|
+
|
|
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
|
|
+
|
|
+#include <linux/clk.h>
|
|
+#include <linux/cpu.h>
|
|
+#include <linux/cpufreq.h>
|
|
+#include <linux/cpumask.h>
|
|
+#include <linux/export.h>
|
|
+#include <linux/mutex.h>
|
|
+//#include <linux/opp.h>
|
|
+#include <linux/slab.h>
|
|
+#include <linux/topology.h>
|
|
+#include <linux/types.h>
|
|
+#include <linux/module.h>
|
|
+#include <linux/of.h>
|
|
+#include <asm/bL_switcher.h>
|
|
+#include <linux/arisc/arisc.h>
|
|
+#include <linux/regulator/consumer.h>
|
|
+#include <linux/sunxi-sid.h>
|
|
+#include <linux/delay.h>
|
|
+#include <asm/cacheflush.h>
|
|
+#ifdef CONFIG_DEBUG_FS
|
|
+#include <linux/debugfs.h>
|
|
+#include <linux/hrtimer.h>
|
|
+#endif
|
|
+
|
|
+#include "sunxi-iks-cpufreq.h"
|
|
+
|
|
+#ifdef CONFIG_DEBUG_FS
|
|
+static unsigned long long c0_set_time_usecs;
|
|
+static unsigned long long c0_get_time_usecs;
|
|
+static unsigned long long c1_set_time_usecs;
|
|
+static unsigned long long c1_get_time_usecs;
|
|
+#endif
|
|
+
|
|
+#if defined(CONFIG_ARCH_SUN8IW6P1) || defined(CONFIG_ARCH_SUN8IW9P1) || defined(CONFIG_ARCH_SUN8IW17P1)
|
|
+#define PLL1_CLK "pll_cpu0"
|
|
+#define PLL2_CLK "pll_cpu1"
|
|
+
|
|
+#define CLUSTER0_CLK "cluster0"
|
|
+#define CLUSTER1_CLK "cluster1"
|
|
+#define ARISC_DVFS_VF_TABLE_MAX (16)
|
|
+
|
|
+static struct cpufreq_frequency_table sunxi_freq_table_ca7[] = {
|
|
+ {0, 2016 * 1000},
|
|
+ {0, 1800 * 1000},
|
|
+ {0, 1608 * 1000},
|
|
+ {0, 1200 * 1000},
|
|
+ {0, 1128 * 1000},
|
|
+ {0, 1008 * 1000},
|
|
+ {0, 912 * 1000},
|
|
+ {0, 864 * 1000},
|
|
+ {0, 720 * 1000},
|
|
+ {0, 600 * 1000},
|
|
+ {0, 480 * 1000},
|
|
+ {0, CPUFREQ_TABLE_END},
|
|
+};
|
|
+typedef struct arisc_freq_voltage {
|
|
+ u32 freq;
|
|
+ u32 voltage;
|
|
+ u32 axi_div;
|
|
+} arisc_freq_voltage_t;
|
|
+
|
|
+static struct arisc_freq_voltage arisc_vf_table[2][ARISC_DVFS_VF_TABLE_MAX] = {
|
|
+ /*
|
|
+ * cpu0 vdd is 1.20v if cpu freq is (600Mhz, 1008Mhz]
|
|
+ * cpu0 vdd is 1.20v if cpu freq is (420Mhz, 600Mhz]
|
|
+ * cpu0 vdd is 1.20v if cpu freq is (360Mhz, 420Mhz]
|
|
+ * cpu0 vdd is 1.20v if cpu freq is (300Mhz, 360Mhz]
|
|
+ * cpu0 vdd is 1.20v if cpu freq is (240Mhz, 300Mhz]
|
|
+ * cpu0 vdd is 1.20v if cpu freq is (120Mhz, 240Mhz]
|
|
+ * cpu0 vdd is 1.20v if cpu freq is (60Mhz, 120Mhz]
|
|
+ * cpu0 vdd is 1.20v if cpu freq is (0Mhz, 60Mhz]
|
|
+ */
|
|
+ {
|
|
+ /* freq voltage axi_div */
|
|
+ {900000000, 1200, 3},
|
|
+ {600000000, 1200, 3},
|
|
+ {420000000, 1200, 3},
|
|
+ {360000000, 1200, 3},
|
|
+ {300000000, 1200, 3},
|
|
+ {240000000, 1200, 3},
|
|
+ {120000000, 1200, 3},
|
|
+ {60000000, 1200, 3},
|
|
+ {0, 1200, 3},
|
|
+ {0, 1200, 3},
|
|
+ {0, 1200, 3},
|
|
+ {0, 1200, 3},
|
|
+ {0, 1200, 3},
|
|
+ {0, 1200, 3},
|
|
+ {0, 1200, 3},
|
|
+ {0, 1200, 3},
|
|
+ },
|
|
+
|
|
+ /*
|
|
+ * cpu0 vdd is 1.20v if cpu freq is (600Mhz, 1008Mhz]
|
|
+ * cpu0 vdd is 1.20v if cpu freq is (420Mhz, 600Mhz]
|
|
+ * cpu0 vdd is 1.20v if cpu freq is (360Mhz, 420Mhz]
|
|
+ * cpu0 vdd is 1.20v if cpu freq is (300Mhz, 360Mhz]
|
|
+ * cpu0 vdd is 1.20v if cpu freq is (240Mhz, 300Mhz]
|
|
+ * cpu0 vdd is 1.20v if cpu freq is (120Mhz, 240Mhz]
|
|
+ * cpu0 vdd is 1.20v if cpu freq is (60Mhz, 120Mhz]
|
|
+ * cpu0 vdd is 1.20v if cpu freq is (0Mhz, 60Mhz]
|
|
+ */
|
|
+ {
|
|
+ /* freq voltage axi_div */
|
|
+ {900000000, 1200, 3},
|
|
+ {600000000, 1200, 3},
|
|
+ {420000000, 1200, 3},
|
|
+ {360000000, 1200, 3},
|
|
+ {300000000, 1200, 3},
|
|
+ {240000000, 1200, 3},
|
|
+ {120000000, 1200, 3},
|
|
+ {60000000, 1200, 3},
|
|
+ {0, 1200, 3},
|
|
+ {0, 1200, 3},
|
|
+ {0, 1200, 3},
|
|
+ {0, 1200, 3},
|
|
+ {0, 1200, 3},
|
|
+ {0, 1200, 3},
|
|
+ {0, 1200, 3},
|
|
+ {0, 1200, 3},
|
|
+ },
|
|
+};
|
|
+
|
|
+#define sunxi_freq_table_ca15 sunxi_freq_table_ca7
|
|
+
|
|
+/*
|
|
+ * Notice:
|
|
+ * The the definition of the minimum frequnecy should be a valid value
|
|
+ * in the frequnecy table, otherwise, there may be some power efficiency
|
|
+ * lost in the interactive governor, when the cpufreq_interactive_idle_start
|
|
+ * try to check the frequency status:
|
|
+ * if (pcpu->target_freq != pcpu->policy->min) {}
|
|
+ * the target_freq will never equal to the policy->min !!! Then, the timer
|
|
+ * will wakeup the cpu frequently
|
|
+ */
|
|
+/* config the maximum frequency of sunxi core */
|
|
+#define SUNXI_CPUFREQ_L_MAX (2016000000)
|
|
+/* config the minimum frequency of sunxi core */
|
|
+#define SUNXI_CPUFREQ_L_MIN (480000000)
|
|
+/* config the maximum frequency of sunxi core */
|
|
+#define SUNXI_CPUFREQ_B_MAX SUNXI_CPUFREQ_L_MAX
|
|
+/* config the minimum frequency of sunxi core */
|
|
+#define SUNXI_CPUFREQ_B_MIN SUNXI_CPUFREQ_L_MIN
|
|
+
|
|
+#else
|
|
+
|
|
+static struct cpufreq_frequency_table sunxi_freq_table_ca7[] = {
|
|
+ {0, 1200 * 1000},
|
|
+ {0, 1104 * 1000},
|
|
+ {0, 1008 * 1000},
|
|
+ {0, 912 * 1000},
|
|
+ {0, 816 * 1000},
|
|
+ {0, 720 * 1000},
|
|
+ {0, 600 * 1000},
|
|
+ {0, 480 * 1000},
|
|
+ {0, CPUFREQ_TABLE_END},
|
|
+};
|
|
+
|
|
+static struct cpufreq_frequency_table sunxi_freq_table_ca15[] = {
|
|
+ {0, 1800 * 1000},
|
|
+ {0, 1704 * 1000},
|
|
+ {0, 1608 * 1000},
|
|
+ {0, 1512 * 1000},
|
|
+ {0, 1416 * 1000},
|
|
+ {0, 1320 * 1000},
|
|
+ {0, 1200 * 1000},
|
|
+ {0, 1104 * 1000},
|
|
+ {0, 1008 * 1000},
|
|
+ {0, 912 * 1000},
|
|
+ {0, 816 * 1000},
|
|
+ {0, 720 * 1000},
|
|
+ {0, 600 * 1000},
|
|
+ {0, CPUFREQ_TABLE_END},
|
|
+};
|
|
+
|
|
+/*
|
|
+ * Notice:
|
|
+ * The the definition of the minimum frequnecy should be a valid value
|
|
+ * in the frequnecy table, otherwise, there may be some power efficiency
|
|
+ * lost in the interactive governor, when the cpufreq_interactive_idle_start
|
|
+ * try to check the frequency status:
|
|
+ * if (pcpu->target_freq != pcpu->policy->min) {}
|
|
+ * the target_freq will never equal to the policy->min !!! Then, the timer
|
|
+ * will wakeup the cpu frequently
|
|
+ */
|
|
+/* config the maximum frequency of sunxi core */
|
|
+#define SUNXI_CPUFREQ_L_MAX (1200000000)
|
|
+/* config the minimum frequency of sunxi core */
|
|
+#define SUNXI_CPUFREQ_L_MIN (480000000)
|
|
+/* config the maximum frequency of sunxi core */
|
|
+#define SUNXI_CPUFREQ_B_MAX (1800000000)
|
|
+/* config the minimum frequency of sunxi core */
|
|
+#define SUNXI_CPUFREQ_B_MIN (600000000)
|
|
+
|
|
+#endif
|
|
+
|
|
+static unsigned int l_freq_max = SUNXI_CPUFREQ_L_MAX / 1000;
|
|
+static unsigned int l_freq_boot = SUNXI_CPUFREQ_L_MAX / 1000;
|
|
+static unsigned int l_freq_ext = SUNXI_CPUFREQ_L_MAX / 1000;
|
|
+static unsigned int l_freq_min = SUNXI_CPUFREQ_L_MIN / 1000;
|
|
+static unsigned int b_freq_max = SUNXI_CPUFREQ_B_MAX / 1000;
|
|
+static unsigned int b_freq_boot = SUNXI_CPUFREQ_B_MAX / 1000;
|
|
+static unsigned int b_freq_ext = SUNXI_CPUFREQ_B_MAX / 1000;
|
|
+static unsigned int b_freq_min = SUNXI_CPUFREQ_B_MIN / 1000;
|
|
+
|
|
+#ifdef CONFIG_BL_SWITCHER
|
|
+bool bL_switching_enabled;
|
|
+#endif
|
|
+
|
|
+int sunxi_dvfs_debug;
|
|
+int sunxi_boot_freq_lock;
|
|
+
|
|
+static struct clk *clk_pll1; /* pll1 clock handler */
|
|
+static struct clk *clk_pll2; /* pll2 clock handler */
|
|
+static struct clk *cluster_clk[MAX_CLUSTERS];
|
|
+static unsigned int cluster_pll[MAX_CLUSTERS];
|
|
+static struct cpufreq_frequency_table *freq_table[MAX_CLUSTERS + 1];
|
|
+static struct regulator *cpu_vdd[MAX_CLUSTERS]; /* cpu vdd handler */
|
|
+
|
|
+static DEFINE_PER_CPU(unsigned int, physical_cluster);
|
|
+static DEFINE_PER_CPU(unsigned int, cpu_last_req_freq);
|
|
+
|
|
+static struct mutex cluster_lock[MAX_CLUSTERS];
|
|
+static unsigned int sunxi_cpufreq_set_rate(u32 cpu, u32 old_cluster,
|
|
+ u32 new_cluster, u32 rate);
|
|
+
|
|
+static unsigned int find_cluster_maxfreq(int cluster)
|
|
+{
|
|
+ int j;
|
|
+ u32 max_freq = 0, cpu_freq;
|
|
+
|
|
+ for_each_online_cpu(j) {
|
|
+ cpu_freq = per_cpu(cpu_last_req_freq, j);
|
|
+ if ((cluster == per_cpu(physical_cluster, j)) && (max_freq < cpu_freq))
|
|
+ max_freq = cpu_freq;
|
|
+ }
|
|
+
|
|
+ if (unlikely(sunxi_dvfs_debug))
|
|
+ CPUFREQ_DBG("%s: cluster:%d, max freq:%d\n", __func__, cluster, max_freq);
|
|
+
|
|
+ return max_freq;
|
|
+}
|
|
+
|
|
+/*
|
|
+ * get the current cpu vdd;
|
|
+ * return: cpu vdd, based on mv;
|
|
+ */
|
|
+static int sunxi_cpufreq_getvolt(unsigned int cpu)
|
|
+{
|
|
+ u32 cur_cluster = per_cpu(physical_cluster, cpu);
|
|
+
|
|
+ return regulator_get_voltage(cpu_vdd[cur_cluster]) / 1000;
|
|
+}
|
|
+
|
|
+
|
|
+static unsigned int sunxi_clk_get_cpu_rate(unsigned int cpu)
|
|
+{
|
|
+ u32 cur_cluster = per_cpu(physical_cluster, cpu), rate;
|
|
+ u32 other_cluster_cpu_num = 0;
|
|
+
|
|
+#ifdef CONFIG_SCHED_SMP_DCMP
|
|
+ u32 cpu_id;
|
|
+ u32 other_cluster = MAX_CLUSTERS;
|
|
+ u32 other_cluster_cpu = nr_cpu_ids;
|
|
+ u32 other_cluster_rate;
|
|
+ u32 tmp_cluster;
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_DEBUG_FS
|
|
+ ktime_t calltime = ktime_set(0, 0), delta, rettime;
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_SCHED_SMP_DCMP
|
|
+ for_each_online_cpu(cpu_id) {
|
|
+ tmp_cluster = cpu_to_cluster(cpu_id);
|
|
+ if (tmp_cluster != cur_cluster) {
|
|
+ other_cluster_cpu_num++;
|
|
+ if (other_cluster == MAX_CLUSTERS) {
|
|
+ other_cluster_cpu = cpu_id;
|
|
+ other_cluster = tmp_cluster;
|
|
+ }
|
|
+ }
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ if (other_cluster_cpu_num) {
|
|
+ mutex_lock(&cluster_lock[A15_CLUSTER]);
|
|
+ mutex_lock(&cluster_lock[A7_CLUSTER]);
|
|
+ } else {
|
|
+ mutex_lock(&cluster_lock[cur_cluster]);
|
|
+ }
|
|
+
|
|
+#ifdef CONFIG_DEBUG_FS
|
|
+ calltime = ktime_get();
|
|
+#endif
|
|
+
|
|
+ if (cur_cluster == A7_CLUSTER)
|
|
+ clk_get_rate(clk_pll1);
|
|
+ else if (cur_cluster == A15_CLUSTER)
|
|
+ clk_get_rate(clk_pll2);
|
|
+
|
|
+ rate = clk_get_rate(cluster_clk[cur_cluster]) / 1000;
|
|
+
|
|
+ /* For switcher we use virtual A15 clock rates */
|
|
+ if (is_bL_switching_enabled())
|
|
+ rate = VIRT_FREQ(cur_cluster, rate);
|
|
+
|
|
+#ifdef CONFIG_SCHED_SMP_DCMP
|
|
+ if (other_cluster_cpu_num) {
|
|
+ if (other_cluster == A7_CLUSTER)
|
|
+ clk_get_rate(clk_pll1);
|
|
+ else if (other_cluster == A15_CLUSTER)
|
|
+ clk_get_rate(clk_pll2);
|
|
+
|
|
+ other_cluster_rate = clk_get_rate(cluster_clk[other_cluster]);
|
|
+ other_cluster_rate /= 1000;
|
|
+ /* For switcher we use virtual A15 clock rates */
|
|
+ if (is_bL_switching_enabled())
|
|
+ other_cluster_rate = VIRT_FREQ(other_cluster,
|
|
+ other_cluster_rate);
|
|
+ }
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_DEBUG_FS
|
|
+ rettime = ktime_get();
|
|
+ delta = ktime_sub(rettime, calltime);
|
|
+ if (cur_cluster == A7_CLUSTER)
|
|
+ c0_get_time_usecs = ktime_to_ns(delta) >> 10;
|
|
+ else if (cur_cluster == A15_CLUSTER)
|
|
+ c1_get_time_usecs = ktime_to_ns(delta) >> 10;
|
|
+#endif
|
|
+
|
|
+ if (unlikely(sunxi_dvfs_debug))
|
|
+ CPUFREQ_DBG("cpu:%d, cur_cluster:%d, cur_freq:%d\n",
|
|
+ cpu, cur_cluster, rate);
|
|
+
|
|
+#ifdef CONFIG_SCHED_SMP_DCMP
|
|
+ if (other_cluster_cpu_num) {
|
|
+ mutex_unlock(&cluster_lock[A7_CLUSTER]);
|
|
+ mutex_unlock(&cluster_lock[A15_CLUSTER]);
|
|
+ if (other_cluster_rate != rate) {
|
|
+ sunxi_cpufreq_set_rate(other_cluster_cpu,
|
|
+ other_cluster, other_cluster, rate);
|
|
+ }
|
|
+ } else
|
|
+#endif
|
|
+ mutex_unlock(&cluster_lock[cur_cluster]);
|
|
+
|
|
+ return rate;
|
|
+
|
|
+}
|
|
+
|
|
+#ifdef CONFIG_SUNXI_ARISC
|
|
+static int clk_set_fail_notify(void *arg)
|
|
+{
|
|
+ CPUFREQ_ERR("%s: cluster: %d\n", __func__, (u32)arg);
|
|
+
|
|
+ /* maybe should do others */
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+#endif
|
|
+
|
|
+static int sunxi_clk_set_cpu_rate(unsigned int cluster, unsigned int cpu, unsigned int rate)
|
|
+{
|
|
+
|
|
+#ifdef CONFIG_SUNXI_CPUFREQ_ASYN
|
|
+ unsigned long timeout;
|
|
+#endif
|
|
+ int ret;
|
|
+#ifdef CONFIG_DEBUG_FS
|
|
+ ktime_t calltime = ktime_set(0, 0), delta, rettime;
|
|
+#endif
|
|
+
|
|
+ if (unlikely(sunxi_dvfs_debug))
|
|
+ CPUFREQ_DBG("cpu:%d, cluster:%d, set freq:%u\n", cpu, cluster, rate);
|
|
+
|
|
+#ifdef CONFIG_DEBUG_FS
|
|
+ calltime = ktime_get();
|
|
+#endif
|
|
+
|
|
+#ifndef CONFIG_SUNXI_ARISC
|
|
+ ret = 0;
|
|
+#else
|
|
+#ifdef CONFIG_SUNXI_CPUFREQ_ASYN
|
|
+ ret = arisc_dvfs_set_cpufreq(rate, cluster_pll[cluster],
|
|
+ ARISC_MESSAGE_ATTR_ASYN,
|
|
+ clk_set_fail_notify, (void *)cluster);
|
|
+ /* CPUS max latency for cpu freq*/
|
|
+ timeout = 15;
|
|
+
|
|
+ while (timeout-- && ((A7_CLUSTER == cluster ? clk_get_rate(clk_pll1) : clk_get_rate(clk_pll2)) != rate*1000))
|
|
+ msleep(1);
|
|
+ if ((A7_CLUSTER == cluster ? clk_get_rate(clk_pll1) : clk_get_rate(clk_pll2)) != rate*1000)
|
|
+ ret = -1;
|
|
+#else
|
|
+ ret = arisc_dvfs_set_cpufreq(rate, cluster_pll[cluster],
|
|
+ ARISC_MESSAGE_ATTR_SOFTSYN,
|
|
+ clk_set_fail_notify, (void *)cluster);
|
|
+#endif
|
|
+#endif
|
|
+
|
|
+#ifdef CONFIG_DEBUG_FS
|
|
+ rettime = ktime_get();
|
|
+ delta = ktime_sub(rettime, calltime);
|
|
+ if (cluster == A7_CLUSTER)
|
|
+ c0_set_time_usecs = ktime_to_ns(delta) >> 10;
|
|
+ else if (cluster == A15_CLUSTER)
|
|
+ c1_set_time_usecs = ktime_to_ns(delta) >> 10;
|
|
+#endif
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static unsigned int sunxi_cpufreq_set_rate(u32 cpu, u32 old_cluster,
|
|
+ u32 new_cluster, u32 rate)
|
|
+{
|
|
+ u32 new_rate, prev_rate;
|
|
+ int ret;
|
|
+
|
|
+ mutex_lock(&cluster_lock[new_cluster]);
|
|
+
|
|
+ prev_rate = per_cpu(cpu_last_req_freq, cpu);
|
|
+ per_cpu(cpu_last_req_freq, cpu) = rate;
|
|
+ per_cpu(physical_cluster, cpu) = new_cluster;
|
|
+
|
|
+ if (is_bL_switching_enabled()) {
|
|
+ new_rate = find_cluster_maxfreq(new_cluster);
|
|
+ new_rate = ACTUAL_FREQ(new_cluster, new_rate);
|
|
+ } else
|
|
+ new_rate = rate;
|
|
+
|
|
+ if (unlikely(sunxi_dvfs_debug))
|
|
+ CPUFREQ_DBG("cpu:%d, old cluster:%d, new cluster:%d, target freq:%d\n",
|
|
+ cpu, old_cluster, new_cluster, new_rate);
|
|
+ ret = sunxi_clk_set_cpu_rate(new_cluster, cpu, new_rate);
|
|
+ if (WARN_ON(ret)) {
|
|
+ CPUFREQ_ERR("clk_set_rate failed:%d, new cluster:%d\n", ret, new_cluster);
|
|
+ per_cpu(cpu_last_req_freq, cpu) = prev_rate;
|
|
+ per_cpu(physical_cluster, cpu) = old_cluster;
|
|
+
|
|
+ mutex_unlock(&cluster_lock[new_cluster]);
|
|
+
|
|
+ return ret;
|
|
+ }
|
|
+ mutex_unlock(&cluster_lock[new_cluster]);
|
|
+
|
|
+ if (is_bL_switching_enabled()) {
|
|
+ /* Recalc freq for old cluster when switching clusters */
|
|
+ if (old_cluster != new_cluster) {
|
|
+ if (unlikely(sunxi_dvfs_debug))
|
|
+ CPUFREQ_DBG("cpu:%d, switch from cluster-%d to cluster-%d\n",
|
|
+ cpu, old_cluster, new_cluster);
|
|
+
|
|
+ bL_switch_request(cpu, new_cluster);
|
|
+
|
|
+ mutex_lock(&cluster_lock[old_cluster]);
|
|
+ /* Set freq of old cluster if there are cpus left on it */
|
|
+ new_rate = find_cluster_maxfreq(old_cluster);
|
|
+ new_rate = ACTUAL_FREQ(old_cluster, new_rate);
|
|
+ if (new_rate) {
|
|
+ if (unlikely(sunxi_dvfs_debug))
|
|
+ CPUFREQ_DBG("Updating rate of old cluster:%d, to freq:%d\n",
|
|
+ old_cluster, new_rate);
|
|
+
|
|
+ if (sunxi_clk_set_cpu_rate(old_cluster, cpu, new_rate))
|
|
+ CPUFREQ_ERR("clk_set_rate failed: %d, old cluster:%d\n",
|
|
+ ret, old_cluster);
|
|
+ }
|
|
+ mutex_unlock(&cluster_lock[old_cluster]);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/* Validate policy frequency range */
|
|
+static int sunxi_cpufreq_verify_policy(struct cpufreq_policy *policy)
|
|
+{
|
|
+ u32 cur_cluster = cpu_to_cluster(policy->cpu);
|
|
+
|
|
+ return cpufreq_frequency_table_verify(policy, freq_table[cur_cluster]);
|
|
+}
|
|
+
|
|
+/* Set clock frequency */
|
|
+static int sunxi_cpufreq_set_target_index(struct cpufreq_policy *policy,
|
|
+ unsigned int index)
|
|
+{
|
|
+ struct cpufreq_freqs freqs;
|
|
+ u32 cpu = policy->cpu;
|
|
+ u32 target_freq;
|
|
+ u32 cur_cluster, new_cluster, actual_cluster;
|
|
+ int ret = 0;
|
|
+#ifdef CONFIG_SCHED_SMP_DCMP
|
|
+ u32 i, other_cluster;
|
|
+#endif
|
|
+
|
|
+ cur_cluster = cpu_to_cluster(cpu);
|
|
+ new_cluster = actual_cluster = per_cpu(physical_cluster, cpu);
|
|
+
|
|
+ freqs.old = sunxi_clk_get_cpu_rate(cpu);
|
|
+
|
|
+ if (unlikely(sunxi_dvfs_debug))
|
|
+ CPUFREQ_DBG("request frequency is %u\n", target_freq);
|
|
+
|
|
+#if 0
|
|
+ if (unlikely(sunxi_boot_freq_lock)) {
|
|
+ boot_freq = cur_cluster == A7_CLUSTER ? l_freq_boot : b_freq_boot;
|
|
+ target_freq = target_freq > boot_freq ? boot_freq : target_freq;
|
|
+ }
|
|
+#endif
|
|
+
|
|
+ freqs.new = freq_table[cur_cluster][index].frequency;
|
|
+ if (freqs.old == freqs.new)
|
|
+ return 0;
|
|
+
|
|
+ if (unlikely(sunxi_dvfs_debug))
|
|
+ CPUFREQ_DBG("target frequency find is %u, entry %u\n", freqs.new, index);
|
|
+
|
|
+ if (is_bL_switching_enabled()) {
|
|
+ if ((actual_cluster == A15_CLUSTER) &&
|
|
+ (freqs.new < SUNXI_BL_SWITCH_THRESHOLD))
|
|
+ new_cluster = A7_CLUSTER;
|
|
+ else if ((actual_cluster == A7_CLUSTER) &&
|
|
+ (freqs.new > SUNXI_BL_SWITCH_THRESHOLD))
|
|
+ new_cluster = A15_CLUSTER;
|
|
+ }
|
|
+
|
|
+ ret = sunxi_cpufreq_set_rate(cpu, actual_cluster, new_cluster, freqs.new);
|
|
+#ifdef CONFIG_SCHED_SMP_DCMP
|
|
+ for_each_online_cpu(i) {
|
|
+ other_cluster = cpu_to_cluster(i);
|
|
+ if (other_cluster != actual_cluster) {
|
|
+ ret = sunxi_cpufreq_set_rate(i, other_cluster, other_cluster, freqs.new);
|
|
+ break;
|
|
+ }
|
|
+ }
|
|
+#endif
|
|
+ if (ret)
|
|
+ return ret;
|
|
+
|
|
+ policy->cur = freqs.new;
|
|
+
|
|
+ if (unlikely(sunxi_dvfs_debug))
|
|
+ CPUFREQ_DBG("DVFS done! Freq[%uMHz] Volt[%umv] ok\n\n",
|
|
+ sunxi_clk_get_cpu_rate(cpu) / 1000,
|
|
+ sunxi_cpufreq_getvolt(cpu));
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static inline u32 get_table_count(struct cpufreq_frequency_table *table)
|
|
+{
|
|
+ int count;
|
|
+
|
|
+ for (count = 0; table[count].frequency != CPUFREQ_TABLE_END; count++)
|
|
+ ;
|
|
+
|
|
+ return count;
|
|
+}
|
|
+
|
|
+
|
|
+static int merge_cluster_tables(void)
|
|
+{
|
|
+ int i, j, k = 0, count = 1;
|
|
+ struct cpufreq_frequency_table *table;
|
|
+
|
|
+ for (i = 0; i < MAX_CLUSTERS; i++)
|
|
+ count += get_table_count(freq_table[i]);
|
|
+
|
|
+ table = kzalloc(sizeof(*table) * count, GFP_KERNEL);
|
|
+ if (!table)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ freq_table[MAX_CLUSTERS] = table;
|
|
+
|
|
+ /* Add in reverse order to get freqs in increasing order */
|
|
+ for (i = MAX_CLUSTERS - 1; i >= 0; i--) {
|
|
+ for (j = 0; freq_table[i][j].frequency != CPUFREQ_TABLE_END; j++) {
|
|
+ table[k].frequency = VIRT_FREQ(i, freq_table[i][j].frequency);
|
|
+ CPUFREQ_DBG("%s: index: %d, freq: %d\n", __func__, k, table[k].frequency);
|
|
+ k++;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ // table[k].index = k;
|
|
+ table[k].frequency = CPUFREQ_TABLE_END;
|
|
+
|
|
+ CPUFREQ_DBG("%s: End, table: %p, count: %d\n", __func__, table, k);
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/*get the cpufreq table from dts
|
|
+ * */
|
|
+#ifdef CONFIG_ARCH_SUN8IW17P1
|
|
+static int init_cpufreq_table_dt(void)
|
|
+{
|
|
+ struct device_node *np;
|
|
+ const struct property *prop;
|
|
+ int cnt, i;
|
|
+ const __be32 *val;
|
|
+
|
|
+ np = of_find_node_by_path("/cpus/cpu@0");
|
|
+ if (!np) {
|
|
+ CPUFREQ_ERR("No cpu node found\n");
|
|
+ return -ENODEV;
|
|
+ }
|
|
+
|
|
+ prop = of_find_property(np, "cpufreq_tbl", NULL);
|
|
+ if (!prop || !prop->value) {
|
|
+ CPUFREQ_ERR("Invalid cpufreq_tbl\n");
|
|
+ return -ENODEV;
|
|
+ }
|
|
+
|
|
+ cnt = prop->length / sizeof(u32);
|
|
+ val = prop->value;
|
|
+ for (i = 0; i < cnt; i++) {
|
|
+ // sunxi_freq_table_ca7[i].index = i;
|
|
+ sunxi_freq_table_ca7[i].frequency = be32_to_cpup(val++);
|
|
+
|
|
+ // sunxi_freq_table_ca15[i].index = i;
|
|
+ sunxi_freq_table_ca15[i].frequency =
|
|
+ sunxi_freq_table_ca7[i].frequency;
|
|
+ }
|
|
+
|
|
+ // sunxi_freq_table_ca7[i].index = i;
|
|
+ // sunxi_freq_table_ca15[i].index = i;
|
|
+ sunxi_freq_table_ca15[i].frequency = CPUFREQ_TABLE_END;
|
|
+ sunxi_freq_table_ca7[i].frequency = CPUFREQ_TABLE_END;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+#endif
|
|
+
|
|
+/*
|
|
+ * init cpu max/min frequency from dt;
|
|
+ * return: 0 - init cpu max/min successed, !0 - init cpu max/min failed;
|
|
+ */
|
|
+static int __init_freq_dt(char *tbl_name)
|
|
+{
|
|
+ int ret = 0;
|
|
+ struct device_node *main_np;
|
|
+ struct device_node *sub_np;
|
|
+
|
|
+ main_np = of_find_node_by_path("/dvfs_table");
|
|
+ if (!main_np) {
|
|
+ CPUFREQ_ERR("No dvfs table node found\n");
|
|
+ ret = -ENODEV;
|
|
+ goto fail;
|
|
+ }
|
|
+
|
|
+ sub_np = of_find_compatible_node(main_np, NULL, tbl_name);
|
|
+ if (!sub_np) {
|
|
+ CPUFREQ_ERR("No %s node found\n", tbl_name);
|
|
+ ret = -ENODEV;
|
|
+ goto fail;
|
|
+ }
|
|
+
|
|
+ if (of_property_read_u32(sub_np, "L_max_freq", &l_freq_max)) {
|
|
+ CPUFREQ_ERR("get cpu l_max frequency from dt failed\n");
|
|
+ ret = -EINVAL;
|
|
+ goto fail;
|
|
+ }
|
|
+
|
|
+ if (of_property_read_u32(sub_np, "L_min_freq", &l_freq_min)) {
|
|
+ CPUFREQ_ERR("get cpu l_min frequency from dt failed\n");
|
|
+ ret = -EINVAL;
|
|
+ goto fail;
|
|
+ }
|
|
+
|
|
+ if (of_property_read_u32(sub_np, "L_extremity_freq", &l_freq_ext))
|
|
+ l_freq_ext = l_freq_max;
|
|
+
|
|
+ if (of_property_read_u32(sub_np, "L_boot_freq", &l_freq_boot))
|
|
+ l_freq_boot = l_freq_max;
|
|
+ else
|
|
+ sunxi_boot_freq_lock = 1;
|
|
+
|
|
+ if (of_property_read_u32(sub_np, "B_max_freq", &b_freq_max)) {
|
|
+ CPUFREQ_ERR("get cpu b_max frequency from dt failed\n");
|
|
+ ret = -EINVAL;
|
|
+ goto fail;
|
|
+ }
|
|
+
|
|
+ if (of_property_read_u32(sub_np, "B_min_freq", &b_freq_min)) {
|
|
+ CPUFREQ_ERR("get cpu b_min frequency from dt failed\n");
|
|
+ ret = -EINVAL;
|
|
+ goto fail;
|
|
+ }
|
|
+
|
|
+ if (of_property_read_u32(sub_np, "B_extremity_freq", &b_freq_ext))
|
|
+ b_freq_ext = b_freq_max;
|
|
+
|
|
+ if (of_property_read_u32(sub_np, "B_boot_freq", &b_freq_boot))
|
|
+ b_freq_boot = b_freq_max;
|
|
+
|
|
+ if (l_freq_max > SUNXI_CPUFREQ_L_MAX || l_freq_max < SUNXI_CPUFREQ_L_MIN
|
|
+ || l_freq_min < SUNXI_CPUFREQ_L_MIN || l_freq_min > SUNXI_CPUFREQ_L_MAX) {
|
|
+ CPUFREQ_ERR("l_cpu max or min frequency from sysconfig is more than range\n");
|
|
+ ret = -EINVAL;
|
|
+ goto fail;
|
|
+ }
|
|
+
|
|
+ if (l_freq_min > l_freq_max) {
|
|
+ CPUFREQ_ERR("l_cpu min frequency can not be more than l_cpu max frequency\n");
|
|
+ ret = -EINVAL;
|
|
+ goto fail;
|
|
+ }
|
|
+
|
|
+ if (l_freq_ext < l_freq_max) {
|
|
+ CPUFREQ_ERR("l_cpu ext frequency can not be less than l_cpu max frequency\n");
|
|
+ ret = -EINVAL;
|
|
+ goto fail;
|
|
+ }
|
|
+
|
|
+ if (l_freq_boot > l_freq_max || l_freq_boot < l_freq_min) {
|
|
+ CPUFREQ_ERR("l_cpu boot frequency invalid\n");
|
|
+ ret = -EINVAL;
|
|
+ goto fail;
|
|
+ }
|
|
+
|
|
+ if (b_freq_max > SUNXI_CPUFREQ_B_MAX || b_freq_max < SUNXI_CPUFREQ_B_MIN
|
|
+ || b_freq_min < SUNXI_CPUFREQ_B_MIN || b_freq_min > SUNXI_CPUFREQ_B_MAX) {
|
|
+ CPUFREQ_ERR("b_cpu max or min frequency from sysconfig is more than range\n");
|
|
+ ret = -EINVAL;
|
|
+ goto fail;
|
|
+ }
|
|
+
|
|
+ if (b_freq_min > b_freq_max) {
|
|
+ CPUFREQ_ERR("b_cpu min frequency can not be more than b_cpu max frequency\n");
|
|
+ ret = -EINVAL;
|
|
+ goto fail;
|
|
+ }
|
|
+
|
|
+ if (b_freq_ext < b_freq_max) {
|
|
+ CPUFREQ_ERR("b_cpu ext frequency can not be less than b_cpu max frequency\n");
|
|
+ ret = -EINVAL;
|
|
+ goto fail;
|
|
+ }
|
|
+
|
|
+ if (b_freq_boot > b_freq_max || b_freq_boot < b_freq_min) {
|
|
+ CPUFREQ_ERR("b_cpu boot frequency invalid\n");
|
|
+ ret = -EINVAL;
|
|
+ goto fail;
|
|
+ }
|
|
+
|
|
+ l_freq_max /= 1000;
|
|
+ l_freq_min /= 1000;
|
|
+ l_freq_ext /= 1000;
|
|
+ l_freq_boot /= 1000;
|
|
+ b_freq_max /= 1000;
|
|
+ b_freq_min /= 1000;
|
|
+ b_freq_ext /= 1000;
|
|
+ b_freq_boot /= 1000;
|
|
+
|
|
+ return 0;
|
|
+
|
|
+fail:
|
|
+ /* use default cpu max/min frequency */
|
|
+ l_freq_max = SUNXI_CPUFREQ_L_MAX / 1000;
|
|
+ l_freq_min = SUNXI_CPUFREQ_L_MIN / 1000;
|
|
+ l_freq_ext = l_freq_max;
|
|
+ l_freq_boot = l_freq_max;
|
|
+ b_freq_max = SUNXI_CPUFREQ_B_MAX / 1000;
|
|
+ b_freq_min = SUNXI_CPUFREQ_B_MIN / 1000;
|
|
+ b_freq_ext = b_freq_max;
|
|
+ b_freq_boot = b_freq_max;
|
|
+
|
|
+ return ret;
|
|
+}
|
|
+
|
|
+static int sunxi_get_vf_table(void)
|
|
+{
|
|
+ int cluster = 0;
|
|
+ int index = 0;
|
|
+ struct device_node *dvfs_main_np;
|
|
+ struct device_node *dvfs_sub_np;
|
|
+ unsigned int table_count;
|
|
+ char cluster_name[2] = {'L', 'B'};
|
|
+ int vf_table_type = 0;
|
|
+ int vf_table_size[2] = {0, 0};
|
|
+ char vf_table_main_key[64];
|
|
+ char vf_table_sub_key[64];
|
|
+
|
|
+ dvfs_main_np = of_find_node_by_path("/dvfs_table");
|
|
+ if (!dvfs_main_np) {
|
|
+ CPUFREQ_ERR("No dvfs table node found\n");
|
|
+ return -ENODEV;
|
|
+ }
|
|
+
|
|
+ if (of_property_read_u32(dvfs_main_np, "vf_table_count",
|
|
+ &table_count)) {
|
|
+ CPUFREQ_ERR("get vf_table_count failed\n");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (table_count == 1) {
|
|
+ CPUFREQ_DBG("%s: support only one vf_table\n", __func__);
|
|
+ vf_table_type = 0;
|
|
+ } else
|
|
+ vf_table_type = sunxi_get_soc_bin();
|
|
+
|
|
+ sprintf(vf_table_main_key, "%s%d", "allwinner,vf_table",
|
|
+ vf_table_type);
|
|
+
|
|
+ CPUFREQ_DBG("%s: vf table type [%d=%s]\n", __func__, vf_table_type,
|
|
+ vf_table_main_key);
|
|
+
|
|
+ /* parse system config v-f table information */
|
|
+ for (cluster = 0; cluster < 1; cluster++) {
|
|
+ sprintf(vf_table_sub_key, "%c_LV_count",
|
|
+ cluster_name[cluster]);
|
|
+ dvfs_sub_np = of_find_compatible_node(dvfs_main_np, NULL,
|
|
+ vf_table_main_key);
|
|
+ if (!dvfs_sub_np) {
|
|
+ CPUFREQ_ERR("No %s node found\n", vf_table_main_key);
|
|
+ return -ENODEV;
|
|
+ }
|
|
+
|
|
+ if (of_property_read_u32(dvfs_sub_np, vf_table_sub_key,
|
|
+ &vf_table_size[cluster])) {
|
|
+ CPUFREQ_ERR("get %s failed\n", vf_table_sub_key);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ for (index = 0; index < vf_table_size[cluster]; index++) {
|
|
+ sprintf(vf_table_sub_key, "%c_LV%d_freq",
|
|
+ cluster_name[cluster], index + 1);
|
|
+ if (of_property_read_u32(dvfs_sub_np, vf_table_sub_key,
|
|
+ &arisc_vf_table[cluster][index].freq)) {
|
|
+ CPUFREQ_ERR("get %s failed\n", vf_table_sub_key);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ CPUFREQ_DBG("%s: freq [%s-%d=%d]\n", __func__,
|
|
+ vf_table_sub_key, index,
|
|
+ arisc_vf_table[cluster][index].freq);
|
|
+
|
|
+ sprintf(vf_table_sub_key, "%c_LV%d_volt",
|
|
+ cluster_name[cluster], index + 1);
|
|
+ if (of_property_read_u32(dvfs_sub_np, vf_table_sub_key,
|
|
+ &arisc_vf_table[cluster][index].voltage)) {
|
|
+ CPUFREQ_ERR("get %s failed\n", vf_table_sub_key);
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ CPUFREQ_DBG("%s: volt [%s-%d=%d]\n", __func__,
|
|
+ vf_table_sub_key, index,
|
|
+ arisc_vf_table[cluster][index].voltage);
|
|
+ }
|
|
+ }
|
|
+
|
|
+ return index;
|
|
+
|
|
+}
|
|
+
|
|
+static int sunxi_set_vf_table(u32 num)
|
|
+{
|
|
+ void *kvir = NULL;
|
|
+
|
|
+
|
|
+ kvir =
|
|
+ kmalloc(num * sizeof(struct cpufreq_frequency_table), GFP_KERNEL);
|
|
+ if (kvir == NULL) {
|
|
+ CPUFREQ_ERR("kmalloc error for transmiting vf table\n");
|
|
+ return -1;
|
|
+ }
|
|
+ memcpy((void *)kvir, (void *)(&arisc_vf_table[0]),
|
|
+ num * sizeof(struct cpufreq_frequency_table));
|
|
+ __dma_flush_area((void *)kvir, num * sizeof(struct cpufreq_frequency_table));
|
|
+
|
|
+ arisc_dvfs_cfg_vf_table(0, num, virt_to_phys(kvir));
|
|
+
|
|
+ kfree(kvir);
|
|
+ kvir = NULL;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+
|
|
+static int sunxi_cpufreq_cpu_init(struct cpufreq_policy *policy)
|
|
+{
|
|
+ u32 cur_cluster = cpu_to_cluster(policy->cpu);
|
|
+ u32 cpu;
|
|
+
|
|
+ /* set cpu freq-table */
|
|
+ if (is_bL_switching_enabled()) {
|
|
+ /* bL switcher use the merged freq table */
|
|
+ cpufreq_table_validate_and_show(policy, freq_table[MAX_CLUSTERS]);
|
|
+ } else {
|
|
+ /* HMP use the per-cluster freq table */
|
|
+ cpufreq_table_validate_and_show(policy, freq_table[cur_cluster]);
|
|
+ }
|
|
+
|
|
+ /* set the target cluster of cpu */
|
|
+ if (cur_cluster < MAX_CLUSTERS) {
|
|
+ /* set cpu masks */
|
|
+#ifdef CONFIG_SCHED_SMP_DCMP
|
|
+ cpumask_copy(policy->cpus, cpu_possible_mask);
|
|
+#else
|
|
+ cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu));
|
|
+#endif
|
|
+ /* set core sibling */
|
|
+ for_each_cpu(cpu, topology_core_cpumask(policy->cpu)) {
|
|
+ per_cpu(physical_cluster, cpu) = cur_cluster;
|
|
+ }
|
|
+ } else {
|
|
+ /* Assumption: during init, we are always running on A7 */
|
|
+ per_cpu(physical_cluster, policy->cpu) = A7_CLUSTER;
|
|
+ }
|
|
+ /* initialize current cpu freq */
|
|
+ policy->cur = sunxi_clk_get_cpu_rate(policy->cpu);
|
|
+ per_cpu(cpu_last_req_freq, policy->cpu) = policy->cur;
|
|
+
|
|
+ if (unlikely(sunxi_dvfs_debug))
|
|
+ CPUFREQ_DBG("cpu:%d, cluster:%d, init freq:%d\n", policy->cpu, cur_cluster, policy->cur);
|
|
+
|
|
+ /* set the policy min and max freq */
|
|
+ if (is_bL_switching_enabled()) {
|
|
+ /* bL switcher use the merged freq table */
|
|
+ cpufreq_frequency_table_cpuinfo(policy, freq_table[MAX_CLUSTERS]);
|
|
+ } else {
|
|
+ /* HMP use the per-cluster freq table */
|
|
+ if (cur_cluster == A7_CLUSTER) {
|
|
+ policy->min = policy->cpuinfo.min_freq = l_freq_min;
|
|
+ policy->cpuinfo.max_freq = l_freq_ext;
|
|
+ policy->max = l_freq_max;
|
|
+ // policy->cpuinfo.boot_freq = l_freq_boot;
|
|
+ } else if (cur_cluster == A15_CLUSTER) {
|
|
+ policy->min = policy->cpuinfo.min_freq = b_freq_min;
|
|
+ policy->cpuinfo.max_freq = b_freq_ext;
|
|
+ policy->max = b_freq_max;
|
|
+ // policy->cpuinfo.boot_freq = b_freq_boot;
|
|
+ }
|
|
+ }
|
|
+
|
|
+ /* set the transition latency value */
|
|
+ policy->cpuinfo.transition_latency = SUNXI_FREQTRANS_LATENCY;
|
|
+
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+/* Export freq_table to sysfs */
|
|
+static struct freq_attr *sunxi_cpufreq_attr[] = {
|
|
+ &cpufreq_freq_attr_scaling_available_freqs,
|
|
+ NULL,
|
|
+};
|
|
+
|
|
+static struct cpufreq_driver sunxi_cpufreq_driver = {
|
|
+ .name = "sunxi-iks",
|
|
+ .flags = CPUFREQ_STICKY,
|
|
+ .verify = sunxi_cpufreq_verify_policy,
|
|
+ .target_index = sunxi_cpufreq_set_target_index,
|
|
+ .get = sunxi_clk_get_cpu_rate,
|
|
+ .init = sunxi_cpufreq_cpu_init,
|
|
+ // .have_governor_per_policy = true,
|
|
+ .attr = sunxi_cpufreq_attr,
|
|
+};
|
|
+
|
|
+static int sunxi_cpufreq_switcher_notifier(struct notifier_block *nfb,
|
|
+ unsigned long action, void *_arg)
|
|
+{
|
|
+ pr_info("%s: action:%lu\n", __func__, action);
|
|
+
|
|
+ switch (action) {
|
|
+ case BL_NOTIFY_PRE_ENABLE:
|
|
+ case BL_NOTIFY_PRE_DISABLE:
|
|
+ cpufreq_unregister_driver(&sunxi_cpufreq_driver);
|
|
+ break;
|
|
+
|
|
+ case BL_NOTIFY_POST_ENABLE:
|
|
+ set_switching_enabled(true);
|
|
+ cpufreq_register_driver(&sunxi_cpufreq_driver);
|
|
+ break;
|
|
+
|
|
+ case BL_NOTIFY_POST_DISABLE:
|
|
+ set_switching_enabled(false);
|
|
+ cpufreq_register_driver(&sunxi_cpufreq_driver);
|
|
+ break;
|
|
+
|
|
+ default:
|
|
+ return NOTIFY_DONE;
|
|
+ }
|
|
+
|
|
+ return NOTIFY_OK;
|
|
+}
|
|
+
|
|
+static struct notifier_block sunxi_switcher_notifier = {
|
|
+ .notifier_call = sunxi_cpufreq_switcher_notifier,
|
|
+};
|
|
+
|
|
+static int __init sunxi_cpufreq_init(void)
|
|
+{
|
|
+ int ret, i;
|
|
+ char vftbl_name[20] = {0};
|
|
+ int vf_table_type = 0;
|
|
+ unsigned int vf_table_count = 0;
|
|
+ int num;
|
|
+ struct device_node *np;
|
|
+
|
|
+ ret = bL_switcher_get_enabled();
|
|
+ set_switching_enabled(ret);
|
|
+
|
|
+ /* get pll1 pll2 clock handle */
|
|
+ clk_pll1 = clk_get(NULL, PLL1_CLK);
|
|
+ if (IS_ERR_OR_NULL(clk_pll1))
|
|
+ pr_err("get clk_pll1 clk handle err or null\n");
|
|
+
|
|
+ clk_pll2 = clk_get(NULL, PLL2_CLK);
|
|
+
|
|
+ if (IS_ERR_OR_NULL(clk_pll2))
|
|
+ pr_err("get clk_pll2 clk handle err or null\n");
|
|
+
|
|
+ /* get cluster clock handle */
|
|
+ cluster_clk[A7_CLUSTER] = clk_get(NULL, CLUSTER0_CLK);
|
|
+ if (IS_ERR_OR_NULL(cluster_clk[A7_CLUSTER]))
|
|
+ pr_err("get cluster_clk[%d] clk handle err or null\n", A7_CLUSTER);
|
|
+
|
|
+ cluster_clk[A15_CLUSTER] = clk_get(NULL, CLUSTER1_CLK);
|
|
+ if (IS_ERR_OR_NULL(cluster_clk[A15_CLUSTER]))
|
|
+ pr_err("get cluster_clk[%d] clk handle err or null\n", A15_CLUSTER);
|
|
+
|
|
+ pr_info("cluster_clk[%d] = %lu\n", A7_CLUSTER, clk_get_rate(cluster_clk[A7_CLUSTER]));
|
|
+ pr_info("cluster_clk[%d] = %lu\n", A15_CLUSTER, clk_get_rate(cluster_clk[A15_CLUSTER]));
|
|
+
|
|
+#ifdef CONFIG_ARCH_SUN8IW17P1
|
|
+ init_cpufreq_table_dt();
|
|
+#endif
|
|
+ /* initialize cpufreq table and merge ca7/ca15 table */
|
|
+ freq_table[A7_CLUSTER] = sunxi_freq_table_ca7;
|
|
+ freq_table[A15_CLUSTER] = sunxi_freq_table_ca15;
|
|
+ merge_cluster_tables();
|
|
+
|
|
+ /* initialize ca7 and ca15 cluster pll number */
|
|
+ cluster_pll[A7_CLUSTER] = ARISC_DVFS_PLL1;
|
|
+ cluster_pll[A15_CLUSTER] = ARISC_DVFS_PLL2;
|
|
+
|
|
+ cpu_vdd[A7_CLUSTER] = regulator_get(NULL, SUNXI_CPUFREQ_C0_CPUVDD);
|
|
+ if (IS_ERR(cpu_vdd[A7_CLUSTER])) {
|
|
+ CPUFREQ_ERR("%s: could not get Cluster0 cpu vdd\n", __func__);
|
|
+ return -ENOENT;
|
|
+ }
|
|
+
|
|
+ cpu_vdd[A15_CLUSTER] = regulator_get(NULL, SUNXI_CPUFREQ_C1_CPUVDD);
|
|
+ if (IS_ERR(cpu_vdd[A15_CLUSTER])) {
|
|
+ regulator_put(cpu_vdd[A7_CLUSTER]);
|
|
+ CPUFREQ_ERR("%s: could not get Cluster1 cpu vdd\n", __func__);
|
|
+ return -ENOENT;
|
|
+ }
|
|
+
|
|
+ np = of_find_node_by_path("/dvfs_table");
|
|
+ if (!np) {
|
|
+ CPUFREQ_ERR("No dvfs table node found\n");
|
|
+ return -ENODEV;
|
|
+ }
|
|
+
|
|
+ if (of_property_read_u32(np, "vf_table_count", &vf_table_count)) {
|
|
+ CPUFREQ_ERR("get vf_table_count failed\n");
|
|
+ return -EINVAL;
|
|
+ }
|
|
+
|
|
+ if (vf_table_count == 1) {
|
|
+ CPUFREQ_DBG("support only one vf_table\n");
|
|
+ vf_table_type = 0;
|
|
+ } else
|
|
+ vf_table_type = sunxi_get_soc_bin();
|
|
+
|
|
+ sprintf(vftbl_name, "%s%d", "allwinner,vf_table", vf_table_type);
|
|
+
|
|
+ /* init cpu extreme frequency from dts */
|
|
+ if (__init_freq_dt(vftbl_name)) {
|
|
+ CPUFREQ_ERR("%s, use default cpu max/min frequency, l_max: %uMHz, l_min: %uMHz, b_max: %uMHz, b_min: %uMHz\n",
|
|
+ __func__, l_freq_max/1000, l_freq_min/1000, b_freq_max/1000, b_freq_min/1000);
|
|
+ } else {
|
|
+ CPUFREQ_DBG("%s, get cpu frequency from sysconfig, l_max: %uMHz, l_min: %uMHz, b_max: %uMHz, b_min: %uMHz\n",
|
|
+ __func__, l_freq_max/1000, l_freq_min/1000, b_freq_max/1000, b_freq_min/1000);
|
|
+ }
|
|
+
|
|
+ for (i = 0; i < MAX_CLUSTERS; i++)
|
|
+ mutex_init(&cluster_lock[i]);
|
|
+
|
|
+ num = sunxi_get_vf_table();
|
|
+ sunxi_set_vf_table(num);
|
|
+
|
|
+ ret = cpufreq_register_driver(&sunxi_cpufreq_driver);
|
|
+ if (ret) {
|
|
+ CPUFREQ_ERR("sunxi register cpufreq driver fail, err: %d\n", ret);
|
|
+ } else {
|
|
+ CPUFREQ_DBG("sunxi register cpufreq driver succeed\n");
|
|
+ ret = bL_switcher_register_notifier(&sunxi_switcher_notifier);
|
|
+ if (ret) {
|
|
+ CPUFREQ_ERR("sunxi register bL notifier fail, err: %d\n", ret);
|
|
+ cpufreq_unregister_driver(&sunxi_cpufreq_driver);
|
|
+ } else {
|
|
+ CPUFREQ_DBG("sunxi register bL notifier succeed\n");
|
|
+ }
|
|
+ }
|
|
+
|
|
+ bL_switcher_put_enabled();
|
|
+
|
|
+ pr_debug("%s: done!\n", __func__);
|
|
+ return ret;
|
|
+}
|
|
+module_init(sunxi_cpufreq_init);
|
|
+
|
|
+static void __exit sunxi_cpufreq_exit(void)
|
|
+{
|
|
+ int i;
|
|
+
|
|
+ for (i = 0; i < MAX_CLUSTERS; i++)
|
|
+ mutex_destroy(&cluster_lock[i]);
|
|
+
|
|
+ clk_put(clk_pll1);
|
|
+ clk_put(clk_pll2);
|
|
+ clk_put(cluster_clk[A7_CLUSTER]);
|
|
+ clk_put(cluster_clk[A15_CLUSTER]);
|
|
+ regulator_put(cpu_vdd[A7_CLUSTER]);
|
|
+ regulator_put(cpu_vdd[A15_CLUSTER]);
|
|
+ bL_switcher_get_enabled();
|
|
+ bL_switcher_unregister_notifier(&sunxi_switcher_notifier);
|
|
+ cpufreq_unregister_driver(&sunxi_cpufreq_driver);
|
|
+ bL_switcher_put_enabled();
|
|
+ CPUFREQ_DBG("%s: done!\n", __func__);
|
|
+}
|
|
+module_exit(sunxi_cpufreq_exit);
|
|
+
|
|
+#ifdef CONFIG_DEBUG_FS
|
|
+static struct dentry *debugfs_cpufreq_root;
|
|
+
|
|
+static int get_c0_time_show(struct seq_file *s, void *data)
|
|
+{
|
|
+ seq_printf(s, "%llu\n", c0_get_time_usecs);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int get_c0_time_open(struct inode *inode, struct file *file)
|
|
+{
|
|
+ return single_open(file, get_c0_time_show, inode->i_private);
|
|
+}
|
|
+
|
|
+static const struct file_operations get_c0_time_fops = {
|
|
+ .open = get_c0_time_open,
|
|
+ .read = seq_read,
|
|
+};
|
|
+
|
|
+static int set_c0_time_show(struct seq_file *s, void *data)
|
|
+{
|
|
+ seq_printf(s, "%llu\n", c0_set_time_usecs);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int set_c0_time_open(struct inode *inode, struct file *file)
|
|
+{
|
|
+ return single_open(file, set_c0_time_show, inode->i_private);
|
|
+}
|
|
+
|
|
+static const struct file_operations set_c0_time_fops = {
|
|
+ .open = set_c0_time_open,
|
|
+ .read = seq_read,
|
|
+};
|
|
+
|
|
+static int get_c1_time_show(struct seq_file *s, void *data)
|
|
+{
|
|
+ seq_printf(s, "%llu\n", c1_get_time_usecs);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int get_c1_time_open(struct inode *inode, struct file *file)
|
|
+{
|
|
+ return single_open(file, get_c1_time_show, inode->i_private);
|
|
+}
|
|
+
|
|
+static const struct file_operations get_c1_time_fops = {
|
|
+ .open = get_c1_time_open,
|
|
+ .read = seq_read,
|
|
+};
|
|
+
|
|
+static int set_c1_time_show(struct seq_file *s, void *data)
|
|
+{
|
|
+ seq_printf(s, "%llu\n", c1_set_time_usecs);
|
|
+ return 0;
|
|
+}
|
|
+
|
|
+static int set_c1_time_open(struct inode *inode, struct file *file)
|
|
+{
|
|
+ return single_open(file, set_c1_time_show, inode->i_private);
|
|
+}
|
|
+
|
|
+static const struct file_operations set_c1_time_fops = {
|
|
+ .open = set_c1_time_open,
|
|
+ .read = seq_read,
|
|
+};
|
|
+
|
|
+static int __init debug_init(void)
|
|
+{
|
|
+ int err = 0;
|
|
+
|
|
+ debugfs_cpufreq_root = debugfs_create_dir("cpufreq", 0);
|
|
+ if (!debugfs_cpufreq_root)
|
|
+ return -ENOMEM;
|
|
+
|
|
+ if (!debugfs_create_file("c0_get_time", 0444, debugfs_cpufreq_root, NULL, &get_c0_time_fops)) {
|
|
+ err = -ENOMEM;
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ if (!debugfs_create_file("c0_set_time", 0444, debugfs_cpufreq_root, NULL, &set_c0_time_fops)) {
|
|
+ err = -ENOMEM;
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ if (!debugfs_create_file("c1_get_time", 0444, debugfs_cpufreq_root, NULL, &get_c1_time_fops)) {
|
|
+ err = -ENOMEM;
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ if (!debugfs_create_file("c1_set_time", 0444, debugfs_cpufreq_root, NULL, &set_c1_time_fops)) {
|
|
+ err = -ENOMEM;
|
|
+ goto out;
|
|
+ }
|
|
+
|
|
+ return 0;
|
|
+
|
|
+out:
|
|
+ debugfs_remove_recursive(debugfs_cpufreq_root);
|
|
+ return err;
|
|
+}
|
|
+
|
|
+static void __exit debug_exit(void)
|
|
+{
|
|
+ debugfs_remove_recursive(debugfs_cpufreq_root);
|
|
+}
|
|
+
|
|
+late_initcall(debug_init);
|
|
+module_exit(debug_exit);
|
|
+#endif /* CONFIG_DEBUG_FS */
|
|
+
|
|
+MODULE_AUTHOR("sunny <sunny@allwinnertech.com>");
|
|
+MODULE_DESCRIPTION("sunxi iks cpufreq driver");
|
|
+MODULE_LICENSE("GPL");
|