SmartAudio/lichee/linux-4.9/arch/arm/mach-sunxi/sun8iw17-mcpm.c

701 lines
19 KiB
C
Executable File

/*
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* arch/arm/mach-sunxi/mcpm-sunxi.c
*
* Based on arch/arm/mach-vexpress/dcscb.c
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/arm-cci.h>
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/of_address.h>
#include <linux/syscore_ops.h>
#include <linux/arisc/arisc.h>
#include <linux/arisc/arisc-notifier.h>
#include <linux/arm-smccc.h>
#include <asm/cputype.h>
#include <asm/cp15.h>
#include <asm/mcpm.h>
#include <asm/smp_plat.h>
#define CLUSTER_CPU_STATUS (0x80)
#define CPU_RST_CTRL (0x00)
#define CLUSTER_CTRL0 (0x10)
#define CLUSTER_CTRL1 (0x14)
#define CLUSTER_PWRON_RST(cluster) (0x40 + (cluster) * 0x40)
#define CPU_PWR_CLAMP(cluster, cpu) (0x50 + (cluster*16 + cpu)*0x4)
#define CLUSTER_PWROFF_GATING(cluster) (0x44 + (cluster) * 0x40)
#define CPU_SOFT_ENTRY_REG0 (0x1bc)
#define CLUSTER_0 (0)
#define CLUSTER_1 (1)
#define MAX_CLUSTERS (2)
#define CORES_PER_CLUSTER (3)
#define SUN8I_CPU_IS_WFI_MODE(cluster, cpu) (readl(sunxi_cpuxcfg_base[cluster]\
+ CLUSTER_CPU_STATUS) & (1 << (16 + cpu)))
#define SUN8I_L2CACHE_IS_WFI_MODE(cluster) (readl(sunxi_cpuxcfg_base[cluster]\
+ CLUSTER_CPU_STATUS) & (1 << 0))
#define SUN8I_C0_CLSUTER_PWRUP_FREQ (600000) /* freq base on khz */
#define SUN8I_C1_CLSUTER_PWRUP_FREQ (600000) /* freq base on khz */
static unsigned int cluster_powerup_freq[MAX_CLUSTERS];
#define CONFIG_MCPM_WITH_ARISC_DVFS_SUPPORT
#ifdef CONFIG_MCPM_WITH_ARISC_DVFS_SUPPORT
static bool arisc_ready;
#define is_arisc_ready() (arisc_ready)
#define set_arisc_ready(x) (arisc_ready = (x))
static unsigned int cluster_pll[MAX_CLUSTERS];
#endif
static void __iomem *sunxi_cpuxcfg_base[2];
static void __iomem *sunxi_cpuscfg_base;
static void __iomem *sunxi_rtc_base;
int sunxi_smc_set_cpu_off(void);
extern int __init sun8i_cci_init(void);
extern void sun8i_power_up_setup(unsigned int affinity_level);
static inline void sunxi_set_secondary_entry(void *entry)
{
writel((u32)entry, sunxi_rtc_base + CPU_SOFT_ENTRY_REG0);
}
cpumask_t cpu_power_up_state_mask;
static int sun8i_cpu_power_switch_set(unsigned int cluster,
unsigned int cpu, bool enable)
{
if (enable) {
if (readl(sunxi_cpuscfg_base + CPU_PWR_CLAMP(cluster, cpu))
== 0x00) {
pr_debug("%s: power switch enable already\n", __func__);
return 0;
}
/* de-active cpu power clamp */
writel(0xFE, sunxi_cpuscfg_base + CPU_PWR_CLAMP(cluster, cpu));
udelay(20);
writel(0xF8, sunxi_cpuscfg_base + CPU_PWR_CLAMP(cluster, cpu));
udelay(10);
writel(0xE0, sunxi_cpuscfg_base + CPU_PWR_CLAMP(cluster, cpu));
udelay(10);
writel(0xc0, sunxi_cpuscfg_base + CPU_PWR_CLAMP(cluster, cpu));
udelay(10);
writel(0x80, sunxi_cpuscfg_base + CPU_PWR_CLAMP(cluster, cpu));
udelay(10);
writel(0x00, sunxi_cpuscfg_base + CPU_PWR_CLAMP(cluster, cpu));
udelay(20);
while (readl(sunxi_cpuscfg_base
+ CPU_PWR_CLAMP(cluster, cpu)) != 0x00)
;
} else {
if (readl(sunxi_cpuscfg_base + CPU_PWR_CLAMP(cluster, cpu))
== 0xFF) {
pr_debug("%s: pwr switch disable already\n", __func__);
return 0;
}
writel(0xFF, sunxi_cpuscfg_base + CPU_PWR_CLAMP(cluster, cpu));
udelay(30);
while (readl(sunxi_cpuscfg_base + CPU_PWR_CLAMP(cluster, cpu))
!= 0xFF)
;
}
return 0;
}
static int sun8i_cluster_power_set(unsigned int cluster, bool enable)
{
unsigned int value;
int i;
#ifdef CONFIG_MCPM_WITH_ARISC_DVFS_SUPPORT
/* cluster operation must wait arisc ready */
if (!is_arisc_ready()) {
pr_err("%s: arisc not ready, can't power-up/down cluster\n",
__func__);
return -EINVAL;
}
#endif
if (enable) {
pr_debug("sun8i power up cluster-%d\n", cluster);
/* assert cluster cores resets */
value = readl(sunxi_cpuxcfg_base[cluster] + CPU_RST_CTRL);
value &= (~(0x7<<0)); /* Core Reset */
writel(value, sunxi_cpuxcfg_base[cluster] + CPU_RST_CTRL);
udelay(10);
/* assert cluster cores power-on reset */
value = readl(sunxi_cpuscfg_base + CLUSTER_PWRON_RST(cluster));
value &= (~(0x7));
writel(value, sunxi_cpuscfg_base + CLUSTER_PWRON_RST(cluster));
udelay(10);
/* assert cluster resets */
value = readl(sunxi_cpuxcfg_base[cluster] + CPU_RST_CTRL);
value &= (~(0x1<<24)); /* SOC DBG Reset */
value &= (~(0x7<<20)); /* ETM Reset */
value &= (~(0x7<<16)); /* Debug Reset */
value &= (~(0x1<<8)); /* L2 Cache Reset*/
writel(value, sunxi_cpuxcfg_base[cluster] + CPU_RST_CTRL);
udelay(10);
/* Set L2RSTDISABLE LOW */
value = readl(sunxi_cpuxcfg_base[cluster] + CLUSTER_CTRL0);
value &= (~(0x1<<4));
writel(value, sunxi_cpuxcfg_base[cluster] + CLUSTER_CTRL0);
#ifdef CONFIG_MCPM_WITH_ARISC_DVFS_SUPPORT
/* notify arisc to power-up cluster */
arisc_dvfs_set_cpufreq(cluster_powerup_freq[cluster],
cluster_pll[cluster], ARISC_MESSAGE_ATTR_SOFTSYN,
NULL, NULL);
mdelay(1);
#endif
/* inactive ACINACTM */
value = readl(sunxi_cpuxcfg_base[cluster] + CLUSTER_CTRL1);
value |= (1<<0);
writel(value, sunxi_cpuxcfg_base[cluster] + CLUSTER_CTRL1);
/* clear cluster power-off gating */
value = readl(sunxi_cpuscfg_base
+ CLUSTER_PWROFF_GATING(cluster));
value &= (~(0x1<<4));
writel(value, sunxi_cpuscfg_base
+ CLUSTER_PWROFF_GATING(cluster));
udelay(20);
/* active ACINACTM */
value = readl(sunxi_cpuxcfg_base[cluster] + CLUSTER_CTRL1);
value &= (~(1<<0));
writel(value, sunxi_cpuxcfg_base[cluster] + CLUSTER_CTRL1);
/* de-assert cores reset */
value = readl(sunxi_cpuxcfg_base[cluster] + CPU_RST_CTRL);
value |= (0x1<<24); /* SOC DBG Reset */
value |= (0x7<<20); /* ETM Reset */
value |= (0x7<<16); /* Debug Reset */
value |= (0x1<<8); /* L2 Cache Reset*/
writel(value, sunxi_cpuxcfg_base[cluster] + CPU_RST_CTRL);
udelay(20);
pr_debug("sun8i power up cluster-%d ok\n", cluster);
} else {
pr_debug("sun8i power down cluster-%d\n", cluster);
/* inactive ACINACTM */
value = readl(sunxi_cpuxcfg_base[cluster] + CLUSTER_CTRL1);
value |= (1<<0);
writel(value, sunxi_cpuxcfg_base[cluster] + CLUSTER_CTRL1);
while (1) {
if (SUN8I_L2CACHE_IS_WFI_MODE(cluster))
break;
/* maybe should support timeout to avoid deadloop */
}
/* assert cluster cores resets */
value = readl(sunxi_cpuxcfg_base[cluster] + CPU_RST_CTRL);
value &= (~(0x7<<0)); /* Core Reset */
writel(value, sunxi_cpuxcfg_base[cluster] + CPU_RST_CTRL);
udelay(10);
/* assert cluster cores power-on reset */
value = readl(sunxi_cpuscfg_base + CLUSTER_PWRON_RST(cluster));
value &= (~(0x7));
writel(value, sunxi_cpuscfg_base + CLUSTER_PWRON_RST(cluster));
udelay(10);
/* assert cluster resets */
value = readl(sunxi_cpuxcfg_base[cluster] + CPU_RST_CTRL);
value &= (~(0x1<<24)); /* SOC DBG Reset */
value &= (~(0x7<<20)); /* ETM Reset */
value &= (~(0x7<<16)); /* Debug Reset */
value &= (~(0x1<<8)); /* L2 Cache Reset*/
writel(value, sunxi_cpuxcfg_base[cluster] + CPU_RST_CTRL);
udelay(10);
/* enable cluster and cores power-off gating */
value = readl(sunxi_cpuscfg_base
+ CLUSTER_PWROFF_GATING(cluster));
value |= (1<<4);
value |= (0x7<<0);
writel(value, sunxi_cpuscfg_base
+ CLUSTER_PWROFF_GATING(cluster));
udelay(20);
/* disable cluster cores power switch */
for (i = 0; i < CORES_PER_CLUSTER; i++)
sun8i_cpu_power_switch_set(cluster, i, 0);
#ifdef CONFIG_MCPM_WITH_ARISC_DVFS_SUPPORT
/* notify arisc to power-down cluster,
* arisc will disable cluster clock
* and power-off cpu power domain.
*/
arisc_dvfs_set_cpufreq(0, cluster_pll[cluster],
ARISC_MESSAGE_ATTR_SOFTSYN, NULL, NULL);
#endif
pr_debug("sun8i power down cluster-%d ok\n", cluster);
}
return 0;
}
int sun8i_cpu_power_set(unsigned int cluster, unsigned int cpu, bool enable)
{
unsigned int value;
if (enable) {
/* power-up cpu core process */
pr_debug("sun8i power up cluster-%d cpu-%d\n", cluster, cpu);
cpumask_set_cpu(cluster * CORES_PER_CLUSTER + cpu,
&cpu_power_up_state_mask);
/* assert cpu core reset */
value = readl(sunxi_cpuxcfg_base[cluster] + CPU_RST_CTRL);
value &= (~(1<<cpu));
writel(value, sunxi_cpuxcfg_base[cluster] + CPU_RST_CTRL);
udelay(10);
/* assert cpu power-on reset */
value = readl(sunxi_cpuscfg_base + CLUSTER_PWRON_RST(cluster));
value &= (~(1<<cpu));
writel(value, sunxi_cpuscfg_base + CLUSTER_PWRON_RST(cluster));
udelay(10);
/* L1RSTDISABLE hold low */
value = readl(sunxi_cpuxcfg_base[cluster] + CLUSTER_CTRL0);
value &= ~(0x1<<cpu);
writel(value, sunxi_cpuxcfg_base[cluster] + CLUSTER_CTRL0);
/* release power switch */
sun8i_cpu_power_switch_set(cluster, cpu, 1);
/* clear power-off gating */
value = readl(sunxi_cpuscfg_base
+ CLUSTER_PWROFF_GATING(cluster));
value &= (~(0x1<<cpu));
writel(value, sunxi_cpuscfg_base
+ CLUSTER_PWROFF_GATING(cluster));
udelay(20);
/* de-assert cpu power-on reset */
value = readl(sunxi_cpuscfg_base + CLUSTER_PWRON_RST(cluster));
value |= ((1<<cpu));
writel(value, sunxi_cpuscfg_base + CLUSTER_PWRON_RST(cluster));
udelay(10);
/* de-assert core reset */
value = readl(sunxi_cpuxcfg_base[cluster] + CPU_RST_CTRL);
value |= (1<<cpu);
writel(value, sunxi_cpuxcfg_base[cluster] + CPU_RST_CTRL);
udelay(10);
pr_debug("sun8i power up cluster-%d cpu-%d ok\n", cluster, cpu);
} else {
/* power-down cpu core process */
pr_debug("sun8i power down cluster-%d cpu-%d\n", cluster, cpu);
/* enable cpu power-off gating */
value = readl(sunxi_cpuscfg_base
+ CLUSTER_PWROFF_GATING(cluster));
value |= (0x1 << cpu);
writel(value, sunxi_cpuscfg_base
+ CLUSTER_PWROFF_GATING(cluster));
udelay(20);
/* active the power output switch */
sun8i_cpu_power_switch_set(cluster, cpu, 0);
cpumask_clear_cpu(cluster * CORES_PER_CLUSTER + cpu,
&cpu_power_up_state_mask);
pr_debug("sun8i power down cluster-%d cpu-%d ok\n",
cluster, cpu);
}
return 0;
}
static int sunxi_cpu_powerup(unsigned int cpu, unsigned int cluster)
{
if (cpu >= CORES_PER_CLUSTER || cluster >= MAX_CLUSTERS)
return -EINVAL;
pr_debug("%s: cluster %u cpu %u\n", __func__, cluster, cpu);
sun8i_cpu_power_set(cluster, cpu, 1);
return 0;
}
static int sunxi_cluster_powerup(unsigned int cluster)
{
if (cluster >= MAX_CLUSTERS)
return -EINVAL;
pr_debug("%s: cluster %u\n", __func__, cluster);
return sun8i_cluster_power_set(cluster, 1);
}
static void sunxi_cpu_powerdown_prepare(unsigned int cpu, unsigned int cluster)
{
pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
}
static void sunxi_cluster_powerdown_prepare(unsigned int cluster)
{
pr_debug("%s: cluster %u\n", __func__, cluster);
// sun8i_cluster_power_set(cluster, 0);
}
static void sunxi_cpu_cache_disable(void)
{
pr_debug("%s: cpu cache disable.\n", __func__);
#ifdef CONFIG_TEE
sunxi_smc_set_cpu_off();
#endif
/*
* Flush the local CPU cache.
*
* Cluster1/Cluster0 can hit in the cache with SCTLR.C=0,
* so we don't need
* a preliminary flush here for those CPUs. At least, that's
* the theory -- without the extra flush, Linux explodes on
* RTSM (maybe not needed anymore, to be investigated).
*/
flush_cache_louis();
set_cr(get_cr() & ~CR_C);
flush_cache_louis();
/* Disable local coherency by clearing the ACTLR "SMP" bit: */
set_auxcr(get_auxcr() & ~(1 << 6));
}
static void sunxi_cluster_cache_disable(void)
{
pr_debug("%s: cluster cache disable.\n", __func__);
#ifdef CONFIG_TEE
sunxi_smc_set_cpu_off();
#endif
/*
* Flush all cache levels for this cluster.
*
* Cluster1/Cluster0 can hit in the cache with SCTLR.C=0, so we don't need
* a preliminary flush here for those CPUs. At least, that's
* the theory -- without the extra flush, Linux explodes on
* RTSM (maybe not needed anymore, to be investigated).
*/
flush_cache_all();
set_cr(get_cr() & ~CR_C);
flush_cache_all();
/*
* This is a harmless no-op. On platforms with a real
* outer cache this might either be needed or not,
* depending on where the outer cache sits.
*/
outer_flush_all();
/* Disable local coherency by clearing the ACTLR "SMP" bit: */
set_auxcr(get_auxcr() & ~(1 << 6));
}
static int sun8i_cluster_power_status(unsigned int cluster)
{
int status = 0;
unsigned int value;
/* cluster WFI status :
* all cpu cores enter WFI mode + L2Cache enter WFI status
*/
value = readl(sunxi_cpuxcfg_base[cluster] + CLUSTER_CPU_STATUS);
if ((((value >> 16) & 0x7) == 0x7) && ((value & 0x1) == 0x1))
status = 1;
return status;
}
extern bool mcpm_cluster_unused(unsigned int cluster);
static int sunxi_wait_for_powerdown(unsigned int cpu, unsigned int cluster)
{
pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
while (1)
if (SUN8I_CPU_IS_WFI_MODE(cluster, cpu)) {
break;
}
sun8i_cpu_power_set(cluster, cpu, 0);
if ((mcpm_cluster_unused(cluster)) &&
(sun8i_cluster_power_status(cluster))) {
sun8i_cluster_power_set(cluster, 0);
}
return 0;
}
static void sunxi_cpu_is_up(unsigned int cpu, unsigned int cluster)
{
pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
}
static const struct mcpm_platform_ops sunxi_power_ops = {
.cpu_powerup = sunxi_cpu_powerup,
.cluster_powerup = sunxi_cluster_powerup,
.cpu_powerdown_prepare = sunxi_cpu_powerdown_prepare,
.cluster_powerdown_prepare = sunxi_cluster_powerdown_prepare,
.cpu_cache_disable = sunxi_cpu_cache_disable,
.cluster_cache_disable = sunxi_cluster_cache_disable,
.wait_for_powerdown = sunxi_wait_for_powerdown,
.cpu_is_up = sunxi_cpu_is_up,
};
#ifdef CONFIG_MCPM_WITH_ARISC_DVFS_SUPPORT
static int sun8iw17_arisc_notify_call(struct notifier_block *nfb,
unsigned long action, void *parg)
{
unsigned int mpidr, cpu, cluster;
mpidr = read_cpuid_mpidr();
cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
switch (action) {
case ARISC_INIT_READY:
/* arisc ready now */
set_arisc_ready(1);
#if 0
/* power-off cluster-1 first */
sun8i_cluster_power_set((cluster == CLUSTER_0)
? CLUSTER_1 : CLUSTER_0, 0);
#endif
/* power-up off-line cpus*/
for_each_present_cpu(cpu) {
if (num_online_cpus() >= setup_max_cpus)
break;
if (!cpu_online(cpu))
cpu_up(cpu);
}
break;
}
return NOTIFY_OK;
}
static struct notifier_block sun8iw17_arisc_notifier = {
&sun8iw17_arisc_notify_call,
NULL,
0
};
#endif
static const struct of_device_id sunxi_dt_mcpm_match[] = {
{ .compatible = "allwinner,sun8iw17p1" },
{},
};
#define TEE_SET_CPU_ENTRY 0xFFFF0001
#define TEE_SET_CPU_OFF 0xFFFF0002
#define OPTEE_SMC_FUNCID_PLATFORM 100
#ifndef OPTEE_SMC_STD_CALL_VAL
#define OPTEE_SMC_STD_CALL_VAL(func_num) \
ARM_SMCCC_CALL_VAL(ARM_SMCCC_STD_CALL, ARM_SMCCC_SMC_32, \
ARM_SMCCC_OWNER_TRUSTED_OS, (func_num))
#endif
#ifndef OPTEE_SMC_FAST_CALL_VAL
#define OPTEE_SMC_FAST_CALL_VAL(func_num) \
ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, ARM_SMCCC_SMC_32, \
ARM_SMCCC_OWNER_TRUSTED_OS, (func_num))
#endif
#define OPTEE_SMC_PLATFORM_CMD \
OPTEE_SMC_FAST_CALL_VAL(OPTEE_SMC_FUNCID_PLATFORM)
int sunxi_smc_set_cpu_entry(phys_addr_t entry)
{
struct arm_smccc_res res;
arm_smccc_smc(OPTEE_SMC_PLATFORM_CMD,
TEE_SET_CPU_ENTRY, entry, 0, 0, 0, 0, 0, &res);
return res.a0;
}
int sunxi_smc_set_cpu_off(void)
{
struct arm_smccc_res res;
arm_smccc_smc(OPTEE_SMC_PLATFORM_CMD,
TEE_SET_CPU_OFF, 0, 0, 0, 0, 0, 0, &res);
return res.a0;
}
void sun8i_set_secondary_entry(unsigned long boot_addr)
{
#if defined(CONFIG_TEE)
sunxi_smc_set_cpu_entry(boot_addr);
#else
sunxi_set_secondary_entry((void *)boot_addr);
#endif
}
static void sunxi_mcpm_setup_entry_point(void)
{
/* set sun8i platform non-boot cpu startup entry. */
sun8i_set_secondary_entry(virt_to_phys(mcpm_entry_point));
}
static struct syscore_ops sunxi_mcpm_syscore_ops = {
.resume = sunxi_mcpm_setup_entry_point,
};
static void __init sun8iw17_mcpm_boot_cpu_init(void)
{
unsigned int mpidr, cpu, cluster;
mpidr = read_cpuid_mpidr();
cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
pr_debug("%s: cpu %u cluster %u\n", __func__, cpu, cluster);
BUG_ON(cpu >= CORES_PER_CLUSTER || cluster >= MAX_CLUSTERS);
cpumask_clear(&cpu_power_up_state_mask);
cpumask_set_cpu(cluster * CORES_PER_CLUSTER + cpu,
&cpu_power_up_state_mask);
}
void sun8iw17_mcpm_cpu_map_init(void)
{
cpu_logical_map(0) = 0x000;
cpu_logical_map(1) = 0x001;
cpu_logical_map(2) = 0x002;
cpu_logical_map(3) = 0x100;
cpu_logical_map(4) = 0x101;
cpu_logical_map(5) = 0x102;
}
static void sunxi_mcpm_mem_source_request(void)
{
struct device_node *np;
struct device_node *npp;
np = of_find_compatible_node(NULL, NULL,
"allwinner,sunxi-cpucfg");
if (!np) {
pr_err("Can not find sunxi_cpucfg device tree\n");
return;
}
sunxi_cpuxcfg_base[0] = of_iomap(np, 0);
if (!sunxi_cpuxcfg_base[0])
pr_err("sunxi_cpuxcfg_base iomap Failed(cluster 0)\n");
npp = of_find_compatible_node(np, NULL, "allwinner,sunxi-cpucfg");
if (!npp) {
pr_err("Can not find sunxi_cpucfg device tree\n");
return;
}
sunxi_cpuxcfg_base[1] = of_iomap(npp, 0);
if (!sunxi_cpuxcfg_base[1])
pr_err("sunxi_cpuxcfg_base iomap Failed(cluster 1)\n");
of_node_put(np);
of_node_put(npp);
np = of_find_compatible_node(NULL, NULL, "allwinner,cpuscfg");
if (!np) {
pr_warn("Can not find sunxi_cpuscfg device tree\n");
return;
}
sunxi_cpuscfg_base = of_iomap(np, 0);
if (!sunxi_cpuscfg_base)
pr_err("sunxi_cpuscfg iomap Failed\n");
of_node_put(np);
np = of_find_compatible_node(NULL, NULL, "allwinner,sunxi-rtc");
if (!np) {
pr_err("Can not find sunxi_rtc device tree\n");
return;
}
sunxi_rtc_base = of_iomap(np, 0);
if (!sunxi_rtc_base) {
pr_err("sunxi_rtc iomap Failed\n");
}
of_node_put(np);
}
static int __init sunxi_mcpm_init(void)
{
struct device_node *node;
int ret;
node = of_find_matching_node(NULL, sunxi_dt_mcpm_match);
if (!node)
return -ENODEV;
of_node_put(node);
#ifdef CONFIG_SUN8I_CCI
sun8i_cci_init();
#endif
sunxi_mcpm_mem_source_request();
sun8iw17_mcpm_boot_cpu_init();
ret = mcpm_platform_register(&sunxi_power_ops);
if (!ret)
ret = mcpm_sync_init(sun8i_power_up_setup);
#ifdef CONFIG_ARM_CPU_SUSPEND
if (!ret)
ret = mcpm_loopback(sunxi_cluster_cache_disable); /* turn on the CCI */
#endif
if (ret) {
return ret;
}
mcpm_smp_set_ops();
pr_info("SUNXI MCPM support installed\n");
sunxi_mcpm_setup_entry_point();
/* initialize cluster0 and cluster1 power-up freq as deafult */
cluster_powerup_freq[CLUSTER_0] = SUN8I_C0_CLSUTER_PWRUP_FREQ;
cluster_powerup_freq[CLUSTER_1] = SUN8I_C1_CLSUTER_PWRUP_FREQ;
#ifdef CONFIG_MCPM_WITH_ARISC_DVFS_SUPPORT
/* initialize cluster0 and cluster1 cluster pll number */
cluster_pll[CLUSTER_0] = ARISC_DVFS_PLL1;
cluster_pll[CLUSTER_1] = ARISC_DVFS_PLL2;
/* register arisc ready notifier */
arisc_register_notifier(&sun8iw17_arisc_notifier);
#endif
register_syscore_ops(&sunxi_mcpm_syscore_ops);
sun8iw17_mcpm_cpu_map_init();
return ret;
}
early_initcall(sunxi_mcpm_init);