diff options
Diffstat (limited to 'arch/arm/cpu/armv7')
| -rw-r--r-- | arch/arm/cpu/armv7/Makefile | 5 | ||||
| -rw-r--r-- | arch/arm/cpu/armv7/am33xx/board.c | 11 | ||||
| -rw-r--r-- | arch/arm/cpu/armv7/am33xx/clock_am33xx.c | 8 | ||||
| -rw-r--r-- | arch/arm/cpu/armv7/am33xx/sys_info.c | 57 | ||||
| -rw-r--r-- | arch/arm/cpu/armv7/mx6/clock.c | 40 | ||||
| -rw-r--r-- | arch/arm/cpu/armv7/nonsec_virt.S | 208 | ||||
| -rw-r--r-- | arch/arm/cpu/armv7/omap-common/boot-common.c | 3 | ||||
| -rw-r--r-- | arch/arm/cpu/armv7/omap-common/clocks-common.c | 7 | ||||
| -rw-r--r-- | arch/arm/cpu/armv7/omap5/prcm-regs.c | 12 | ||||
| -rw-r--r-- | arch/arm/cpu/armv7/socfpga/Makefile | 2 | ||||
| -rw-r--r-- | arch/arm/cpu/armv7/socfpga/spl.c | 6 | ||||
| -rw-r--r-- | arch/arm/cpu/armv7/socfpga/system_manager.c | 27 | ||||
| -rw-r--r-- | arch/arm/cpu/armv7/virt-v7.c | 173 |
13 files changed, 532 insertions, 27 deletions
diff --git a/arch/arm/cpu/armv7/Makefile b/arch/arm/cpu/armv7/Makefile index b723e22a5..ee4b02183 100644 --- a/arch/arm/cpu/armv7/Makefile +++ b/arch/arm/cpu/armv7/Makefile @@ -20,6 +20,11 @@ ifneq ($(CONFIG_AM43XX)$(CONFIG_AM33XX)$(CONFIG_OMAP44XX)$(CONFIG_OMAP54XX)$(CON SOBJS += lowlevel_init.o endif +ifneq ($(CONFIG_ARMV7_NONSEC)$(CONFIG_ARMV7_VIRT),) +SOBJS += nonsec_virt.o +COBJS += virt-v7.o +endif + SRCS := $(START:.o=.S) $(COBJS:.o=.c) OBJS := $(addprefix $(obj),$(COBJS) $(SOBJS)) START := $(addprefix $(obj),$(START)) diff --git a/arch/arm/cpu/armv7/am33xx/board.c b/arch/arm/cpu/armv7/am33xx/board.c index 2ea3d698f..a31bf40e5 100644 --- a/arch/arm/cpu/armv7/am33xx/board.c +++ b/arch/arm/cpu/armv7/am33xx/board.c @@ -27,6 +27,7 @@ #include <miiphy.h> #include <cpsw.h> #include <asm/errno.h> +#include <linux/compiler.h> #include <linux/usb/ch9.h> #include <linux/usb/gadget.h> #include <linux/usb/musb.h> @@ -137,6 +138,16 @@ int arch_misc_init(void) } #if defined(CONFIG_SPL_BUILD) || defined(CONFIG_NOR_BOOT) +/* + * This function is the place to do per-board things such as ramp up the + * MPU clock frequency. + */ +__weak void am33xx_spl_board_init(void) +{ + do_setup_dpll(&dpll_core_regs, &dpll_core_opp100); + do_setup_dpll(&dpll_mpu_regs, &dpll_mpu_opp100); +} + static void rtc32k_enable(void) { struct rtc_regs *rtc = (struct rtc_regs *)RTC_BASE; diff --git a/arch/arm/cpu/armv7/am33xx/clock_am33xx.c b/arch/arm/cpu/armv7/am33xx/clock_am33xx.c index e5f287b33..fabe2595a 100644 --- a/arch/arm/cpu/armv7/am33xx/clock_am33xx.c +++ b/arch/arm/cpu/armv7/am33xx/clock_am33xx.c @@ -51,10 +51,14 @@ const struct dpll_regs dpll_ddr_regs = { .cm_div_m2_dpll = CM_WKUP + 0xA0, }; -const struct dpll_params dpll_mpu = { +struct dpll_params dpll_mpu_opp100 = { CONFIG_SYS_MPUCLK, OSC-1, 1, -1, -1, -1, -1}; -const struct dpll_params dpll_core = { +const struct dpll_params dpll_core_opp100 = { 1000, OSC-1, -1, -1, 10, 8, 4}; +const struct dpll_params dpll_mpu = { + MPUPLL_M_300, OSC-1, 1, -1, -1, -1, -1}; +const struct dpll_params dpll_core = { + 50, OSC-1, -1, -1, 1, 1, 1}; const struct dpll_params dpll_per = { 960, OSC-1, 5, -1, -1, -1, -1}; diff --git a/arch/arm/cpu/armv7/am33xx/sys_info.c b/arch/arm/cpu/armv7/am33xx/sys_info.c index 63afaaa32..50eb598ff 100644 --- a/arch/arm/cpu/armv7/am33xx/sys_info.c +++ b/arch/arm/cpu/armv7/am33xx/sys_info.c @@ -17,6 +17,7 @@ #include <asm/arch/sys_proto.h> #include <asm/arch/cpu.h> #include <asm/arch/clock.h> +#include <power/tps65910.h> struct ctrl_stat *cstat = (struct ctrl_stat *)CTRL_BASE; @@ -119,3 +120,59 @@ int print_cpuinfo(void) return 0; } #endif /* CONFIG_DISPLAY_CPUINFO */ + +#ifdef CONFIG_AM33XX +int am335x_get_efuse_mpu_max_freq(struct ctrl_dev *cdev) +{ + int sil_rev; + + sil_rev = readl(&cdev->deviceid) >> 28; + + if (sil_rev == 1) + /* PG 2.0, efuse may not be set. */ + return MPUPLL_M_800; + else if (sil_rev >= 2) { + /* Check what the efuse says our max speed is. */ + int efuse_arm_mpu_max_freq; + efuse_arm_mpu_max_freq = readl(&cdev->efuse_sma); + switch ((efuse_arm_mpu_max_freq & DEVICE_ID_MASK)) { + case AM335X_ZCZ_1000: + return MPUPLL_M_1000; + case AM335X_ZCZ_800: + return MPUPLL_M_800; + case AM335X_ZCZ_720: + return MPUPLL_M_720; + case AM335X_ZCZ_600: + case AM335X_ZCE_600: + return MPUPLL_M_600; + case AM335X_ZCZ_300: + case AM335X_ZCE_300: + return MPUPLL_M_300; + } + } + + /* PG 1.0 or otherwise unknown, use the PG1.0 max */ + return MPUPLL_M_720; +} + +int am335x_get_tps65910_mpu_vdd(int sil_rev, int frequency) +{ + /* For PG2.1 and later, we have one set of values. */ + if (sil_rev >= 2) { + switch (frequency) { + case MPUPLL_M_1000: + return TPS65910_OP_REG_SEL_1_3_2_5; + case MPUPLL_M_800: + return TPS65910_OP_REG_SEL_1_2_6; + case MPUPLL_M_720: + return TPS65910_OP_REG_SEL_1_2_0; + case MPUPLL_M_600: + case MPUPLL_M_300: + return TPS65910_OP_REG_SEL_1_1_3; + } + } + + /* Default to PG1.0/PG2.0 values. */ + return TPS65910_OP_REG_SEL_1_1_3; +} +#endif diff --git a/arch/arm/cpu/armv7/mx6/clock.c b/arch/arm/cpu/armv7/mx6/clock.c index 7efb0d209..df1167860 100644 --- a/arch/arm/cpu/armv7/mx6/clock.c +++ b/arch/arm/cpu/armv7/mx6/clock.c @@ -228,13 +228,13 @@ static u32 get_axi_clk(void) static u32 get_emi_slow_clk(void) { - u32 emi_clk_sel, emi_slow_pof, cscmr1, root_freq = 0; + u32 emi_clk_sel, emi_slow_podf, cscmr1, root_freq = 0; cscmr1 = __raw_readl(&imx_ccm->cscmr1); emi_clk_sel = cscmr1 & MXC_CCM_CSCMR1_ACLK_EMI_SLOW_MASK; emi_clk_sel >>= MXC_CCM_CSCMR1_ACLK_EMI_SLOW_OFFSET; - emi_slow_pof = cscmr1 & MXC_CCM_CSCMR1_ACLK_EMI_SLOW_PODF_MASK; - emi_slow_pof >>= MXC_CCM_CSCMR1_ACLK_EMI_PODF_OFFSET; + emi_slow_podf = cscmr1 & MXC_CCM_CSCMR1_ACLK_EMI_SLOW_PODF_MASK; + emi_slow_podf >>= MXC_CCM_CSCMR1_ACLK_EMI_SLOW_PODF_OFFSET; switch (emi_clk_sel) { case 0: @@ -251,7 +251,7 @@ static u32 get_emi_slow_clk(void) break; } - return root_freq / (emi_slow_pof + 1); + return root_freq / (emi_slow_podf + 1); } #ifdef CONFIG_MX6SL @@ -282,6 +282,36 @@ static u32 get_mmdc_ch0_clk(void) return freq / (podf + 1); } + +int enable_fec_anatop_clock(void) +{ + u32 reg = 0; + s32 timeout = 100000; + + struct anatop_regs __iomem *anatop = + (struct anatop_regs __iomem *)ANATOP_BASE_ADDR; + + reg = readl(&anatop->pll_enet); + if ((reg & BM_ANADIG_PLL_ENET_POWERDOWN) || + (!(reg & BM_ANADIG_PLL_ENET_LOCK))) { + reg &= ~BM_ANADIG_PLL_ENET_POWERDOWN; + writel(reg, &anatop->pll_enet); + while (timeout--) { + if (readl(&anatop->pll_enet) & BM_ANADIG_PLL_ENET_LOCK) + break; + } + if (timeout < 0) + return -ETIMEDOUT; + } + + /* Enable FEC clock */ + reg |= BM_ANADIG_PLL_ENET_ENABLE; + reg &= ~BM_ANADIG_PLL_ENET_BYPASS; + writel(reg, &anatop->pll_enet); + + return 0; +} + #else static u32 get_mmdc_ch0_clk(void) { @@ -457,7 +487,7 @@ void enable_ipu_clock(void) struct mxc_ccm_reg *mxc_ccm = (struct mxc_ccm_reg *)CCM_BASE_ADDR; int reg; reg = readl(&mxc_ccm->CCGR3); - reg |= MXC_CCM_CCGR3_IPU1_IPU_DI0_OFFSET; + reg |= MXC_CCM_CCGR3_IPU1_IPU_MASK; writel(reg, &mxc_ccm->CCGR3); } /***************************************************/ diff --git a/arch/arm/cpu/armv7/nonsec_virt.S b/arch/arm/cpu/armv7/nonsec_virt.S new file mode 100644 index 000000000..358348ffa --- /dev/null +++ b/arch/arm/cpu/armv7/nonsec_virt.S @@ -0,0 +1,208 @@ +/* + * code for switching cores into non-secure state and into HYP mode + * + * Copyright (c) 2013 Andre Przywara <andre.przywara@linaro.org> + * + * See file CREDITS for list of people who contributed to this + * project. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ + +#include <config.h> +#include <linux/linkage.h> +#include <asm/gic.h> +#include <asm/armv7.h> + +.arch_extension sec +.arch_extension virt + +/* the vector table for secure state and HYP mode */ +_monitor_vectors: + .word 0 /* reset */ + .word 0 /* undef */ + adr pc, _secure_monitor + .word 0 + .word 0 + adr pc, _hyp_trap + .word 0 + .word 0 + +/* + * secure monitor handler + * U-boot calls this "software interrupt" in start.S + * This is executed on a "smc" instruction, we use a "smc #0" to switch + * to non-secure state. + * We use only r0 and r1 here, due to constraints in the caller. + */ + .align 5 +_secure_monitor: + mrc p15, 0, r1, c1, c1, 0 @ read SCR + bic r1, r1, #0x4e @ clear IRQ, FIQ, EA, nET bits + orr r1, r1, #0x31 @ enable NS, AW, FW bits + +#ifdef CONFIG_ARMV7_VIRT + mrc p15, 0, r0, c0, c1, 1 @ read ID_PFR1 + and r0, r0, #CPUID_ARM_VIRT_MASK @ mask virtualization bits + cmp r0, #(1 << CPUID_ARM_VIRT_SHIFT) + orreq r1, r1, #0x100 @ allow HVC instruction +#endif + + mcr p15, 0, r1, c1, c1, 0 @ write SCR (with NS bit set) + +#ifdef CONFIG_ARMV7_VIRT + mrceq p15, 0, r0, c12, c0, 1 @ get MVBAR value + mcreq p15, 4, r0, c12, c0, 0 @ write HVBAR +#endif + + movs pc, lr @ return to non-secure SVC + +_hyp_trap: + mrs lr, elr_hyp @ for older asm: .byte 0x00, 0xe3, 0x0e, 0xe1 + mov pc, lr @ do no switch modes, but + @ return to caller + +/* + * Secondary CPUs start here and call the code for the core specific parts + * of the non-secure and HYP mode transition. The GIC distributor specific + * code has already been executed by a C function before. + * Then they go back to wfi and wait to be woken up by the kernel again. + */ +ENTRY(_smp_pen) + mrs r0, cpsr + orr r0, r0, #0xc0 + msr cpsr, r0 @ disable interrupts + ldr r1, =_start + mcr p15, 0, r1, c12, c0, 0 @ set VBAR + + bl _nonsec_init + mov r12, r0 @ save GICC address +#ifdef CONFIG_ARMV7_VIRT + bl _switch_to_hyp +#endif + + ldr r1, [r12, #GICC_IAR] @ acknowledge IPI + str r1, [r12, #GICC_EOIR] @ signal end of interrupt + + adr r0, _smp_pen @ do not use this address again + b smp_waitloop @ wait for IPIs, board specific +ENDPROC(_smp_pen) + +/* + * Switch a core to non-secure state. + * + * 1. initialize the GIC per-core interface + * 2. allow coprocessor access in non-secure modes + * 3. switch the cpu mode (by calling "smc #0") + * + * Called from smp_pen by secondary cores and directly by the BSP. + * Do not assume that the stack is available and only use registers + * r0-r3 and r12. + * + * PERIPHBASE is used to get the GIC address. This could be 40 bits long, + * though, but we check this in C before calling this function. + */ +ENTRY(_nonsec_init) +#ifdef CONFIG_ARM_GIC_BASE_ADDRESS + ldr r2, =CONFIG_ARM_GIC_BASE_ADDRESS +#else + mrc p15, 4, r2, c15, c0, 0 @ read CBAR + bfc r2, #0, #15 @ clear reserved bits +#endif + add r3, r2, #GIC_DIST_OFFSET @ GIC dist i/f offset + mvn r1, #0 @ all bits to 1 + str r1, [r3, #GICD_IGROUPRn] @ allow private interrupts + + mrc p15, 0, r0, c0, c0, 0 @ read MIDR + ldr r1, =MIDR_PRIMARY_PART_MASK + and r0, r0, r1 @ mask out variant and revision + + ldr r1, =MIDR_CORTEX_A7_R0P0 & MIDR_PRIMARY_PART_MASK + cmp r0, r1 @ check for Cortex-A7 + + ldr r1, =MIDR_CORTEX_A15_R0P0 & MIDR_PRIMARY_PART_MASK + cmpne r0, r1 @ check for Cortex-A15 + + movne r1, #GIC_CPU_OFFSET_A9 @ GIC CPU offset for A9 + moveq r1, #GIC_CPU_OFFSET_A15 @ GIC CPU offset for A15/A7 + add r3, r2, r1 @ r3 = GIC CPU i/f addr + + mov r1, #1 @ set GICC_CTLR[enable] + str r1, [r3, #GICC_CTLR] @ and clear all other bits + mov r1, #0xff + str r1, [r3, #GICC_PMR] @ set priority mask register + + movw r1, #0x3fff + movt r1, #0x0006 + mcr p15, 0, r1, c1, c1, 2 @ NSACR = all copros to non-sec + +/* The CNTFRQ register of the generic timer needs to be + * programmed in secure state. Some primary bootloaders / firmware + * omit this, so if the frequency is provided in the configuration, + * we do this here instead. + * But first check if we have the generic timer. + */ +#ifdef CONFIG_SYS_CLK_FREQ + mrc p15, 0, r0, c0, c1, 1 @ read ID_PFR1 + and r0, r0, #CPUID_ARM_GENTIMER_MASK @ mask arch timer bits + cmp r0, #(1 << CPUID_ARM_GENTIMER_SHIFT) + ldreq r1, =CONFIG_SYS_CLK_FREQ + mcreq p15, 0, r1, c14, c0, 0 @ write CNTFRQ +#endif + + adr r1, _monitor_vectors + mcr p15, 0, r1, c12, c0, 1 @ set MVBAR to secure vectors + + mrc p15, 0, ip, c12, c0, 0 @ save secure copy of VBAR + + isb + smc #0 @ call into MONITOR mode + + mcr p15, 0, ip, c12, c0, 0 @ write non-secure copy of VBAR + + mov r1, #1 + str r1, [r3, #GICC_CTLR] @ enable non-secure CPU i/f + add r2, r2, #GIC_DIST_OFFSET + str r1, [r2, #GICD_CTLR] @ allow private interrupts + + mov r0, r3 @ return GICC address + + bx lr +ENDPROC(_nonsec_init) + +#ifdef CONFIG_SMP_PEN_ADDR +/* void __weak smp_waitloop(unsigned previous_address); */ +ENTRY(smp_waitloop) + wfi + ldr r1, =CONFIG_SMP_PEN_ADDR @ load start address + ldr r1, [r1] + cmp r0, r1 @ make sure we dont execute this code + beq smp_waitloop @ again (due to a spurious wakeup) + mov pc, r1 +ENDPROC(smp_waitloop) +.weak smp_waitloop +#endif + +ENTRY(_switch_to_hyp) + mov r0, lr + mov r1, sp @ save SVC copy of LR and SP + isb + hvc #0 @ for older asm: .byte 0x70, 0x00, 0x40, 0xe1 + mov sp, r1 + mov lr, r0 @ restore SVC copy of LR and SP + + bx lr +ENDPROC(_switch_to_hyp) diff --git a/arch/arm/cpu/armv7/omap-common/boot-common.c b/arch/arm/cpu/armv7/omap-common/boot-common.c index 6b4772b68..0ffa03ac0 100644 --- a/arch/arm/cpu/armv7/omap-common/boot-common.c +++ b/arch/arm/cpu/armv7/omap-common/boot-common.c @@ -76,6 +76,9 @@ void spl_board_init(void) #if defined(CONFIG_AM33XX) && defined(CONFIG_SPL_MUSB_NEW_SUPPORT) arch_misc_init(); #endif +#ifdef CONFIG_AM33XX + am33xx_spl_board_init(); +#endif } int board_mmc_init(bd_t *bis) diff --git a/arch/arm/cpu/armv7/omap-common/clocks-common.c b/arch/arm/cpu/armv7/omap-common/clocks-common.c index 758059407..ab0c5680f 100644 --- a/arch/arm/cpu/armv7/omap-common/clocks-common.c +++ b/arch/arm/cpu/armv7/omap-common/clocks-common.c @@ -589,13 +589,6 @@ void scale_vcores(struct vcores_data const *vcores) val = optimize_vcore_voltage(&vcores->iva); do_scale_vcore(vcores->iva.addr, val, vcores->iva.pmic); - - if (emif_sdram_type() == EMIF_SDRAM_TYPE_DDR3) { - /* Configure LDO SRAM "magic" bits */ - writel(2, (*prcm)->prm_sldo_core_setup); - writel(2, (*prcm)->prm_sldo_mpu_setup); - writel(2, (*prcm)->prm_sldo_mm_setup); - } } static inline void enable_clock_domain(u32 const clkctrl_reg, u32 enable_mode) diff --git a/arch/arm/cpu/armv7/omap5/prcm-regs.c b/arch/arm/cpu/armv7/omap5/prcm-regs.c index 579818d55..5a3d52c11 100644 --- a/arch/arm/cpu/armv7/omap5/prcm-regs.c +++ b/arch/arm/cpu/armv7/omap5/prcm-regs.c @@ -286,12 +286,6 @@ struct prcm_regs const omap5_es1_prcm = { .prm_vc_val_bypass = 0x4ae07ba0, .prm_vc_cfg_i2c_mode = 0x4ae07bb4, .prm_vc_cfg_i2c_clk = 0x4ae07bb8, - .prm_sldo_core_setup = 0x4ae07bc4, - .prm_sldo_core_ctrl = 0x4ae07bc8, - .prm_sldo_mpu_setup = 0x4ae07bcc, - .prm_sldo_mpu_ctrl = 0x4ae07bd0, - .prm_sldo_mm_setup = 0x4ae07bd4, - .prm_sldo_mm_ctrl = 0x4ae07bd8, /* SCRM stuff, used by some boards */ .scrm_auxclk0 = 0x4ae0a310, @@ -735,12 +729,6 @@ struct prcm_regs const omap5_es2_prcm = { .prm_vc_cfg_i2c_mode = 0x4ae07cb4, .prm_vc_cfg_i2c_clk = 0x4ae07cb8, - .prm_sldo_core_setup = 0x4ae07cc4, - .prm_sldo_core_ctrl = 0x4ae07cc8, - .prm_sldo_mpu_setup = 0x4ae07ccc, - .prm_sldo_mpu_ctrl = 0x4ae07cd0, - .prm_sldo_mm_setup = 0x4ae07cd4, - .prm_sldo_mm_ctrl = 0x4ae07cd8, .prm_abbldo_mpu_setup = 0x4ae07cdc, .prm_abbldo_mpu_ctrl = 0x4ae07ce0, diff --git a/arch/arm/cpu/armv7/socfpga/Makefile b/arch/arm/cpu/armv7/socfpga/Makefile index 5024fc55e..0859e443d 100644 --- a/arch/arm/cpu/armv7/socfpga/Makefile +++ b/arch/arm/cpu/armv7/socfpga/Makefile @@ -13,7 +13,7 @@ include $(TOPDIR)/config.mk LIB = $(obj)lib$(SOC).o SOBJS := lowlevel_init.o -COBJS-y := misc.o timer.o reset_manager.o +COBJS-y := misc.o timer.o reset_manager.o system_manager.o COBJS-$(CONFIG_SPL_BUILD) += spl.o COBJS := $(COBJS-y) diff --git a/arch/arm/cpu/armv7/socfpga/spl.c b/arch/arm/cpu/armv7/socfpga/spl.c index 2b9be28c2..74bceab18 100644 --- a/arch/arm/cpu/armv7/socfpga/spl.c +++ b/arch/arm/cpu/armv7/socfpga/spl.c @@ -12,6 +12,7 @@ #include <image.h> #include <asm/arch/reset_manager.h> #include <spl.h> +#include <asm/arch/system_manager.h> DECLARE_GLOBAL_DATA_PTR; @@ -25,6 +26,11 @@ u32 spl_boot_device(void) */ void spl_board_init(void) { +#ifndef CONFIG_SOCFPGA_VIRTUAL_TARGET + /* configure the pin muxing through system manager */ + sysmgr_pinmux_init(); +#endif /* CONFIG_SOCFPGA_VIRTUAL_TARGET */ + /* de-assert reset for peripherals and bridges based on handoff */ reset_deassert_peripherals_handoff(); diff --git a/arch/arm/cpu/armv7/socfpga/system_manager.c b/arch/arm/cpu/armv7/socfpga/system_manager.c new file mode 100644 index 000000000..d96521ba0 --- /dev/null +++ b/arch/arm/cpu/armv7/socfpga/system_manager.c @@ -0,0 +1,27 @@ +/* + * Copyright (C) 2013 Altera Corporation <www.altera.com> + * + * SPDX-License-Identifier: GPL-2.0+ + */ + +#include <common.h> +#include <asm/io.h> +#include <asm/arch/system_manager.h> + +DECLARE_GLOBAL_DATA_PTR; + +/* + * Configure all the pin muxes + */ +void sysmgr_pinmux_init(void) +{ + unsigned long offset = CONFIG_SYSMGR_PINMUXGRP_OFFSET; + + const unsigned long *pval = sys_mgr_init_table; + unsigned long i; + + for (i = 0; i < ARRAY_SIZE(sys_mgr_init_table); + i++, offset += sizeof(unsigned long)) { + writel(*pval++, (SOCFPGA_SYSMGR_ADDRESS + offset)); + } +} diff --git a/arch/arm/cpu/armv7/virt-v7.c b/arch/arm/cpu/armv7/virt-v7.c new file mode 100644 index 000000000..6de7fe781 --- /dev/null +++ b/arch/arm/cpu/armv7/virt-v7.c @@ -0,0 +1,173 @@ +/* + * (C) Copyright 2013 + * Andre Przywara, Linaro + * + * Routines to transition ARMv7 processors from secure into non-secure state + * and from non-secure SVC into HYP mode + * needed to enable ARMv7 virtualization for current hypervisors + * + * See file CREDITS for list of people who contributed to this + * project. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ + +#include <common.h> +#include <asm/armv7.h> +#include <asm/gic.h> +#include <asm/io.h> + +unsigned long gic_dist_addr; + +static unsigned int read_cpsr(void) +{ + unsigned int reg; + + asm volatile ("mrs %0, cpsr\n" : "=r" (reg)); + return reg; +} + +static unsigned int read_id_pfr1(void) +{ + unsigned int reg; + + asm("mrc p15, 0, %0, c0, c1, 1\n" : "=r"(reg)); + return reg; +} + +static unsigned long get_gicd_base_address(void) +{ +#ifdef CONFIG_ARM_GIC_BASE_ADDRESS + return CONFIG_ARM_GIC_BASE_ADDRESS + GIC_DIST_OFFSET; +#else + unsigned midr; + unsigned periphbase; + + /* check whether we are an Cortex-A15 or A7. + * The actual HYP switch should work with all CPUs supporting + * the virtualization extension, but we need the GIC address, + * which we know only for sure for those two CPUs. + */ + asm("mrc p15, 0, %0, c0, c0, 0\n" : "=r"(midr)); + switch (midr & MIDR_PRIMARY_PART_MASK) { + case MIDR_CORTEX_A9_R0P1: + case MIDR_CORTEX_A15_R0P0: + case MIDR_CORTEX_A7_R0P0: + break; + default: + printf("nonsec: could not determine GIC address.\n"); + return -1; + } + + /* get the GIC base address from the CBAR register */ + asm("mrc p15, 4, %0, c15, c0, 0\n" : "=r" (periphbase)); + + /* the PERIPHBASE can be mapped above 4 GB (lower 8 bits used to + * encode this). Bail out here since we cannot access this without + * enabling paging. + */ + if ((periphbase & 0xff) != 0) { + printf("nonsec: PERIPHBASE is above 4 GB, no access.\n"); + return -1; + } + + return (periphbase & CBAR_MASK) + GIC_DIST_OFFSET; +#endif +} + +static void kick_secondary_cpus_gic(unsigned long gicdaddr) +{ + /* kick all CPUs (except this one) by writing to GICD_SGIR */ + writel(1U << 24, gicdaddr + GICD_SGIR); +} + +void __weak smp_kick_all_cpus(void) +{ + kick_secondary_cpus_gic(gic_dist_addr); +} + +int armv7_switch_hyp(void) +{ + unsigned int reg; + + /* check whether we are in HYP mode already */ + if ((read_cpsr() & 0x1f) == 0x1a) { + debug("CPU already in HYP mode\n"); + return 0; + } + + /* check whether the CPU supports the virtualization extensions */ + reg = read_id_pfr1(); + if ((reg & CPUID_ARM_VIRT_MASK) != 1 << CPUID_ARM_VIRT_SHIFT) { + printf("HYP mode: Virtualization extensions not implemented.\n"); + return -1; + } + + /* call the HYP switching code on this CPU also */ + _switch_to_hyp(); + + if ((read_cpsr() & 0x1F) != 0x1a) { + printf("HYP mode: switch not successful.\n"); + return -1; + } + + return 0; +} + +int armv7_switch_nonsec(void) +{ + unsigned int reg; + unsigned itlinesnr, i; + + /* check whether the CPU supports the security extensions */ + reg = read_id_pfr1(); + if ((reg & 0xF0) == 0) { + printf("nonsec: Security extensions not implemented.\n"); + return -1; + } + + /* the SCR register will be set directly in the monitor mode handler, + * according to the spec one should not tinker with it in secure state + * in SVC mode. Do not try to read it once in non-secure state, + * any access to it will trap. + */ + + gic_dist_addr = get_gicd_base_address(); + if (gic_dist_addr == -1) + return -1; + + /* enable the GIC distributor */ + writel(readl(gic_dist_addr + GICD_CTLR) | 0x03, + gic_dist_addr + GICD_CTLR); + + /* TYPER[4:0] contains an encoded number of available interrupts */ + itlinesnr = readl(gic_dist_addr + GICD_TYPER) & 0x1f; + + /* set all bits in the GIC group registers to one to allow access + * from non-secure state. The first 32 interrupts are private per + * CPU and will be set later when enabling the GIC for each core + */ + for (i = 1; i <= itlinesnr; i++) + writel((unsigned)-1, gic_dist_addr + GICD_IGROUPRn + 4 * i); + + smp_set_core_boot_addr((unsigned long)_smp_pen, -1); + smp_kick_all_cpus(); + + /* call the non-sec switching code on this CPU also */ + _nonsec_init(); + + return 0; +} |