diff options
| author | Aneesh V <aneesh@ti.com> | 2011-06-16 23:30:47 +0000 | 
|---|---|---|
| committer | Albert ARIBAUD <albert.u.boot@aribaud.net> | 2011-07-04 10:55:25 +0200 | 
| commit | 2c451f7831208741d0ff7ca6046cffcd9ee49def (patch) | |
| tree | ec885d6ce9bc97eca3128e83e9af35c5b063ffe1 /arch | |
| parent | 4c93da7c392737f2036130c240e2b4bea773d703 (diff) | |
| download | olio-uboot-2014.01-2c451f7831208741d0ff7ca6046cffcd9ee49def.tar.xz olio-uboot-2014.01-2c451f7831208741d0ff7ca6046cffcd9ee49def.zip | |
armv7: cache maintenance operations for armv7
- Add a framework for layered cache maintenance
	- separate out SOC specific outer cache maintenance from
	  maintenance of caches known to CPU
- Add generic ARMv7 cache maintenance operations that affect all
  caches known to ARMv7 CPUs. For instance in Cortex-A8 these
  opertions will affect both L1 and L2 caches. In Cortex-A9
  these will affect only L1 cache
- D-cache operations supported:
	- Invalidate entire D-cache
	- Invalidate D-cache range
	- Flush(clean & invalidate) entire D-cache
	- Flush D-cache range
- I-cache operations supported:
	- Invalidate entire I-cache
- Add maintenance functions for TLB, branch predictor array etc.
- Enable -march=armv7-a so that armv7 assembly instructions can be
  used
Signed-off-by: Aneesh V <aneesh@ti.com>
Diffstat (limited to 'arch')
| -rw-r--r-- | arch/arm/cpu/armv7/Makefile | 2 | ||||
| -rw-r--r-- | arch/arm/cpu/armv7/cache_v7.c | 394 | ||||
| -rw-r--r-- | arch/arm/include/asm/armv7.h | 67 | ||||
| -rw-r--r-- | arch/arm/include/asm/utils.h | 56 | 
4 files changed, 518 insertions, 1 deletions
| diff --git a/arch/arm/cpu/armv7/Makefile b/arch/arm/cpu/armv7/Makefile index 8c0e91500..299792ac5 100644 --- a/arch/arm/cpu/armv7/Makefile +++ b/arch/arm/cpu/armv7/Makefile @@ -26,7 +26,7 @@ include $(TOPDIR)/config.mk  LIB	= $(obj)lib$(CPU).o  START	:= start.o -COBJS	:= cpu.o +COBJS	:= cpu.o cache_v7.o  COBJS  += syslib.o  SRCS	:= $(START:.o=.S) $(COBJS:.o=.c) diff --git a/arch/arm/cpu/armv7/cache_v7.c b/arch/arm/cpu/armv7/cache_v7.c new file mode 100644 index 000000000..3e1e1bf87 --- /dev/null +++ b/arch/arm/cpu/armv7/cache_v7.c @@ -0,0 +1,394 @@ +/* + * (C) Copyright 2010 + * Texas Instruments, <www.ti.com> + * Aneesh V <aneesh@ti.com> + * + * See file CREDITS for list of people who contributed to this + * project. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ +#include <linux/types.h> +#include <common.h> +#include <asm/armv7.h> +#include <asm/utils.h> + +#define ARMV7_DCACHE_INVAL_ALL		1 +#define ARMV7_DCACHE_CLEAN_INVAL_ALL	2 +#define ARMV7_DCACHE_INVAL_RANGE	3 +#define ARMV7_DCACHE_CLEAN_INVAL_RANGE	4 + +#ifndef CONFIG_SYS_DCACHE_OFF +/* + * Write the level and type you want to Cache Size Selection Register(CSSELR) + * to get size details from Current Cache Size ID Register(CCSIDR) + */ +static void set_csselr(u32 level, u32 type) +{	u32 csselr = level << 1 | type; + +	/* Write to Cache Size Selection Register(CSSELR) */ +	asm volatile ("mcr p15, 2, %0, c0, c0, 0" : : "r" (csselr)); +} + +static u32 get_ccsidr(void) +{ +	u32 ccsidr; + +	/* Read current CP15 Cache Size ID Register */ +	asm volatile ("mrc p15, 1, %0, c0, c0, 0" : "=r" (ccsidr)); +	return ccsidr; +} + +static u32 get_clidr(void) +{ +	u32 clidr; + +	/* Read current CP15 Cache Level ID Register */ +	asm volatile ("mrc p15,1,%0,c0,c0,1" : "=r" (clidr)); +	return clidr; +} + +static void v7_inval_dcache_level_setway(u32 level, u32 num_sets, +					 u32 num_ways, u32 way_shift, +					 u32 log2_line_len) +{ +	int way, set, setway; + +	/* +	 * For optimal assembly code: +	 *	a. count down +	 *	b. have bigger loop inside +	 */ +	for (way = num_ways - 1; way >= 0 ; way--) { +		for (set = num_sets - 1; set >= 0; set--) { +			setway = (level << 1) | (set << log2_line_len) | +				 (way << way_shift); +			/* Invalidate data/unified cache line by set/way */ +			asm volatile ("	mcr p15, 0, %0, c7, c6, 2" +					: : "r" (setway)); +		} +	} +	/* DMB to make sure the operation is complete */ +	CP15DMB; +} + +static void v7_clean_inval_dcache_level_setway(u32 level, u32 num_sets, +					       u32 num_ways, u32 way_shift, +					       u32 log2_line_len) +{ +	int way, set, setway; + +	/* +	 * For optimal assembly code: +	 *	a. count down +	 *	b. have bigger loop inside +	 */ +	for (way = num_ways - 1; way >= 0 ; way--) { +		for (set = num_sets - 1; set >= 0; set--) { +			setway = (level << 1) | (set << log2_line_len) | +				 (way << way_shift); +			/* +			 * Clean & Invalidate data/unified +			 * cache line by set/way +			 */ +			asm volatile ("	mcr p15, 0, %0, c7, c14, 2" +					: : "r" (setway)); +		} +	} +	/* DMB to make sure the operation is complete */ +	CP15DMB; +} + +static void v7_maint_dcache_level_setway(u32 level, u32 operation) +{ +	u32 ccsidr; +	u32 num_sets, num_ways, log2_line_len, log2_num_ways; +	u32 way_shift; + +	set_csselr(level, ARMV7_CSSELR_IND_DATA_UNIFIED); + +	ccsidr = get_ccsidr(); + +	log2_line_len = ((ccsidr & CCSIDR_LINE_SIZE_MASK) >> +				CCSIDR_LINE_SIZE_OFFSET) + 2; +	/* Converting from words to bytes */ +	log2_line_len += 2; + +	num_ways  = ((ccsidr & CCSIDR_ASSOCIATIVITY_MASK) >> +			CCSIDR_ASSOCIATIVITY_OFFSET) + 1; +	num_sets  = ((ccsidr & CCSIDR_NUM_SETS_MASK) >> +			CCSIDR_NUM_SETS_OFFSET) + 1; +	/* +	 * According to ARMv7 ARM number of sets and number of ways need +	 * not be a power of 2 +	 */ +	log2_num_ways = log_2_n_round_up(num_ways); + +	way_shift = (32 - log2_num_ways); +	if (operation == ARMV7_DCACHE_INVAL_ALL) { +		v7_inval_dcache_level_setway(level, num_sets, num_ways, +				      way_shift, log2_line_len); +	} else if (operation == ARMV7_DCACHE_CLEAN_INVAL_ALL) { +		v7_clean_inval_dcache_level_setway(level, num_sets, num_ways, +						   way_shift, log2_line_len); +	} +} + +static void v7_maint_dcache_all(u32 operation) +{ +	u32 level, cache_type, level_start_bit = 0; + +	u32 clidr = get_clidr(); + +	for (level = 0; level < 7; level++) { +		cache_type = (clidr >> level_start_bit) & 0x7; +		if ((cache_type == ARMV7_CLIDR_CTYPE_DATA_ONLY) || +		    (cache_type == ARMV7_CLIDR_CTYPE_INSTRUCTION_DATA) || +		    (cache_type == ARMV7_CLIDR_CTYPE_UNIFIED)) +			v7_maint_dcache_level_setway(level, operation); +		level_start_bit += 3; +	} +} + +static void v7_dcache_clean_inval_range(u32 start, +					u32 stop, u32 line_len) +{ +	u32 mva; + +	/* Align start to cache line boundary */ +	start &= ~(line_len - 1); +	for (mva = start; mva < stop; mva = mva + line_len) { +		/* DCCIMVAC - Clean & Invalidate data cache by MVA to PoC */ +		asm volatile ("mcr p15, 0, %0, c7, c14, 1" : : "r" (mva)); +	} +} + +static void v7_dcache_inval_range(u32 start, u32 stop, u32 line_len) +{ +	u32 mva; + +	/* +	 * If start address is not aligned to cache-line flush the first +	 * line to prevent affecting somebody else's buffer +	 */ +	if (start & (line_len - 1)) { +		v7_dcache_clean_inval_range(start, start + 1, line_len); +		/* move to next cache line */ +		start = (start + line_len - 1) & ~(line_len - 1); +	} + +	/* +	 * If stop address is not aligned to cache-line flush the last +	 * line to prevent affecting somebody else's buffer +	 */ +	if (stop & (line_len - 1)) { +		v7_dcache_clean_inval_range(stop, stop + 1, line_len); +		/* align to the beginning of this cache line */ +		stop &= ~(line_len - 1); +	} + +	for (mva = start; mva < stop; mva = mva + line_len) { +		/* DCIMVAC - Invalidate data cache by MVA to PoC */ +		asm volatile ("mcr p15, 0, %0, c7, c6, 1" : : "r" (mva)); +	} +} + +static void v7_dcache_maint_range(u32 start, u32 stop, u32 range_op) +{ +	u32 line_len, ccsidr; + +	ccsidr = get_ccsidr(); +	line_len = ((ccsidr & CCSIDR_LINE_SIZE_MASK) >> +			CCSIDR_LINE_SIZE_OFFSET) + 2; +	/* Converting from words to bytes */ +	line_len += 2; +	/* converting from log2(linelen) to linelen */ +	line_len = 1 << line_len; + +	switch (range_op) { +	case ARMV7_DCACHE_CLEAN_INVAL_RANGE: +		v7_dcache_clean_inval_range(start, stop, line_len); +		break; +	case ARMV7_DCACHE_INVAL_RANGE: +		v7_dcache_inval_range(start, stop, line_len); +		break; +	} + +	/* DMB to make sure the operation is complete */ +	CP15DMB; +} + +/* Invalidate TLB */ +static void v7_inval_tlb(void) +{ +	/* Invalidate entire unified TLB */ +	asm volatile ("mcr p15, 0, %0, c8, c7, 0" : : "r" (0)); +	/* Invalidate entire data TLB */ +	asm volatile ("mcr p15, 0, %0, c8, c6, 0" : : "r" (0)); +	/* Invalidate entire instruction TLB */ +	asm volatile ("mcr p15, 0, %0, c8, c5, 0" : : "r" (0)); +	/* Full system DSB - make sure that the invalidation is complete */ +	CP15DSB; +	/* Full system ISB - make sure the instruction stream sees it */ +	CP15ISB; +} + +void invalidate_dcache_all(void) +{ +	v7_maint_dcache_all(ARMV7_DCACHE_INVAL_ALL); + +	v7_outer_cache_inval_all(); +} + +/* + * Performs a clean & invalidation of the entire data cache + * at all levels + */ +void flush_dcache_all(void) +{ +	v7_maint_dcache_all(ARMV7_DCACHE_CLEAN_INVAL_ALL); + +	v7_outer_cache_flush_all(); +} + +/* + * Invalidates range in all levels of D-cache/unified cache used: + * Affects the range [start, stop - 1] + */ +void invalidate_dcache_range(unsigned long start, unsigned long stop) +{ + +	v7_dcache_maint_range(start, stop, ARMV7_DCACHE_INVAL_RANGE); + +	v7_outer_cache_inval_range(start, stop); +} + +/* + * Flush range(clean & invalidate) from all levels of D-cache/unified + * cache used: + * Affects the range [start, stop - 1] + */ +void flush_dcache_range(unsigned long start, unsigned long stop) +{ +	v7_dcache_maint_range(start, stop, ARMV7_DCACHE_CLEAN_INVAL_RANGE); + +	v7_outer_cache_flush_range(start, stop); +} + +void arm_init_before_mmu(void) +{ +	v7_outer_cache_enable(); +	invalidate_dcache_all(); +	v7_inval_tlb(); +} + +/* + * Flush range from all levels of d-cache/unified-cache used: + * Affects the range [start, start + size - 1] + */ +void  flush_cache(unsigned long start, unsigned long size) +{ +	flush_dcache_range(start, start + size); +} +#else /* #ifndef CONFIG_SYS_DCACHE_OFF */ +void invalidate_dcache_all(void) +{ +} + +void flush_dcache_all(void) +{ +} + +void invalidate_dcache_range(unsigned long start, unsigned long stop) +{ +} + +void flush_dcache_range(unsigned long start, unsigned long stop) +{ +} + +void arm_init_before_mmu(void) +{ +} + +void  flush_cache(unsigned long start, unsigned long size) +{ +} +#endif /* #ifndef CONFIG_SYS_DCACHE_OFF */ + +#ifndef CONFIG_SYS_ICACHE_OFF +/* Invalidate entire I-cache and branch predictor array */ +void invalidate_icache_all(void) +{ +	/* +	 * Invalidate all instruction caches to PoU. +	 * Also flushes branch target cache. +	 */ +	asm volatile ("mcr p15, 0, %0, c7, c5, 0" : : "r" (0)); + +	/* Invalidate entire branch predictor array */ +	asm volatile ("mcr p15, 0, %0, c7, c5, 6" : : "r" (0)); + +	/* Full system DSB - make sure that the invalidation is complete */ +	CP15DSB; + +	/* ISB - make sure the instruction stream sees it */ +	CP15ISB; +} +#else +void invalidate_icache_all(void) +{ +} +#endif + +/* + * Stub implementations for outer cache operations + */ +void __v7_outer_cache_enable(void) +{ +} +void v7_outer_cache_enable(void) +	__attribute__((weak, alias("__v7_outer_cache_enable"))); + +void __v7_outer_cache_disable(void) +{ +} +void v7_outer_cache_disable(void) +	__attribute__((weak, alias("__v7_outer_cache_disable"))); + +void __v7_outer_cache_flush_all(void) +{ +} +void v7_outer_cache_flush_all(void) +	__attribute__((weak, alias("__v7_outer_cache_flush_all"))); + +void __v7_outer_cache_inval_all(void) +{ +} +void v7_outer_cache_inval_all(void) +	__attribute__((weak, alias("__v7_outer_cache_inval_all"))); + +void __v7_outer_cache_flush_range(u32 start, u32 end) +{ +} +void v7_outer_cache_flush_range(u32 start, u32 end) +	__attribute__((weak, alias("__v7_outer_cache_flush_range"))); + +void __v7_outer_cache_inval_range(u32 start, u32 end) +{ +} +void v7_outer_cache_inval_range(u32 start, u32 end) +	__attribute__((weak, alias("__v7_outer_cache_inval_range"))); diff --git a/arch/arm/include/asm/armv7.h b/arch/arm/include/asm/armv7.h new file mode 100644 index 000000000..88b9c62dd --- /dev/null +++ b/arch/arm/include/asm/armv7.h @@ -0,0 +1,67 @@ +/* + * (C) Copyright 2010 + * Texas Instruments, <www.ti.com> + * Aneesh V <aneesh@ti.com> + * + * See file CREDITS for list of people who contributed to this + * project. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ +#ifndef ARMV7_H +#define ARMV7_H +#include <linux/types.h> + +/* CCSIDR */ +#define CCSIDR_LINE_SIZE_OFFSET		0 +#define CCSIDR_LINE_SIZE_MASK		0x7 +#define CCSIDR_ASSOCIATIVITY_OFFSET	3 +#define CCSIDR_ASSOCIATIVITY_MASK	(0x3FF << 3) +#define CCSIDR_NUM_SETS_OFFSET		13 +#define CCSIDR_NUM_SETS_MASK		(0x7FFF << 13) + +/* + * Values for InD field in CSSELR + * Selects the type of cache + */ +#define ARMV7_CSSELR_IND_DATA_UNIFIED	0 +#define ARMV7_CSSELR_IND_INSTRUCTION	1 + +/* Values for Ctype fields in CLIDR */ +#define ARMV7_CLIDR_CTYPE_NO_CACHE		0 +#define ARMV7_CLIDR_CTYPE_INSTRUCTION_ONLY	1 +#define ARMV7_CLIDR_CTYPE_DATA_ONLY		2 +#define ARMV7_CLIDR_CTYPE_INSTRUCTION_DATA	3 +#define ARMV7_CLIDR_CTYPE_UNIFIED		4 + +/* + * CP15 Barrier instructions + * Please note that we have separate barrier instructions in ARMv7 + * However, we use the CP15 based instructtions because we use + * -march=armv5 in U-Boot + */ +#define CP15ISB	asm volatile ("mcr     p15, 0, %0, c7, c5, 4" : : "r" (0)) +#define CP15DSB	asm volatile ("mcr     p15, 0, %0, c7, c10, 4" : : "r" (0)) +#define CP15DMB	asm volatile ("mcr     p15, 0, %0, c7, c10, 5" : : "r" (0)) + +void v7_outer_cache_enable(void); +void v7_outer_cache_disable(void); +void v7_outer_cache_flush_all(void); +void v7_outer_cache_inval_all(void); +void v7_outer_cache_flush_range(u32 start, u32 end); +void v7_outer_cache_inval_range(u32 start, u32 end); + +#endif diff --git a/arch/arm/include/asm/utils.h b/arch/arm/include/asm/utils.h new file mode 100644 index 000000000..828b86cb3 --- /dev/null +++ b/arch/arm/include/asm/utils.h @@ -0,0 +1,56 @@ +/* + * (C) Copyright 2010 + * Texas Instruments, <www.ti.com> + * Aneesh V <aneesh@ti.com> + * + * See file CREDITS for list of people who contributed to this + * project. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation; either version 2 of + * the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, + * MA 02111-1307 USA + */ +#ifndef _UTILS_H_ +#define _UTILS_H_ + +static inline s32 log_2_n_round_up(u32 n) +{ +	s32 log2n = -1; +	u32 temp = n; + +	while (temp) { +		log2n++; +		temp >>= 1; +	} + +	if (n & (n - 1)) +		return log2n + 1; /* not power of 2 - round up */ +	else +		return log2n; /* power of 2 */ +} + +static inline s32 log_2_n_round_down(u32 n) +{ +	s32 log2n = -1; +	u32 temp = n; + +	while (temp) { +		log2n++; +		temp >>= 1; +	} + +	return log2n; +} + +#endif |