diff options
| -rw-r--r-- | arch/powerpc/kernel/misc_32.S | 23 | ||||
| -rw-r--r-- | arch/ppc/kernel/misc.S | 4 | ||||
| -rw-r--r-- | arch/ppc/kernel/ppc_ksyms.c | 1 | ||||
| -rw-r--r-- | arch/ppc/platforms/prep_setup.c | 9 | ||||
| -rw-r--r-- | include/asm-powerpc/cache.h | 40 | ||||
| -rw-r--r-- | include/asm-powerpc/cacheflush.h (renamed from include/asm-ppc64/cacheflush.h) | 52 | ||||
| -rw-r--r-- | include/asm-powerpc/reg.h | 6 | ||||
| -rw-r--r-- | include/asm-powerpc/reg_8xx.h (renamed from include/asm-ppc/cache.h) | 50 | ||||
| -rw-r--r-- | include/asm-ppc/cacheflush.h | 49 | ||||
| -rw-r--r-- | include/asm-ppc64/cache.h | 36 | 
10 files changed, 98 insertions, 172 deletions
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index 3bedb532aed..f6d84a75ed2 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S @@ -519,7 +519,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)   *   * flush_icache_range(unsigned long start, unsigned long stop)   */ -_GLOBAL(flush_icache_range) +_GLOBAL(__flush_icache_range)  BEGIN_FTR_SECTION  	blr				/* for 601, do nothing */  END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE) @@ -607,27 +607,6 @@ _GLOBAL(invalidate_dcache_range)  	sync				/* wait for dcbi's to get to ram */  	blr -#ifdef CONFIG_NOT_COHERENT_CACHE -/* - * 40x cores have 8K or 16K dcache and 32 byte line size. - * 44x has a 32K dcache and 32 byte line size. - * 8xx has 1, 2, 4, 8K variants. - * For now, cover the worst case of the 44x. - * Must be called with external interrupts disabled. - */ -#define CACHE_NWAYS	64 -#define CACHE_NLINES	16 - -_GLOBAL(flush_dcache_all) -	li	r4, (2 * CACHE_NWAYS * CACHE_NLINES) -	mtctr	r4 -	lis     r5, KERNELBASE@h -1:	lwz	r3, 0(r5)		/* Load one word from every line */ -	addi	r5, r5, L1_CACHE_BYTES -	bdnz    1b -	blr -#endif /* CONFIG_NOT_COHERENT_CACHE */ -  /*   * Flush a particular page from the data cache to RAM.   * Note: this is necessary because the instruction cache does *not* diff --git a/arch/ppc/kernel/misc.S b/arch/ppc/kernel/misc.S index ae6af29938a..5e61124581d 100644 --- a/arch/ppc/kernel/misc.S +++ b/arch/ppc/kernel/misc.S @@ -497,9 +497,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)   * and invalidate the corresponding instruction cache blocks.   * This is a no-op on the 601.   * - * flush_icache_range(unsigned long start, unsigned long stop) + * __flush_icache_range(unsigned long start, unsigned long stop)   */ -_GLOBAL(flush_icache_range) +_GLOBAL(__flush_icache_range)  BEGIN_FTR_SECTION  	blr				/* for 601, do nothing */  END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE) diff --git a/arch/ppc/kernel/ppc_ksyms.c b/arch/ppc/kernel/ppc_ksyms.c index 6550de73a85..307077f1493 100644 --- a/arch/ppc/kernel/ppc_ksyms.c +++ b/arch/ppc/kernel/ppc_ksyms.c @@ -175,6 +175,7 @@ EXPORT_SYMBOL(pci_bus_to_phys);  #endif /* CONFIG_PCI */  #ifdef CONFIG_NOT_COHERENT_CACHE +extern void flush_dcache_all(void);  EXPORT_SYMBOL(flush_dcache_all);  #endif diff --git a/arch/ppc/platforms/prep_setup.c b/arch/ppc/platforms/prep_setup.c index 067d7d53b81..4415748071d 100644 --- a/arch/ppc/platforms/prep_setup.c +++ b/arch/ppc/platforms/prep_setup.c @@ -61,6 +61,15 @@  #include <asm/pci-bridge.h>  #include <asm/todc.h> +/* prep registers for L2 */ +#define CACHECRBA       0x80000823      /* Cache configuration register address */ +#define L2CACHE_MASK	0x03	/* Mask for 2 L2 Cache bits */ +#define L2CACHE_512KB	0x00	/* 512KB */ +#define L2CACHE_256KB	0x01	/* 256KB */ +#define L2CACHE_1MB	0x02	/* 1MB */ +#define L2CACHE_NONE	0x03	/* NONE */ +#define L2CACHE_PARITY  0x08    /* Mask for L2 Cache Parity Protected bit */ +  TODC_ALLOC();  unsigned char ucSystemType; diff --git a/include/asm-powerpc/cache.h b/include/asm-powerpc/cache.h new file mode 100644 index 00000000000..26ce502e76e --- /dev/null +++ b/include/asm-powerpc/cache.h @@ -0,0 +1,40 @@ +#ifndef _ASM_POWERPC_CACHE_H +#define _ASM_POWERPC_CACHE_H + +#ifdef __KERNEL__ + +#include <linux/config.h> + +/* bytes per L1 cache line */ +#if defined(CONFIG_8xx) || defined(CONFIG_403GCX) +#define L1_CACHE_SHIFT		4 +#define MAX_COPY_PREFETCH	1 +#elif defined(CONFIG_PPC32) +#define L1_CACHE_SHIFT		5 +#define MAX_COPY_PREFETCH	4 +#else /* CONFIG_PPC64 */ +#define L1_CACHE_SHIFT		7 +#endif + +#define	L1_CACHE_BYTES		(1 << L1_CACHE_SHIFT) + +#define	SMP_CACHE_BYTES		L1_CACHE_BYTES +#define L1_CACHE_SHIFT_MAX	7 /* largest L1 which this arch supports */ + +#if defined(__powerpc64__) && !defined(__ASSEMBLY__) +struct ppc64_caches { +	u32	dsize;			/* L1 d-cache size */ +	u32	dline_size;		/* L1 d-cache line size	*/ +	u32	log_dline_size; +	u32	dlines_per_page; +	u32	isize;			/* L1 i-cache size */ +	u32	iline_size;		/* L1 i-cache line size	*/ +	u32	log_iline_size; +	u32	ilines_per_page; +}; + +extern struct ppc64_caches ppc64_caches; +#endif /* __powerpc64__ && ! __ASSEMBLY__ */ + +#endif /* __KERNEL__ */ +#endif /* _ASM_POWERPC_CACHE_H */ diff --git a/include/asm-ppc64/cacheflush.h b/include/asm-powerpc/cacheflush.h index ffbc08be8e5..8a740c88d93 100644 --- a/include/asm-ppc64/cacheflush.h +++ b/include/asm-powerpc/cacheflush.h @@ -1,13 +1,20 @@ -#ifndef _PPC64_CACHEFLUSH_H -#define _PPC64_CACHEFLUSH_H +/* + *  This program is free software; you can redistribute it and/or + *  modify it under the terms of the GNU General Public License + *  as published by the Free Software Foundation; either version + *  2 of the License, or (at your option) any later version. + */ +#ifndef _ASM_POWERPC_CACHEFLUSH_H +#define _ASM_POWERPC_CACHEFLUSH_H + +#ifdef __KERNEL__  #include <linux/mm.h>  #include <asm/cputable.h>  /* - * No cache flushing is required when address mappings are - * changed, because the caches on PowerPCs are physically - * addressed. + * No cache flushing is required when address mappings are changed, + * because the caches on PowerPCs are physically addressed.   */  #define flush_cache_all()			do { } while (0)  #define flush_cache_mm(mm)			do { } while (0) @@ -22,27 +29,40 @@ extern void flush_dcache_page(struct page *page);  #define flush_dcache_mmap_unlock(mapping)	do { } while (0)  extern void __flush_icache_range(unsigned long, unsigned long); +static inline void flush_icache_range(unsigned long start, unsigned long stop) +{ +	if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) +		__flush_icache_range(start, stop); +} +  extern void flush_icache_user_range(struct vm_area_struct *vma,  				    struct page *page, unsigned long addr,  				    int len); +extern void __flush_dcache_icache(void *page_va); +extern void flush_dcache_icache_page(struct page *page); +#if defined(CONFIG_PPC32) && !defined(CONFIG_BOOKE) +extern void __flush_dcache_icache_phys(unsigned long physaddr); +#endif /* CONFIG_PPC32 && !CONFIG_BOOKE */  extern void flush_dcache_range(unsigned long start, unsigned long stop); -extern void flush_dcache_phys_range(unsigned long start, unsigned long stop); +#ifdef CONFIG_PPC32 +extern void clean_dcache_range(unsigned long start, unsigned long stop); +extern void invalidate_dcache_range(unsigned long start, unsigned long stop); +#endif /* CONFIG_PPC32 */ +#ifdef CONFIG_PPC64  extern void flush_inval_dcache_range(unsigned long start, unsigned long stop); +extern void flush_dcache_phys_range(unsigned long start, unsigned long stop); +#endif  #define copy_to_user_page(vma, page, vaddr, dst, src, len) \ -do { memcpy(dst, src, len); \ -     flush_icache_user_range(vma, page, vaddr, len); \ -} while (0) +	do { \ +		memcpy(dst, src, len); \ +		flush_icache_user_range(vma, page, vaddr, len); \ +	} while (0)  #define copy_from_user_page(vma, page, vaddr, dst, src, len) \  	memcpy(dst, src, len) -extern void __flush_dcache_icache(void *page_va); -static inline void flush_icache_range(unsigned long start, unsigned long stop) -{ -	if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) -		__flush_icache_range(start, stop); -} +#endif /* __KERNEL__ */ -#endif /* _PPC64_CACHEFLUSH_H */ +#endif /* _ASM_POWERPC_CACHEFLUSH_H */ diff --git a/include/asm-powerpc/reg.h b/include/asm-powerpc/reg.h index 489cf4c99c2..ef121f4f0ba 100644 --- a/include/asm-powerpc/reg.h +++ b/include/asm-powerpc/reg.h @@ -16,7 +16,11 @@  /* Pickup Book E specific registers. */  #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)  #include <asm/reg_booke.h> -#endif +#endif /* CONFIG_BOOKE || CONFIG_40x */ + +#ifdef CONFIG_8xx +#include <asm/reg_8xx.h> +#endif /* CONFIG_8xx */  #define MSR_SF_LG	63              /* Enable 64 bit mode */  #define MSR_ISF_LG	61              /* Interrupt 64b mode valid on 630 */ diff --git a/include/asm-ppc/cache.h b/include/asm-powerpc/reg_8xx.h index 7a157d0f4b5..e8ea346b21d 100644 --- a/include/asm-ppc/cache.h +++ b/include/asm-powerpc/reg_8xx.h @@ -1,49 +1,9 @@  /* - * include/asm-ppc/cache.h + * Contains register definitions common to PowerPC 8xx CPUs.  Notice   */ -#ifdef __KERNEL__ -#ifndef __ARCH_PPC_CACHE_H -#define __ARCH_PPC_CACHE_H +#ifndef _ASM_POWERPC_REG_8xx_H +#define _ASM_POWERPC_REG_8xx_H -#include <linux/config.h> - -/* bytes per L1 cache line */ -#if defined(CONFIG_8xx) || defined(CONFIG_403GCX) -#define L1_CACHE_SHIFT	4 -#define MAX_COPY_PREFETCH	1 -#elif defined(CONFIG_PPC64BRIDGE) -#define L1_CACHE_SHIFT	7 -#define MAX_COPY_PREFETCH	1 -#else -#define L1_CACHE_SHIFT	5 -#define MAX_COPY_PREFETCH	4 -#endif - -#define	L1_CACHE_BYTES	(1 << L1_CACHE_SHIFT) - -#define	SMP_CACHE_BYTES L1_CACHE_BYTES -#define L1_CACHE_SHIFT_MAX 7	/* largest L1 which this arch supports */ - -#define	L1_CACHE_ALIGN(x)       (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)) -#define	L1_CACHE_PAGES		8 - -#ifndef __ASSEMBLY__ -extern void clean_dcache_range(unsigned long start, unsigned long stop); -extern void flush_dcache_range(unsigned long start, unsigned long stop); -extern void invalidate_dcache_range(unsigned long start, unsigned long stop); -extern void flush_dcache_all(void); -#endif /* __ASSEMBLY__ */ - -/* prep registers for L2 */ -#define CACHECRBA       0x80000823      /* Cache configuration register address */ -#define L2CACHE_MASK	0x03	/* Mask for 2 L2 Cache bits */ -#define L2CACHE_512KB	0x00	/* 512KB */ -#define L2CACHE_256KB	0x01	/* 256KB */ -#define L2CACHE_1MB	0x02	/* 1MB */ -#define L2CACHE_NONE	0x03	/* NONE */ -#define L2CACHE_PARITY  0x08    /* Mask for L2 Cache Parity Protected bit */ - -#ifdef CONFIG_8xx  /* Cache control on the MPC8xx is provided through some additional   * special purpose registers.   */ @@ -78,7 +38,5 @@ extern void flush_dcache_all(void);  #define DC_DFWT		0x40000000	/* Data cache is forced write through */  #define DC_LES		0x20000000	/* Caches are little endian mode */ -#endif /* CONFIG_8xx */ -#endif -#endif /* __KERNEL__ */ +#endif /* _ASM_POWERPC_REG_8xx_H */ diff --git a/include/asm-ppc/cacheflush.h b/include/asm-ppc/cacheflush.h deleted file mode 100644 index 6a243efb331..00000000000 --- a/include/asm-ppc/cacheflush.h +++ /dev/null @@ -1,49 +0,0 @@ -/* - * include/asm-ppc/cacheflush.h - * - *  This program is free software; you can redistribute it and/or - *  modify it under the terms of the GNU General Public License - *  as published by the Free Software Foundation; either version - *  2 of the License, or (at your option) any later version. - */ -#ifdef __KERNEL__ -#ifndef _PPC_CACHEFLUSH_H -#define _PPC_CACHEFLUSH_H - -#include <linux/mm.h> - -/* - * No cache flushing is required when address mappings are - * changed, because the caches on PowerPCs are physically - * addressed.  -- paulus - * Also, when SMP we use the coherency (M) bit of the - * BATs and PTEs.  -- Cort - */ -#define flush_cache_all()		do { } while (0) -#define flush_cache_mm(mm)		do { } while (0) -#define flush_cache_range(vma, a, b)	do { } while (0) -#define flush_cache_page(vma, p, pfn)	do { } while (0) -#define flush_icache_page(vma, page)	do { } while (0) -#define flush_cache_vmap(start, end)	do { } while (0) -#define flush_cache_vunmap(start, end)	do { } while (0) - -extern void flush_dcache_page(struct page *page); -#define flush_dcache_mmap_lock(mapping)		do { } while (0) -#define flush_dcache_mmap_unlock(mapping)	do { } while (0) - -extern void flush_icache_range(unsigned long, unsigned long); -extern void flush_icache_user_range(struct vm_area_struct *vma, -		struct page *page, unsigned long addr, int len); - -#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ -do { memcpy(dst, src, len); \ -     flush_icache_user_range(vma, page, vaddr, len); \ -} while (0) -#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ -	memcpy(dst, src, len) - -extern void __flush_dcache_icache(void *page_va); -extern void __flush_dcache_icache_phys(unsigned long physaddr); -extern void flush_dcache_icache_page(struct page *page); -#endif /* _PPC_CACHEFLUSH_H */ -#endif /* __KERNEL__ */ diff --git a/include/asm-ppc64/cache.h b/include/asm-ppc64/cache.h deleted file mode 100644 index 92140a7efbd..00000000000 --- a/include/asm-ppc64/cache.h +++ /dev/null @@ -1,36 +0,0 @@ -/* - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ -#ifndef __ARCH_PPC64_CACHE_H -#define __ARCH_PPC64_CACHE_H - -#include <asm/types.h> - -/* bytes per L1 cache line */ -#define L1_CACHE_SHIFT	7 -#define L1_CACHE_BYTES	(1 << L1_CACHE_SHIFT) - -#define SMP_CACHE_BYTES L1_CACHE_BYTES -#define L1_CACHE_SHIFT_MAX 7	/* largest L1 which this arch supports */ - -#ifndef __ASSEMBLY__ - -struct ppc64_caches { -	u32	dsize;			/* L1 d-cache size */ -	u32	dline_size;		/* L1 d-cache line size	*/ -	u32	log_dline_size; -	u32	dlines_per_page; -	u32	isize;			/* L1 i-cache size */ -	u32	iline_size;		/* L1 i-cache line size	*/ -	u32	log_iline_size; -	u32	ilines_per_page; -}; - -extern struct ppc64_caches ppc64_caches; - -#endif - -#endif  |