diff options
| -rw-r--r-- | arch/powerpc/kernel/vmlinux.lds.S | 2 | ||||
| -rw-r--r-- | arch/x86/kernel/init_task.c | 2 | ||||
| -rw-r--r-- | include/asm-generic/vmlinux.lds.h | 2 | ||||
| -rw-r--r-- | include/linux/cache.h | 2 | 
4 files changed, 4 insertions, 4 deletions
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index dcd01c82e70..3229c062216 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -231,7 +231,7 @@ SECTIONS  		PAGE_ALIGNED_DATA(PAGE_SIZE)  	} -	.data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) { +	.data..cacheline_aligned : AT(ADDR(.data..cacheline_aligned) - LOAD_OFFSET) {  		CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)  	} diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c index 3a54dcb9cd0..43e9ccf4494 100644 --- a/arch/x86/kernel/init_task.c +++ b/arch/x86/kernel/init_task.c @@ -34,7 +34,7 @@ EXPORT_SYMBOL(init_task);  /*   * per-CPU TSS segments. Threads are completely 'soft' on Linux,   * no more per-task TSS's. The TSS size is kept cacheline-aligned - * so they are allowed to end up in the .data.cacheline_aligned + * so they are allowed to end up in the .data..cacheline_aligned   * section. Since TSS's are completely CPU-local, we want them   * on exact cacheline boundaries, to eliminate cacheline ping-pong.   */ diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h index 67e652068e0..78450aaab9e 100644 --- a/include/asm-generic/vmlinux.lds.h +++ b/include/asm-generic/vmlinux.lds.h @@ -189,7 +189,7 @@  #define CACHELINE_ALIGNED_DATA(align)					\  	. = ALIGN(align);						\ -	*(.data.cacheline_aligned) +	*(.data..cacheline_aligned)  #define INIT_TASK_DATA(align)						\  	. = ALIGN(align);						\ diff --git a/include/linux/cache.h b/include/linux/cache.h index 97e24881c4c..4c570653ab8 100644 --- a/include/linux/cache.h +++ b/include/linux/cache.h @@ -31,7 +31,7 @@  #ifndef __cacheline_aligned  #define __cacheline_aligned					\    __attribute__((__aligned__(SMP_CACHE_BYTES),			\ -		 __section__(".data.cacheline_aligned"))) +		 __section__(".data..cacheline_aligned")))  #endif /* __cacheline_aligned */  #ifndef __cacheline_aligned_in_smp  |