diff options
Diffstat (limited to 'arch/ia64/kernel')
| -rw-r--r-- | arch/ia64/kernel/Makefile | 1 | ||||
| -rw-r--r-- | arch/ia64/kernel/asm-offsets.c | 2 | ||||
| -rw-r--r-- | arch/ia64/kernel/cpufreq/Kconfig | 29 | ||||
| -rw-r--r-- | arch/ia64/kernel/cpufreq/Makefile | 2 | ||||
| -rw-r--r-- | arch/ia64/kernel/cpufreq/acpi-cpufreq.c | 437 | ||||
| -rw-r--r-- | arch/ia64/kernel/entry.S | 16 | ||||
| -rw-r--r-- | arch/ia64/kernel/fsys.S | 53 | ||||
| -rw-r--r-- | arch/ia64/kernel/head.S | 4 | ||||
| -rw-r--r-- | arch/ia64/kernel/iosapic.c | 34 | ||||
| -rw-r--r-- | arch/ia64/kernel/irq.c | 8 | ||||
| -rw-r--r-- | arch/ia64/kernel/ivt.S | 8 | ||||
| -rw-r--r-- | arch/ia64/kernel/kprobes.c | 8 | ||||
| -rw-r--r-- | arch/ia64/kernel/mca.c | 37 | ||||
| -rw-r--r-- | arch/ia64/kernel/mca_drv.c | 2 | ||||
| -rw-r--r-- | arch/ia64/kernel/minstate.h | 2 | ||||
| -rw-r--r-- | arch/ia64/kernel/palinfo.c | 561 | ||||
| -rw-r--r-- | arch/ia64/kernel/perfmon.c | 19 | ||||
| -rw-r--r-- | arch/ia64/kernel/process.c | 96 | ||||
| -rw-r--r-- | arch/ia64/kernel/salinfo.c | 57 | ||||
| -rw-r--r-- | arch/ia64/kernel/setup.c | 2 | ||||
| -rw-r--r-- | arch/ia64/kernel/signal.c | 19 | ||||
| -rw-r--r-- | arch/ia64/kernel/smpboot.c | 2 | ||||
| -rw-r--r-- | arch/ia64/kernel/sys_ia64.c | 35 | ||||
| -rw-r--r-- | arch/ia64/kernel/time.c | 5 | ||||
| -rw-r--r-- | arch/ia64/kernel/traps.c | 2 | 
25 files changed, 402 insertions, 1039 deletions
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile index d959c84904b..20678a9ed11 100644 --- a/arch/ia64/kernel/Makefile +++ b/arch/ia64/kernel/Makefile @@ -23,7 +23,6 @@ obj-$(CONFIG_SMP)		+= smp.o smpboot.o  obj-$(CONFIG_NUMA)		+= numa.o  obj-$(CONFIG_PERFMON)		+= perfmon_default_smpl.o  obj-$(CONFIG_IA64_CYCLONE)	+= cyclone.o -obj-$(CONFIG_CPU_FREQ)		+= cpufreq/  obj-$(CONFIG_IA64_MCA_RECOVERY)	+= mca_recovery.o  obj-$(CONFIG_KPROBES)		+= kprobes.o jprobes.o  obj-$(CONFIG_DYNAMIC_FTRACE)	+= ftrace.o diff --git a/arch/ia64/kernel/asm-offsets.c b/arch/ia64/kernel/asm-offsets.c index a48bd9a9927..46c9e300731 100644 --- a/arch/ia64/kernel/asm-offsets.c +++ b/arch/ia64/kernel/asm-offsets.c @@ -41,7 +41,7 @@ void foo(void)  	DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));  	DEFINE(TI_CPU, offsetof(struct thread_info, cpu));  	DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count)); -#ifdef CONFIG_VIRT_CPU_ACCOUNTING +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE  	DEFINE(TI_AC_STAMP, offsetof(struct thread_info, ac_stamp));  	DEFINE(TI_AC_LEAVE, offsetof(struct thread_info, ac_leave));  	DEFINE(TI_AC_STIME, offsetof(struct thread_info, ac_stime)); diff --git a/arch/ia64/kernel/cpufreq/Kconfig b/arch/ia64/kernel/cpufreq/Kconfig deleted file mode 100644 index 2d9d5279b98..00000000000 --- a/arch/ia64/kernel/cpufreq/Kconfig +++ /dev/null @@ -1,29 +0,0 @@ - -# -# CPU Frequency scaling -# - -menu "CPU Frequency scaling" - -source "drivers/cpufreq/Kconfig" - -if CPU_FREQ - -comment "CPUFreq processor drivers" - -config IA64_ACPI_CPUFREQ -	tristate "ACPI Processor P-States driver" -	select CPU_FREQ_TABLE -	depends on ACPI_PROCESSOR -	help -	This driver adds a CPUFreq driver which utilizes the ACPI -	Processor Performance States. - -	For details, take a look at <file:Documentation/cpu-freq/>. - -	If in doubt, say N. - -endif   # CPU_FREQ - -endmenu - diff --git a/arch/ia64/kernel/cpufreq/Makefile b/arch/ia64/kernel/cpufreq/Makefile deleted file mode 100644 index 4838f2a57c7..00000000000 --- a/arch/ia64/kernel/cpufreq/Makefile +++ /dev/null @@ -1,2 +0,0 @@ -obj-$(CONFIG_IA64_ACPI_CPUFREQ)		+= acpi-cpufreq.o - diff --git a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c b/arch/ia64/kernel/cpufreq/acpi-cpufreq.c deleted file mode 100644 index f09b174244d..00000000000 --- a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c +++ /dev/null @@ -1,437 +0,0 @@ -/* - * arch/ia64/kernel/cpufreq/acpi-cpufreq.c - * This file provides the ACPI based P-state support. This - * module works with generic cpufreq infrastructure. Most of - * the code is based on i386 version - * (arch/i386/kernel/cpu/cpufreq/acpi-cpufreq.c) - * - * Copyright (C) 2005 Intel Corp - *      Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> - */ - -#include <linux/kernel.h> -#include <linux/slab.h> -#include <linux/module.h> -#include <linux/init.h> -#include <linux/cpufreq.h> -#include <linux/proc_fs.h> -#include <linux/seq_file.h> -#include <asm/io.h> -#include <asm/uaccess.h> -#include <asm/pal.h> - -#include <linux/acpi.h> -#include <acpi/processor.h> - -MODULE_AUTHOR("Venkatesh Pallipadi"); -MODULE_DESCRIPTION("ACPI Processor P-States Driver"); -MODULE_LICENSE("GPL"); - - -struct cpufreq_acpi_io { -	struct acpi_processor_performance	acpi_data; -	struct cpufreq_frequency_table		*freq_table; -	unsigned int				resume; -}; - -static struct cpufreq_acpi_io	*acpi_io_data[NR_CPUS]; - -static struct cpufreq_driver acpi_cpufreq_driver; - - -static int -processor_set_pstate ( -	u32	value) -{ -	s64 retval; - -	pr_debug("processor_set_pstate\n"); - -	retval = ia64_pal_set_pstate((u64)value); - -	if (retval) { -		pr_debug("Failed to set freq to 0x%x, with error 0x%lx\n", -		        value, retval); -		return -ENODEV; -	} -	return (int)retval; -} - - -static int -processor_get_pstate ( -	u32	*value) -{ -	u64	pstate_index = 0; -	s64 	retval; - -	pr_debug("processor_get_pstate\n"); - -	retval = ia64_pal_get_pstate(&pstate_index, -	                             PAL_GET_PSTATE_TYPE_INSTANT); -	*value = (u32) pstate_index; - -	if (retval) -		pr_debug("Failed to get current freq with " -			"error 0x%lx, idx 0x%x\n", retval, *value); - -	return (int)retval; -} - - -/* To be used only after data->acpi_data is initialized */ -static unsigned -extract_clock ( -	struct cpufreq_acpi_io *data, -	unsigned value, -	unsigned int cpu) -{ -	unsigned long i; - -	pr_debug("extract_clock\n"); - -	for (i = 0; i < data->acpi_data.state_count; i++) { -		if (value == data->acpi_data.states[i].status) -			return data->acpi_data.states[i].core_frequency; -	} -	return data->acpi_data.states[i-1].core_frequency; -} - - -static unsigned int -processor_get_freq ( -	struct cpufreq_acpi_io	*data, -	unsigned int		cpu) -{ -	int			ret = 0; -	u32			value = 0; -	cpumask_t		saved_mask; -	unsigned long 		clock_freq; - -	pr_debug("processor_get_freq\n"); - -	saved_mask = current->cpus_allowed; -	set_cpus_allowed_ptr(current, cpumask_of(cpu)); -	if (smp_processor_id() != cpu) -		goto migrate_end; - -	/* processor_get_pstate gets the instantaneous frequency */ -	ret = processor_get_pstate(&value); - -	if (ret) { -		set_cpus_allowed_ptr(current, &saved_mask); -		printk(KERN_WARNING "get performance failed with error %d\n", -		       ret); -		ret = 0; -		goto migrate_end; -	} -	clock_freq = extract_clock(data, value, cpu); -	ret = (clock_freq*1000); - -migrate_end: -	set_cpus_allowed_ptr(current, &saved_mask); -	return ret; -} - - -static int -processor_set_freq ( -	struct cpufreq_acpi_io	*data, -	unsigned int		cpu, -	int			state) -{ -	int			ret = 0; -	u32			value = 0; -	struct cpufreq_freqs    cpufreq_freqs; -	cpumask_t		saved_mask; -	int			retval; - -	pr_debug("processor_set_freq\n"); - -	saved_mask = current->cpus_allowed; -	set_cpus_allowed_ptr(current, cpumask_of(cpu)); -	if (smp_processor_id() != cpu) { -		retval = -EAGAIN; -		goto migrate_end; -	} - -	if (state == data->acpi_data.state) { -		if (unlikely(data->resume)) { -			pr_debug("Called after resume, resetting to P%d\n", state); -			data->resume = 0; -		} else { -			pr_debug("Already at target state (P%d)\n", state); -			retval = 0; -			goto migrate_end; -		} -	} - -	pr_debug("Transitioning from P%d to P%d\n", -		data->acpi_data.state, state); - -	/* cpufreq frequency struct */ -	cpufreq_freqs.cpu = cpu; -	cpufreq_freqs.old = data->freq_table[data->acpi_data.state].frequency; -	cpufreq_freqs.new = data->freq_table[state].frequency; - -	/* notify cpufreq */ -	cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_PRECHANGE); - -	/* -	 * First we write the target state's 'control' value to the -	 * control_register. -	 */ - -	value = (u32) data->acpi_data.states[state].control; - -	pr_debug("Transitioning to state: 0x%08x\n", value); - -	ret = processor_set_pstate(value); -	if (ret) { -		unsigned int tmp = cpufreq_freqs.new; -		cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_POSTCHANGE); -		cpufreq_freqs.new = cpufreq_freqs.old; -		cpufreq_freqs.old = tmp; -		cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_PRECHANGE); -		cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_POSTCHANGE); -		printk(KERN_WARNING "Transition failed with error %d\n", ret); -		retval = -ENODEV; -		goto migrate_end; -	} - -	cpufreq_notify_transition(&cpufreq_freqs, CPUFREQ_POSTCHANGE); - -	data->acpi_data.state = state; - -	retval = 0; - -migrate_end: -	set_cpus_allowed_ptr(current, &saved_mask); -	return (retval); -} - - -static unsigned int -acpi_cpufreq_get ( -	unsigned int		cpu) -{ -	struct cpufreq_acpi_io *data = acpi_io_data[cpu]; - -	pr_debug("acpi_cpufreq_get\n"); - -	return processor_get_freq(data, cpu); -} - - -static int -acpi_cpufreq_target ( -	struct cpufreq_policy   *policy, -	unsigned int target_freq, -	unsigned int relation) -{ -	struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; -	unsigned int next_state = 0; -	unsigned int result = 0; - -	pr_debug("acpi_cpufreq_setpolicy\n"); - -	result = cpufreq_frequency_table_target(policy, -			data->freq_table, target_freq, relation, &next_state); -	if (result) -		return (result); - -	result = processor_set_freq(data, policy->cpu, next_state); - -	return (result); -} - - -static int -acpi_cpufreq_verify ( -	struct cpufreq_policy   *policy) -{ -	unsigned int result = 0; -	struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; - -	pr_debug("acpi_cpufreq_verify\n"); - -	result = cpufreq_frequency_table_verify(policy, -			data->freq_table); - -	return (result); -} - - -static int -acpi_cpufreq_cpu_init ( -	struct cpufreq_policy   *policy) -{ -	unsigned int		i; -	unsigned int		cpu = policy->cpu; -	struct cpufreq_acpi_io	*data; -	unsigned int		result = 0; - -	pr_debug("acpi_cpufreq_cpu_init\n"); - -	data = kzalloc(sizeof(struct cpufreq_acpi_io), GFP_KERNEL); -	if (!data) -		return (-ENOMEM); - -	acpi_io_data[cpu] = data; - -	result = acpi_processor_register_performance(&data->acpi_data, cpu); - -	if (result) -		goto err_free; - -	/* capability check */ -	if (data->acpi_data.state_count <= 1) { -		pr_debug("No P-States\n"); -		result = -ENODEV; -		goto err_unreg; -	} - -	if ((data->acpi_data.control_register.space_id != -					ACPI_ADR_SPACE_FIXED_HARDWARE) || -	    (data->acpi_data.status_register.space_id != -					ACPI_ADR_SPACE_FIXED_HARDWARE)) { -		pr_debug("Unsupported address space [%d, %d]\n", -			(u32) (data->acpi_data.control_register.space_id), -			(u32) (data->acpi_data.status_register.space_id)); -		result = -ENODEV; -		goto err_unreg; -	} - -	/* alloc freq_table */ -	data->freq_table = kmalloc(sizeof(struct cpufreq_frequency_table) * -	                           (data->acpi_data.state_count + 1), -	                           GFP_KERNEL); -	if (!data->freq_table) { -		result = -ENOMEM; -		goto err_unreg; -	} - -	/* detect transition latency */ -	policy->cpuinfo.transition_latency = 0; -	for (i=0; i<data->acpi_data.state_count; i++) { -		if ((data->acpi_data.states[i].transition_latency * 1000) > -		    policy->cpuinfo.transition_latency) { -			policy->cpuinfo.transition_latency = -			    data->acpi_data.states[i].transition_latency * 1000; -		} -	} -	policy->cur = processor_get_freq(data, policy->cpu); - -	/* table init */ -	for (i = 0; i <= data->acpi_data.state_count; i++) -	{ -		data->freq_table[i].index = i; -		if (i < data->acpi_data.state_count) { -			data->freq_table[i].frequency = -			      data->acpi_data.states[i].core_frequency * 1000; -		} else { -			data->freq_table[i].frequency = CPUFREQ_TABLE_END; -		} -	} - -	result = cpufreq_frequency_table_cpuinfo(policy, data->freq_table); -	if (result) { -		goto err_freqfree; -	} - -	/* notify BIOS that we exist */ -	acpi_processor_notify_smm(THIS_MODULE); - -	printk(KERN_INFO "acpi-cpufreq: CPU%u - ACPI performance management " -	       "activated.\n", cpu); - -	for (i = 0; i < data->acpi_data.state_count; i++) -		pr_debug("     %cP%d: %d MHz, %d mW, %d uS, %d uS, 0x%x 0x%x\n", -			(i == data->acpi_data.state?'*':' '), i, -			(u32) data->acpi_data.states[i].core_frequency, -			(u32) data->acpi_data.states[i].power, -			(u32) data->acpi_data.states[i].transition_latency, -			(u32) data->acpi_data.states[i].bus_master_latency, -			(u32) data->acpi_data.states[i].status, -			(u32) data->acpi_data.states[i].control); - -	cpufreq_frequency_table_get_attr(data->freq_table, policy->cpu); - -	/* the first call to ->target() should result in us actually -	 * writing something to the appropriate registers. */ -	data->resume = 1; - -	return (result); - - err_freqfree: -	kfree(data->freq_table); - err_unreg: -	acpi_processor_unregister_performance(&data->acpi_data, cpu); - err_free: -	kfree(data); -	acpi_io_data[cpu] = NULL; - -	return (result); -} - - -static int -acpi_cpufreq_cpu_exit ( -	struct cpufreq_policy   *policy) -{ -	struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; - -	pr_debug("acpi_cpufreq_cpu_exit\n"); - -	if (data) { -		cpufreq_frequency_table_put_attr(policy->cpu); -		acpi_io_data[policy->cpu] = NULL; -		acpi_processor_unregister_performance(&data->acpi_data, -		                                      policy->cpu); -		kfree(data); -	} - -	return (0); -} - - -static struct freq_attr* acpi_cpufreq_attr[] = { -	&cpufreq_freq_attr_scaling_available_freqs, -	NULL, -}; - - -static struct cpufreq_driver acpi_cpufreq_driver = { -	.verify 	= acpi_cpufreq_verify, -	.target 	= acpi_cpufreq_target, -	.get 		= acpi_cpufreq_get, -	.init		= acpi_cpufreq_cpu_init, -	.exit		= acpi_cpufreq_cpu_exit, -	.name		= "acpi-cpufreq", -	.owner		= THIS_MODULE, -	.attr           = acpi_cpufreq_attr, -}; - - -static int __init -acpi_cpufreq_init (void) -{ -	pr_debug("acpi_cpufreq_init\n"); - - 	return cpufreq_register_driver(&acpi_cpufreq_driver); -} - - -static void __exit -acpi_cpufreq_exit (void) -{ -	pr_debug("acpi_cpufreq_exit\n"); - -	cpufreq_unregister_driver(&acpi_cpufreq_driver); -	return; -} - - -late_initcall(acpi_cpufreq_init); -module_exit(acpi_cpufreq_exit); - diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S index 6bfd8429ee0..7a53530f22c 100644 --- a/arch/ia64/kernel/entry.S +++ b/arch/ia64/kernel/entry.S @@ -724,7 +724,7 @@ GLOBAL_ENTRY(__paravirt_leave_syscall)  #endif  .global __paravirt_work_processed_syscall;  __paravirt_work_processed_syscall: -#ifdef CONFIG_VIRT_CPU_ACCOUNTING +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE  	adds r2=PT(LOADRS)+16,r12  	MOV_FROM_ITC(pUStk, p9, r22, r19)	// fetch time at leave  	adds r18=TI_FLAGS+IA64_TASK_SIZE,r13 @@ -762,7 +762,7 @@ __paravirt_work_processed_syscall:  	ld8 r29=[r2],16		// M0|1 load cr.ipsr  	ld8 r28=[r3],16		// M0|1 load cr.iip -#ifdef CONFIG_VIRT_CPU_ACCOUNTING +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE  (pUStk) add r14=TI_AC_LEAVE+IA64_TASK_SIZE,r13  	;;  	ld8 r30=[r2],16		// M0|1 load cr.ifs @@ -793,7 +793,7 @@ __paravirt_work_processed_syscall:  	ld8.fill r1=[r3],16			// M0|1 load r1  (pUStk) mov r17=1				// A  	;; -#ifdef CONFIG_VIRT_CPU_ACCOUNTING +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE  (pUStk) st1 [r15]=r17				// M2|3  #else  (pUStk) st1 [r14]=r17				// M2|3 @@ -813,7 +813,7 @@ __paravirt_work_processed_syscall:  	shr.u r18=r19,16		// I0|1 get byte size of existing "dirty" partition  	COVER				// B    add current frame into dirty partition & set cr.ifs  	;; -#ifdef CONFIG_VIRT_CPU_ACCOUNTING +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE  	mov r19=ar.bsp			// M2   get new backing store pointer  	st8 [r14]=r22			// M	save time at leave  	mov f10=f0			// F    clear f10 @@ -948,7 +948,7 @@ GLOBAL_ENTRY(__paravirt_leave_kernel)  	adds r16=PT(CR_IPSR)+16,r12  	adds r17=PT(CR_IIP)+16,r12 -#ifdef CONFIG_VIRT_CPU_ACCOUNTING +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE  	.pred.rel.mutex pUStk,pKStk  	MOV_FROM_PSR(pKStk, r22, r29)	// M2 read PSR now that interrupts are disabled  	MOV_FROM_ITC(pUStk, p9, r22, r29)	// M  fetch time at leave @@ -981,7 +981,7 @@ GLOBAL_ENTRY(__paravirt_leave_kernel)  	;;  	ld8.fill r12=[r16],16  	ld8.fill r13=[r17],16 -#ifdef CONFIG_VIRT_CPU_ACCOUNTING +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE  (pUStk)	adds r3=TI_AC_LEAVE+IA64_TASK_SIZE,r18  #else  (pUStk)	adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18 @@ -989,7 +989,7 @@ GLOBAL_ENTRY(__paravirt_leave_kernel)  	;;  	ld8 r20=[r16],16	// ar.fpsr  	ld8.fill r15=[r17],16 -#ifdef CONFIG_VIRT_CPU_ACCOUNTING +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE  (pUStk)	adds r18=IA64_TASK_THREAD_ON_USTACK_OFFSET,r18	// deferred  #endif  	;; @@ -997,7 +997,7 @@ GLOBAL_ENTRY(__paravirt_leave_kernel)  	ld8.fill r2=[r17]  (pUStk)	mov r17=1  	;; -#ifdef CONFIG_VIRT_CPU_ACCOUNTING +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE  	//  mmi_ :  ld8 st1 shr;;         mmi_ : st8 st1 shr;;  	//  mib  :  mov add br        ->  mib  : ld8 add br  	//  bbb_ :  br  nop cover;;       mbb_ : mov br  cover;; diff --git a/arch/ia64/kernel/fsys.S b/arch/ia64/kernel/fsys.S index e662f178b99..abc6dee3799 100644 --- a/arch/ia64/kernel/fsys.S +++ b/arch/ia64/kernel/fsys.S @@ -90,53 +90,6 @@ ENTRY(fsys_getpid)  	FSYS_RETURN  END(fsys_getpid) -ENTRY(fsys_getppid) -	.prologue -	.altrp b6 -	.body -	add r17=IA64_TASK_GROUP_LEADER_OFFSET,r16 -	;; -	ld8 r17=[r17]				// r17 = current->group_leader -	add r9=TI_FLAGS+IA64_TASK_SIZE,r16 -	;; - -	ld4 r9=[r9] -	add r17=IA64_TASK_REAL_PARENT_OFFSET,r17 // r17 = ¤t->group_leader->real_parent -	;; -	and r9=TIF_ALLWORK_MASK,r9 - -1:	ld8 r18=[r17]				// r18 = current->group_leader->real_parent -	;; -	cmp.ne p8,p0=0,r9 -	add r8=IA64_TASK_TGID_OFFSET,r18	// r8 = ¤t->group_leader->real_parent->tgid -	;; - -	/* -	 * The .acq is needed to ensure that the read of tgid has returned its data before -	 * we re-check "real_parent". -	 */ -	ld4.acq r8=[r8]				// r8 = current->group_leader->real_parent->tgid -#ifdef CONFIG_SMP -	/* -	 * Re-read current->group_leader->real_parent. -	 */ -	ld8 r19=[r17]				// r19 = current->group_leader->real_parent -(p8)	br.spnt.many fsys_fallback_syscall -	;; -	cmp.ne p6,p0=r18,r19			// did real_parent change? -	mov r19=0			// i must not leak kernel bits... -(p6)	br.cond.spnt.few 1b			// yes -> redo the read of tgid and the check -	;; -	mov r17=0			// i must not leak kernel bits... -	mov r18=0			// i must not leak kernel bits... -#else -	mov r17=0			// i must not leak kernel bits... -	mov r18=0			// i must not leak kernel bits... -	mov r19=0			// i must not leak kernel bits... -#endif -	FSYS_RETURN -END(fsys_getppid) -  ENTRY(fsys_set_tid_address)  	.prologue  	.altrp b6 @@ -529,7 +482,7 @@ GLOBAL_ENTRY(paravirt_fsys_bubble_down)  	nop.i 0  	;;  	mov ar.rsc=0				// M2   set enforced lazy mode, pl 0, LE, loadrs=0 -#ifdef CONFIG_VIRT_CPU_ACCOUNTING +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE  	MOV_FROM_ITC(p0, p6, r30, r23)		// M    get cycle for accounting  #else  	nop.m 0 @@ -555,7 +508,7 @@ GLOBAL_ENTRY(paravirt_fsys_bubble_down)  	cmp.ne pKStk,pUStk=r0,r0		// A    set pKStk <- 0, pUStk <- 1  	br.call.sptk.many b7=ia64_syscall_setup	// B  	;; -#ifdef CONFIG_VIRT_CPU_ACCOUNTING +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE  	// mov.m r30=ar.itc is called in advance  	add r16=TI_AC_STAMP+IA64_TASK_SIZE,r2  	add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r2 @@ -614,7 +567,7 @@ paravirt_fsyscall_table:  	data8 0				// chown  	data8 0				// lseek		// 1040  	data8 fsys_getpid		// getpid -	data8 fsys_getppid		// getppid +	data8 0				// getppid  	data8 0				// mount  	data8 0				// umount  	data8 0				// setuid		// 1045 diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S index 4738ff7bd66..9be4e497f3d 100644 --- a/arch/ia64/kernel/head.S +++ b/arch/ia64/kernel/head.S @@ -1073,7 +1073,7 @@ END(ia64_native_sched_clock)  sched_clock = ia64_native_sched_clock  #endif -#ifdef CONFIG_VIRT_CPU_ACCOUNTING +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE  GLOBAL_ENTRY(cycle_to_cputime)  	alloc r16=ar.pfs,1,0,0,0  	addl r8=THIS_CPU(ia64_cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0 @@ -1091,7 +1091,7 @@ GLOBAL_ENTRY(cycle_to_cputime)  	shrp r8=r9,r8,IA64_NSEC_PER_CYC_SHIFT  	br.ret.sptk.many rp  END(cycle_to_cputime) -#endif /* CONFIG_VIRT_CPU_ACCOUNTING */ +#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */  #ifdef CONFIG_IA64_BRL_EMU diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c index ee33c3aaa2f..19f107be734 100644 --- a/arch/ia64/kernel/iosapic.c +++ b/arch/ia64/kernel/iosapic.c @@ -76,7 +76,7 @@   *	PCI pin -> global system interrupt (GSI) -> IA-64 vector <-> IRQ   *   * Note: The term "IRQ" is loosely used everywhere in Linux kernel to - * describeinterrupts.  Now we use "IRQ" only for Linux IRQ's.  ISA IRQ + * describe interrupts.  Now we use "IRQ" only for Linux IRQ's.  ISA IRQ   * (isa_irq) is the only exception in this source code.   */ @@ -1010,6 +1010,26 @@ iosapic_check_gsi_range (unsigned int gsi_base, unsigned int ver)  	return 0;  } +static int +iosapic_delete_rte(unsigned int irq, unsigned int gsi) +{ +	struct iosapic_rte_info *rte, *temp; + +	list_for_each_entry_safe(rte, temp, &iosapic_intr_info[irq].rtes, +								rte_list) { +		if (rte->iosapic->gsi_base + rte->rte_index == gsi) { +			if (rte->refcnt) +				return -EBUSY; + +			list_del(&rte->rte_list); +			kfree(rte); +			return 0; +		} +	} + +	return -EINVAL; +} +  int iosapic_init(unsigned long phys_addr, unsigned int gsi_base)  {  	int num_rte, err, index; @@ -1069,7 +1089,7 @@ int iosapic_init(unsigned long phys_addr, unsigned int gsi_base)  int iosapic_remove(unsigned int gsi_base)  { -	int index, err = 0; +	int i, irq, index, err = 0;  	unsigned long flags;  	spin_lock_irqsave(&iosapic_lock, flags); @@ -1087,6 +1107,16 @@ int iosapic_remove(unsigned int gsi_base)  		goto out;  	} +	for (i = gsi_base; i < gsi_base + iosapic_lists[index].num_rte; i++) { +		irq = __gsi_to_irq(i); +		if (irq < 0) +			continue; + +		err = iosapic_delete_rte(irq, i); +		if (err) +			goto out; +	} +  	iounmap(iosapic_lists[index].addr);  	iosapic_free(index);   out: diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c index ad69606613e..f2c41828113 100644 --- a/arch/ia64/kernel/irq.c +++ b/arch/ia64/kernel/irq.c @@ -23,6 +23,8 @@  #include <linux/interrupt.h>  #include <linux/kernel_stat.h> +#include <asm/mca.h> +  /*   * 'what should we do if we get a hw irq event on an illegal vector'.   * each architecture has to answer this themselves. @@ -83,6 +85,12 @@ bool is_affinity_mask_valid(const struct cpumask *cpumask)  #endif /* CONFIG_SMP */ +int __init arch_early_irq_init(void) +{ +	ia64_mca_irq_init(); +	return 0; +} +  #ifdef CONFIG_HOTPLUG_CPU  unsigned int vectors_in_migration[NR_IRQS]; diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S index fa25689fc45..689ffcaa284 100644 --- a/arch/ia64/kernel/ivt.S +++ b/arch/ia64/kernel/ivt.S @@ -784,7 +784,7 @@ ENTRY(break_fault)  (p8)	adds r28=16,r28				// A    switch cr.iip to next bundle  (p9)	adds r8=1,r8				// A    increment ei to next slot -#ifdef CONFIG_VIRT_CPU_ACCOUNTING +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE  	;;  	mov b6=r30				// I0   setup syscall handler branch reg early  #else @@ -801,7 +801,7 @@ ENTRY(break_fault)  	//  ///////////////////////////////////////////////////////////////////////  	st1 [r16]=r0				// M2|3 clear current->thread.on_ustack flag -#ifdef CONFIG_VIRT_CPU_ACCOUNTING +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE  	MOV_FROM_ITC(p0, p14, r30, r18)		// M    get cycle for accounting  #else  	mov b6=r30				// I0   setup syscall handler branch reg early @@ -817,7 +817,7 @@ ENTRY(break_fault)  	cmp.eq p14,p0=r9,r0			// A    are syscalls being traced/audited?  	br.call.sptk.many b7=ia64_syscall_setup	// B  1: -#ifdef CONFIG_VIRT_CPU_ACCOUNTING +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE  	// mov.m r30=ar.itc is called in advance, and r13 is current  	add r16=TI_AC_STAMP+IA64_TASK_SIZE,r13	// A  	add r17=TI_AC_LEAVE+IA64_TASK_SIZE,r13	// A @@ -1043,7 +1043,7 @@ END(ia64_syscall_setup)  	DBG_FAULT(16)  	FAULT(16) -#if defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(__IA64_ASM_PARAVIRTUALIZED_NATIVE) +#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(__IA64_ASM_PARAVIRTUALIZED_NATIVE)  	/*  	 * There is no particular reason for this code to be here, other than  	 * that there happens to be space here that would go unused otherwise. diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c index 7026b29e277..f8280a766a7 100644 --- a/arch/ia64/kernel/kprobes.c +++ b/arch/ia64/kernel/kprobes.c @@ -423,7 +423,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)  {  	struct kretprobe_instance *ri = NULL;  	struct hlist_head *head, empty_rp; -	struct hlist_node *node, *tmp; +	struct hlist_node *tmp;  	unsigned long flags, orig_ret_address = 0;  	unsigned long trampoline_address =  		((struct fnptr *)kretprobe_trampoline)->ip; @@ -444,7 +444,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)  	 *       real return address, and all the rest will point to  	 *       kretprobe_trampoline  	 */ -	hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { +	hlist_for_each_entry_safe(ri, tmp, head, hlist) {  		if (ri->task != current)  			/* another task is sharing our hash bucket */  			continue; @@ -461,7 +461,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)  	regs->cr_iip = orig_ret_address; -	hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { +	hlist_for_each_entry_safe(ri, tmp, head, hlist) {  		if (ri->task != current)  			/* another task is sharing our hash bucket */  			continue; @@ -487,7 +487,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)  	kretprobe_hash_unlock(current, &flags);  	preempt_enable_no_resched(); -	hlist_for_each_entry_safe(ri, node, tmp, &empty_rp, hlist) { +	hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {  		hlist_del(&ri->hlist);  		kfree(ri);  	} diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 65bf9cd3904..d7396dbb07b 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -2074,22 +2074,16 @@ ia64_mca_init(void)  	printk(KERN_INFO "MCA related initialization done\n");  } +  /* - * ia64_mca_late_init - * - *	Opportunity to setup things that require initialization later - *	than ia64_mca_init.  Setup a timer to poll for CPEs if the - *	platform doesn't support an interrupt driven mechanism. - * - *  Inputs  :   None - *  Outputs :   Status + * These pieces cannot be done in ia64_mca_init() because it is called before + * early_irq_init() which would wipe out our percpu irq registrations. But we + * cannot leave them until ia64_mca_late_init() because by then all the other + * processors have been brought online and have set their own CMC vectors to + * point at a non-existant action. Called from arch_early_irq_init().   */ -static int __init -ia64_mca_late_init(void) +void __init ia64_mca_irq_init(void)  { -	if (!mca_init) -		return 0; -  	/*  	 *  Configure the CMCI/P vector and handler. Interrupts for CMC are  	 *  per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c). @@ -2108,6 +2102,23 @@ ia64_mca_late_init(void)  	/* Setup the CPEI/P handler */  	register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction);  #endif +} + +/* + * ia64_mca_late_init + * + *	Opportunity to setup things that require initialization later + *	than ia64_mca_init.  Setup a timer to poll for CPEs if the + *	platform doesn't support an interrupt driven mechanism. + * + *  Inputs  :   None + *  Outputs :   Status + */ +static int __init +ia64_mca_late_init(void) +{ +	if (!mca_init) +		return 0;  	register_hotcpu_notifier(&mca_cpu_notifier); diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c index 9392e021c93..94f8bf777af 100644 --- a/arch/ia64/kernel/mca_drv.c +++ b/arch/ia64/kernel/mca_drv.c @@ -349,7 +349,7 @@ init_record_index_pools(void)  	/* - 3 - */  	slidx_pool.max_idx = (rec_max_size/sect_min_size) * 2 + 1; -	slidx_pool.buffer = (slidx_list_t *) +	slidx_pool.buffer =  		kmalloc(slidx_pool.max_idx * sizeof(slidx_list_t), GFP_KERNEL);  	return slidx_pool.buffer ? 0 : -ENOMEM; diff --git a/arch/ia64/kernel/minstate.h b/arch/ia64/kernel/minstate.h index d56753a1163..cc82a7d744c 100644 --- a/arch/ia64/kernel/minstate.h +++ b/arch/ia64/kernel/minstate.h @@ -4,7 +4,7 @@  #include "entry.h"  #include "paravirt_inst.h" -#ifdef CONFIG_VIRT_CPU_ACCOUNTING +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE  /* read ar.itc in advance, and use it before leaving bank 0 */  #define ACCOUNT_GET_STAMP				\  (pUStk) mov.m r20=ar.itc; diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c index 77597e5ea60..2b3c2d79256 100644 --- a/arch/ia64/kernel/palinfo.c +++ b/arch/ia64/kernel/palinfo.c @@ -22,6 +22,7 @@  #include <linux/errno.h>  #include <linux/init.h>  #include <linux/proc_fs.h> +#include <linux/seq_file.h>  #include <linux/mm.h>  #include <linux/module.h>  #include <linux/efi.h> @@ -41,7 +42,7 @@ MODULE_LICENSE("GPL");  #define PALINFO_VERSION "0.5" -typedef int (*palinfo_func_t)(char*); +typedef int (*palinfo_func_t)(struct seq_file *);  typedef struct {  	const char		*name;		/* name of the proc entry */ @@ -54,7 +55,7 @@ typedef struct {   *  A bunch of string array to get pretty printing   */ -static char *cache_types[] = { +static const char *cache_types[] = {  	"",			/* not used */  	"Instruction",  	"Data", @@ -122,19 +123,16 @@ static const char *mem_attrib[]={   *	- a pointer to the end of the buffer   *   */ -static char * -bitvector_process(char *p, u64 vector) +static void bitvector_process(struct seq_file *m, u64 vector)  {  	int i,j; -	const char *units[]={ "", "K", "M", "G", "T" }; +	static const char *units[]={ "", "K", "M", "G", "T" };  	for (i=0, j=0; i < 64; i++ , j=i/10) { -		if (vector & 0x1) { -			p += sprintf(p, "%d%s ", 1 << (i-j*10), units[j]); -		} +		if (vector & 0x1) +			seq_printf(m, "%d%s ", 1 << (i-j*10), units[j]);  		vector >>= 1;  	} -	return p;  }  /* @@ -149,8 +147,7 @@ bitvector_process(char *p, u64 vector)   *	- a pointer to the end of the buffer   *   */ -static char * -bitregister_process(char *p, u64 *reg_info, int max) +static void bitregister_process(struct seq_file *m, u64 *reg_info, int max)  {  	int i, begin, skip = 0;  	u64 value = reg_info[0]; @@ -163,9 +160,9 @@ bitregister_process(char *p, u64 *reg_info, int max)  		if ((value & 0x1) == 0 && skip == 0) {  			if (begin  <= i - 2) -				p += sprintf(p, "%d-%d ", begin, i-1); +				seq_printf(m, "%d-%d ", begin, i-1);  			else -				p += sprintf(p, "%d ", i-1); +				seq_printf(m, "%d ", i-1);  			skip  = 1;  			begin = -1;  		} else if ((value & 0x1) && skip == 1) { @@ -176,19 +173,15 @@ bitregister_process(char *p, u64 *reg_info, int max)  	}  	if (begin > -1) {  		if (begin < 127) -			p += sprintf(p, "%d-127", begin); +			seq_printf(m, "%d-127", begin);  		else -			p += sprintf(p, "127"); +			seq_puts(m, "127");  	} - -	return p;  } -static int -power_info(char *page) +static int power_info(struct seq_file *m)  {  	s64 status; -	char *p = page;  	u64 halt_info_buffer[8];  	pal_power_mgmt_info_u_t *halt_info =(pal_power_mgmt_info_u_t *)halt_info_buffer;  	int i; @@ -198,26 +191,25 @@ power_info(char *page)  	for (i=0; i < 8 ; i++ ) {  		if (halt_info[i].pal_power_mgmt_info_s.im == 1) { -			p += sprintf(p,	"Power level %d:\n" -				     "\tentry_latency       : %d cycles\n" -				     "\texit_latency        : %d cycles\n" -				     "\tpower consumption   : %d mW\n" -				     "\tCache+TLB coherency : %s\n", i, -				     halt_info[i].pal_power_mgmt_info_s.entry_latency, -				     halt_info[i].pal_power_mgmt_info_s.exit_latency, -				     halt_info[i].pal_power_mgmt_info_s.power_consumption, -				     halt_info[i].pal_power_mgmt_info_s.co ? "Yes" : "No"); +			seq_printf(m, +				   "Power level %d:\n" +				   "\tentry_latency       : %d cycles\n" +				   "\texit_latency        : %d cycles\n" +				   "\tpower consumption   : %d mW\n" +				   "\tCache+TLB coherency : %s\n", i, +				   halt_info[i].pal_power_mgmt_info_s.entry_latency, +				   halt_info[i].pal_power_mgmt_info_s.exit_latency, +				   halt_info[i].pal_power_mgmt_info_s.power_consumption, +				   halt_info[i].pal_power_mgmt_info_s.co ? "Yes" : "No");  		} else { -			p += sprintf(p,"Power level %d: not implemented\n",i); +			seq_printf(m,"Power level %d: not implemented\n", i);  		}  	} -	return p - page; +	return 0;  } -static int -cache_info(char *page) +static int cache_info(struct seq_file *m)  { -	char *p = page;  	unsigned long i, levels, unique_caches;  	pal_cache_config_info_t cci;  	int j, k; @@ -228,73 +220,74 @@ cache_info(char *page)  		return 0;  	} -	p += sprintf(p, "Cache levels  : %ld\nUnique caches : %ld\n\n", levels, unique_caches); +	seq_printf(m, "Cache levels  : %ld\nUnique caches : %ld\n\n", +		   levels, unique_caches);  	for (i=0; i < levels; i++) { -  		for (j=2; j >0 ; j--) { -  			/* even without unification some level may not be present */ -			if ((status=ia64_pal_cache_config_info(i,j, &cci)) != 0) { +			if ((status=ia64_pal_cache_config_info(i,j, &cci)) != 0)  				continue; -			} -			p += sprintf(p, -				     "%s Cache level %lu:\n" -				     "\tSize           : %u bytes\n" -				     "\tAttributes     : ", -				     cache_types[j+cci.pcci_unified], i+1, -				     cci.pcci_cache_size); -			if (cci.pcci_unified) p += sprintf(p, "Unified "); +			seq_printf(m, +				   "%s Cache level %lu:\n" +				   "\tSize           : %u bytes\n" +				   "\tAttributes     : ", +				   cache_types[j+cci.pcci_unified], i+1, +				   cci.pcci_cache_size); + +			if (cci.pcci_unified) +				seq_puts(m, "Unified "); -			p += sprintf(p, "%s\n", cache_mattrib[cci.pcci_cache_attr]); +			seq_printf(m, "%s\n", cache_mattrib[cci.pcci_cache_attr]); -			p += sprintf(p, -				     "\tAssociativity  : %d\n" -				     "\tLine size      : %d bytes\n" -				     "\tStride         : %d bytes\n", -				     cci.pcci_assoc, 1<<cci.pcci_line_size, 1<<cci.pcci_stride); +			seq_printf(m, +				   "\tAssociativity  : %d\n" +				   "\tLine size      : %d bytes\n" +				   "\tStride         : %d bytes\n", +				   cci.pcci_assoc, +				   1<<cci.pcci_line_size, +				   1<<cci.pcci_stride);  			if (j == 1) -				p += sprintf(p, "\tStore latency  : N/A\n"); +				seq_puts(m, "\tStore latency  : N/A\n");  			else -				p += sprintf(p, "\tStore latency  : %d cycle(s)\n", -						cci.pcci_st_latency); +				seq_printf(m, "\tStore latency  : %d cycle(s)\n", +					   cci.pcci_st_latency); -			p += sprintf(p, -				     "\tLoad latency   : %d cycle(s)\n" -				     "\tStore hints    : ", cci.pcci_ld_latency); +			seq_printf(m, +				   "\tLoad latency   : %d cycle(s)\n" +				   "\tStore hints    : ", cci.pcci_ld_latency);  			for(k=0; k < 8; k++ ) {  				if ( cci.pcci_st_hints & 0x1) -					p += sprintf(p, "[%s]", cache_st_hints[k]); +					seq_printf(m, "[%s]", cache_st_hints[k]);  				cci.pcci_st_hints >>=1;  			} -			p += sprintf(p, "\n\tLoad hints     : "); +			seq_puts(m, "\n\tLoad hints     : ");  			for(k=0; k < 8; k++ ) {  				if (cci.pcci_ld_hints & 0x1) -					p += sprintf(p, "[%s]", cache_ld_hints[k]); +					seq_printf(m, "[%s]", cache_ld_hints[k]);  				cci.pcci_ld_hints >>=1;  			} -			p += sprintf(p, -				     "\n\tAlias boundary : %d byte(s)\n" -				     "\tTag LSB        : %d\n" -				     "\tTag MSB        : %d\n", -				     1<<cci.pcci_alias_boundary, cci.pcci_tag_lsb, -				     cci.pcci_tag_msb); +			seq_printf(m, +				   "\n\tAlias boundary : %d byte(s)\n" +				   "\tTag LSB        : %d\n" +				   "\tTag MSB        : %d\n", +				   1<<cci.pcci_alias_boundary, cci.pcci_tag_lsb, +				   cci.pcci_tag_msb);  			/* when unified, data(j=2) is enough */ -			if (cci.pcci_unified) break; +			if (cci.pcci_unified) +				break;  		}  	} -	return p - page; +	return 0;  } -static int -vm_info(char *page) +static int vm_info(struct seq_file *m)  { -	char *p = page;  	u64 tr_pages =0, vw_pages=0, tc_pages;  	u64 attrib;  	pal_vm_info_1_u_t vm_info_1; @@ -309,7 +302,7 @@ vm_info(char *page)  		printk(KERN_ERR "ia64_pal_vm_summary=%ld\n", status);  	} else { -		p += sprintf(p, +		seq_printf(m,  		     "Physical Address Space         : %d bits\n"  		     "Virtual Address Space          : %d bits\n"  		     "Protection Key Registers(PKR)  : %d\n" @@ -324,49 +317,49 @@ vm_info(char *page)  		     vm_info_1.pal_vm_info_1_s.hash_tag_id,  		     vm_info_2.pal_vm_info_2_s.rid_size);  		if (vm_info_2.pal_vm_info_2_s.max_purges == PAL_MAX_PURGES) -			p += sprintf(p, "unlimited\n"); +			seq_puts(m, "unlimited\n");  		else -			p += sprintf(p, "%d\n", +			seq_printf(m, "%d\n",  		     		vm_info_2.pal_vm_info_2_s.max_purges ?  				vm_info_2.pal_vm_info_2_s.max_purges : 1);  	}  	if (ia64_pal_mem_attrib(&attrib) == 0) { -		p += sprintf(p, "Supported memory attributes    : "); +		seq_puts(m, "Supported memory attributes    : ");  		sep = "";  		for (i = 0; i < 8; i++) {  			if (attrib & (1 << i)) { -				p += sprintf(p, "%s%s", sep, mem_attrib[i]); +				seq_printf(m, "%s%s", sep, mem_attrib[i]);  				sep = ", ";  			}  		} -		p += sprintf(p, "\n"); +		seq_putc(m, '\n');  	}  	if ((status = ia64_pal_vm_page_size(&tr_pages, &vw_pages)) !=0) {  		printk(KERN_ERR "ia64_pal_vm_page_size=%ld\n", status);  	} else { -		p += sprintf(p, -			     "\nTLB walker                     : %simplemented\n" -			     "Number of DTR                  : %d\n" -			     "Number of ITR                  : %d\n" -			     "TLB insertable page sizes      : ", -			     vm_info_1.pal_vm_info_1_s.vw ? "" : "not ", -			     vm_info_1.pal_vm_info_1_s.max_dtr_entry+1, -			     vm_info_1.pal_vm_info_1_s.max_itr_entry+1); - +		seq_printf(m, +			   "\nTLB walker                     : %simplemented\n" +			   "Number of DTR                  : %d\n" +			   "Number of ITR                  : %d\n" +			   "TLB insertable page sizes      : ", +			   vm_info_1.pal_vm_info_1_s.vw ? "" : "not ", +			   vm_info_1.pal_vm_info_1_s.max_dtr_entry+1, +			   vm_info_1.pal_vm_info_1_s.max_itr_entry+1); -		p = bitvector_process(p, tr_pages); +		bitvector_process(m, tr_pages); -		p += sprintf(p, "\nTLB purgeable page sizes       : "); +		seq_puts(m, "\nTLB purgeable page sizes       : "); -		p = bitvector_process(p, vw_pages); +		bitvector_process(m, vw_pages);  	} -	if ((status=ia64_get_ptce(&ptce)) != 0) { + +	if ((status = ia64_get_ptce(&ptce)) != 0) {  		printk(KERN_ERR "ia64_get_ptce=%ld\n", status);  	} else { -		p += sprintf(p, +		seq_printf(m,  		     "\nPurge base address             : 0x%016lx\n"  		     "Purge outer loop count         : %d\n"  		     "Purge inner loop count         : %d\n" @@ -375,7 +368,7 @@ vm_info(char *page)  		     ptce.base, ptce.count[0], ptce.count[1],  		     ptce.stride[0], ptce.stride[1]); -		p += sprintf(p, +		seq_printf(m,  		     "TC Levels                      : %d\n"  		     "Unique TC(s)                   : %d\n",  		     vm_info_1.pal_vm_info_1_s.num_tc_levels, @@ -385,13 +378,11 @@ vm_info(char *page)  			for (j=2; j>0 ; j--) {  				tc_pages = 0; /* just in case */ -  				/* even without unification, some levels may not be present */ -				if ((status=ia64_pal_vm_info(i,j, &tc_info, &tc_pages)) != 0) { +				if ((status=ia64_pal_vm_info(i,j, &tc_info, &tc_pages)) != 0)  					continue; -				} -				p += sprintf(p, +				seq_printf(m,  				     "\n%s Translation Cache Level %d:\n"  				     "\tHash sets           : %d\n"  				     "\tAssociativity       : %d\n" @@ -403,15 +394,15 @@ vm_info(char *page)  				     tc_info.tc_num_entries);  				if (tc_info.tc_pf) -					p += sprintf(p, "PreferredPageSizeOptimized "); +					seq_puts(m, "PreferredPageSizeOptimized ");  				if (tc_info.tc_unified) -					p += sprintf(p, "Unified "); +					seq_puts(m, "Unified ");  				if (tc_info.tc_reduce_tr) -					p += sprintf(p, "TCReduction"); +					seq_puts(m, "TCReduction"); -				p += sprintf(p, "\n\tSupported page sizes: "); +				seq_puts(m, "\n\tSupported page sizes: "); -				p = bitvector_process(p, tc_pages); +				bitvector_process(m, tc_pages);  				/* when unified date (j=2) is enough */  				if (tc_info.tc_unified) @@ -419,16 +410,14 @@ vm_info(char *page)  			}  		}  	} -	p += sprintf(p, "\n"); -	return p - page; +	seq_putc(m, '\n'); +	return 0;  } -static int -register_info(char *page) +static int register_info(struct seq_file *m)  { -	char *p = page;  	u64 reg_info[2];  	u64 info;  	unsigned long phys_stacked; @@ -442,35 +431,31 @@ register_info(char *page)  	};  	for(info=0; info < 4; info++) { - -		if (ia64_pal_register_info(info, ®_info[0], ®_info[1]) != 0) return 0; - -		p += sprintf(p, "%-32s : ", info_type[info]); - -		p = bitregister_process(p, reg_info, 128); - -		p += sprintf(p, "\n"); +		if (ia64_pal_register_info(info, ®_info[0], ®_info[1]) != 0) +			return 0; +		seq_printf(m, "%-32s : ", info_type[info]); +		bitregister_process(m, reg_info, 128); +		seq_putc(m, '\n');  	} -	if (ia64_pal_rse_info(&phys_stacked, &hints) == 0) { +	if (ia64_pal_rse_info(&phys_stacked, &hints) == 0) +		seq_printf(m, +			   "RSE stacked physical registers   : %ld\n" +			   "RSE load/store hints             : %ld (%s)\n", +			   phys_stacked, hints.ph_data, +			   hints.ph_data < RSE_HINTS_COUNT ? rse_hints[hints.ph_data]: "(??)"); -	p += sprintf(p, -		     "RSE stacked physical registers   : %ld\n" -		     "RSE load/store hints             : %ld (%s)\n", -		     phys_stacked, hints.ph_data, -		     hints.ph_data < RSE_HINTS_COUNT ? rse_hints[hints.ph_data]: "(??)"); -	}  	if (ia64_pal_debug_info(&iregs, &dregs))  		return 0; -	p += sprintf(p, -		     "Instruction debug register pairs : %ld\n" -		     "Data debug register pairs        : %ld\n", iregs, dregs); +	seq_printf(m, +		   "Instruction debug register pairs : %ld\n" +		   "Data debug register pairs        : %ld\n", iregs, dregs); -	return p - page; +	return 0;  } -static char *proc_features_0[]={		/* Feature set 0 */ +static const char *const proc_features_0[]={		/* Feature set 0 */  	NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,  	NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,  	NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL, @@ -502,7 +487,7 @@ static char *proc_features_0[]={		/* Feature set 0 */  	"Enable BERR promotion"  }; -static char *proc_features_16[]={		/* Feature set 16 */ +static const char *const proc_features_16[]={		/* Feature set 16 */  	"Disable ETM",  	"Enable ETM",  	"Enable MCA on half-way timer", @@ -522,7 +507,7 @@ static char *proc_features_16[]={		/* Feature set 16 */  	NULL, NULL, NULL, NULL, NULL  }; -static char **proc_features[]={ +static const char *const *const proc_features[]={  	proc_features_0,  	NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,  	NULL, NULL, NULL, NULL, @@ -530,11 +515,10 @@ static char **proc_features[]={  	NULL, NULL, NULL, NULL,  }; -static char * feature_set_info(char *page, u64 avail, u64 status, u64 control, -							unsigned long set) +static void feature_set_info(struct seq_file *m, u64 avail, u64 status, u64 control, +			     unsigned long set)  { -	char *p = page; -	char **vf, **v; +	const char *const *vf, *const *v;  	int i;  	vf = v = proc_features[set]; @@ -547,13 +531,13 @@ static char * feature_set_info(char *page, u64 avail, u64 status, u64 control,  		if (vf)  			v = vf + i;  		if ( v && *v ) { -			p += sprintf(p, "%-40s : %s %s\n", *v, +			seq_printf(m, "%-40s : %s %s\n", *v,  				avail & 0x1 ? (status & 0x1 ? -						"On " : "Off"): "", +					      "On " : "Off"): "",  				avail & 0x1 ? (control & 0x1 ?  						"Ctrl" : "NoCtrl"): "");  		} else { -			p += sprintf(p, "Feature set %2ld bit %2d\t\t\t" +			seq_printf(m, "Feature set %2ld bit %2d\t\t\t"  					" : %s %s\n",  				set, i,  				avail & 0x1 ? (status & 0x1 ? @@ -562,36 +546,32 @@ static char * feature_set_info(char *page, u64 avail, u64 status, u64 control,  						"Ctrl" : "NoCtrl"): "");  		}  	} -	return p;  } -static int -processor_info(char *page) +static int processor_info(struct seq_file *m)  { -	char *p = page;  	u64 avail=1, status=1, control=1, feature_set=0;  	s64 ret;  	do {  		ret = ia64_pal_proc_get_features(&avail, &status, &control,  						feature_set); -		if (ret < 0) { -			return p - page; -		} +		if (ret < 0) +			return 0; +  		if (ret == 1) {  			feature_set++;  			continue;  		} -		p = feature_set_info(p, avail, status, control, feature_set); - +		feature_set_info(m, avail, status, control, feature_set);  		feature_set++;  	} while(1); -	return p - page; +	return 0;  } -static const char *bus_features[]={ +static const char *const bus_features[]={  	NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,  	NULL,NULL,NULL,NULL,NULL,NULL,NULL, NULL,NULL,  	NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL, @@ -617,125 +597,118 @@ static const char *bus_features[]={  }; -static int -bus_info(char *page) +static int bus_info(struct seq_file *m)  { -	char *p = page; -	const char **v = bus_features; +	const char *const *v = bus_features;  	pal_bus_features_u_t av, st, ct;  	u64 avail, status, control;  	int i;  	s64 ret; -	if ((ret=ia64_pal_bus_get_features(&av, &st, &ct)) != 0) return 0; +	if ((ret=ia64_pal_bus_get_features(&av, &st, &ct)) != 0) +		return 0;  	avail   = av.pal_bus_features_val;  	status  = st.pal_bus_features_val;  	control = ct.pal_bus_features_val;  	for(i=0; i < 64; i++, v++, avail >>=1, status >>=1, control >>=1) { -		if ( ! *v ) continue; -		p += sprintf(p, "%-48s : %s%s %s\n", *v, -				avail & 0x1 ? "" : "NotImpl", -				avail & 0x1 ? (status  & 0x1 ? "On" : "Off"): "", -				avail & 0x1 ? (control & 0x1 ? "Ctrl" : "NoCtrl"): ""); +		if ( ! *v ) +			continue; +		seq_printf(m, "%-48s : %s%s %s\n", *v, +			   avail & 0x1 ? "" : "NotImpl", +			   avail & 0x1 ? (status  & 0x1 ? "On" : "Off"): "", +			   avail & 0x1 ? (control & 0x1 ? "Ctrl" : "NoCtrl"): "");  	} -	return p - page; +	return 0;  } -static int -version_info(char *page) +static int version_info(struct seq_file *m)  {  	pal_version_u_t min_ver, cur_ver; -	char *p = page;  	if (ia64_pal_version(&min_ver, &cur_ver) != 0)  		return 0; -	p += sprintf(p, -		     "PAL_vendor : 0x%02x (min=0x%02x)\n" -		     "PAL_A      : %02x.%02x (min=%02x.%02x)\n" -		     "PAL_B      : %02x.%02x (min=%02x.%02x)\n", -		     cur_ver.pal_version_s.pv_pal_vendor, -		     min_ver.pal_version_s.pv_pal_vendor, -		     cur_ver.pal_version_s.pv_pal_a_model, -		     cur_ver.pal_version_s.pv_pal_a_rev, -		     min_ver.pal_version_s.pv_pal_a_model, -		     min_ver.pal_version_s.pv_pal_a_rev, -		     cur_ver.pal_version_s.pv_pal_b_model, -		     cur_ver.pal_version_s.pv_pal_b_rev, -		     min_ver.pal_version_s.pv_pal_b_model, -		     min_ver.pal_version_s.pv_pal_b_rev); -	return p - page; +	seq_printf(m, +		   "PAL_vendor : 0x%02x (min=0x%02x)\n" +		   "PAL_A      : %02x.%02x (min=%02x.%02x)\n" +		   "PAL_B      : %02x.%02x (min=%02x.%02x)\n", +		   cur_ver.pal_version_s.pv_pal_vendor, +		   min_ver.pal_version_s.pv_pal_vendor, +		   cur_ver.pal_version_s.pv_pal_a_model, +		   cur_ver.pal_version_s.pv_pal_a_rev, +		   min_ver.pal_version_s.pv_pal_a_model, +		   min_ver.pal_version_s.pv_pal_a_rev, +		   cur_ver.pal_version_s.pv_pal_b_model, +		   cur_ver.pal_version_s.pv_pal_b_rev, +		   min_ver.pal_version_s.pv_pal_b_model, +		   min_ver.pal_version_s.pv_pal_b_rev); +	return 0;  } -static int -perfmon_info(char *page) +static int perfmon_info(struct seq_file *m)  { -	char *p = page;  	u64 pm_buffer[16];  	pal_perf_mon_info_u_t pm_info; -	if (ia64_pal_perf_mon_info(pm_buffer, &pm_info) != 0) return 0; +	if (ia64_pal_perf_mon_info(pm_buffer, &pm_info) != 0) +		return 0; -	p += sprintf(p, -		     "PMC/PMD pairs                 : %d\n" -		     "Counter width                 : %d bits\n" -		     "Cycle event number            : %d\n" -		     "Retired event number          : %d\n" -		     "Implemented PMC               : ", -		     pm_info.pal_perf_mon_info_s.generic, pm_info.pal_perf_mon_info_s.width, -		     pm_info.pal_perf_mon_info_s.cycles, pm_info.pal_perf_mon_info_s.retired); +	seq_printf(m, +		   "PMC/PMD pairs                 : %d\n" +		   "Counter width                 : %d bits\n" +		   "Cycle event number            : %d\n" +		   "Retired event number          : %d\n" +		   "Implemented PMC               : ", +		   pm_info.pal_perf_mon_info_s.generic, +		   pm_info.pal_perf_mon_info_s.width, +		   pm_info.pal_perf_mon_info_s.cycles, +		   pm_info.pal_perf_mon_info_s.retired); -	p = bitregister_process(p, pm_buffer, 256); -	p += sprintf(p, "\nImplemented PMD               : "); -	p = bitregister_process(p, pm_buffer+4, 256); -	p += sprintf(p, "\nCycles count capable          : "); -	p = bitregister_process(p, pm_buffer+8, 256); -	p += sprintf(p, "\nRetired bundles count capable : "); +	bitregister_process(m, pm_buffer, 256); +	seq_puts(m, "\nImplemented PMD               : "); +	bitregister_process(m, pm_buffer+4, 256); +	seq_puts(m, "\nCycles count capable          : "); +	bitregister_process(m, pm_buffer+8, 256); +	seq_puts(m, "\nRetired bundles count capable : ");  #ifdef CONFIG_ITANIUM  	/*  	 * PAL_PERF_MON_INFO reports that only PMC4 can be used to count CPU_CYCLES  	 * which is wrong, both PMC4 and PMD5 support it.  	 */ -	if (pm_buffer[12] == 0x10) pm_buffer[12]=0x30; +	if (pm_buffer[12] == 0x10) +		pm_buffer[12]=0x30;  #endif -	p = bitregister_process(p, pm_buffer+12, 256); - -	p += sprintf(p, "\n"); - -	return p - page; +	bitregister_process(m, pm_buffer+12, 256); +	seq_putc(m, '\n'); +	return 0;  } -static int -frequency_info(char *page) +static int frequency_info(struct seq_file *m)  { -	char *p = page;  	struct pal_freq_ratio proc, itc, bus;  	unsigned long base;  	if (ia64_pal_freq_base(&base) == -1) -		p += sprintf(p, "Output clock            : not implemented\n"); +		seq_puts(m, "Output clock            : not implemented\n");  	else -		p += sprintf(p, "Output clock            : %ld ticks/s\n", base); +		seq_printf(m, "Output clock            : %ld ticks/s\n", base);  	if (ia64_pal_freq_ratios(&proc, &bus, &itc) != 0) return 0; -	p += sprintf(p, +	seq_printf(m,  		     "Processor/Clock ratio   : %d/%d\n"  		     "Bus/Clock ratio         : %d/%d\n"  		     "ITC/Clock ratio         : %d/%d\n",  		     proc.num, proc.den, bus.num, bus.den, itc.num, itc.den); - -	return p - page; +	return 0;  } -static int -tr_info(char *page) +static int tr_info(struct seq_file *m)  { -	char *p = page;  	long status;  	pal_tr_valid_u_t tr_valid;  	u64 tr_buffer[4]; @@ -794,39 +767,40 @@ tr_info(char *page)  		ifa_reg  = (struct ifa_reg *)&tr_buffer[2]; -		if (ifa_reg->valid == 0) continue; +		if (ifa_reg->valid == 0) +			continue;  		gr_reg   = (struct gr_reg *)tr_buffer;  		itir_reg = (struct itir_reg *)&tr_buffer[1];  		rid_reg  = (struct rid_reg *)&tr_buffer[3];  		pgm	 = -1 << (itir_reg->ps - 12); -		p += sprintf(p, -			     "%cTR%lu: av=%d pv=%d dv=%d mv=%d\n" -			     "\tppn  : 0x%lx\n" -			     "\tvpn  : 0x%lx\n" -			     "\tps   : ", -			     "ID"[i], j, -			     tr_valid.pal_tr_valid_s.access_rights_valid, -			     tr_valid.pal_tr_valid_s.priv_level_valid, -			     tr_valid.pal_tr_valid_s.dirty_bit_valid, -			     tr_valid.pal_tr_valid_s.mem_attr_valid, -			     (gr_reg->ppn & pgm)<< 12, (ifa_reg->vpn & pgm)<< 12); +		seq_printf(m, +			   "%cTR%lu: av=%d pv=%d dv=%d mv=%d\n" +			   "\tppn  : 0x%lx\n" +			   "\tvpn  : 0x%lx\n" +			   "\tps   : ", +			   "ID"[i], j, +			   tr_valid.pal_tr_valid_s.access_rights_valid, +			   tr_valid.pal_tr_valid_s.priv_level_valid, +			   tr_valid.pal_tr_valid_s.dirty_bit_valid, +			   tr_valid.pal_tr_valid_s.mem_attr_valid, +			   (gr_reg->ppn & pgm)<< 12, (ifa_reg->vpn & pgm)<< 12); -		p = bitvector_process(p, 1<< itir_reg->ps); +		bitvector_process(m, 1<< itir_reg->ps); -		p += sprintf(p, -			     "\n\tpl   : %d\n" -			     "\tar   : %d\n" -			     "\trid  : %x\n" -			     "\tp    : %d\n" -			     "\tma   : %d\n" -			     "\td    : %d\n", -			     gr_reg->pl, gr_reg->ar, rid_reg->rid, gr_reg->p, gr_reg->ma, -			     gr_reg->d); +		seq_printf(m, +			   "\n\tpl   : %d\n" +			   "\tar   : %d\n" +			   "\trid  : %x\n" +			   "\tp    : %d\n" +			   "\tma   : %d\n" +			   "\td    : %d\n", +			   gr_reg->pl, gr_reg->ar, rid_reg->rid, gr_reg->p, gr_reg->ma, +			   gr_reg->d);  		}  	} -	return p - page; +	return 0;  } @@ -834,7 +808,7 @@ tr_info(char *page)  /*   * List {name,function} pairs for every entry in /proc/palinfo/cpu*   */ -static palinfo_entry_t palinfo_entries[]={ +static const palinfo_entry_t palinfo_entries[]={  	{ "version_info",	version_info, },  	{ "vm_info",		vm_info, },  	{ "cache_info",		cache_info, }, @@ -849,17 +823,6 @@ static palinfo_entry_t palinfo_entries[]={  #define NR_PALINFO_ENTRIES	(int) ARRAY_SIZE(palinfo_entries) -/* - * this array is used to keep track of the proc entries we create. This is - * required in the module mode when we need to remove all entries. The procfs code - * does not do recursion of deletion - * - * Notes: - *	- +1 accounts for the cpuN directory entry in /proc/pal - */ -#define NR_PALINFO_PROC_ENTRIES	(NR_CPUS*(NR_PALINFO_ENTRIES+1)) - -static struct proc_dir_entry *palinfo_proc_entries[NR_PALINFO_PROC_ENTRIES];  static struct proc_dir_entry *palinfo_dir;  /* @@ -887,7 +850,7 @@ typedef union {   */  typedef struct {  	palinfo_func_t	func;	/* pointer to function to call */ -	char		*page;	/* buffer to store results */ +	struct seq_file *m;	/* buffer to store results */  	int		ret;	/* return value from call */  } palinfo_smp_data_t; @@ -900,7 +863,7 @@ static void  palinfo_smp_call(void *info)  {  	palinfo_smp_data_t *data = (palinfo_smp_data_t *)info; -	data->ret = (*data->func)(data->page); +	data->ret = (*data->func)(data->m);  }  /* @@ -910,13 +873,13 @@ palinfo_smp_call(void *info)   *	otherwise how many bytes in the "page" buffer were written   */  static -int palinfo_handle_smp(pal_func_cpu_u_t *f, char *page) +int palinfo_handle_smp(struct seq_file *m, pal_func_cpu_u_t *f)  {  	palinfo_smp_data_t ptr;  	int ret;  	ptr.func = palinfo_entries[f->func_id].proc_read; -	ptr.page = page; +	ptr.m = m;  	ptr.ret  = 0; /* just in case */ @@ -930,7 +893,7 @@ int palinfo_handle_smp(pal_func_cpu_u_t *f, char *page)  }  #else /* ! CONFIG_SMP */  static -int palinfo_handle_smp(pal_func_cpu_u_t *f, char *page) +int palinfo_handle_smp(struct seq_file *m, pal_func_cpu_u_t *f)  {  	printk(KERN_ERR "palinfo: should not be called with non SMP kernel\n");  	return 0; @@ -940,91 +903,63 @@ int palinfo_handle_smp(pal_func_cpu_u_t *f, char *page)  /*   * Entry point routine: all calls go through this function   */ -static int -palinfo_read_entry(char *page, char **start, off_t off, int count, int *eof, void *data) +static int proc_palinfo_show(struct seq_file *m, void *v)  { -	int len=0; -	pal_func_cpu_u_t *f = (pal_func_cpu_u_t *)&data; +	pal_func_cpu_u_t *f = (pal_func_cpu_u_t *)&m->private;  	/*  	 * in SMP mode, we may need to call another CPU to get correct  	 * information. PAL, by definition, is processor specific  	 */  	if (f->req_cpu == get_cpu()) -		len = (*palinfo_entries[f->func_id].proc_read)(page); +		(*palinfo_entries[f->func_id].proc_read)(m);  	else -		len = palinfo_handle_smp(f, page); +		palinfo_handle_smp(m, f);  	put_cpu(); +	return 0; +} -	if (len <= off+count) *eof = 1; - -	*start = page + off; -	len   -= off; - -	if (len>count) len = count; -	if (len<0) len = 0; - -	return len; +static int proc_palinfo_open(struct inode *inode, struct file *file) +{ +	return single_open(file, proc_palinfo_show, PDE_DATA(inode));  } +static const struct file_operations proc_palinfo_fops = { +	.open		= proc_palinfo_open, +	.read		= seq_read, +	.llseek		= seq_lseek, +	.release	= single_release, +}; +  static void __cpuinit  create_palinfo_proc_entries(unsigned int cpu)  { -#	define CPUSTR	"cpu%d" -  	pal_func_cpu_u_t f; -	struct proc_dir_entry **pdir;  	struct proc_dir_entry *cpu_dir;  	int j; -	char cpustr[sizeof(CPUSTR)]; - - -	/* -	 * we keep track of created entries in a depth-first order for -	 * cleanup purposes. Each entry is stored into palinfo_proc_entries -	 */ -	sprintf(cpustr,CPUSTR, cpu); +	char cpustr[3+4+1];	/* cpu numbers are up to 4095 on itanic */ +	sprintf(cpustr, "cpu%d", cpu);  	cpu_dir = proc_mkdir(cpustr, palinfo_dir); +	if (!cpu_dir) +		return;  	f.req_cpu = cpu; -	/* -	 * Compute the location to store per cpu entries -	 * We dont store the top level entry in this list, but -	 * remove it finally after removing all cpu entries. -	 */ -	pdir = &palinfo_proc_entries[cpu*(NR_PALINFO_ENTRIES+1)]; -	*pdir++ = cpu_dir;  	for (j=0; j < NR_PALINFO_ENTRIES; j++) {  		f.func_id = j; -		*pdir = create_proc_read_entry( -				palinfo_entries[j].name, 0, cpu_dir, -				palinfo_read_entry, (void *)f.value); -		pdir++; +		proc_create_data(palinfo_entries[j].name, 0, cpu_dir, +				 &proc_palinfo_fops, (void *)f.value);  	}  }  static void  remove_palinfo_proc_entries(unsigned int hcpu)  { -	int j; -	struct proc_dir_entry *cpu_dir, **pdir; - -	pdir = &palinfo_proc_entries[hcpu*(NR_PALINFO_ENTRIES+1)]; -	cpu_dir = *pdir; -	*pdir++=NULL; -	for (j=0; j < (NR_PALINFO_ENTRIES); j++) { -		if ((*pdir)) { -			remove_proc_entry ((*pdir)->name, cpu_dir); -			*pdir ++= NULL; -		} -	} - -	if (cpu_dir) { -		remove_proc_entry(cpu_dir->name, palinfo_dir); -	} +	char cpustr[3+4+1];	/* cpu numbers are up to 4095 on itanic */ +	sprintf(cpustr, "cpu%d", hcpu); +	remove_proc_subtree(cpustr, palinfo_dir);  }  static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb, @@ -1058,6 +993,8 @@ palinfo_init(void)  	printk(KERN_INFO "PAL Information Facility v%s\n", PALINFO_VERSION);  	palinfo_dir = proc_mkdir("pal", NULL); +	if (!palinfo_dir) +		return -ENOMEM;  	/* Create palinfo dirs in /proc for all online cpus */  	for_each_online_cpu(i) { @@ -1073,22 +1010,8 @@ palinfo_init(void)  static void __exit  palinfo_exit(void)  { -	int i = 0; - -	/* remove all nodes: depth first pass. Could optimize this  */ -	for_each_online_cpu(i) { -		remove_palinfo_proc_entries(i); -	} - -	/* -	 * Remove the top level entry finally -	 */ -	remove_proc_entry(palinfo_dir->name, NULL); - -	/* -	 * Unregister from cpu notifier callbacks -	 */  	unregister_hotcpu_notifier(&palinfo_cpu_notifier); +	remove_proc_subtree("pal", NULL);  }  module_init(palinfo_init); diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c index ea39eba61ef..9ea25fce06d 100644 --- a/arch/ia64/kernel/perfmon.c +++ b/arch/ia64/kernel/perfmon.c @@ -42,6 +42,7 @@  #include <linux/completion.h>  #include <linux/tracehook.h>  #include <linux/slab.h> +#include <linux/cpu.h>  #include <asm/errno.h>  #include <asm/intrinsics.h> @@ -619,6 +620,7 @@ static struct file_system_type pfm_fs_type = {  	.mount    = pfmfs_mount,  	.kill_sb  = kill_anon_super,  }; +MODULE_ALIAS_FS("pfmfs");  DEFINE_PER_CPU(unsigned long, pfm_syst_info);  DEFINE_PER_CPU(struct task_struct *, pmu_owner); @@ -1321,8 +1323,6 @@ out:  }  EXPORT_SYMBOL(pfm_unregister_buffer_fmt); -extern void update_pal_halt_status(int); -  static int  pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)  { @@ -1370,9 +1370,9 @@ pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)  		cpu));  	/* -	 * disable default_idle() to go to PAL_HALT +	 * Force idle() into poll mode  	 */ -	update_pal_halt_status(0); +	cpu_idle_poll_ctrl(true);  	UNLOCK_PFS(flags); @@ -1429,11 +1429,8 @@ pfm_unreserve_session(pfm_context_t *ctx, int is_syswide, unsigned int cpu)  		is_syswide,  		cpu)); -	/* -	 * if possible, enable default_idle() to go into PAL_HALT -	 */ -	if (pfm_sessions.pfs_task_sessions == 0 && pfm_sessions.pfs_sys_sessions == 0) -		update_pal_halt_status(1); +	/* Undo forced polling. Last session reenables pal_halt */ +	cpu_idle_poll_ctrl(false);  	UNLOCK_PFS(flags); @@ -2221,9 +2218,9 @@ pfm_alloc_file(pfm_context_t *ctx)  	d_add(path.dentry, inode);  	file = alloc_file(&path, FMODE_READ, &pfm_file_ops); -	if (!file) { +	if (IS_ERR(file)) {  		path_put(&path); -		return ERR_PTR(-ENFILE); +		return file;  	}  	file->f_flags = O_RDONLY; diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c index 31360cbbd5f..55d4ba47a90 100644 --- a/arch/ia64/kernel/process.c +++ b/arch/ia64/kernel/process.c @@ -57,8 +57,6 @@ void (*ia64_mark_idle)(int);  unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;  EXPORT_SYMBOL(boot_option_idle_override); -void (*pm_idle) (void); -EXPORT_SYMBOL(pm_idle);  void (*pm_power_off) (void);  EXPORT_SYMBOL(pm_power_off); @@ -98,21 +96,13 @@ show_stack (struct task_struct *task, unsigned long *sp)  }  void -dump_stack (void) -{ -	show_stack(NULL, NULL); -} - -EXPORT_SYMBOL(dump_stack); - -void  show_regs (struct pt_regs *regs)  {  	unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri;  	print_modules(); -	printk("\nPid: %d, CPU %d, comm: %20s\n", task_pid_nr(current), -			smp_processor_id(), current->comm); +	printk("\n"); +	show_regs_print_info(KERN_DEFAULT);  	printk("psr : %016lx ifs : %016lx ip  : [<%016lx>]    %s (%s)\n",  	       regs->cr_ipsr, regs->cr_ifs, ip, print_tainted(),  	       init_utsname()->release); @@ -211,41 +201,13 @@ do_notify_resume_user(sigset_t *unused, struct sigscratch *scr, long in_syscall)  	local_irq_disable();	/* force interrupt disable */  } -static int pal_halt        = 1; -static int can_do_pal_halt = 1; -  static int __init nohalt_setup(char * str)  { -	pal_halt = can_do_pal_halt = 0; +	cpu_idle_poll_ctrl(true);  	return 1;  }  __setup("nohalt", nohalt_setup); -void -update_pal_halt_status(int status) -{ -	can_do_pal_halt = pal_halt && status; -} - -/* - * We use this if we don't have any better idle routine.. - */ -void -default_idle (void) -{ -	local_irq_enable(); -	while (!need_resched()) { -		if (can_do_pal_halt) { -			local_irq_disable(); -			if (!need_resched()) { -				safe_halt(); -			} -			local_irq_enable(); -		} else -			cpu_relax(); -	} -} -  #ifdef CONFIG_HOTPLUG_CPU  /* We don't actually take CPU down, just spin without interrupts. */  static inline void play_dead(void) @@ -272,51 +234,29 @@ static inline void play_dead(void)  }  #endif /* CONFIG_HOTPLUG_CPU */ -void __attribute__((noreturn)) -cpu_idle (void) +void arch_cpu_idle_dead(void)  { -	void (*mark_idle)(int) = ia64_mark_idle; -  	int cpu = smp_processor_id(); +	play_dead(); +} -	/* endless idle loop with no priority at all */ -	while (1) { -		rcu_idle_enter(); -		if (can_do_pal_halt) { -			current_thread_info()->status &= ~TS_POLLING; -			/* -			 * TS_POLLING-cleared state must be visible before we -			 * test NEED_RESCHED: -			 */ -			smp_mb(); -		} else { -			current_thread_info()->status |= TS_POLLING; -		} +void arch_cpu_idle(void) +{ +	void (*mark_idle)(int) = ia64_mark_idle; -		if (!need_resched()) { -			void (*idle)(void);  #ifdef CONFIG_SMP -			min_xtp(); +	min_xtp();  #endif -			rmb(); -			if (mark_idle) -				(*mark_idle)(1); +	rmb(); +	if (mark_idle) +		(*mark_idle)(1); -			idle = pm_idle; -			if (!idle) -				idle = default_idle; -			(*idle)(); -			if (mark_idle) -				(*mark_idle)(0); +	safe_halt(); + +	if (mark_idle) +		(*mark_idle)(0);  #ifdef CONFIG_SMP -			normal_xtp(); +	normal_xtp();  #endif -		} -		rcu_idle_exit(); -		schedule_preempt_disabled(); -		check_pgt_cache(); -		if (cpu_is_offline(cpu)) -			play_dead(); -	}  }  void diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c index 79802e540e5..4bc580af67b 100644 --- a/arch/ia64/kernel/salinfo.c +++ b/arch/ia64/kernel/salinfo.c @@ -40,6 +40,7 @@  #include <linux/cpu.h>  #include <linux/types.h>  #include <linux/proc_fs.h> +#include <linux/seq_file.h>  #include <linux/module.h>  #include <linux/smp.h>  #include <linux/timer.h> @@ -53,7 +54,7 @@ MODULE_AUTHOR("Jesse Barnes <jbarnes@sgi.com>");  MODULE_DESCRIPTION("/proc interface to IA-64 SAL features");  MODULE_LICENSE("GPL"); -static int salinfo_read(char *page, char **start, off_t off, int count, int *eof, void *data); +static const struct file_operations proc_salinfo_fops;  typedef struct {  	const char		*name;		/* name of the proc entry */ @@ -65,7 +66,7 @@ typedef struct {   * List {name,feature} pairs for every entry in /proc/sal/<feature>   * that this module exports   */ -static salinfo_entry_t salinfo_entries[]={ +static const salinfo_entry_t salinfo_entries[]={  	{ "bus_lock",           IA64_SAL_PLATFORM_FEATURE_BUS_LOCK, },  	{ "irq_redirection",	IA64_SAL_PLATFORM_FEATURE_IRQ_REDIR_HINT, },  	{ "ipi_redirection",	IA64_SAL_PLATFORM_FEATURE_IPI_REDIR_HINT, }, @@ -301,9 +302,7 @@ salinfo_event_open(struct inode *inode, struct file *file)  static ssize_t  salinfo_event_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)  { -	struct inode *inode = file->f_path.dentry->d_inode; -	struct proc_dir_entry *entry = PDE(inode); -	struct salinfo_data *data = entry->data; +	struct salinfo_data *data = PDE_DATA(file_inode(file));  	char cmd[32];  	size_t size;  	int i, n, cpu = -1; @@ -360,8 +359,7 @@ static const struct file_operations salinfo_event_fops = {  static int  salinfo_log_open(struct inode *inode, struct file *file)  { -	struct proc_dir_entry *entry = PDE(inode); -	struct salinfo_data *data = entry->data; +	struct salinfo_data *data = PDE_DATA(inode);  	if (!capable(CAP_SYS_ADMIN))  		return -EPERM; @@ -386,8 +384,7 @@ salinfo_log_open(struct inode *inode, struct file *file)  static int  salinfo_log_release(struct inode *inode, struct file *file)  { -	struct proc_dir_entry *entry = PDE(inode); -	struct salinfo_data *data = entry->data; +	struct salinfo_data *data = PDE_DATA(inode);  	if (data->state == STATE_NO_DATA) {  		vfree(data->log_buffer); @@ -463,9 +460,7 @@ retry:  static ssize_t  salinfo_log_read(struct file *file, char __user *buffer, size_t count, loff_t *ppos)  { -	struct inode *inode = file->f_path.dentry->d_inode; -	struct proc_dir_entry *entry = PDE(inode); -	struct salinfo_data *data = entry->data; +	struct salinfo_data *data = PDE_DATA(file_inode(file));  	u8 *buf;  	u64 bufsize; @@ -524,9 +519,7 @@ salinfo_log_clear(struct salinfo_data *data, int cpu)  static ssize_t  salinfo_log_write(struct file *file, const char __user *buffer, size_t count, loff_t *ppos)  { -	struct inode *inode = file->f_path.dentry->d_inode; -	struct proc_dir_entry *entry = PDE(inode); -	struct salinfo_data *data = entry->data; +	struct salinfo_data *data = PDE_DATA(file_inode(file));  	char cmd[32];  	size_t size;  	u32 offset; @@ -637,8 +630,9 @@ salinfo_init(void)  	for (i=0; i < NR_SALINFO_ENTRIES; i++) {  		/* pass the feature bit in question as misc data */ -		*sdir++ = create_proc_read_entry (salinfo_entries[i].name, 0, salinfo_dir, -						  salinfo_read, (void *)salinfo_entries[i].feature); +		*sdir++ = proc_create_data(salinfo_entries[i].name, 0, salinfo_dir, +					   &proc_salinfo_fops, +					   (void *)salinfo_entries[i].feature);  	}  	for (i = 0; i < ARRAY_SIZE(salinfo_log_name); i++) { @@ -684,22 +678,23 @@ salinfo_init(void)   * 'data' contains an integer that corresponds to the feature we're   * testing   */ -static int -salinfo_read(char *page, char **start, off_t off, int count, int *eof, void *data) +static int proc_salinfo_show(struct seq_file *m, void *v)  { -	int len = 0; - -	len = sprintf(page, (sal_platform_features & (unsigned long)data) ? "1\n" : "0\n"); - -	if (len <= off+count) *eof = 1; - -	*start = page + off; -	len   -= off; - -	if (len>count) len = count; -	if (len<0) len = 0; +	unsigned long data = (unsigned long)v; +	seq_puts(m, (sal_platform_features & data) ? "1\n" : "0\n"); +	return 0; +} -	return len; +static int proc_salinfo_open(struct inode *inode, struct file *file) +{ +	return single_open(file, proc_salinfo_show, PDE_DATA(inode));  } +static const struct file_operations proc_salinfo_fops = { +	.open		= proc_salinfo_open, +	.read		= seq_read, +	.llseek		= seq_lseek, +	.release	= single_release, +}; +  module_init(salinfo_init); diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c index aaefd9b94f2..13bfdd22afc 100644 --- a/arch/ia64/kernel/setup.c +++ b/arch/ia64/kernel/setup.c @@ -1051,7 +1051,6 @@ cpu_init (void)  		max_num_phys_stacked = num_phys_stacked;  	}  	platform_cpu_init(); -	pm_idle = default_idle;  }  void __init @@ -1064,6 +1063,7 @@ check_bugs (void)  static int __init run_dmi_scan(void)  {  	dmi_scan_machine(); +	dmi_set_dump_stack_arch_desc();  	return 0;  }  core_initcall(run_dmi_scan); diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c index 680b73786be..3637e03d228 100644 --- a/arch/ia64/kernel/signal.c +++ b/arch/ia64/kernel/signal.c @@ -39,14 +39,6 @@  # define GET_SIGSET(k,u)	__get_user((k)->sig[0], &(u)->sig[0])  #endif -asmlinkage long -sys_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, long arg2, -		 long arg3, long arg4, long arg5, long arg6, long arg7, -		 struct pt_regs regs) -{ -	return do_sigaltstack(uss, uoss, regs.r12); -} -  static long  restore_sigcontext (struct sigcontext __user *sc, struct sigscratch *scr)  { @@ -208,11 +200,8 @@ ia64_rt_sigreturn (struct sigscratch *scr)  	printk("SIG return (%s:%d): sp=%lx ip=%lx\n",  	       current->comm, current->pid, scr->pt.r12, scr->pt.cr_iip);  #endif -	/* -	 * It is more difficult to avoid calling this function than to -	 * call it and ignore errors. -	 */ -	do_sigaltstack(&sc->sc_stack, NULL, scr->pt.r12); +	if (restore_altstack(&sc->sc_stack)) +		goto give_sigsegv;  	return retval;    give_sigsegv: @@ -376,9 +365,7 @@ setup_frame (int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set,  	err |= copy_siginfo_to_user(&frame->info, info); -	err |= __put_user(current->sas_ss_sp, &frame->sc.sc_stack.ss_sp); -	err |= __put_user(current->sas_ss_size, &frame->sc.sc_stack.ss_size); -	err |= __put_user(sas_ss_flags(scr->pt.r12), &frame->sc.sc_stack.ss_flags); +	err |= __save_altstack(&frame->sc.sc_stack, scr->pt.r12);  	err |= setup_sigcontext(&frame->sc, set, scr);  	if (unlikely(err)) diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c index 500f1e4d9f9..8d87168d218 100644 --- a/arch/ia64/kernel/smpboot.c +++ b/arch/ia64/kernel/smpboot.c @@ -455,7 +455,7 @@ start_secondary (void *unused)  	preempt_disable();  	smp_callin(); -	cpu_idle(); +	cpu_startup_entry(CPUHP_ONLINE);  	return 0;  } diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c index d9439ef2f66..41e33f84c18 100644 --- a/arch/ia64/kernel/sys_ia64.c +++ b/arch/ia64/kernel/sys_ia64.c @@ -25,9 +25,9 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len  			unsigned long pgoff, unsigned long flags)  {  	long map_shared = (flags & MAP_SHARED); -	unsigned long start_addr, align_mask = PAGE_SIZE - 1; +	unsigned long align_mask = 0;  	struct mm_struct *mm = current->mm; -	struct vm_area_struct *vma; +	struct vm_unmapped_area_info info;  	if (len > RGN_MAP_LIMIT)  		return -ENOMEM; @@ -44,7 +44,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len  		addr = 0;  #endif  	if (!addr) -		addr = mm->free_area_cache; +		addr = TASK_UNMAPPED_BASE;  	if (map_shared && (TASK_SIZE > 0xfffffffful))  		/* @@ -53,28 +53,15 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len  		 * tasks, we prefer to avoid exhausting the address space too quickly by  		 * limiting alignment to a single page.  		 */ -		align_mask = SHMLBA - 1; +		align_mask = PAGE_MASK & (SHMLBA - 1); -  full_search: -	start_addr = addr = (addr + align_mask) & ~align_mask; - -	for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { -		/* At this point:  (!vma || addr < vma->vm_end). */ -		if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) { -			if (start_addr != TASK_UNMAPPED_BASE) { -				/* Start a new search --- just in case we missed some holes.  */ -				addr = TASK_UNMAPPED_BASE; -				goto full_search; -			} -			return -ENOMEM; -		} -		if (!vma || addr + len <= vma->vm_start) { -			/* Remember the address where we stopped this search:  */ -			mm->free_area_cache = addr + len; -			return addr; -		} -		addr = (vma->vm_end + align_mask) & ~align_mask; -	} +	info.flags = 0; +	info.length = len; +	info.low_limit = addr; +	info.high_limit = TASK_SIZE; +	info.align_mask = align_mask; +	info.align_offset = 0; +	return vm_unmapped_area(&info);  }  asmlinkage long diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index 88a794536bc..fbaac1afb84 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c @@ -77,7 +77,7 @@ static struct clocksource clocksource_itc = {  };  static struct clocksource *itc_clocksource; -#ifdef CONFIG_VIRT_CPU_ACCOUNTING +#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE  #include <linux/kernel_stat.h> @@ -136,13 +136,14 @@ void vtime_account_system(struct task_struct *tsk)  	account_system_time(tsk, 0, delta, delta);  } +EXPORT_SYMBOL_GPL(vtime_account_system);  void vtime_account_idle(struct task_struct *tsk)  {  	account_idle_time(vtime_delta(tsk));  } -#endif /* CONFIG_VIRT_CPU_ACCOUNTING */ +#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */  static irqreturn_t  timer_interrupt (int irq, void *dev_id) diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c index bd42b76000d..f7f9f9c6caf 100644 --- a/arch/ia64/kernel/traps.c +++ b/arch/ia64/kernel/traps.c @@ -72,7 +72,7 @@ die (const char *str, struct pt_regs *regs, long err)  	bust_spinlocks(0);  	die.lock_owner = -1; -	add_taint(TAINT_DIE); +	add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);  	spin_unlock_irq(&die.lock);  	if (!regs)  |