diff options
Diffstat (limited to 'arch/powerpc/kernel/rtas.c')
| -rw-r--r-- | arch/powerpc/kernel/rtas.c | 113 | 
1 files changed, 113 insertions, 0 deletions
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 1fd6e7b2f39..52add6f3e20 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -19,6 +19,7 @@  #include <linux/init.h>  #include <linux/capability.h>  #include <linux/delay.h> +#include <linux/cpu.h>  #include <linux/smp.h>  #include <linux/completion.h>  #include <linux/cpumask.h> @@ -807,6 +808,95 @@ static void rtas_percpu_suspend_me(void *info)  	__rtas_suspend_cpu((struct rtas_suspend_me_data *)info, 1);  } +enum rtas_cpu_state { +	DOWN, +	UP, +}; + +#ifndef CONFIG_SMP +static int rtas_cpu_state_change_mask(enum rtas_cpu_state state, +				cpumask_var_t cpus) +{ +	if (!cpumask_empty(cpus)) { +		cpumask_clear(cpus); +		return -EINVAL; +	} else +		return 0; +} +#else +/* On return cpumask will be altered to indicate CPUs changed. + * CPUs with states changed will be set in the mask, + * CPUs with status unchanged will be unset in the mask. */ +static int rtas_cpu_state_change_mask(enum rtas_cpu_state state, +				cpumask_var_t cpus) +{ +	int cpu; +	int cpuret = 0; +	int ret = 0; + +	if (cpumask_empty(cpus)) +		return 0; + +	for_each_cpu(cpu, cpus) { +		switch (state) { +		case DOWN: +			cpuret = cpu_down(cpu); +			break; +		case UP: +			cpuret = cpu_up(cpu); +			break; +		} +		if (cpuret) { +			pr_debug("%s: cpu_%s for cpu#%d returned %d.\n", +					__func__, +					((state == UP) ? "up" : "down"), +					cpu, cpuret); +			if (!ret) +				ret = cpuret; +			if (state == UP) { +				/* clear bits for unchanged cpus, return */ +				cpumask_shift_right(cpus, cpus, cpu); +				cpumask_shift_left(cpus, cpus, cpu); +				break; +			} else { +				/* clear bit for unchanged cpu, continue */ +				cpumask_clear_cpu(cpu, cpus); +			} +		} +	} + +	return ret; +} +#endif + +int rtas_online_cpus_mask(cpumask_var_t cpus) +{ +	int ret; + +	ret = rtas_cpu_state_change_mask(UP, cpus); + +	if (ret) { +		cpumask_var_t tmp_mask; + +		if (!alloc_cpumask_var(&tmp_mask, GFP_TEMPORARY)) +			return ret; + +		/* Use tmp_mask to preserve cpus mask from first failure */ +		cpumask_copy(tmp_mask, cpus); +		rtas_offline_cpus_mask(tmp_mask); +		free_cpumask_var(tmp_mask); +	} + +	return ret; +} +EXPORT_SYMBOL(rtas_online_cpus_mask); + +int rtas_offline_cpus_mask(cpumask_var_t cpus) +{ +	return rtas_cpu_state_change_mask(DOWN, cpus); +} +EXPORT_SYMBOL(rtas_offline_cpus_mask); +  int rtas_ibm_suspend_me(struct rtas_args *args)  {  	long state; @@ -814,6 +904,8 @@ int rtas_ibm_suspend_me(struct rtas_args *args)  	unsigned long retbuf[PLPAR_HCALL_BUFSIZE];  	struct rtas_suspend_me_data data;  	DECLARE_COMPLETION_ONSTACK(done); +	cpumask_var_t offline_mask; +	int cpuret;  	if (!rtas_service_present("ibm,suspend-me"))  		return -ENOSYS; @@ -837,11 +929,24 @@ int rtas_ibm_suspend_me(struct rtas_args *args)  		return 0;  	} +	if (!alloc_cpumask_var(&offline_mask, GFP_TEMPORARY)) +		return -ENOMEM; +  	atomic_set(&data.working, 0);  	atomic_set(&data.done, 0);  	atomic_set(&data.error, 0);  	data.token = rtas_token("ibm,suspend-me");  	data.complete = &done; + +	/* All present CPUs must be online */ +	cpumask_andnot(offline_mask, cpu_present_mask, cpu_online_mask); +	cpuret = rtas_online_cpus_mask(offline_mask); +	if (cpuret) { +		pr_err("%s: Could not bring present CPUs online.\n", __func__); +		atomic_set(&data.error, cpuret); +		goto out; +	} +  	stop_topology_update();  	/* Call function on all CPUs.  One of us will make the @@ -857,6 +962,14 @@ int rtas_ibm_suspend_me(struct rtas_args *args)  	start_topology_update(); +	/* Take down CPUs not online prior to suspend */ +	cpuret = rtas_offline_cpus_mask(offline_mask); +	if (cpuret) +		pr_warn("%s: Could not restore CPUs to offline state.\n", +				__func__); + +out: +	free_cpumask_var(offline_mask);  	return atomic_read(&data.error);  }  #else /* CONFIG_PPC_PSERIES */  |