diff options
| author | Tejun Heo <tj@kernel.org> | 2013-03-12 11:29:59 -0700 | 
|---|---|---|
| committer | Tejun Heo <tj@kernel.org> | 2013-03-12 11:29:59 -0700 | 
| commit | d84ff0512f1bfc0d8c864efadb4523fce68919cc (patch) | |
| tree | b91fe48e9bd59e0709b00869cd200c79f882afff /kernel | |
| parent | 493a1724fef9a3e931d9199f1a19e358e526a6e7 (diff) | |
| download | olio-linux-3.10-d84ff0512f1bfc0d8c864efadb4523fce68919cc.tar.xz olio-linux-3.10-d84ff0512f1bfc0d8c864efadb4523fce68919cc.zip  | |
workqueue: consistently use int for @cpu variables
Workqueue is mixing unsigned int and int for @cpu variables.  There's
no point in using unsigned int for cpus - many of cpu related APIs
take int anyway.  Consistently use int for @cpu variables so that we
can use negative values to mark special ones.
This patch doesn't introduce any visible behavior changes.
Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Lai Jiangshan <laijs@cn.fujitsu.com>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/workqueue.c | 24 | ||||
| -rw-r--r-- | kernel/workqueue_internal.h | 5 | 
2 files changed, 13 insertions, 16 deletions
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 26c67c76b6c..73c5f68065b 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c @@ -124,7 +124,7 @@ enum {  struct worker_pool {  	spinlock_t		lock;		/* the pool lock */ -	unsigned int		cpu;		/* I: the associated cpu */ +	int			cpu;		/* I: the associated cpu */  	int			id;		/* I: pool ID */  	unsigned int		flags;		/* X: flags */ @@ -467,8 +467,7 @@ static struct worker_pool *get_std_worker_pool(int cpu, bool highpri)  	return &pools[highpri];  } -static struct pool_workqueue *get_pwq(unsigned int cpu, -				      struct workqueue_struct *wq) +static struct pool_workqueue *get_pwq(int cpu, struct workqueue_struct *wq)  {  	if (!(wq->flags & WQ_UNBOUND)) {  		if (likely(cpu < nr_cpu_ids)) @@ -730,7 +729,7 @@ static void wake_up_worker(struct worker_pool *pool)   * CONTEXT:   * spin_lock_irq(rq->lock)   */ -void wq_worker_waking_up(struct task_struct *task, unsigned int cpu) +void wq_worker_waking_up(struct task_struct *task, int cpu)  {  	struct worker *worker = kthread_data(task); @@ -755,8 +754,7 @@ void wq_worker_waking_up(struct task_struct *task, unsigned int cpu)   * RETURNS:   * Worker task on @cpu to wake up, %NULL if none.   */ -struct task_struct *wq_worker_sleeping(struct task_struct *task, -				       unsigned int cpu) +struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu)  {  	struct worker *worker = kthread_data(task), *to_wakeup = NULL;  	struct worker_pool *pool; @@ -1159,7 +1157,7 @@ static bool is_chained_work(struct workqueue_struct *wq)  	return worker && worker->current_pwq->wq == wq;  } -static void __queue_work(unsigned int cpu, struct workqueue_struct *wq, +static void __queue_work(int cpu, struct workqueue_struct *wq,  			 struct work_struct *work)  {  	struct pool_workqueue *pwq; @@ -1714,7 +1712,7 @@ static struct worker *create_worker(struct worker_pool *pool)  	if (pool->cpu != WORK_CPU_UNBOUND)  		worker->task = kthread_create_on_node(worker_thread,  					worker, cpu_to_node(pool->cpu), -					"kworker/%u:%d%s", pool->cpu, id, pri); +					"kworker/%d:%d%s", pool->cpu, id, pri);  	else  		worker->task = kthread_create(worker_thread, worker,  					      "kworker/u:%d%s", id, pri); @@ -3345,7 +3343,7 @@ EXPORT_SYMBOL_GPL(workqueue_set_max_active);   * RETURNS:   * %true if congested, %false otherwise.   */ -bool workqueue_congested(unsigned int cpu, struct workqueue_struct *wq) +bool workqueue_congested(int cpu, struct workqueue_struct *wq)  {  	struct pool_workqueue *pwq = get_pwq(cpu, wq); @@ -3461,7 +3459,7 @@ static int __cpuinit workqueue_cpu_up_callback(struct notifier_block *nfb,  					       unsigned long action,  					       void *hcpu)  { -	unsigned int cpu = (unsigned long)hcpu; +	int cpu = (unsigned long)hcpu;  	struct worker_pool *pool;  	switch (action & ~CPU_TASKS_FROZEN) { @@ -3507,7 +3505,7 @@ static int __cpuinit workqueue_cpu_down_callback(struct notifier_block *nfb,  						 unsigned long action,  						 void *hcpu)  { -	unsigned int cpu = (unsigned long)hcpu; +	int cpu = (unsigned long)hcpu;  	struct work_struct unbind_work;  	switch (action & ~CPU_TASKS_FROZEN) { @@ -3547,7 +3545,7 @@ static void work_for_cpu_fn(struct work_struct *work)   * It is up to the caller to ensure that the cpu doesn't go offline.   * The caller must not hold any locks which would prevent @fn from completing.   */ -long work_on_cpu(unsigned int cpu, long (*fn)(void *), void *arg) +long work_on_cpu(int cpu, long (*fn)(void *), void *arg)  {  	struct work_for_cpu wfc = { .fn = fn, .arg = arg }; @@ -3705,7 +3703,7 @@ out_unlock:  static int __init init_workqueues(void)  { -	unsigned int cpu; +	int cpu;  	/* make sure we have enough bits for OFFQ pool ID */  	BUILD_BUG_ON((1LU << (BITS_PER_LONG - WORK_OFFQ_POOL_SHIFT)) < diff --git a/kernel/workqueue_internal.h b/kernel/workqueue_internal.h index f9c887731e2..f116f071d91 100644 --- a/kernel/workqueue_internal.h +++ b/kernel/workqueue_internal.h @@ -59,8 +59,7 @@ static inline struct worker *current_wq_worker(void)   * Scheduler hooks for concurrency managed workqueue.  Only to be used from   * sched.c and workqueue.c.   */ -void wq_worker_waking_up(struct task_struct *task, unsigned int cpu); -struct task_struct *wq_worker_sleeping(struct task_struct *task, -				       unsigned int cpu); +void wq_worker_waking_up(struct task_struct *task, int cpu); +struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu);  #endif /* _KERNEL_WORKQUEUE_INTERNAL_H */  |