diff options
| author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2012-05-08 13:39:59 +0200 | 
|---|---|---|
| committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2012-05-08 13:39:59 +0200 | 
| commit | 5e13a0c5ec05d382b488a691dfb8af015b1dea1e (patch) | |
| tree | 7a06dfa1f7661f8908193f2437b32452520221d3 /kernel | |
| parent | b615b57a124a4af7b68196bc2fb8acc236041fa2 (diff) | |
| parent | 4f256e8aa3eda15c11c3cec3ec5336e1fc579cbd (diff) | |
| download | olio-linux-3.10-5e13a0c5ec05d382b488a691dfb8af015b1dea1e.tar.xz olio-linux-3.10-5e13a0c5ec05d382b488a691dfb8af015b1dea1e.zip  | |
Merge remote-tracking branch 'airlied/drm-core-next' into drm-intel-next-queued
Backmerge of drm-next to resolve a few ugly conflicts and to get a few
fixes from 3.4-rc6 (which drm-next has already merged). Note that this
merge also restricts the stencil cache lra evict policy workaround to
snb (as it should) - I had to frob the code anyway because the
CM0_MASK_SHIFT define died in the masked bit cleanups.
We need the backmerge to get Paulo Zanoni's infoframe regression fix
for gm45 - further bugfixes from him touch the same area and would
needlessly conflict.
Signed-Off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/events/core.c | 2 | ||||
| -rw-r--r-- | kernel/irq/debug.h | 38 | ||||
| -rw-r--r-- | kernel/power/swap.c | 28 | ||||
| -rw-r--r-- | kernel/rcutree.c | 1 | ||||
| -rw-r--r-- | kernel/sched/core.c | 22 | ||||
| -rw-r--r-- | kernel/sched/fair.c | 18 | ||||
| -rw-r--r-- | kernel/sched/features.h | 1 | ||||
| -rw-r--r-- | kernel/time/tick-broadcast.c | 13 | ||||
| -rw-r--r-- | kernel/trace/trace.c | 8 | ||||
| -rw-r--r-- | kernel/trace/trace.h | 4 | ||||
| -rw-r--r-- | kernel/trace/trace_output.c | 5 | 
11 files changed, 87 insertions, 53 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index a6a9ec4cd8f..fd126f82b57 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -3183,7 +3183,7 @@ static void perf_event_for_each(struct perf_event *event,  	perf_event_for_each_child(event, func);  	func(event);  	list_for_each_entry(sibling, &event->sibling_list, group_entry) -		perf_event_for_each_child(event, func); +		perf_event_for_each_child(sibling, func);  	mutex_unlock(&ctx->mutex);  } diff --git a/kernel/irq/debug.h b/kernel/irq/debug.h index 97a8bfadc88..e75e29e4434 100644 --- a/kernel/irq/debug.h +++ b/kernel/irq/debug.h @@ -4,10 +4,10 @@  #include <linux/kallsyms.h> -#define P(f) if (desc->status_use_accessors & f) printk("%14s set\n", #f) -#define PS(f) if (desc->istate & f) printk("%14s set\n", #f) +#define ___P(f) if (desc->status_use_accessors & f) printk("%14s set\n", #f) +#define ___PS(f) if (desc->istate & f) printk("%14s set\n", #f)  /* FIXME */ -#define PD(f) do { } while (0) +#define ___PD(f) do { } while (0)  static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc)  { @@ -23,23 +23,23 @@ static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc)  		print_symbol("%s\n", (unsigned long)desc->action->handler);  	} -	P(IRQ_LEVEL); -	P(IRQ_PER_CPU); -	P(IRQ_NOPROBE); -	P(IRQ_NOREQUEST); -	P(IRQ_NOTHREAD); -	P(IRQ_NOAUTOEN); +	___P(IRQ_LEVEL); +	___P(IRQ_PER_CPU); +	___P(IRQ_NOPROBE); +	___P(IRQ_NOREQUEST); +	___P(IRQ_NOTHREAD); +	___P(IRQ_NOAUTOEN); -	PS(IRQS_AUTODETECT); -	PS(IRQS_REPLAY); -	PS(IRQS_WAITING); -	PS(IRQS_PENDING); +	___PS(IRQS_AUTODETECT); +	___PS(IRQS_REPLAY); +	___PS(IRQS_WAITING); +	___PS(IRQS_PENDING); -	PD(IRQS_INPROGRESS); -	PD(IRQS_DISABLED); -	PD(IRQS_MASKED); +	___PD(IRQS_INPROGRESS); +	___PD(IRQS_DISABLED); +	___PD(IRQS_MASKED);  } -#undef P -#undef PS -#undef PD +#undef ___P +#undef ___PS +#undef ___PD diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 8742fd013a9..eef311a58a6 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c @@ -51,6 +51,23 @@  #define MAP_PAGE_ENTRIES	(PAGE_SIZE / sizeof(sector_t) - 1) +/* + * Number of free pages that are not high. + */ +static inline unsigned long low_free_pages(void) +{ +	return nr_free_pages() - nr_free_highpages(); +} + +/* + * Number of pages required to be kept free while writing the image. Always + * half of all available low pages before the writing starts. + */ +static inline unsigned long reqd_free_pages(void) +{ +	return low_free_pages() / 2; +} +  struct swap_map_page {  	sector_t entries[MAP_PAGE_ENTRIES];  	sector_t next_swap; @@ -72,7 +89,7 @@ struct swap_map_handle {  	sector_t cur_swap;  	sector_t first_sector;  	unsigned int k; -	unsigned long nr_free_pages, written; +	unsigned long reqd_free_pages;  	u32 crc32;  }; @@ -316,8 +333,7 @@ static int get_swap_writer(struct swap_map_handle *handle)  		goto err_rel;  	}  	handle->k = 0; -	handle->nr_free_pages = nr_free_pages() >> 1; -	handle->written = 0; +	handle->reqd_free_pages = reqd_free_pages();  	handle->first_sector = handle->cur_swap;  	return 0;  err_rel: @@ -352,11 +368,11 @@ static int swap_write_page(struct swap_map_handle *handle, void *buf,  		handle->cur_swap = offset;  		handle->k = 0;  	} -	if (bio_chain && ++handle->written > handle->nr_free_pages) { +	if (bio_chain && low_free_pages() <= handle->reqd_free_pages) {  		error = hib_wait_on_bio_chain(bio_chain);  		if (error)  			goto out; -		handle->written = 0; +		handle->reqd_free_pages = reqd_free_pages();  	}   out:  	return error; @@ -618,7 +634,7 @@ static int save_image_lzo(struct swap_map_handle *handle,  	 * Adjust number of free pages after all allocations have been done.  	 * We don't want to run out of pages when writing.  	 */ -	handle->nr_free_pages = nr_free_pages() >> 1; +	handle->reqd_free_pages = reqd_free_pages();  	/*  	 * Start the CRC32 thread. diff --git a/kernel/rcutree.c b/kernel/rcutree.c index 1050d6d3922..d0c5baf1ab1 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -1820,7 +1820,6 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),  	 * a quiescent state betweentimes.  	 */  	local_irq_save(flags); -	WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));  	rdp = this_cpu_ptr(rsp->rda);  	/* Add the callback to our list. */ diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 4603b9d8f30..0533a688ce2 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6405,16 +6405,26 @@ static void __sdt_free(const struct cpumask *cpu_map)  		struct sd_data *sdd = &tl->data;  		for_each_cpu(j, cpu_map) { -			struct sched_domain *sd = *per_cpu_ptr(sdd->sd, j); -			if (sd && (sd->flags & SD_OVERLAP)) -				free_sched_groups(sd->groups, 0); -			kfree(*per_cpu_ptr(sdd->sd, j)); -			kfree(*per_cpu_ptr(sdd->sg, j)); -			kfree(*per_cpu_ptr(sdd->sgp, j)); +			struct sched_domain *sd; + +			if (sdd->sd) { +				sd = *per_cpu_ptr(sdd->sd, j); +				if (sd && (sd->flags & SD_OVERLAP)) +					free_sched_groups(sd->groups, 0); +				kfree(*per_cpu_ptr(sdd->sd, j)); +			} + +			if (sdd->sg) +				kfree(*per_cpu_ptr(sdd->sg, j)); +			if (sdd->sgp) +				kfree(*per_cpu_ptr(sdd->sgp, j));  		}  		free_percpu(sdd->sd); +		sdd->sd = NULL;  		free_percpu(sdd->sg); +		sdd->sg = NULL;  		free_percpu(sdd->sgp); +		sdd->sgp = NULL;  	}  } diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 0d97ebdc58f..e9553640c1c 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c @@ -784,7 +784,7 @@ account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)  		update_load_add(&rq_of(cfs_rq)->load, se->load.weight);  #ifdef CONFIG_SMP  	if (entity_is_task(se)) -		list_add_tail(&se->group_node, &rq_of(cfs_rq)->cfs_tasks); +		list_add(&se->group_node, &rq_of(cfs_rq)->cfs_tasks);  #endif  	cfs_rq->nr_running++;  } @@ -3215,6 +3215,8 @@ static int move_one_task(struct lb_env *env)  static unsigned long task_h_load(struct task_struct *p); +static const unsigned int sched_nr_migrate_break = 32; +  /*   * move_tasks tries to move up to load_move weighted load from busiest to   * this_rq, as part of a balancing operation within domain "sd". @@ -3242,7 +3244,7 @@ static int move_tasks(struct lb_env *env)  		/* take a breather every nr_migrate tasks */  		if (env->loop > env->loop_break) { -			env->loop_break += sysctl_sched_nr_migrate; +			env->loop_break += sched_nr_migrate_break;  			env->flags |= LBF_NEED_BREAK;  			break;  		} @@ -3252,7 +3254,7 @@ static int move_tasks(struct lb_env *env)  		load = task_h_load(p); -		if (load < 16 && !env->sd->nr_balance_failed) +		if (sched_feat(LB_MIN) && load < 16 && !env->sd->nr_balance_failed)  			goto next;  		if ((load / 2) > env->load_move) @@ -4407,7 +4409,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,  		.dst_cpu	= this_cpu,  		.dst_rq		= this_rq,  		.idle		= idle, -		.loop_break	= sysctl_sched_nr_migrate, +		.loop_break	= sched_nr_migrate_break,  	};  	cpumask_copy(cpus, cpu_active_mask); @@ -4445,10 +4447,10 @@ redo:  		 * correctly treated as an imbalance.  		 */  		env.flags |= LBF_ALL_PINNED; -		env.load_move = imbalance; -		env.src_cpu = busiest->cpu; -		env.src_rq = busiest; -		env.loop_max = busiest->nr_running; +		env.load_move	= imbalance; +		env.src_cpu	= busiest->cpu; +		env.src_rq	= busiest; +		env.loop_max	= min_t(unsigned long, sysctl_sched_nr_migrate, busiest->nr_running);  more_balance:  		local_irq_save(flags); diff --git a/kernel/sched/features.h b/kernel/sched/features.h index e61fd73913d..de00a486c5c 100644 --- a/kernel/sched/features.h +++ b/kernel/sched/features.h @@ -68,3 +68,4 @@ SCHED_FEAT(TTWU_QUEUE, true)  SCHED_FEAT(FORCE_SD_OVERLAP, false)  SCHED_FEAT(RT_RUNTIME_SHARE, true) +SCHED_FEAT(LB_MIN, false) diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index bf57abdc7bd..f113755695e 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -346,7 +346,8 @@ int tick_resume_broadcast(void)  						     tick_get_broadcast_mask());  			break;  		case TICKDEV_MODE_ONESHOT: -			broadcast = tick_resume_broadcast_oneshot(bc); +			if (!cpumask_empty(tick_get_broadcast_mask())) +				broadcast = tick_resume_broadcast_oneshot(bc);  			break;  		}  	} @@ -373,6 +374,9 @@ static int tick_broadcast_set_event(ktime_t expires, int force)  {  	struct clock_event_device *bc = tick_broadcast_device.evtdev; +	if (bc->mode != CLOCK_EVT_MODE_ONESHOT) +		clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT); +  	return clockevents_program_event(bc, expires, force);  } @@ -531,7 +535,6 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc)  		int was_periodic = bc->mode == CLOCK_EVT_MODE_PERIODIC;  		bc->event_handler = tick_handle_oneshot_broadcast; -		clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);  		/* Take the do_timer update */  		tick_do_timer_cpu = cpu; @@ -549,6 +552,7 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc)  			   to_cpumask(tmpmask));  		if (was_periodic && !cpumask_empty(to_cpumask(tmpmask))) { +			clockevents_set_mode(bc, CLOCK_EVT_MODE_ONESHOT);  			tick_broadcast_init_next_event(to_cpumask(tmpmask),  						       tick_next_period);  			tick_broadcast_set_event(tick_next_period, 1); @@ -577,15 +581,10 @@ void tick_broadcast_switch_to_oneshot(void)  	raw_spin_lock_irqsave(&tick_broadcast_lock, flags);  	tick_broadcast_device.mode = TICKDEV_MODE_ONESHOT; - -	if (cpumask_empty(tick_get_broadcast_mask())) -		goto end; -  	bc = tick_broadcast_device.evtdev;  	if (bc)  		tick_broadcast_setup_oneshot(bc); -end:  	raw_spin_unlock_irqrestore(&tick_broadcast_lock, flags);  } diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index ed7b5d1e12f..2a22255c101 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -4629,7 +4629,8 @@ static ssize_t  rb_simple_read(struct file *filp, char __user *ubuf,  	       size_t cnt, loff_t *ppos)  { -	struct ring_buffer *buffer = filp->private_data; +	struct trace_array *tr = filp->private_data; +	struct ring_buffer *buffer = tr->buffer;  	char buf[64];  	int r; @@ -4647,7 +4648,8 @@ static ssize_t  rb_simple_write(struct file *filp, const char __user *ubuf,  		size_t cnt, loff_t *ppos)  { -	struct ring_buffer *buffer = filp->private_data; +	struct trace_array *tr = filp->private_data; +	struct ring_buffer *buffer = tr->buffer;  	unsigned long val;  	int ret; @@ -4734,7 +4736,7 @@ static __init int tracer_init_debugfs(void)  			  &trace_clock_fops);  	trace_create_file("tracing_on", 0644, d_tracer, -			    global_trace.buffer, &rb_simple_fops); +			    &global_trace, &rb_simple_fops);  #ifdef CONFIG_DYNAMIC_FTRACE  	trace_create_file("dyn_ftrace_total_info", 0444, d_tracer, diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index 95059f091a2..f95d65da6db 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h @@ -836,11 +836,11 @@ extern const char *__stop___trace_bprintk_fmt[];  		     filter)  #include "trace_entries.h" -#ifdef CONFIG_FUNCTION_TRACER +#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_FUNCTION_TRACER)  int perf_ftrace_event_register(struct ftrace_event_call *call,  			       enum trace_reg type, void *data);  #else  #define perf_ftrace_event_register NULL -#endif /* CONFIG_FUNCTION_TRACER */ +#endif  #endif /* _LINUX_KERNEL_TRACE_H */ diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index 859fae6b182..df611a0e76c 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c @@ -652,6 +652,8 @@ int trace_print_lat_context(struct trace_iterator *iter)  {  	u64 next_ts;  	int ret; +	/* trace_find_next_entry will reset ent_size */ +	int ent_size = iter->ent_size;  	struct trace_seq *s = &iter->seq;  	struct trace_entry *entry = iter->ent,  			   *next_entry = trace_find_next_entry(iter, NULL, @@ -660,6 +662,9 @@ int trace_print_lat_context(struct trace_iterator *iter)  	unsigned long abs_usecs = ns2usecs(iter->ts - iter->tr->time_start);  	unsigned long rel_usecs; +	/* Restore the original ent_size */ +	iter->ent_size = ent_size; +  	if (!next_entry)  		next_ts = iter->ts;  	rel_usecs = ns2usecs(next_ts - iter->ts);  |