diff options
Diffstat (limited to 'kernel/events/core.c')
| -rw-r--r-- | kernel/events/core.c | 94 | 
1 files changed, 62 insertions, 32 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index f1cf0edeb39..7fee567153f 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -1253,7 +1253,7 @@ retry:  /*   * Cross CPU call to disable a performance event   */ -static int __perf_event_disable(void *info) +int __perf_event_disable(void *info)  {  	struct perf_event *event = info;  	struct perf_event_context *ctx = event->ctx; @@ -2935,12 +2935,12 @@ EXPORT_SYMBOL_GPL(perf_event_release_kernel);  /*   * Called when the last reference to the file is gone.   */ -static int perf_release(struct inode *inode, struct file *file) +static void put_event(struct perf_event *event)  { -	struct perf_event *event = file->private_data;  	struct task_struct *owner; -	file->private_data = NULL; +	if (!atomic_long_dec_and_test(&event->refcount)) +		return;  	rcu_read_lock();  	owner = ACCESS_ONCE(event->owner); @@ -2975,7 +2975,13 @@ static int perf_release(struct inode *inode, struct file *file)  		put_task_struct(owner);  	} -	return perf_event_release_kernel(event); +	perf_event_release_kernel(event); +} + +static int perf_release(struct inode *inode, struct file *file) +{ +	put_event(file->private_data); +	return 0;  }  u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running) @@ -3227,7 +3233,7 @@ unlock:  static const struct file_operations perf_fops; -static struct perf_event *perf_fget_light(int fd, int *fput_needed) +static struct file *perf_fget_light(int fd, int *fput_needed)  {  	struct file *file; @@ -3241,7 +3247,7 @@ static struct perf_event *perf_fget_light(int fd, int *fput_needed)  		return ERR_PTR(-EBADF);  	} -	return file->private_data; +	return file;  }  static int perf_event_set_output(struct perf_event *event, @@ -3273,19 +3279,21 @@ static long perf_ioctl(struct file *file, unsigned int cmd, unsigned long arg)  	case PERF_EVENT_IOC_SET_OUTPUT:  	{ +		struct file *output_file = NULL;  		struct perf_event *output_event = NULL;  		int fput_needed = 0;  		int ret;  		if (arg != -1) { -			output_event = perf_fget_light(arg, &fput_needed); -			if (IS_ERR(output_event)) -				return PTR_ERR(output_event); +			output_file = perf_fget_light(arg, &fput_needed); +			if (IS_ERR(output_file)) +				return PTR_ERR(output_file); +			output_event = output_file->private_data;  		}  		ret = perf_event_set_output(event, output_event);  		if (output_event) -			fput_light(output_event->filp, fput_needed); +			fput_light(output_file, fput_needed);  		return ret;  	} @@ -4039,7 +4047,7 @@ void perf_prepare_sample(struct perf_event_header *header,  	if (sample_type & PERF_SAMPLE_CALLCHAIN) {  		int size = 1; -		data->callchain = perf_callchain(regs); +		data->callchain = perf_callchain(event, regs);  		if (data->callchain)  			size += data->callchain->nr; @@ -5209,7 +5217,8 @@ static int perf_tp_event_match(struct perf_event *event,  }  void perf_tp_event(u64 addr, u64 count, void *record, int entry_size, -		   struct pt_regs *regs, struct hlist_head *head, int rctx) +		   struct pt_regs *regs, struct hlist_head *head, int rctx, +		   struct task_struct *task)  {  	struct perf_sample_data data;  	struct perf_event *event; @@ -5228,6 +5237,31 @@ void perf_tp_event(u64 addr, u64 count, void *record, int entry_size,  			perf_swevent_event(event, count, &data, regs);  	} +	/* +	 * If we got specified a target task, also iterate its context and +	 * deliver this event there too. +	 */ +	if (task && task != current) { +		struct perf_event_context *ctx; +		struct trace_entry *entry = record; + +		rcu_read_lock(); +		ctx = rcu_dereference(task->perf_event_ctxp[perf_sw_context]); +		if (!ctx) +			goto unlock; + +		list_for_each_entry_rcu(event, &ctx->event_list, event_entry) { +			if (event->attr.type != PERF_TYPE_TRACEPOINT) +				continue; +			if (event->attr.config != entry->type) +				continue; +			if (perf_tp_event_match(event, &data, regs)) +				perf_swevent_event(event, count, &data, regs); +		} +unlock: +		rcu_read_unlock(); +	} +  	perf_swevent_put_recursion_context(rctx);  }  EXPORT_SYMBOL_GPL(perf_tp_event); @@ -5924,6 +5958,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,  	mutex_init(&event->mmap_mutex); +	atomic_long_set(&event->refcount, 1);  	event->cpu		= cpu;  	event->attr		= *attr;  	event->group_leader	= group_leader; @@ -6234,12 +6269,12 @@ SYSCALL_DEFINE5(perf_event_open,  		return event_fd;  	if (group_fd != -1) { -		group_leader = perf_fget_light(group_fd, &fput_needed); -		if (IS_ERR(group_leader)) { -			err = PTR_ERR(group_leader); +		group_file = perf_fget_light(group_fd, &fput_needed); +		if (IS_ERR(group_file)) { +			err = PTR_ERR(group_file);  			goto err_fd;  		} -		group_file = group_leader->filp; +		group_leader = group_file->private_data;  		if (flags & PERF_FLAG_FD_OUTPUT)  			output_event = group_leader;  		if (flags & PERF_FLAG_FD_NO_GROUP) @@ -6376,7 +6411,6 @@ SYSCALL_DEFINE5(perf_event_open,  		put_ctx(gctx);  	} -	event->filp = event_file;  	WARN_ON_ONCE(ctx->parent_ctx);  	mutex_lock(&ctx->mutex); @@ -6470,7 +6504,6 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu,  		goto err_free;  	} -	event->filp = NULL;  	WARN_ON_ONCE(ctx->parent_ctx);  	mutex_lock(&ctx->mutex);  	perf_install_in_context(ctx, event, cpu); @@ -6552,7 +6585,7 @@ static void sync_child_event(struct perf_event *child_event,  	 * Release the parent event, if this was the last  	 * reference to it.  	 */ -	fput(parent_event->filp); +	put_event(parent_event);  }  static void @@ -6628,9 +6661,8 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)  	 *  	 *   __perf_event_exit_task()  	 *     sync_child_event() -	 *       fput(parent_event->filp) -	 *         perf_release() -	 *           mutex_lock(&ctx->mutex) +	 *       put_event() +	 *         mutex_lock(&ctx->mutex)  	 *  	 * But since its the parent context it won't be the same instance.  	 */ @@ -6698,7 +6730,7 @@ static void perf_free_event(struct perf_event *event,  	list_del_init(&event->child_list);  	mutex_unlock(&parent->child_mutex); -	fput(parent->filp); +	put_event(parent);  	perf_group_detach(event);  	list_del_event(event, ctx); @@ -6778,6 +6810,12 @@ inherit_event(struct perf_event *parent_event,  				           NULL, NULL);  	if (IS_ERR(child_event))  		return child_event; + +	if (!atomic_long_inc_not_zero(&parent_event->refcount)) { +		free_event(child_event); +		return NULL; +	} +  	get_ctx(child_ctx);  	/* @@ -6819,14 +6857,6 @@ inherit_event(struct perf_event *parent_event,  	raw_spin_unlock_irqrestore(&child_ctx->lock, flags);  	/* -	 * Get a reference to the parent filp - we will fput it -	 * when the child event exits. This is safe to do because -	 * we are in the parent and we know that the filp still -	 * exists and has a nonzero count: -	 */ -	atomic_long_inc(&parent_event->filp->f_count); - -	/*  	 * Link this into the parent event's child list  	 */  	WARN_ON_ONCE(parent_event->ctx->parent_ctx);  |