diff options
| -rw-r--r-- | include/linux/ftrace.h | 1 | ||||
| -rw-r--r-- | include/linux/kernel.h | 1 | ||||
| -rw-r--r-- | kernel/extable.c | 8 | ||||
| -rw-r--r-- | kernel/trace/ftrace.c | 37 | 
4 files changed, 40 insertions, 7 deletions
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h index 4609c0ece79..caba694a62b 100644 --- a/include/linux/ftrace.h +++ b/include/linux/ftrace.h @@ -34,6 +34,7 @@ struct ftrace_hash;  enum {  	FTRACE_OPS_FL_ENABLED		= 1 << 0,  	FTRACE_OPS_FL_GLOBAL		= 1 << 1, +	FTRACE_OPS_FL_DYNAMIC		= 1 << 2,  };  struct ftrace_ops { diff --git a/include/linux/kernel.h b/include/linux/kernel.h index 00cec4dc0ae..f37ba716ef8 100644 --- a/include/linux/kernel.h +++ b/include/linux/kernel.h @@ -283,6 +283,7 @@ extern char *get_options(const char *str, int nints, int *ints);  extern unsigned long long memparse(const char *ptr, char **retptr);  extern int core_kernel_text(unsigned long addr); +extern int core_kernel_data(unsigned long addr);  extern int __kernel_text_address(unsigned long addr);  extern int kernel_text_address(unsigned long addr);  extern int func_ptr_is_kernel_text(void *ptr); diff --git a/kernel/extable.c b/kernel/extable.c index 7f8f263f852..c2d625fcda7 100644 --- a/kernel/extable.c +++ b/kernel/extable.c @@ -72,6 +72,14 @@ int core_kernel_text(unsigned long addr)  	return 0;  } +int core_kernel_data(unsigned long addr) +{ +	if (addr >= (unsigned long)_sdata && +	    addr < (unsigned long)_edata) +		return 1; +	return 0; +} +  int __kernel_text_address(unsigned long addr)  {  	if (core_kernel_text(addr)) diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index 6c7e1df39b5..5b3ee04e39d 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -189,8 +189,14 @@ static void update_ftrace_function(void)  	update_global_ops(); +	/* +	 * If we are at the end of the list and this ops is +	 * not dynamic, then have the mcount trampoline call +	 * the function directly +	 */  	if (ftrace_ops_list == &ftrace_list_end || -	    ftrace_ops_list->next == &ftrace_list_end) +	    (ftrace_ops_list->next == &ftrace_list_end && +	     !(ftrace_ops_list->flags & FTRACE_OPS_FL_DYNAMIC)))  		func = ftrace_ops_list->func;  	else  		func = ftrace_ops_list_func; @@ -250,6 +256,9 @@ static int __register_ftrace_function(struct ftrace_ops *ops)  	if (WARN_ON(ops->flags & FTRACE_OPS_FL_ENABLED))  		return -EBUSY; +	if (!core_kernel_data((unsigned long)ops)) +		ops->flags |= FTRACE_OPS_FL_DYNAMIC; +  	if (ops->flags & FTRACE_OPS_FL_GLOBAL) {  		int first = ftrace_global_list == &ftrace_list_end;  		add_ftrace_ops(&ftrace_global_list, ops); @@ -293,6 +302,13 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)  	if (ftrace_enabled)  		update_ftrace_function(); +	/* +	 * Dynamic ops may be freed, we must make sure that all +	 * callers are done before leaving this function. +	 */ +	if (ops->flags & FTRACE_OPS_FL_DYNAMIC) +		synchronize_sched(); +  	return 0;  } @@ -1225,6 +1241,9 @@ ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)   * the filter_hash does not exist or is empty,   *  AND   * the ip is not in the ops->notrace_hash. + * + * This needs to be called with preemption disabled as + * the hashes are freed with call_rcu_sched().   */  static int  ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip) @@ -1233,9 +1252,6 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)  	struct ftrace_hash *notrace_hash;  	int ret; -	/* The hashes are freed with call_rcu_sched() */ -	preempt_disable_notrace(); -  	filter_hash = rcu_dereference_raw(ops->filter_hash);  	notrace_hash = rcu_dereference_raw(ops->notrace_hash); @@ -1246,7 +1262,6 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)  		ret = 1;  	else  		ret = 0; -	preempt_enable_notrace();  	return ret;  } @@ -3425,14 +3440,20 @@ ftrace_ops_test(struct ftrace_ops *ops, unsigned long ip)  static void  ftrace_ops_list_func(unsigned long ip, unsigned long parent_ip)  { -	/* see comment above ftrace_global_list_func */ -	struct ftrace_ops *op = rcu_dereference_raw(ftrace_ops_list); +	struct ftrace_ops *op; +	/* +	 * Some of the ops may be dynamically allocated, +	 * they must be freed after a synchronize_sched(). +	 */ +	preempt_disable_notrace(); +	op = rcu_dereference_raw(ftrace_ops_list);  	while (op != &ftrace_list_end) {  		if (ftrace_ops_test(op, ip))  			op->func(ip, parent_ip);  		op = rcu_dereference_raw(op->next);  	}; +	preempt_enable_notrace();  }  static void clear_ftrace_swapper(void) @@ -3743,6 +3764,7 @@ int register_ftrace_function(struct ftrace_ops *ops)  	mutex_unlock(&ftrace_lock);  	return ret;  } +EXPORT_SYMBOL_GPL(register_ftrace_function);  /**   * unregister_ftrace_function - unregister a function for profiling. @@ -3762,6 +3784,7 @@ int unregister_ftrace_function(struct ftrace_ops *ops)  	return ret;  } +EXPORT_SYMBOL_GPL(unregister_ftrace_function);  int  ftrace_enable_sysctl(struct ctl_table *table, int write,  |