diff options
Diffstat (limited to 'kernel/module.c')
| -rw-r--r-- | kernel/module.c | 612 | 
1 files changed, 395 insertions, 217 deletions
diff --git a/kernel/module.c b/kernel/module.c index 808bd62e172..eab08274ec9 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -21,6 +21,7 @@  #include <linux/ftrace_event.h>  #include <linux/init.h>  #include <linux/kallsyms.h> +#include <linux/file.h>  #include <linux/fs.h>  #include <linux/sysfs.h>  #include <linux/kernel.h> @@ -28,6 +29,7 @@  #include <linux/vmalloc.h>  #include <linux/elf.h>  #include <linux/proc_fs.h> +#include <linux/security.h>  #include <linux/seq_file.h>  #include <linux/syscalls.h>  #include <linux/fcntl.h> @@ -59,6 +61,7 @@  #include <linux/pfn.h>  #include <linux/bsearch.h>  #include <linux/fips.h> +#include <uapi/linux/module.h>  #include "module-internal.h"  #define CREATE_TRACE_POINTS @@ -185,6 +188,7 @@ struct load_info {     ongoing or failed initialization etc. */  static inline int strong_try_module_get(struct module *mod)  { +	BUG_ON(mod && mod->state == MODULE_STATE_UNFORMED);  	if (mod && mod->state == MODULE_STATE_COMING)  		return -EBUSY;  	if (try_module_get(mod)) @@ -340,6 +344,9 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,  #endif  		}; +		if (mod->state == MODULE_STATE_UNFORMED) +			continue; +  		if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))  			return true;  	} @@ -447,16 +454,24 @@ const struct kernel_symbol *find_symbol(const char *name,  EXPORT_SYMBOL_GPL(find_symbol);  /* Search for module by name: must hold module_mutex. */ -struct module *find_module(const char *name) +static struct module *find_module_all(const char *name, +				      bool even_unformed)  {  	struct module *mod;  	list_for_each_entry(mod, &modules, list) { +		if (!even_unformed && mod->state == MODULE_STATE_UNFORMED) +			continue;  		if (strcmp(mod->name, name) == 0)  			return mod;  	}  	return NULL;  } + +struct module *find_module(const char *name) +{ +	return find_module_all(name, false); +}  EXPORT_SYMBOL_GPL(find_module);  #ifdef CONFIG_SMP @@ -522,6 +537,8 @@ bool is_module_percpu_address(unsigned long addr)  	preempt_disable();  	list_for_each_entry_rcu(mod, &modules, list) { +		if (mod->state == MODULE_STATE_UNFORMED) +			continue;  		if (!mod->percpu_size)  			continue;  		for_each_possible_cpu(cpu) { @@ -1045,6 +1062,8 @@ static ssize_t show_initstate(struct module_attribute *mattr,  	case MODULE_STATE_GOING:  		state = "going";  		break; +	default: +		BUG();  	}  	return sprintf(buffer, "%s\n", state);  } @@ -1783,6 +1802,8 @@ void set_all_modules_text_rw(void)  	mutex_lock(&module_mutex);  	list_for_each_entry_rcu(mod, &modules, list) { +		if (mod->state == MODULE_STATE_UNFORMED) +			continue;  		if ((mod->module_core) && (mod->core_text_size)) {  			set_page_attributes(mod->module_core,  						mod->module_core + mod->core_text_size, @@ -1804,6 +1825,8 @@ void set_all_modules_text_ro(void)  	mutex_lock(&module_mutex);  	list_for_each_entry_rcu(mod, &modules, list) { +		if (mod->state == MODULE_STATE_UNFORMED) +			continue;  		if ((mod->module_core) && (mod->core_text_size)) {  			set_page_attributes(mod->module_core,  						mod->module_core + mod->core_text_size, @@ -2279,7 +2302,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)  	Elf_Shdr *symsect = info->sechdrs + info->index.sym;  	Elf_Shdr *strsect = info->sechdrs + info->index.str;  	const Elf_Sym *src; -	unsigned int i, nsrc, ndst, strtab_size; +	unsigned int i, nsrc, ndst, strtab_size = 0;  	/* Put symbol section at end of init part of module. */  	symsect->sh_flags |= SHF_ALLOC; @@ -2290,9 +2313,6 @@ static void layout_symtab(struct module *mod, struct load_info *info)  	src = (void *)info->hdr + symsect->sh_offset;  	nsrc = symsect->sh_size / sizeof(*src); -	/* strtab always starts with a nul, so offset 0 is the empty string. */ -	strtab_size = 1; -  	/* Compute total space required for the core symbols' strtab. */  	for (ndst = i = 0; i < nsrc; i++) {  		if (i == 0 || @@ -2334,7 +2354,6 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)  	mod->core_symtab = dst = mod->module_core + info->symoffs;  	mod->core_strtab = s = mod->module_core + info->stroffs;  	src = mod->symtab; -	*s++ = 0;  	for (ndst = i = 0; i < mod->num_symtab; i++) {  		if (i == 0 ||  		    is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) { @@ -2375,7 +2394,7 @@ static void dynamic_debug_remove(struct _ddebug *debug)  void * __weak module_alloc(unsigned long size)  { -	return size == 0 ? NULL : vmalloc_exec(size); +	return vmalloc_exec(size);  }  static void *module_alloc_update_bounds(unsigned long size) @@ -2422,18 +2441,17 @@ static inline void kmemleak_load_module(const struct module *mod,  #endif  #ifdef CONFIG_MODULE_SIG -static int module_sig_check(struct load_info *info, -			    const void *mod, unsigned long *_len) +static int module_sig_check(struct load_info *info)  {  	int err = -ENOKEY; -	unsigned long markerlen = sizeof(MODULE_SIG_STRING) - 1; -	unsigned long len = *_len; +	const unsigned long markerlen = sizeof(MODULE_SIG_STRING) - 1; +	const void *mod = info->hdr; -	if (len > markerlen && -	    memcmp(mod + len - markerlen, MODULE_SIG_STRING, markerlen) == 0) { +	if (info->len > markerlen && +	    memcmp(mod + info->len - markerlen, MODULE_SIG_STRING, markerlen) == 0) {  		/* We truncate the module to discard the signature */ -		*_len -= markerlen; -		err = mod_verify_sig(mod, _len); +		info->len -= markerlen; +		err = mod_verify_sig(mod, &info->len);  	}  	if (!err) { @@ -2451,59 +2469,114 @@ static int module_sig_check(struct load_info *info,  	return err;  }  #else /* !CONFIG_MODULE_SIG */ -static int module_sig_check(struct load_info *info, -			    void *mod, unsigned long *len) +static int module_sig_check(struct load_info *info)  {  	return 0;  }  #endif /* !CONFIG_MODULE_SIG */ -/* Sets info->hdr, info->len and info->sig_ok. */ -static int copy_and_check(struct load_info *info, -			  const void __user *umod, unsigned long len, -			  const char __user *uargs) +/* Sanity checks against invalid binaries, wrong arch, weird elf version. */ +static int elf_header_check(struct load_info *info) +{ +	if (info->len < sizeof(*(info->hdr))) +		return -ENOEXEC; + +	if (memcmp(info->hdr->e_ident, ELFMAG, SELFMAG) != 0 +	    || info->hdr->e_type != ET_REL +	    || !elf_check_arch(info->hdr) +	    || info->hdr->e_shentsize != sizeof(Elf_Shdr)) +		return -ENOEXEC; + +	if (info->hdr->e_shoff >= info->len +	    || (info->hdr->e_shnum * sizeof(Elf_Shdr) > +		info->len - info->hdr->e_shoff)) +		return -ENOEXEC; + +	return 0; +} + +/* Sets info->hdr and info->len. */ +static int copy_module_from_user(const void __user *umod, unsigned long len, +				  struct load_info *info)  {  	int err; -	Elf_Ehdr *hdr; -	if (len < sizeof(*hdr)) +	info->len = len; +	if (info->len < sizeof(*(info->hdr)))  		return -ENOEXEC; +	err = security_kernel_module_from_file(NULL); +	if (err) +		return err; +  	/* Suck in entire file: we'll want most of it. */ -	if ((hdr = vmalloc(len)) == NULL) +	info->hdr = vmalloc(info->len); +	if (!info->hdr)  		return -ENOMEM; -	if (copy_from_user(hdr, umod, len) != 0) { -		err = -EFAULT; -		goto free_hdr; +	if (copy_from_user(info->hdr, umod, info->len) != 0) { +		vfree(info->hdr); +		return -EFAULT;  	} -	err = module_sig_check(info, hdr, &len); +	return 0; +} + +/* Sets info->hdr and info->len. */ +static int copy_module_from_fd(int fd, struct load_info *info) +{ +	struct file *file; +	int err; +	struct kstat stat; +	loff_t pos; +	ssize_t bytes = 0; + +	file = fget(fd); +	if (!file) +		return -ENOEXEC; + +	err = security_kernel_module_from_file(file); +	if (err) +		goto out; + +	err = vfs_getattr(file->f_vfsmnt, file->f_dentry, &stat);  	if (err) -		goto free_hdr; +		goto out; -	/* Sanity checks against insmoding binaries or wrong arch, -	   weird elf version */ -	if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0 -	    || hdr->e_type != ET_REL -	    || !elf_check_arch(hdr) -	    || hdr->e_shentsize != sizeof(Elf_Shdr)) { -		err = -ENOEXEC; -		goto free_hdr; +	if (stat.size > INT_MAX) { +		err = -EFBIG; +		goto out;  	} -	if (hdr->e_shoff >= len || -	    hdr->e_shnum * sizeof(Elf_Shdr) > len - hdr->e_shoff) { -		err = -ENOEXEC; -		goto free_hdr; +	/* Don't hand 0 to vmalloc, it whines. */ +	if (stat.size == 0) { +		err = -EINVAL; +		goto out;  	} -	info->hdr = hdr; -	info->len = len; -	return 0; +	info->hdr = vmalloc(stat.size); +	if (!info->hdr) { +		err = -ENOMEM; +		goto out; +	} + +	pos = 0; +	while (pos < stat.size) { +		bytes = kernel_read(file, pos, (char *)(info->hdr) + pos, +				    stat.size - pos); +		if (bytes < 0) { +			vfree(info->hdr); +			err = bytes; +			goto out; +		} +		if (bytes == 0) +			break; +		pos += bytes; +	} +	info->len = pos; -free_hdr: -	vfree(hdr); +out: +	fput(file);  	return err;  } @@ -2512,7 +2585,7 @@ static void free_copy(struct load_info *info)  	vfree(info->hdr);  } -static int rewrite_section_headers(struct load_info *info) +static int rewrite_section_headers(struct load_info *info, int flags)  {  	unsigned int i; @@ -2540,7 +2613,10 @@ static int rewrite_section_headers(struct load_info *info)  	}  	/* Track but don't keep modinfo and version sections. */ -	info->index.vers = find_sec(info, "__versions"); +	if (flags & MODULE_INIT_IGNORE_MODVERSIONS) +		info->index.vers = 0; /* Pretend no __versions section! */ +	else +		info->index.vers = find_sec(info, "__versions");  	info->index.info = find_sec(info, ".modinfo");  	info->sechdrs[info->index.info].sh_flags &= ~(unsigned long)SHF_ALLOC;  	info->sechdrs[info->index.vers].sh_flags &= ~(unsigned long)SHF_ALLOC; @@ -2555,7 +2631,7 @@ static int rewrite_section_headers(struct load_info *info)   * Return the temporary module pointer (we'll replace it with the final   * one when we move the module sections around).   */ -static struct module *setup_load_info(struct load_info *info) +static struct module *setup_load_info(struct load_info *info, int flags)  {  	unsigned int i;  	int err; @@ -2566,7 +2642,7 @@ static struct module *setup_load_info(struct load_info *info)  	info->secstrings = (void *)info->hdr  		+ info->sechdrs[info->hdr->e_shstrndx].sh_offset; -	err = rewrite_section_headers(info); +	err = rewrite_section_headers(info, flags);  	if (err)  		return ERR_PTR(err); @@ -2604,11 +2680,14 @@ static struct module *setup_load_info(struct load_info *info)  	return mod;  } -static int check_modinfo(struct module *mod, struct load_info *info) +static int check_modinfo(struct module *mod, struct load_info *info, int flags)  {  	const char *modmagic = get_modinfo(info, "vermagic");  	int err; +	if (flags & MODULE_INIT_IGNORE_VERMAGIC) +		modmagic = NULL; +  	/* This is allowed: modprobe --force will invalidate it. */  	if (!modmagic) {  		err = try_to_force_load(mod, "bad vermagic"); @@ -2738,20 +2817,23 @@ static int move_module(struct module *mod, struct load_info *info)  	memset(ptr, 0, mod->core_size);  	mod->module_core = ptr; -	ptr = module_alloc_update_bounds(mod->init_size); -	/* -	 * The pointer to this block is stored in the module structure -	 * which is inside the block. This block doesn't need to be -	 * scanned as it contains data and code that will be freed -	 * after the module is initialized. -	 */ -	kmemleak_ignore(ptr); -	if (!ptr && mod->init_size) { -		module_free(mod, mod->module_core); -		return -ENOMEM; -	} -	memset(ptr, 0, mod->init_size); -	mod->module_init = ptr; +	if (mod->init_size) { +		ptr = module_alloc_update_bounds(mod->init_size); +		/* +		 * The pointer to this block is stored in the module structure +		 * which is inside the block. This block doesn't need to be +		 * scanned as it contains data and code that will be freed +		 * after the module is initialized. +		 */ +		kmemleak_ignore(ptr); +		if (!ptr) { +			module_free(mod, mod->module_core); +			return -ENOMEM; +		} +		memset(ptr, 0, mod->init_size); +		mod->module_init = ptr; +	} else +		mod->module_init = NULL;  	/* Transfer each section which specifies SHF_ALLOC */  	pr_debug("final section addresses:\n"); @@ -2844,18 +2926,18 @@ int __weak module_frob_arch_sections(Elf_Ehdr *hdr,  	return 0;  } -static struct module *layout_and_allocate(struct load_info *info) +static struct module *layout_and_allocate(struct load_info *info, int flags)  {  	/* Module within temporary copy. */  	struct module *mod;  	Elf_Shdr *pcpusec;  	int err; -	mod = setup_load_info(info); +	mod = setup_load_info(info, flags);  	if (IS_ERR(mod))  		return mod; -	err = check_modinfo(mod, info); +	err = check_modinfo(mod, info, flags);  	if (err)  		return ERR_PTR(err); @@ -2935,40 +3017,181 @@ static bool finished_loading(const char *name)  	bool ret;  	mutex_lock(&module_mutex); -	mod = find_module(name); -	ret = !mod || mod->state != MODULE_STATE_COMING; +	mod = find_module_all(name, true); +	ret = !mod || mod->state == MODULE_STATE_LIVE +		|| mod->state == MODULE_STATE_GOING;  	mutex_unlock(&module_mutex);  	return ret;  } +/* Call module constructors. */ +static void do_mod_ctors(struct module *mod) +{ +#ifdef CONFIG_CONSTRUCTORS +	unsigned long i; + +	for (i = 0; i < mod->num_ctors; i++) +		mod->ctors[i](); +#endif +} + +/* This is where the real work happens */ +static int do_init_module(struct module *mod) +{ +	int ret = 0; + +	/* +	 * We want to find out whether @mod uses async during init.  Clear +	 * PF_USED_ASYNC.  async_schedule*() will set it. +	 */ +	current->flags &= ~PF_USED_ASYNC; + +	blocking_notifier_call_chain(&module_notify_list, +			MODULE_STATE_COMING, mod); + +	/* Set RO and NX regions for core */ +	set_section_ro_nx(mod->module_core, +				mod->core_text_size, +				mod->core_ro_size, +				mod->core_size); + +	/* Set RO and NX regions for init */ +	set_section_ro_nx(mod->module_init, +				mod->init_text_size, +				mod->init_ro_size, +				mod->init_size); + +	do_mod_ctors(mod); +	/* Start the module */ +	if (mod->init != NULL) +		ret = do_one_initcall(mod->init); +	if (ret < 0) { +		/* Init routine failed: abort.  Try to protect us from +                   buggy refcounters. */ +		mod->state = MODULE_STATE_GOING; +		synchronize_sched(); +		module_put(mod); +		blocking_notifier_call_chain(&module_notify_list, +					     MODULE_STATE_GOING, mod); +		free_module(mod); +		wake_up_all(&module_wq); +		return ret; +	} +	if (ret > 0) { +		printk(KERN_WARNING +"%s: '%s'->init suspiciously returned %d, it should follow 0/-E convention\n" +"%s: loading module anyway...\n", +		       __func__, mod->name, ret, +		       __func__); +		dump_stack(); +	} + +	/* Now it's a first class citizen! */ +	mod->state = MODULE_STATE_LIVE; +	blocking_notifier_call_chain(&module_notify_list, +				     MODULE_STATE_LIVE, mod); + +	/* +	 * We need to finish all async code before the module init sequence +	 * is done.  This has potential to deadlock.  For example, a newly +	 * detected block device can trigger request_module() of the +	 * default iosched from async probing task.  Once userland helper +	 * reaches here, async_synchronize_full() will wait on the async +	 * task waiting on request_module() and deadlock. +	 * +	 * This deadlock is avoided by perfomring async_synchronize_full() +	 * iff module init queued any async jobs.  This isn't a full +	 * solution as it will deadlock the same if module loading from +	 * async jobs nests more than once; however, due to the various +	 * constraints, this hack seems to be the best option for now. +	 * Please refer to the following thread for details. +	 * +	 * http://thread.gmane.org/gmane.linux.kernel/1420814 +	 */ +	if (current->flags & PF_USED_ASYNC) +		async_synchronize_full(); + +	mutex_lock(&module_mutex); +	/* Drop initial reference. */ +	module_put(mod); +	trim_init_extable(mod); +#ifdef CONFIG_KALLSYMS +	mod->num_symtab = mod->core_num_syms; +	mod->symtab = mod->core_symtab; +	mod->strtab = mod->core_strtab; +#endif +	unset_module_init_ro_nx(mod); +	module_free(mod, mod->module_init); +	mod->module_init = NULL; +	mod->init_size = 0; +	mod->init_ro_size = 0; +	mod->init_text_size = 0; +	mutex_unlock(&module_mutex); +	wake_up_all(&module_wq); + +	return 0; +} + +static int may_init_module(void) +{ +	if (!capable(CAP_SYS_MODULE) || modules_disabled) +		return -EPERM; + +	return 0; +} +  /* Allocate and load the module: note that size of section 0 is always     zero, and we rely on this for optional sections. */ -static struct module *load_module(void __user *umod, -				  unsigned long len, -				  const char __user *uargs) +static int load_module(struct load_info *info, const char __user *uargs, +		       int flags)  { -	struct load_info info = { NULL, };  	struct module *mod, *old;  	long err; -	pr_debug("load_module: umod=%p, len=%lu, uargs=%p\n", -	       umod, len, uargs); +	err = module_sig_check(info); +	if (err) +		goto free_copy; -	/* Copy in the blobs from userspace, check they are vaguely sane. */ -	err = copy_and_check(&info, umod, len, uargs); +	err = elf_header_check(info);  	if (err) -		return ERR_PTR(err); +		goto free_copy;  	/* Figure out module layout, and allocate all the memory. */ -	mod = layout_and_allocate(&info); +	mod = layout_and_allocate(info, flags);  	if (IS_ERR(mod)) {  		err = PTR_ERR(mod);  		goto free_copy;  	} +	/* +	 * We try to place it in the list now to make sure it's unique +	 * before we dedicate too many resources.  In particular, +	 * temporary percpu memory exhaustion. +	 */ +	mod->state = MODULE_STATE_UNFORMED; +again: +	mutex_lock(&module_mutex); +	if ((old = find_module_all(mod->name, true)) != NULL) { +		if (old->state == MODULE_STATE_COMING +		    || old->state == MODULE_STATE_UNFORMED) { +			/* Wait in case it fails to load. */ +			mutex_unlock(&module_mutex); +			err = wait_event_interruptible(module_wq, +					       finished_loading(mod->name)); +			if (err) +				goto free_module; +			goto again; +		} +		err = -EEXIST; +		mutex_unlock(&module_mutex); +		goto free_module; +	} +	list_add_rcu(&mod->list, &modules); +	mutex_unlock(&module_mutex); +  #ifdef CONFIG_MODULE_SIG -	mod->sig_ok = info.sig_ok; +	mod->sig_ok = info->sig_ok;  	if (!mod->sig_ok)  		add_taint_module(mod, TAINT_FORCED_MODULE);  #endif @@ -2976,29 +3199,29 @@ static struct module *load_module(void __user *umod,  	/* Now module is in final location, initialize linked lists, etc. */  	err = module_unload_init(mod);  	if (err) -		goto free_module; +		goto unlink_mod;  	/* Now we've got everything in the final locations, we can  	 * find optional sections. */ -	find_module_sections(mod, &info); +	find_module_sections(mod, info);  	err = check_module_license_and_versions(mod);  	if (err)  		goto free_unload;  	/* Set up MODINFO_ATTR fields */ -	setup_modinfo(mod, &info); +	setup_modinfo(mod, info);  	/* Fix up syms, so that st_value is a pointer to location. */ -	err = simplify_symbols(mod, &info); +	err = simplify_symbols(mod, info);  	if (err < 0)  		goto free_modinfo; -	err = apply_relocations(mod, &info); +	err = apply_relocations(mod, info);  	if (err < 0)  		goto free_modinfo; -	err = post_relocation(mod, &info); +	err = post_relocation(mod, info);  	if (err < 0)  		goto free_modinfo; @@ -3011,72 +3234,49 @@ static struct module *load_module(void __user *umod,  		goto free_arch_cleanup;  	} -	/* Mark state as coming so strong_try_module_get() ignores us. */ -	mod->state = MODULE_STATE_COMING; +	dynamic_debug_setup(info->debug, info->num_debug); -	/* Now sew it into the lists so we can get lockdep and oops -	 * info during argument parsing.  No one should access us, since -	 * strong_try_module_get() will fail. -	 * lockdep/oops can run asynchronous, so use the RCU list insertion -	 * function to insert in a way safe to concurrent readers. -	 * The mutex protects against concurrent writers. -	 */ -again:  	mutex_lock(&module_mutex); -	if ((old = find_module(mod->name)) != NULL) { -		if (old->state == MODULE_STATE_COMING) { -			/* Wait in case it fails to load. */ -			mutex_unlock(&module_mutex); -			err = wait_event_interruptible(module_wq, -					       finished_loading(mod->name)); -			if (err) -				goto free_arch_cleanup; -			goto again; -		} -		err = -EEXIST; -		goto unlock; -	} - -	/* This has to be done once we're sure module name is unique. */ -	dynamic_debug_setup(info.debug, info.num_debug); - -	/* Find duplicate symbols */ +	/* Find duplicate symbols (must be called under lock). */  	err = verify_export_symbols(mod);  	if (err < 0) -		goto ddebug; +		goto ddebug_cleanup; + +	/* This relies on module_mutex for list integrity. */ +	module_bug_finalize(info->hdr, info->sechdrs, mod); + +	/* Mark state as coming so strong_try_module_get() ignores us, +	 * but kallsyms etc. can see us. */ +	mod->state = MODULE_STATE_COMING; -	module_bug_finalize(info.hdr, info.sechdrs, mod); -	list_add_rcu(&mod->list, &modules);  	mutex_unlock(&module_mutex);  	/* Module is ready to execute: parsing args may do that. */  	err = parse_args(mod->name, mod->args, mod->kp, mod->num_kp,  			 -32768, 32767, &ddebug_dyndbg_module_param_cb);  	if (err < 0) -		goto unlink; +		goto bug_cleanup;  	/* Link in to syfs. */ -	err = mod_sysfs_setup(mod, &info, mod->kp, mod->num_kp); +	err = mod_sysfs_setup(mod, info, mod->kp, mod->num_kp);  	if (err < 0) -		goto unlink; +		goto bug_cleanup;  	/* Get rid of temporary copy. */ -	free_copy(&info); +	free_copy(info);  	/* Done! */  	trace_module_load(mod); -	return mod; - unlink: +	return do_init_module(mod); + + bug_cleanup: +	/* module_bug_cleanup needs module_mutex protection */  	mutex_lock(&module_mutex); -	/* Unlink carefully: kallsyms could be walking list. */ -	list_del_rcu(&mod->list);  	module_bug_cleanup(mod); -	wake_up_all(&module_wq); - ddebug: -	dynamic_debug_remove(info.debug); - unlock: + ddebug_cleanup:  	mutex_unlock(&module_mutex); +	dynamic_debug_remove(info->debug);  	synchronize_sched();  	kfree(mod->args);   free_arch_cleanup: @@ -3085,107 +3285,59 @@ again:  	free_modinfo(mod);   free_unload:  	module_unload_free(mod); + unlink_mod: +	mutex_lock(&module_mutex); +	/* Unlink carefully: kallsyms could be walking list. */ +	list_del_rcu(&mod->list); +	wake_up_all(&module_wq); +	mutex_unlock(&module_mutex);   free_module: -	module_deallocate(mod, &info); +	module_deallocate(mod, info);   free_copy: -	free_copy(&info); -	return ERR_PTR(err); -} - -/* Call module constructors. */ -static void do_mod_ctors(struct module *mod) -{ -#ifdef CONFIG_CONSTRUCTORS -	unsigned long i; - -	for (i = 0; i < mod->num_ctors; i++) -		mod->ctors[i](); -#endif +	free_copy(info); +	return err;  } -/* This is where the real work happens */  SYSCALL_DEFINE3(init_module, void __user *, umod,  		unsigned long, len, const char __user *, uargs)  { -	struct module *mod; -	int ret = 0; +	int err; +	struct load_info info = { }; -	/* Must have permission */ -	if (!capable(CAP_SYS_MODULE) || modules_disabled) -		return -EPERM; +	err = may_init_module(); +	if (err) +		return err; -	/* Do all the hard work */ -	mod = load_module(umod, len, uargs); -	if (IS_ERR(mod)) -		return PTR_ERR(mod); +	pr_debug("init_module: umod=%p, len=%lu, uargs=%p\n", +	       umod, len, uargs); -	blocking_notifier_call_chain(&module_notify_list, -			MODULE_STATE_COMING, mod); +	err = copy_module_from_user(umod, len, &info); +	if (err) +		return err; -	/* Set RO and NX regions for core */ -	set_section_ro_nx(mod->module_core, -				mod->core_text_size, -				mod->core_ro_size, -				mod->core_size); +	return load_module(&info, uargs, 0); +} -	/* Set RO and NX regions for init */ -	set_section_ro_nx(mod->module_init, -				mod->init_text_size, -				mod->init_ro_size, -				mod->init_size); +SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags) +{ +	int err; +	struct load_info info = { }; -	do_mod_ctors(mod); -	/* Start the module */ -	if (mod->init != NULL) -		ret = do_one_initcall(mod->init); -	if (ret < 0) { -		/* Init routine failed: abort.  Try to protect us from -                   buggy refcounters. */ -		mod->state = MODULE_STATE_GOING; -		synchronize_sched(); -		module_put(mod); -		blocking_notifier_call_chain(&module_notify_list, -					     MODULE_STATE_GOING, mod); -		free_module(mod); -		wake_up_all(&module_wq); -		return ret; -	} -	if (ret > 0) { -		printk(KERN_WARNING -"%s: '%s'->init suspiciously returned %d, it should follow 0/-E convention\n" -"%s: loading module anyway...\n", -		       __func__, mod->name, ret, -		       __func__); -		dump_stack(); -	} +	err = may_init_module(); +	if (err) +		return err; -	/* Now it's a first class citizen! */ -	mod->state = MODULE_STATE_LIVE; -	blocking_notifier_call_chain(&module_notify_list, -				     MODULE_STATE_LIVE, mod); +	pr_debug("finit_module: fd=%d, uargs=%p, flags=%i\n", fd, uargs, flags); -	/* We need to finish all async code before the module init sequence is done */ -	async_synchronize_full(); +	if (flags & ~(MODULE_INIT_IGNORE_MODVERSIONS +		      |MODULE_INIT_IGNORE_VERMAGIC)) +		return -EINVAL; -	mutex_lock(&module_mutex); -	/* Drop initial reference. */ -	module_put(mod); -	trim_init_extable(mod); -#ifdef CONFIG_KALLSYMS -	mod->num_symtab = mod->core_num_syms; -	mod->symtab = mod->core_symtab; -	mod->strtab = mod->core_strtab; -#endif -	unset_module_init_ro_nx(mod); -	module_free(mod, mod->module_init); -	mod->module_init = NULL; -	mod->init_size = 0; -	mod->init_ro_size = 0; -	mod->init_text_size = 0; -	mutex_unlock(&module_mutex); -	wake_up_all(&module_wq); +	err = copy_module_from_fd(fd, &info); +	if (err) +		return err; -	return 0; +	return load_module(&info, uargs, flags);  }  static inline int within(unsigned long addr, void *start, unsigned long size) @@ -3261,6 +3413,8 @@ const char *module_address_lookup(unsigned long addr,  	preempt_disable();  	list_for_each_entry_rcu(mod, &modules, list) { +		if (mod->state == MODULE_STATE_UNFORMED) +			continue;  		if (within_module_init(addr, mod) ||  		    within_module_core(addr, mod)) {  			if (modname) @@ -3284,6 +3438,8 @@ int lookup_module_symbol_name(unsigned long addr, char *symname)  	preempt_disable();  	list_for_each_entry_rcu(mod, &modules, list) { +		if (mod->state == MODULE_STATE_UNFORMED) +			continue;  		if (within_module_init(addr, mod) ||  		    within_module_core(addr, mod)) {  			const char *sym; @@ -3308,6 +3464,8 @@ int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size,  	preempt_disable();  	list_for_each_entry_rcu(mod, &modules, list) { +		if (mod->state == MODULE_STATE_UNFORMED) +			continue;  		if (within_module_init(addr, mod) ||  		    within_module_core(addr, mod)) {  			const char *sym; @@ -3335,6 +3493,8 @@ int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,  	preempt_disable();  	list_for_each_entry_rcu(mod, &modules, list) { +		if (mod->state == MODULE_STATE_UNFORMED) +			continue;  		if (symnum < mod->num_symtab) {  			*value = mod->symtab[symnum].st_value;  			*type = mod->symtab[symnum].st_info; @@ -3377,9 +3537,12 @@ unsigned long module_kallsyms_lookup_name(const char *name)  			ret = mod_find_symname(mod, colon+1);  		*colon = ':';  	} else { -		list_for_each_entry_rcu(mod, &modules, list) +		list_for_each_entry_rcu(mod, &modules, list) { +			if (mod->state == MODULE_STATE_UNFORMED) +				continue;  			if ((ret = mod_find_symname(mod, name)) != 0)  				break; +		}  	}  	preempt_enable();  	return ret; @@ -3394,6 +3557,8 @@ int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,  	int ret;  	list_for_each_entry(mod, &modules, list) { +		if (mod->state == MODULE_STATE_UNFORMED) +			continue;  		for (i = 0; i < mod->num_symtab; i++) {  			ret = fn(data, mod->strtab + mod->symtab[i].st_name,  				 mod, mod->symtab[i].st_value); @@ -3409,6 +3574,7 @@ static char *module_flags(struct module *mod, char *buf)  {  	int bx = 0; +	BUG_ON(mod->state == MODULE_STATE_UNFORMED);  	if (mod->taints ||  	    mod->state == MODULE_STATE_GOING ||  	    mod->state == MODULE_STATE_COMING) { @@ -3450,6 +3616,10 @@ static int m_show(struct seq_file *m, void *p)  	struct module *mod = list_entry(p, struct module, list);  	char buf[8]; +	/* We always ignore unformed modules. */ +	if (mod->state == MODULE_STATE_UNFORMED) +		return 0; +  	seq_printf(m, "%s %u",  		   mod->name, mod->init_size + mod->core_size);  	print_unload_info(m, mod); @@ -3510,6 +3680,8 @@ const struct exception_table_entry *search_module_extables(unsigned long addr)  	preempt_disable();  	list_for_each_entry_rcu(mod, &modules, list) { +		if (mod->state == MODULE_STATE_UNFORMED) +			continue;  		if (mod->num_exentries == 0)  			continue; @@ -3558,10 +3730,13 @@ struct module *__module_address(unsigned long addr)  	if (addr < module_addr_min || addr > module_addr_max)  		return NULL; -	list_for_each_entry_rcu(mod, &modules, list) +	list_for_each_entry_rcu(mod, &modules, list) { +		if (mod->state == MODULE_STATE_UNFORMED) +			continue;  		if (within_module_core(addr, mod)  		    || within_module_init(addr, mod))  			return mod; +	}  	return NULL;  }  EXPORT_SYMBOL_GPL(__module_address); @@ -3614,8 +3789,11 @@ void print_modules(void)  	printk(KERN_DEFAULT "Modules linked in:");  	/* Most callers should already have preempt disabled, but make sure */  	preempt_disable(); -	list_for_each_entry_rcu(mod, &modules, list) +	list_for_each_entry_rcu(mod, &modules, list) { +		if (mod->state == MODULE_STATE_UNFORMED) +			continue;  		printk(" %s%s", mod->name, module_flags(mod, buf)); +	}  	preempt_enable();  	if (last_unloaded_module[0])  		printk(" [last unloaded: %s]", last_unloaded_module);  |