diff options
Diffstat (limited to 'lib')
| -rw-r--r-- | lib/bust_spinlocks.c | 3 | ||||
| -rw-r--r-- | lib/dma-debug.c | 45 | ||||
| -rw-r--r-- | lib/idr.c | 96 | ||||
| -rw-r--r-- | lib/notifier-error-inject.c | 4 | ||||
| -rw-r--r-- | lib/xz/Kconfig | 2 | 
5 files changed, 68 insertions, 82 deletions
diff --git a/lib/bust_spinlocks.c b/lib/bust_spinlocks.c index 9681d54b95d..f8e0e536739 100644 --- a/lib/bust_spinlocks.c +++ b/lib/bust_spinlocks.c @@ -8,6 +8,7 @@   */  #include <linux/kernel.h> +#include <linux/printk.h>  #include <linux/spinlock.h>  #include <linux/tty.h>  #include <linux/wait.h> @@ -28,5 +29,3 @@ void __attribute__((weak)) bust_spinlocks(int yes)  			wake_up_klogd();  	}  } - - diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 5e396accd3d..d87a17a819d 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c @@ -862,17 +862,21 @@ static void check_unmap(struct dma_debug_entry *ref)  	entry = bucket_find_exact(bucket, ref);  	if (!entry) { +		/* must drop lock before calling dma_mapping_error */ +		put_hash_bucket(bucket, &flags); +  		if (dma_mapping_error(ref->dev, ref->dev_addr)) {  			err_printk(ref->dev, NULL, -				   "DMA-API: device driver tries " -				   "to free an invalid DMA memory address\n"); -			return; +				   "DMA-API: device driver tries to free an " +				   "invalid DMA memory address\n"); +		} else { +			err_printk(ref->dev, NULL, +				   "DMA-API: device driver tries to free DMA " +				   "memory it has not allocated [device " +				   "address=0x%016llx] [size=%llu bytes]\n", +				   ref->dev_addr, ref->size);  		} -		err_printk(ref->dev, NULL, "DMA-API: device driver tries " -			   "to free DMA memory it has not allocated " -			   "[device address=0x%016llx] [size=%llu bytes]\n", -			   ref->dev_addr, ref->size); -		goto out; +		return;  	}  	if (ref->size != entry->size) { @@ -936,7 +940,6 @@ static void check_unmap(struct dma_debug_entry *ref)  	hash_bucket_del(entry);  	dma_entry_free(entry); -out:  	put_hash_bucket(bucket, &flags);  } @@ -1082,13 +1085,27 @@ void debug_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)  	ref.dev = dev;  	ref.dev_addr = dma_addr;  	bucket = get_hash_bucket(&ref, &flags); -	entry = bucket_find_exact(bucket, &ref); -	if (!entry) -		goto out; +	list_for_each_entry(entry, &bucket->list, list) { +		if (!exact_match(&ref, entry)) +			continue; + +		/* +		 * The same physical address can be mapped multiple +		 * times. Without a hardware IOMMU this results in the +		 * same device addresses being put into the dma-debug +		 * hash multiple times too. This can result in false +		 * positives being reported. Therefore we implement a +		 * best-fit algorithm here which updates the first entry +		 * from the hash which fits the reference value and is +		 * not currently listed as being checked. +		 */ +		if (entry->map_err_type == MAP_ERR_NOT_CHECKED) { +			entry->map_err_type = MAP_ERR_CHECKED; +			break; +		} +	} -	entry->map_err_type = MAP_ERR_CHECKED; -out:  	put_hash_bucket(bucket, &flags);  }  EXPORT_SYMBOL(debug_dma_mapping_error); diff --git a/lib/idr.c b/lib/idr.c index 73f4d53c02f..322e2816f2f 100644 --- a/lib/idr.c +++ b/lib/idr.c @@ -106,8 +106,14 @@ static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr)  	if (layer_idr)  		return get_from_free_list(layer_idr); -	/* try to allocate directly from kmem_cache */ -	new = kmem_cache_zalloc(idr_layer_cache, gfp_mask); +	/* +	 * Try to allocate directly from kmem_cache.  We want to try this +	 * before preload buffer; otherwise, non-preloading idr_alloc() +	 * users will end up taking advantage of preloading ones.  As the +	 * following is allowed to fail for preloaded cases, suppress +	 * warning this time. +	 */ +	new = kmem_cache_zalloc(idr_layer_cache, gfp_mask | __GFP_NOWARN);  	if (new)  		return new; @@ -115,18 +121,24 @@ static struct idr_layer *idr_layer_alloc(gfp_t gfp_mask, struct idr *layer_idr)  	 * Try to fetch one from the per-cpu preload buffer if in process  	 * context.  See idr_preload() for details.  	 */ -	if (in_interrupt()) -		return NULL; - -	preempt_disable(); -	new = __this_cpu_read(idr_preload_head); -	if (new) { -		__this_cpu_write(idr_preload_head, new->ary[0]); -		__this_cpu_dec(idr_preload_cnt); -		new->ary[0] = NULL; +	if (!in_interrupt()) { +		preempt_disable(); +		new = __this_cpu_read(idr_preload_head); +		if (new) { +			__this_cpu_write(idr_preload_head, new->ary[0]); +			__this_cpu_dec(idr_preload_cnt); +			new->ary[0] = NULL; +		} +		preempt_enable(); +		if (new) +			return new;  	} -	preempt_enable(); -	return new; + +	/* +	 * Both failed.  Try kmem_cache again w/o adding __GFP_NOWARN so +	 * that memory allocation failure warning is printed as intended. +	 */ +	return kmem_cache_zalloc(idr_layer_cache, gfp_mask);  }  static void idr_layer_rcu_free(struct rcu_head *head) @@ -184,20 +196,7 @@ static void idr_mark_full(struct idr_layer **pa, int id)  	}  } -/** - * idr_pre_get - reserve resources for idr allocation - * @idp:	idr handle - * @gfp_mask:	memory allocation flags - * - * This function should be called prior to calling the idr_get_new* functions. - * It preallocates enough memory to satisfy the worst possible allocation. The - * caller should pass in GFP_KERNEL if possible.  This of course requires that - * no spinning locks be held. - * - * If the system is REALLY out of memory this function returns %0, - * otherwise %1. - */ -int idr_pre_get(struct idr *idp, gfp_t gfp_mask) +int __idr_pre_get(struct idr *idp, gfp_t gfp_mask)  {  	while (idp->id_free_cnt < MAX_IDR_FREE) {  		struct idr_layer *new; @@ -208,13 +207,12 @@ int idr_pre_get(struct idr *idp, gfp_t gfp_mask)  	}  	return 1;  } -EXPORT_SYMBOL(idr_pre_get); +EXPORT_SYMBOL(__idr_pre_get);  /**   * sub_alloc - try to allocate an id without growing the tree depth   * @idp: idr handle   * @starting_id: id to start search at - * @id: pointer to the allocated handle   * @pa: idr_layer[MAX_IDR_LEVEL] used as backtrack buffer   * @gfp_mask: allocation mask for idr_layer_alloc()   * @layer_idr: optional idr passed to idr_layer_alloc() @@ -376,25 +374,7 @@ static void idr_fill_slot(struct idr *idr, void *ptr, int id,  	idr_mark_full(pa, id);  } -/** - * idr_get_new_above - allocate new idr entry above or equal to a start id - * @idp: idr handle - * @ptr: pointer you want associated with the id - * @starting_id: id to start search at - * @id: pointer to the allocated handle - * - * This is the allocate id function.  It should be called with any - * required locks. - * - * If allocation from IDR's private freelist fails, idr_get_new_above() will - * return %-EAGAIN.  The caller should retry the idr_pre_get() call to refill - * IDR's preallocation and then retry the idr_get_new_above() call. - * - * If the idr is full idr_get_new_above() will return %-ENOSPC. - * - * @id returns a value in the range @starting_id ... %0x7fffffff - */ -int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id) +int __idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)  {  	struct idr_layer *pa[MAX_IDR_LEVEL + 1];  	int rv; @@ -407,7 +387,7 @@ int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)  	*id = rv;  	return 0;  } -EXPORT_SYMBOL(idr_get_new_above); +EXPORT_SYMBOL(__idr_get_new_above);  /**   * idr_preload - preload for idr_alloc() @@ -569,8 +549,7 @@ void idr_remove(struct idr *idp, int id)  	struct idr_layer *p;  	struct idr_layer *to_free; -	/* see comment in idr_find_slowpath() */ -	if (WARN_ON_ONCE(id < 0)) +	if (id < 0)  		return;  	sub_remove(idp, (idp->layers - 1) * IDR_BITS, id); @@ -667,15 +646,7 @@ void *idr_find_slowpath(struct idr *idp, int id)  	int n;  	struct idr_layer *p; -	/* -	 * If @id is negative, idr_find() used to ignore the sign bit and -	 * performed lookup with the rest of bits, which is weird and can -	 * lead to very obscure bugs.  We're now returning NULL for all -	 * negative IDs but just in case somebody was depending on the sign -	 * bit being ignored, let's trigger WARN_ON_ONCE() so that they can -	 * be detected and fixed.  WARN_ON_ONCE() can later be removed. -	 */ -	if (WARN_ON_ONCE(id < 0)) +	if (id < 0)  		return NULL;  	p = rcu_dereference_raw(idp->top); @@ -824,8 +795,7 @@ void *idr_replace(struct idr *idp, void *ptr, int id)  	int n;  	struct idr_layer *p, *old_p; -	/* see comment in idr_find_slowpath() */ -	if (WARN_ON_ONCE(id < 0)) +	if (id < 0)  		return ERR_PTR(-EINVAL);  	p = idp->top; @@ -918,7 +888,7 @@ static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap)  int ida_pre_get(struct ida *ida, gfp_t gfp_mask)  {  	/* allocate idr_layers */ -	if (!idr_pre_get(&ida->idr, gfp_mask)) +	if (!__idr_pre_get(&ida->idr, gfp_mask))  		return 0;  	/* allocate free_bitmap */ diff --git a/lib/notifier-error-inject.c b/lib/notifier-error-inject.c index 44b92cb6224..eb4a04afea8 100644 --- a/lib/notifier-error-inject.c +++ b/lib/notifier-error-inject.c @@ -17,7 +17,7 @@ static int debugfs_errno_get(void *data, u64 *val)  DEFINE_SIMPLE_ATTRIBUTE(fops_errno, debugfs_errno_get, debugfs_errno_set,  			"%lld\n"); -static struct dentry *debugfs_create_errno(const char *name, mode_t mode, +static struct dentry *debugfs_create_errno(const char *name, umode_t mode,  				struct dentry *parent, int *value)  {  	return debugfs_create_file(name, mode, parent, value, &fops_errno); @@ -50,7 +50,7 @@ struct dentry *notifier_err_inject_init(const char *name, struct dentry *parent,  			struct notifier_err_inject *err_inject, int priority)  {  	struct notifier_err_inject_action *action; -	mode_t mode = S_IFREG | S_IRUSR | S_IWUSR; +	umode_t mode = S_IFREG | S_IRUSR | S_IWUSR;  	struct dentry *dir;  	struct dentry *actions_dir; diff --git a/lib/xz/Kconfig b/lib/xz/Kconfig index 82a04d7ba99..08837db52d9 100644 --- a/lib/xz/Kconfig +++ b/lib/xz/Kconfig @@ -15,7 +15,7 @@ config XZ_DEC_X86  config XZ_DEC_POWERPC  	bool "PowerPC BCJ filter decoder" -	default y if POWERPC +	default y if PPC  	select XZ_DEC_BCJ  config XZ_DEC_IA64  |