diff options
Diffstat (limited to 'lib')
| -rw-r--r-- | lib/Kconfig.debug | 58 | ||||
| -rw-r--r-- | lib/dma-debug.c | 1 | ||||
| -rw-r--r-- | lib/dynamic_debug.c | 42 | ||||
| -rw-r--r-- | lib/radix-tree.c | 2 | ||||
| -rw-r--r-- | lib/swiotlb.c | 18 | 
5 files changed, 68 insertions, 53 deletions
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 1b4afd2e6ca..7b2a8ca97ad 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -461,6 +461,15 @@ config DEBUG_MUTEXES  	 This feature allows mutex semantics violations to be detected and  	 reported. +config BKL +	bool "Big Kernel Lock" if (SMP || PREEMPT) +	default y +	help +	  This is the traditional lock that is used in old code instead +	  of proper locking. All drivers that use the BKL should depend +	  on this symbol. +	  Say Y here unless you are working on removing the BKL. +  config DEBUG_LOCK_ALLOC  	bool "Lock debugging: detect incorrect freeing of live locks"  	depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT @@ -482,6 +491,7 @@ config PROVE_LOCKING  	select DEBUG_SPINLOCK  	select DEBUG_MUTEXES  	select DEBUG_LOCK_ALLOC +	select TRACE_IRQFLAGS  	default n  	help  	 This feature enables the kernel to prove that all locking @@ -539,6 +549,23 @@ config PROVE_RCU_REPEATEDLY  	 disabling, allowing multiple RCU-lockdep warnings to be printed  	 on a single reboot. +	 Say Y to allow multiple RCU-lockdep warnings per boot. + +	 Say N if you are unsure. + +config SPARSE_RCU_POINTER +	bool "RCU debugging: sparse-based checks for pointer usage" +	default n +	help +	 This feature enables the __rcu sparse annotation for +	 RCU-protected pointers.  This annotation will cause sparse +	 to flag any non-RCU used of annotated pointers.  This can be +	 helpful when debugging RCU usage.  Please note that this feature +	 is not intended to enforce code cleanliness; it is instead merely +	 a debugging aid. + +	 Say Y to make sparse flag questionable use of RCU-protected pointers +  	 Say N if you are unsure.  config LOCKDEP @@ -579,11 +606,10 @@ config DEBUG_LOCKDEP  	  of more runtime overhead.  config TRACE_IRQFLAGS -	depends on DEBUG_KERNEL  	bool -	default y -	depends on TRACE_IRQFLAGS_SUPPORT -	depends on PROVE_LOCKING +	help +	  Enables hooks to interrupt enabling and disabling for +	  either tracing or lock debugging.  config DEBUG_SPINLOCK_SLEEP  	bool "Spinlock debugging: sleep-inside-spinlock checking" @@ -832,6 +858,30 @@ config RCU_CPU_STALL_DETECTOR  	  Say Y if you are unsure. +config RCU_CPU_STALL_TIMEOUT +	int "RCU CPU stall timeout in seconds" +	depends on RCU_CPU_STALL_DETECTOR +	range 3 300 +	default 60 +	help +	  If a given RCU grace period extends more than the specified +	  number of seconds, a CPU stall warning is printed.  If the +	  RCU grace period persists, additional CPU stall warnings are +	  printed at more widely spaced intervals. + +config RCU_CPU_STALL_DETECTOR_RUNNABLE +	bool "RCU CPU stall checking starts automatically at boot" +	depends on RCU_CPU_STALL_DETECTOR +	default y +	help +	  If set, start checking for RCU CPU stalls immediately on +	  boot.  Otherwise, RCU CPU stall checking must be manually +	  enabled. + +	  Say Y if you are unsure. + +	  Say N if you wish to suppress RCU CPU stall checking during boot. +  config RCU_CPU_STALL_VERBOSE  	bool "Print additional per-task information for RCU_CPU_STALL_DETECTOR"  	depends on RCU_CPU_STALL_DETECTOR && TREE_PREEMPT_RCU diff --git a/lib/dma-debug.c b/lib/dma-debug.c index 01e64270e24..4bfb0471f10 100644 --- a/lib/dma-debug.c +++ b/lib/dma-debug.c @@ -590,6 +590,7 @@ out_unlock:  static const struct file_operations filter_fops = {  	.read  = filter_read,  	.write = filter_write, +	.llseek = default_llseek,  };  static int dma_debug_fs_init(void) diff --git a/lib/dynamic_debug.c b/lib/dynamic_debug.c index a687d902daa..3094318bfea 100644 --- a/lib/dynamic_debug.c +++ b/lib/dynamic_debug.c @@ -26,19 +26,11 @@  #include <linux/dynamic_debug.h>  #include <linux/debugfs.h>  #include <linux/slab.h> +#include <linux/jump_label.h>  extern struct _ddebug __start___verbose[];  extern struct _ddebug __stop___verbose[]; -/* dynamic_debug_enabled, and dynamic_debug_enabled2 are bitmasks in which - * bit n is set to 1 if any modname hashes into the bucket n, 0 otherwise. They - * use independent hash functions, to reduce the chance of false positives. - */ -long long dynamic_debug_enabled; -EXPORT_SYMBOL_GPL(dynamic_debug_enabled); -long long dynamic_debug_enabled2; -EXPORT_SYMBOL_GPL(dynamic_debug_enabled2); -  struct ddebug_table {  	struct list_head link;  	char *mod_name; @@ -88,26 +80,6 @@ static char *ddebug_describe_flags(struct _ddebug *dp, char *buf,  }  /* - * must be called with ddebug_lock held - */ - -static int disabled_hash(char hash, bool first_table) -{ -	struct ddebug_table *dt; -	char table_hash_value; - -	list_for_each_entry(dt, &ddebug_tables, link) { -		if (first_table) -			table_hash_value = dt->ddebugs->primary_hash; -		else -			table_hash_value = dt->ddebugs->secondary_hash; -		if (dt->num_enabled && (hash == table_hash_value)) -			return 0; -	} -	return 1; -} - -/*   * Search the tables for _ddebug's which match the given   * `query' and apply the `flags' and `mask' to them.  Tells   * the user which ddebug's were changed, or whether none @@ -170,17 +142,9 @@ static void ddebug_change(const struct ddebug_query *query,  				dt->num_enabled++;  			dp->flags = newflags;  			if (newflags) { -				dynamic_debug_enabled |= -						(1LL << dp->primary_hash); -				dynamic_debug_enabled2 |= -						(1LL << dp->secondary_hash); +				jump_label_enable(&dp->enabled);  			} else { -				if (disabled_hash(dp->primary_hash, true)) -					dynamic_debug_enabled &= -						~(1LL << dp->primary_hash); -				if (disabled_hash(dp->secondary_hash, false)) -					dynamic_debug_enabled2 &= -						~(1LL << dp->secondary_hash); +				jump_label_disable(&dp->enabled);  			}  			if (verbose)  				printk(KERN_INFO diff --git a/lib/radix-tree.c b/lib/radix-tree.c index efd16fa80b1..6f412ab4c24 100644 --- a/lib/radix-tree.c +++ b/lib/radix-tree.c @@ -49,7 +49,7 @@ struct radix_tree_node {  	unsigned int	height;		/* Height from the bottom */  	unsigned int	count;  	struct rcu_head	rcu_head; -	void		*slots[RADIX_TREE_MAP_SIZE]; +	void __rcu	*slots[RADIX_TREE_MAP_SIZE];  	unsigned long	tags[RADIX_TREE_MAX_TAGS][RADIX_TREE_TAG_LONGS];  }; diff --git a/lib/swiotlb.c b/lib/swiotlb.c index 34e3082632d..7c06ee51a29 100644 --- a/lib/swiotlb.c +++ b/lib/swiotlb.c @@ -70,7 +70,7 @@ static unsigned long io_tlb_nslabs;   */  static unsigned long io_tlb_overflow = 32*1024; -void *io_tlb_overflow_buffer; +static void *io_tlb_overflow_buffer;  /*   * This is a free list describing the number of free entries available from @@ -147,16 +147,16 @@ void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)  	 * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE  	 * between io_tlb_start and io_tlb_end.  	 */ -	io_tlb_list = alloc_bootmem(io_tlb_nslabs * sizeof(int)); +	io_tlb_list = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(int)));  	for (i = 0; i < io_tlb_nslabs; i++)   		io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);  	io_tlb_index = 0; -	io_tlb_orig_addr = alloc_bootmem(io_tlb_nslabs * sizeof(phys_addr_t)); +	io_tlb_orig_addr = alloc_bootmem_pages(PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));  	/*  	 * Get the overflow emergency buffer  	 */ -	io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow); +	io_tlb_overflow_buffer = alloc_bootmem_low_pages(PAGE_ALIGN(io_tlb_overflow));  	if (!io_tlb_overflow_buffer)  		panic("Cannot allocate SWIOTLB overflow buffer!\n");  	if (verbose) @@ -182,7 +182,7 @@ swiotlb_init_with_default_size(size_t default_size, int verbose)  	/*  	 * Get IO TLB memory from the low pages  	 */ -	io_tlb_start = alloc_bootmem_low_pages(bytes); +	io_tlb_start = alloc_bootmem_low_pages(PAGE_ALIGN(bytes));  	if (!io_tlb_start)  		panic("Cannot allocate SWIOTLB buffer"); @@ -308,13 +308,13 @@ void __init swiotlb_free(void)  			   get_order(io_tlb_nslabs << IO_TLB_SHIFT));  	} else {  		free_bootmem_late(__pa(io_tlb_overflow_buffer), -				  io_tlb_overflow); +				  PAGE_ALIGN(io_tlb_overflow));  		free_bootmem_late(__pa(io_tlb_orig_addr), -				  io_tlb_nslabs * sizeof(phys_addr_t)); +				  PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)));  		free_bootmem_late(__pa(io_tlb_list), -				  io_tlb_nslabs * sizeof(int)); +				  PAGE_ALIGN(io_tlb_nslabs * sizeof(int)));  		free_bootmem_late(__pa(io_tlb_start), -				  io_tlb_nslabs << IO_TLB_SHIFT); +				  PAGE_ALIGN(io_tlb_nslabs << IO_TLB_SHIFT));  	}  }  |