diff options
Diffstat (limited to 'net/ipv6')
| -rw-r--r-- | net/ipv6/ipv6_sockglue.c | 4 | ||||
| -rw-r--r-- | net/ipv6/netfilter/ip6_tables.c | 123 | ||||
| -rw-r--r-- | net/ipv6/udp.c | 6 | 
3 files changed, 43 insertions, 90 deletions
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c index d31df0f4bc9..a7fdf9a27f1 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c @@ -380,10 +380,6 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,  			default:  				goto sticky_done;  			} - -			if ((rthdr->hdrlen & 1) || -			    (rthdr->hdrlen >> 1) != rthdr->segments_left) -				goto sticky_done;  		}  		retv = 0; diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c index dfed176aed3..219e165aea1 100644 --- a/net/ipv6/netfilter/ip6_tables.c +++ b/net/ipv6/netfilter/ip6_tables.c @@ -365,9 +365,9 @@ ip6t_do_table(struct sk_buff *skb,  	IP_NF_ASSERT(table->valid_hooks & (1 << hook)); -	rcu_read_lock_bh(); -	private = rcu_dereference(table->private); -	table_base = rcu_dereference(private->entries[smp_processor_id()]); +	xt_info_rdlock_bh(); +	private = table->private; +	table_base = private->entries[smp_processor_id()];  	e = get_entry(table_base, private->hook_entry[hook]); @@ -466,7 +466,7 @@ ip6t_do_table(struct sk_buff *skb,  #ifdef CONFIG_NETFILTER_DEBUG  	((struct ip6t_entry *)table_base)->comefrom = NETFILTER_LINK_POISON;  #endif -	rcu_read_unlock_bh(); +	xt_info_rdunlock_bh();  #ifdef DEBUG_ALLOW_ALL  	return NF_ACCEPT; @@ -926,9 +926,12 @@ get_counters(const struct xt_table_info *t,  	/* Instead of clearing (by a previous call to memset())  	 * the counters and using adds, we set the counters  	 * with data used by 'current' CPU -	 * We dont care about preemption here. +	 * +	 * Bottom half has to be disabled to prevent deadlock +	 * if new softirq were to run and call ipt_do_table  	 */ -	curcpu = raw_smp_processor_id(); +	local_bh_disable(); +	curcpu = smp_processor_id();  	i = 0;  	IP6T_ENTRY_ITERATE(t->entries[curcpu], @@ -941,72 +944,22 @@ get_counters(const struct xt_table_info *t,  		if (cpu == curcpu)  			continue;  		i = 0; +		xt_info_wrlock(cpu);  		IP6T_ENTRY_ITERATE(t->entries[cpu],  				  t->size,  				  add_entry_to_counter,  				  counters,  				  &i); +		xt_info_wrunlock(cpu);  	} -} - -/* We're lazy, and add to the first CPU; overflow works its fey magic - * and everything is OK. */ -static int -add_counter_to_entry(struct ip6t_entry *e, -		     const struct xt_counters addme[], -		     unsigned int *i) -{ -	ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt); - -	(*i)++; -	return 0; -} - -/* Take values from counters and add them back onto the current cpu */ -static void put_counters(struct xt_table_info *t, -			 const struct xt_counters counters[]) -{ -	unsigned int i, cpu; - -	local_bh_disable(); -	cpu = smp_processor_id(); -	i = 0; -	IP6T_ENTRY_ITERATE(t->entries[cpu], -			   t->size, -			   add_counter_to_entry, -			   counters, -			   &i);  	local_bh_enable();  } -static inline int -zero_entry_counter(struct ip6t_entry *e, void *arg) -{ -	e->counters.bcnt = 0; -	e->counters.pcnt = 0; -	return 0; -} - -static void -clone_counters(struct xt_table_info *newinfo, const struct xt_table_info *info) -{ -	unsigned int cpu; -	const void *loc_cpu_entry = info->entries[raw_smp_processor_id()]; - -	memcpy(newinfo, info, offsetof(struct xt_table_info, entries)); -	for_each_possible_cpu(cpu) { -		memcpy(newinfo->entries[cpu], loc_cpu_entry, info->size); -		IP6T_ENTRY_ITERATE(newinfo->entries[cpu], newinfo->size, -				   zero_entry_counter, NULL); -	} -} -  static struct xt_counters *alloc_counters(struct xt_table *table)  {  	unsigned int countersize;  	struct xt_counters *counters;  	struct xt_table_info *private = table->private; -	struct xt_table_info *info;  	/* We need atomic snapshot of counters: rest doesn't change  	   (other than comefrom, which userspace doesn't care @@ -1015,28 +968,11 @@ static struct xt_counters *alloc_counters(struct xt_table *table)  	counters = vmalloc_node(countersize, numa_node_id());  	if (counters == NULL) -		goto nomem; - -	info = xt_alloc_table_info(private->size); -	if (!info) -		goto free_counters; - -	clone_counters(info, private); - -	mutex_lock(&table->lock); -	xt_table_entry_swap_rcu(private, info); -	synchronize_net();	/* Wait until smoke has cleared */ +		return ERR_PTR(-ENOMEM); -	get_counters(info, counters); -	put_counters(private, counters); -	mutex_unlock(&table->lock); +	get_counters(private, counters); -	xt_free_table_info(info); - - free_counters: -	vfree(counters); - nomem: -	return ERR_PTR(-ENOMEM); +	return counters;  }  static int @@ -1332,8 +1268,9 @@ __do_replace(struct net *net, const char *name, unsigned int valid_hooks,  	    (newinfo->number <= oldinfo->initial_entries))  		module_put(t->me); -	/* Get the old counters. */ +	/* Get the old counters, and synchronize with replace */  	get_counters(oldinfo, counters); +  	/* Decrease module usage counts and free resource */  	loc_cpu_old_entry = oldinfo->entries[raw_smp_processor_id()];  	IP6T_ENTRY_ITERATE(loc_cpu_old_entry, oldinfo->size, cleanup_entry, @@ -1403,11 +1340,24 @@ do_replace(struct net *net, void __user *user, unsigned int len)  	return ret;  } +/* We're lazy, and add to the first CPU; overflow works its fey magic + * and everything is OK. */ +static int +add_counter_to_entry(struct ip6t_entry *e, +		     const struct xt_counters addme[], +		     unsigned int *i) +{ +	ADD_COUNTER(e->counters, addme[*i].bcnt, addme[*i].pcnt); + +	(*i)++; +	return 0; +} +  static int  do_add_counters(struct net *net, void __user *user, unsigned int len,  		int compat)  { -	unsigned int i; +	unsigned int i, curcpu;  	struct xt_counters_info tmp;  	struct xt_counters *paddc;  	unsigned int num_counters; @@ -1463,25 +1413,28 @@ do_add_counters(struct net *net, void __user *user, unsigned int len,  		goto free;  	} -	mutex_lock(&t->lock); + +	local_bh_disable();  	private = t->private;  	if (private->number != num_counters) {  		ret = -EINVAL;  		goto unlock_up_free;  	} -	preempt_disable();  	i = 0;  	/* Choose the copy that is on our node */ -	loc_cpu_entry = private->entries[raw_smp_processor_id()]; +	curcpu = smp_processor_id(); +	xt_info_wrlock(curcpu); +	loc_cpu_entry = private->entries[curcpu];  	IP6T_ENTRY_ITERATE(loc_cpu_entry,  			  private->size,  			  add_counter_to_entry,  			  paddc,  			  &i); -	preempt_enable(); +	xt_info_wrunlock(curcpu); +   unlock_up_free: -	mutex_unlock(&t->lock); +	local_bh_enable();  	xt_table_unlock(t);  	module_put(t->me);   free: diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 6842dd2edd5..8905712cfbb 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -53,6 +53,8 @@ int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)  {  	const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;  	const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2); +	__be32 sk_rcv_saddr = inet_sk(sk)->rcv_saddr; +	__be32 sk2_rcv_saddr = inet_rcv_saddr(sk2);  	int sk_ipv6only = ipv6_only_sock(sk);  	int sk2_ipv6only = inet_v6_ipv6only(sk2);  	int addr_type = ipv6_addr_type(sk_rcv_saddr6); @@ -60,7 +62,9 @@ int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)  	/* if both are mapped, treat as IPv4 */  	if (addr_type == IPV6_ADDR_MAPPED && addr_type2 == IPV6_ADDR_MAPPED) -		return ipv4_rcv_saddr_equal(sk, sk2); +		return (!sk2_ipv6only && +			(!sk_rcv_saddr || !sk2_rcv_saddr || +			  sk_rcv_saddr == sk2_rcv_saddr));  	if (addr_type2 == IPV6_ADDR_ANY &&  	    !(sk2_ipv6only && addr_type == IPV6_ADDR_MAPPED))  |