diff options
| author | Eric Dumazet <eric.dumazet@gmail.com> | 2010-06-08 23:39:10 +0000 | 
|---|---|---|
| committer | David S. Miller <davem@davemloft.net> | 2010-06-10 22:53:52 -0700 | 
| commit | ae638c47dc040b8def16d05dc6acdd527628f231 (patch) | |
| tree | f3fa7015b8f4ed85d44d675d4a9f5f6c82063764 /net | |
| parent | 597a264b1a9c7e36d1728f677c66c5c1f7e3b837 (diff) | |
| download | olio-linux-3.10-ae638c47dc040b8def16d05dc6acdd527628f231.tar.xz olio-linux-3.10-ae638c47dc040b8def16d05dc6acdd527628f231.zip  | |
pkt_sched: gen_estimator: add a new lock
gen_kill_estimator() / gen_new_estimator() is not always called with
RTNL held.
net/netfilter/xt_RATEEST.c is one user of these API that do not hold
RTNL, so random corruptions can occur between "tc" and "iptables".
Add a new fine grained lock instead of trying to use RTNL in netfilter.
Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net')
| -rw-r--r-- | net/core/gen_estimator.c | 15 | 
1 files changed, 12 insertions, 3 deletions
diff --git a/net/core/gen_estimator.c b/net/core/gen_estimator.c index cf8e70392fe..785e5276a30 100644 --- a/net/core/gen_estimator.c +++ b/net/core/gen_estimator.c @@ -107,6 +107,7 @@ static DEFINE_RWLOCK(est_lock);  /* Protects against soft lockup during large deletion */  static struct rb_root est_root = RB_ROOT; +static DEFINE_SPINLOCK(est_tree_lock);  static void est_timer(unsigned long arg)  { @@ -201,7 +202,6 @@ struct gen_estimator *gen_find_node(const struct gnet_stats_basic_packed *bstats   *   * Returns 0 on success or a negative error code.   * - * NOTE: Called under rtnl_mutex   */  int gen_new_estimator(struct gnet_stats_basic_packed *bstats,  		      struct gnet_stats_rate_est *rate_est, @@ -232,6 +232,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,  	est->last_packets = bstats->packets;  	est->avpps = rate_est->pps<<10; +	spin_lock(&est_tree_lock);  	if (!elist[idx].timer.function) {  		INIT_LIST_HEAD(&elist[idx].list);  		setup_timer(&elist[idx].timer, est_timer, idx); @@ -242,6 +243,7 @@ int gen_new_estimator(struct gnet_stats_basic_packed *bstats,  	list_add_rcu(&est->list, &elist[idx].list);  	gen_add_node(est); +	spin_unlock(&est_tree_lock);  	return 0;  } @@ -261,13 +263,13 @@ static void __gen_kill_estimator(struct rcu_head *head)   *   * Removes the rate estimator specified by &bstats and &rate_est.   * - * NOTE: Called under rtnl_mutex   */  void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,  			struct gnet_stats_rate_est *rate_est)  {  	struct gen_estimator *e; +	spin_lock(&est_tree_lock);  	while ((e = gen_find_node(bstats, rate_est))) {  		rb_erase(&e->node, &est_root); @@ -278,6 +280,7 @@ void gen_kill_estimator(struct gnet_stats_basic_packed *bstats,  		list_del_rcu(&e->list);  		call_rcu(&e->e_rcu, __gen_kill_estimator);  	} +	spin_unlock(&est_tree_lock);  }  EXPORT_SYMBOL(gen_kill_estimator); @@ -312,8 +315,14 @@ EXPORT_SYMBOL(gen_replace_estimator);  bool gen_estimator_active(const struct gnet_stats_basic_packed *bstats,  			  const struct gnet_stats_rate_est *rate_est)  { +	bool res; +  	ASSERT_RTNL(); -	return gen_find_node(bstats, rate_est) != NULL; +	spin_lock(&est_tree_lock); +	res = gen_find_node(bstats, rate_est) != NULL; +	spin_unlock(&est_tree_lock); + +	return res;  }  EXPORT_SYMBOL(gen_estimator_active);  |