diff options
Diffstat (limited to 'net')
142 files changed, 1444 insertions, 888 deletions
diff --git a/net/802/mrp.c b/net/802/mrp.c index a4cc3229952..e085bcc754f 100644 --- a/net/802/mrp.c +++ b/net/802/mrp.c @@ -870,8 +870,12 @@ void mrp_uninit_applicant(struct net_device *dev, struct mrp_application *appl)  	 * all pending messages before the applicant is gone.  	 */  	del_timer_sync(&app->join_timer); + +	spin_lock(&app->lock);  	mrp_mad_event(app, MRP_EVENT_TX);  	mrp_pdu_queue(app); +	spin_unlock(&app->lock); +  	mrp_queue_xmit(app);  	dev_mc_del(dev, appl->group_address); diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index a18714469bf..85addcd9372 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -86,13 +86,6 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)  	grp = &vlan_info->grp; -	/* Take it out of our own structures, but be sure to interlock with -	 * HW accelerating devices or SW vlan input packet processing if -	 * VLAN is not 0 (leave it there for 802.1p). -	 */ -	if (vlan_id) -		vlan_vid_del(real_dev, vlan_id); -  	grp->nr_vlan_devs--;  	if (vlan->flags & VLAN_FLAG_MVRP) @@ -114,6 +107,13 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head)  		vlan_gvrp_uninit_applicant(real_dev);  	} +	/* Take it out of our own structures, but be sure to interlock with +	 * HW accelerating devices or SW vlan input packet processing if +	 * VLAN is not 0 (leave it there for 802.1p). +	 */ +	if (vlan_id) +		vlan_vid_del(real_dev, vlan_id); +  	/* Get rid of the vlan's reference to real_dev */  	dev_put(real_dev);  } diff --git a/net/atm/common.c b/net/atm/common.c index 7b491006eaf..737bef59ce8 100644 --- a/net/atm/common.c +++ b/net/atm/common.c @@ -531,6 +531,8 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,  	struct sk_buff *skb;  	int copied, error = -EINVAL; +	msg->msg_namelen = 0; +  	if (sock->state != SS_CONNECTED)  		return -ENOTCONN; diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index 7b11f8bc507..e277e38f736 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -1642,6 +1642,7 @@ static int ax25_recvmsg(struct kiocb *iocb, struct socket *sock,  		ax25_address src;  		const unsigned char *mac = skb_mac_header(skb); +		memset(sax, 0, sizeof(struct full_sockaddr_ax25));  		ax25_addr_parse(mac + 1, skb->data - mac - 1, &src, NULL,  				&digi, NULL, NULL);  		sax->sax25_family = AF_AX25; diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c index a0b253ecada..a5bb0a769eb 100644 --- a/net/batman-adv/bat_iv_ogm.c +++ b/net/batman-adv/bat_iv_ogm.c @@ -1288,7 +1288,8 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb,  	batadv_ogm_packet = (struct batadv_ogm_packet *)packet_buff;  	/* unpack the aggregated packets and process them one by one */ -	do { +	while (batadv_iv_ogm_aggr_packet(buff_pos, packet_len, +					 batadv_ogm_packet->tt_num_changes)) {  		tt_buff = packet_buff + buff_pos + BATADV_OGM_HLEN;  		batadv_iv_ogm_process(ethhdr, batadv_ogm_packet, tt_buff, @@ -1299,8 +1300,7 @@ static int batadv_iv_ogm_receive(struct sk_buff *skb,  		packet_pos = packet_buff + buff_pos;  		batadv_ogm_packet = (struct batadv_ogm_packet *)packet_pos; -	} while (batadv_iv_ogm_aggr_packet(buff_pos, packet_len, -					   batadv_ogm_packet->tt_num_changes)); +	}  	kfree_skb(skb);  	return NET_RX_SUCCESS; diff --git a/net/batman-adv/main.c b/net/batman-adv/main.c index 0488d70c8c3..fa563e497c4 100644 --- a/net/batman-adv/main.c +++ b/net/batman-adv/main.c @@ -169,7 +169,7 @@ void batadv_mesh_free(struct net_device *soft_iface)  	atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);  } -int batadv_is_my_mac(const uint8_t *addr) +int batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr)  {  	const struct batadv_hard_iface *hard_iface; @@ -178,6 +178,9 @@ int batadv_is_my_mac(const uint8_t *addr)  		if (hard_iface->if_status != BATADV_IF_ACTIVE)  			continue; +		if (hard_iface->soft_iface != bat_priv->soft_iface) +			continue; +  		if (batadv_compare_eth(hard_iface->net_dev->dev_addr, addr)) {  			rcu_read_unlock();  			return 1; diff --git a/net/batman-adv/main.h b/net/batman-adv/main.h index ced08b936a9..d40910dfc8e 100644 --- a/net/batman-adv/main.h +++ b/net/batman-adv/main.h @@ -162,7 +162,7 @@ extern struct workqueue_struct *batadv_event_workqueue;  int batadv_mesh_init(struct net_device *soft_iface);  void batadv_mesh_free(struct net_device *soft_iface); -int batadv_is_my_mac(const uint8_t *addr); +int batadv_is_my_mac(struct batadv_priv *bat_priv, const uint8_t *addr);  struct batadv_hard_iface *  batadv_seq_print_text_primary_if_get(struct seq_file *seq);  int batadv_batman_skb_recv(struct sk_buff *skb, struct net_device *dev, diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index 5ee21cebbbb..319f2906c71 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c @@ -402,7 +402,7 @@ int batadv_recv_icmp_packet(struct sk_buff *skb,  		goto out;  	/* not for me */ -	if (!batadv_is_my_mac(ethhdr->h_dest)) +	if (!batadv_is_my_mac(bat_priv, ethhdr->h_dest))  		goto out;  	icmp_packet = (struct batadv_icmp_packet_rr *)skb->data; @@ -416,7 +416,7 @@ int batadv_recv_icmp_packet(struct sk_buff *skb,  	}  	/* packet for me */ -	if (batadv_is_my_mac(icmp_packet->dst)) +	if (batadv_is_my_mac(bat_priv, icmp_packet->dst))  		return batadv_recv_my_icmp_packet(bat_priv, skb, hdr_size);  	/* TTL exceeded */ @@ -548,7 +548,8 @@ batadv_find_ifalter_router(struct batadv_orig_node *primary_orig,  	return router;  } -static int batadv_check_unicast_packet(struct sk_buff *skb, int hdr_size) +static int batadv_check_unicast_packet(struct batadv_priv *bat_priv, +				       struct sk_buff *skb, int hdr_size)  {  	struct ethhdr *ethhdr; @@ -567,7 +568,7 @@ static int batadv_check_unicast_packet(struct sk_buff *skb, int hdr_size)  		return -1;  	/* not for me */ -	if (!batadv_is_my_mac(ethhdr->h_dest)) +	if (!batadv_is_my_mac(bat_priv, ethhdr->h_dest))  		return -1;  	return 0; @@ -582,7 +583,7 @@ int batadv_recv_tt_query(struct sk_buff *skb, struct batadv_hard_iface *recv_if)  	char tt_flag;  	size_t packet_size; -	if (batadv_check_unicast_packet(skb, hdr_size) < 0) +	if (batadv_check_unicast_packet(bat_priv, skb, hdr_size) < 0)  		return NET_RX_DROP;  	/* I could need to modify it */ @@ -614,7 +615,7 @@ int batadv_recv_tt_query(struct sk_buff *skb, struct batadv_hard_iface *recv_if)  	case BATADV_TT_RESPONSE:  		batadv_inc_counter(bat_priv, BATADV_CNT_TT_RESPONSE_RX); -		if (batadv_is_my_mac(tt_query->dst)) { +		if (batadv_is_my_mac(bat_priv, tt_query->dst)) {  			/* packet needs to be linearized to access the TT  			 * changes  			 */ @@ -657,14 +658,15 @@ int batadv_recv_roam_adv(struct sk_buff *skb, struct batadv_hard_iface *recv_if)  	struct batadv_roam_adv_packet *roam_adv_packet;  	struct batadv_orig_node *orig_node; -	if (batadv_check_unicast_packet(skb, sizeof(*roam_adv_packet)) < 0) +	if (batadv_check_unicast_packet(bat_priv, skb, +					sizeof(*roam_adv_packet)) < 0)  		goto out;  	batadv_inc_counter(bat_priv, BATADV_CNT_TT_ROAM_ADV_RX);  	roam_adv_packet = (struct batadv_roam_adv_packet *)skb->data; -	if (!batadv_is_my_mac(roam_adv_packet->dst)) +	if (!batadv_is_my_mac(bat_priv, roam_adv_packet->dst))  		return batadv_route_unicast_packet(skb, recv_if);  	/* check if it is a backbone gateway. we don't accept @@ -967,7 +969,7 @@ static int batadv_check_unicast_ttvn(struct batadv_priv *bat_priv,  	 * last time) the packet had an updated information or not  	 */  	curr_ttvn = (uint8_t)atomic_read(&bat_priv->tt.vn); -	if (!batadv_is_my_mac(unicast_packet->dest)) { +	if (!batadv_is_my_mac(bat_priv, unicast_packet->dest)) {  		orig_node = batadv_orig_hash_find(bat_priv,  						  unicast_packet->dest);  		/* if it is not possible to find the orig_node representing the @@ -1044,14 +1046,14 @@ int batadv_recv_unicast_packet(struct sk_buff *skb,  	if (is4addr)  		hdr_size = sizeof(*unicast_4addr_packet); -	if (batadv_check_unicast_packet(skb, hdr_size) < 0) +	if (batadv_check_unicast_packet(bat_priv, skb, hdr_size) < 0)  		return NET_RX_DROP;  	if (!batadv_check_unicast_ttvn(bat_priv, skb))  		return NET_RX_DROP;  	/* packet for me */ -	if (batadv_is_my_mac(unicast_packet->dest)) { +	if (batadv_is_my_mac(bat_priv, unicast_packet->dest)) {  		if (is4addr) {  			batadv_dat_inc_counter(bat_priv,  					       unicast_4addr_packet->subtype); @@ -1088,7 +1090,7 @@ int batadv_recv_ucast_frag_packet(struct sk_buff *skb,  	struct sk_buff *new_skb = NULL;  	int ret; -	if (batadv_check_unicast_packet(skb, hdr_size) < 0) +	if (batadv_check_unicast_packet(bat_priv, skb, hdr_size) < 0)  		return NET_RX_DROP;  	if (!batadv_check_unicast_ttvn(bat_priv, skb)) @@ -1097,7 +1099,7 @@ int batadv_recv_ucast_frag_packet(struct sk_buff *skb,  	unicast_packet = (struct batadv_unicast_frag_packet *)skb->data;  	/* packet for me */ -	if (batadv_is_my_mac(unicast_packet->dest)) { +	if (batadv_is_my_mac(bat_priv, unicast_packet->dest)) {  		ret = batadv_frag_reassemble_skb(skb, bat_priv, &new_skb);  		if (ret == NET_RX_DROP) @@ -1151,13 +1153,13 @@ int batadv_recv_bcast_packet(struct sk_buff *skb,  		goto out;  	/* ignore broadcasts sent by myself */ -	if (batadv_is_my_mac(ethhdr->h_source)) +	if (batadv_is_my_mac(bat_priv, ethhdr->h_source))  		goto out;  	bcast_packet = (struct batadv_bcast_packet *)skb->data;  	/* ignore broadcasts originated by myself */ -	if (batadv_is_my_mac(bcast_packet->orig)) +	if (batadv_is_my_mac(bat_priv, bcast_packet->orig))  		goto out;  	if (bcast_packet->header.ttl < 2) @@ -1243,14 +1245,14 @@ int batadv_recv_vis_packet(struct sk_buff *skb,  	ethhdr = (struct ethhdr *)skb_mac_header(skb);  	/* not for me */ -	if (!batadv_is_my_mac(ethhdr->h_dest)) +	if (!batadv_is_my_mac(bat_priv, ethhdr->h_dest))  		return NET_RX_DROP;  	/* ignore own packets */ -	if (batadv_is_my_mac(vis_packet->vis_orig)) +	if (batadv_is_my_mac(bat_priv, vis_packet->vis_orig))  		return NET_RX_DROP; -	if (batadv_is_my_mac(vis_packet->sender_orig)) +	if (batadv_is_my_mac(bat_priv, vis_packet->sender_orig))  		return NET_RX_DROP;  	switch (vis_packet->vis_type) { diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c index 98a66a021a6..7abee19567e 100644 --- a/net/batman-adv/translation-table.c +++ b/net/batman-adv/translation-table.c @@ -1953,7 +1953,7 @@ out:  bool batadv_send_tt_response(struct batadv_priv *bat_priv,  			     struct batadv_tt_query_packet *tt_request)  { -	if (batadv_is_my_mac(tt_request->dst)) { +	if (batadv_is_my_mac(bat_priv, tt_request->dst)) {  		/* don't answer backbone gws! */  		if (batadv_bla_is_backbone_gw_orig(bat_priv, tt_request->src))  			return true; diff --git a/net/batman-adv/vis.c b/net/batman-adv/vis.c index c053244b97b..6a1e646be96 100644 --- a/net/batman-adv/vis.c +++ b/net/batman-adv/vis.c @@ -477,7 +477,7 @@ void batadv_receive_client_update_packet(struct batadv_priv *bat_priv,  	/* Are we the target for this VIS packet? */  	if (vis_server == BATADV_VIS_TYPE_SERVER_SYNC	&& -	    batadv_is_my_mac(vis_packet->target_orig)) +	    batadv_is_my_mac(bat_priv, vis_packet->target_orig))  		are_target = 1;  	spin_lock_bh(&bat_priv->vis.hash_lock); @@ -496,7 +496,7 @@ void batadv_receive_client_update_packet(struct batadv_priv *bat_priv,  		batadv_send_list_add(bat_priv, info);  		/* ... we're not the recipient (and thus need to forward). */ -	} else if (!batadv_is_my_mac(packet->target_orig)) { +	} else if (!batadv_is_my_mac(bat_priv, packet->target_orig)) {  		batadv_send_list_add(bat_priv, info);  	} diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index d3ee69b35a7..0d1b08cc76e 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c @@ -230,6 +230,8 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,  	if (flags & (MSG_OOB))  		return -EOPNOTSUPP; +	msg->msg_namelen = 0; +  	skb = skb_recv_datagram(sk, flags, noblock, &err);  	if (!skb) {  		if (sk->sk_shutdown & RCV_SHUTDOWN) @@ -237,8 +239,6 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock,  		return err;  	} -	msg->msg_namelen = 0; -  	copied = skb->len;  	if (len < copied) {  		msg->msg_flags |= MSG_TRUNC; diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index c23bae86263..7c9224bcce1 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c @@ -608,6 +608,7 @@ static int rfcomm_sock_recvmsg(struct kiocb *iocb, struct socket *sock,  	if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) {  		rfcomm_dlc_accept(d); +		msg->msg_namelen = 0;  		return 0;  	} diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c index 79d87d8d4f5..fb6192c9812 100644 --- a/net/bluetooth/sco.c +++ b/net/bluetooth/sco.c @@ -359,6 +359,7 @@ static void __sco_sock_close(struct sock *sk)  			sco_chan_del(sk, ECONNRESET);  		break; +	case BT_CONNECT2:  	case BT_CONNECT:  	case BT_DISCONN:  		sco_chan_del(sk, ECONNRESET); @@ -664,6 +665,7 @@ static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock,  	    test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) {  		hci_conn_accept(pi->conn->hcon, 0);  		sk->sk_state = BT_CONFIG; +		msg->msg_namelen = 0;  		release_sock(sk);  		return 0; diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c index d5f1d3fd4b2..314c73ed418 100644 --- a/net/bridge/br_device.c +++ b/net/bridge/br_device.c @@ -66,7 +66,7 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)  			goto out;  		} -		mdst = br_mdb_get(br, skb); +		mdst = br_mdb_get(br, skb, vid);  		if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb))  			br_multicast_deliver(mdst, skb);  		else diff --git a/net/bridge/br_fdb.c b/net/bridge/br_fdb.c index b0812c91c0f..bab338e6270 100644 --- a/net/bridge/br_fdb.c +++ b/net/bridge/br_fdb.c @@ -423,7 +423,7 @@ static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,  			return 0;  		br_warn(br, "adding interface %s with same address "  		       "as a received packet\n", -		       source->dev->name); +		       source ? source->dev->name : br->dev->name);  		fdb_delete(br, fdb);  	} diff --git a/net/bridge/br_if.c b/net/bridge/br_if.c index ef1b91431c6..459dab22b3f 100644 --- a/net/bridge/br_if.c +++ b/net/bridge/br_if.c @@ -67,7 +67,8 @@ void br_port_carrier_check(struct net_bridge_port *p)  	struct net_device *dev = p->dev;  	struct net_bridge *br = p->br; -	if (netif_running(dev) && netif_oper_up(dev)) +	if (!(p->flags & BR_ADMIN_COST) && +	    netif_running(dev) && netif_oper_up(dev))  		p->path_cost = port_cost(dev);  	if (!netif_running(br->dev)) diff --git a/net/bridge/br_input.c b/net/bridge/br_input.c index 48033015189..828e2bcc1f5 100644 --- a/net/bridge/br_input.c +++ b/net/bridge/br_input.c @@ -97,7 +97,7 @@ int br_handle_frame_finish(struct sk_buff *skb)  	if (is_broadcast_ether_addr(dest))  		skb2 = skb;  	else if (is_multicast_ether_addr(dest)) { -		mdst = br_mdb_get(br, skb); +		mdst = br_mdb_get(br, skb, vid);  		if (mdst || BR_INPUT_SKB_CB_MROUTERS_ONLY(skb)) {  			if ((mdst && mdst->mglist) ||  			    br_multicast_is_router(br)) diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c index 9f97b850fc6..ee79f3f2038 100644 --- a/net/bridge/br_mdb.c +++ b/net/bridge/br_mdb.c @@ -80,6 +80,7 @@ static int br_mdb_fill_info(struct sk_buff *skb, struct netlink_callback *cb,  				port = p->port;  				if (port) {  					struct br_mdb_entry e; +					memset(&e, 0, sizeof(e));  					e.ifindex = port->dev->ifindex;  					e.state = p->state;  					if (p->addr.proto == htons(ETH_P_IP)) @@ -136,6 +137,7 @@ static int br_mdb_dump(struct sk_buff *skb, struct netlink_callback *cb)  				break;  			bpm = nlmsg_data(nlh); +			memset(bpm, 0, sizeof(*bpm));  			bpm->ifindex = dev->ifindex;  			if (br_mdb_fill_info(skb, cb, dev) < 0)  				goto out; @@ -171,6 +173,7 @@ static int nlmsg_populate_mdb_fill(struct sk_buff *skb,  		return -EMSGSIZE;  	bpm = nlmsg_data(nlh); +	memset(bpm, 0, sizeof(*bpm));  	bpm->family  = AF_BRIDGE;  	bpm->ifindex = dev->ifindex;  	nest = nla_nest_start(skb, MDBA_MDB); @@ -228,6 +231,7 @@ void br_mdb_notify(struct net_device *dev, struct net_bridge_port *port,  {  	struct br_mdb_entry entry; +	memset(&entry, 0, sizeof(entry));  	entry.ifindex = port->dev->ifindex;  	entry.addr.proto = group->proto;  	entry.addr.u.ip4 = group->u.ip4; diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index 10e6fce1bb6..923fbeaf7af 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -132,7 +132,7 @@ static struct net_bridge_mdb_entry *br_mdb_ip6_get(  #endif  struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, -					struct sk_buff *skb) +					struct sk_buff *skb, u16 vid)  {  	struct net_bridge_mdb_htable *mdb = rcu_dereference(br->mdb);  	struct br_ip ip; @@ -144,6 +144,7 @@ struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br,  		return NULL;  	ip.proto = skb->protocol; +	ip.vid = vid;  	switch (skb->protocol) {  	case htons(ETH_P_IP): diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c index 27aa3ee517c..299fc5f40a2 100644 --- a/net/bridge/br_netlink.c +++ b/net/bridge/br_netlink.c @@ -29,6 +29,7 @@ static inline size_t br_port_info_size(void)  		+ nla_total_size(1)	/* IFLA_BRPORT_MODE */  		+ nla_total_size(1)	/* IFLA_BRPORT_GUARD */  		+ nla_total_size(1)	/* IFLA_BRPORT_PROTECT */ +		+ nla_total_size(1)	/* IFLA_BRPORT_FAST_LEAVE */  		+ 0;  } @@ -329,6 +330,7 @@ static int br_setport(struct net_bridge_port *p, struct nlattr *tb[])  	br_set_port_flag(p, tb, IFLA_BRPORT_MODE, BR_HAIRPIN_MODE);  	br_set_port_flag(p, tb, IFLA_BRPORT_GUARD, BR_BPDU_GUARD);  	br_set_port_flag(p, tb, IFLA_BRPORT_FAST_LEAVE, BR_MULTICAST_FAST_LEAVE); +	br_set_port_flag(p, tb, IFLA_BRPORT_PROTECT, BR_ROOT_BLOCK);  	if (tb[IFLA_BRPORT_COST]) {  		err = br_stp_set_path_cost(p, nla_get_u32(tb[IFLA_BRPORT_COST])); diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 6d314c4e6bc..d2c043a857b 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -156,6 +156,7 @@ struct net_bridge_port  #define BR_BPDU_GUARD           0x00000002  #define BR_ROOT_BLOCK		0x00000004  #define BR_MULTICAST_FAST_LEAVE	0x00000008 +#define BR_ADMIN_COST		0x00000010  #ifdef CONFIG_BRIDGE_IGMP_SNOOPING  	u32				multicast_startup_queries_sent; @@ -442,7 +443,7 @@ extern int br_multicast_rcv(struct net_bridge *br,  			    struct net_bridge_port *port,  			    struct sk_buff *skb);  extern struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, -					       struct sk_buff *skb); +					       struct sk_buff *skb, u16 vid);  extern void br_multicast_add_port(struct net_bridge_port *port);  extern void br_multicast_del_port(struct net_bridge_port *port);  extern void br_multicast_enable_port(struct net_bridge_port *port); @@ -504,7 +505,7 @@ static inline int br_multicast_rcv(struct net_bridge *br,  }  static inline struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, -						      struct sk_buff *skb) +						      struct sk_buff *skb, u16 vid)  {  	return NULL;  } diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c index 0bdb4ebd362..d45e760141b 100644 --- a/net/bridge/br_stp_if.c +++ b/net/bridge/br_stp_if.c @@ -288,6 +288,7 @@ int br_stp_set_path_cost(struct net_bridge_port *p, unsigned long path_cost)  	    path_cost > BR_MAX_PATH_COST)  		return -ERANGE; +	p->flags |= BR_ADMIN_COST;  	p->path_cost = path_cost;  	br_configuration_update(p->br);  	br_port_state_selection(p->br); diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index 095259f8390..ff2ff3ce696 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c @@ -286,6 +286,8 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock,  	if (m->msg_flags&MSG_OOB)  		goto read_error; +	m->msg_namelen = 0; +  	skb = skb_recv_datagram(sk, flags, 0 , &ret);  	if (!skb)  		goto read_error; diff --git a/net/can/gw.c b/net/can/gw.c index 2d117dc5ebe..117814a7e73 100644 --- a/net/can/gw.c +++ b/net/can/gw.c @@ -466,7 +466,7 @@ static int cgw_notifier(struct notifier_block *nb,  			if (gwj->src.dev == dev || gwj->dst.dev == dev) {  				hlist_del(&gwj->list);  				cgw_unregister_filter(gwj); -				kfree(gwj); +				kmem_cache_free(cgw_cache, gwj);  			}  		}  	} @@ -864,7 +864,7 @@ static void cgw_remove_all_jobs(void)  	hlist_for_each_entry_safe(gwj, nx, &cgw_list, list) {  		hlist_del(&gwj->list);  		cgw_unregister_filter(gwj); -		kfree(gwj); +		kmem_cache_free(cgw_cache, gwj);  	}  } @@ -920,7 +920,7 @@ static int cgw_remove_job(struct sk_buff *skb,  struct nlmsghdr *nlh, void *arg)  		hlist_del(&gwj->list);  		cgw_unregister_filter(gwj); -		kfree(gwj); +		kmem_cache_free(cgw_cache, gwj);  		err = 0;  		break;  	} diff --git a/net/ceph/osdmap.c b/net/ceph/osdmap.c index 69bc4bf89e3..4543b9aba40 100644 --- a/net/ceph/osdmap.c +++ b/net/ceph/osdmap.c @@ -654,6 +654,24 @@ static int osdmap_set_max_osd(struct ceph_osdmap *map, int max)  	return 0;  } +static int __decode_pgid(void **p, void *end, struct ceph_pg *pg) +{ +	u8 v; + +	ceph_decode_need(p, end, 1+8+4+4, bad); +	v = ceph_decode_8(p); +	if (v != 1) +		goto bad; +	pg->pool = ceph_decode_64(p); +	pg->seed = ceph_decode_32(p); +	*p += 4; /* skip preferred */ +	return 0; + +bad: +	dout("error decoding pgid\n"); +	return -EINVAL; +} +  /*   * decode a full map.   */ @@ -745,13 +763,12 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)  	for (i = 0; i < len; i++) {  		int n, j;  		struct ceph_pg pgid; -		struct ceph_pg_v1 pgid_v1;  		struct ceph_pg_mapping *pg; -		ceph_decode_need(p, end, sizeof(u32) + sizeof(u64), bad); -		ceph_decode_copy(p, &pgid_v1, sizeof(pgid_v1)); -		pgid.pool = le32_to_cpu(pgid_v1.pool); -		pgid.seed = le16_to_cpu(pgid_v1.ps); +		err = __decode_pgid(p, end, &pgid); +		if (err) +			goto bad; +		ceph_decode_need(p, end, sizeof(u32), bad);  		n = ceph_decode_32(p);  		err = -EINVAL;  		if (n > (UINT_MAX - sizeof(*pg)) / sizeof(u32)) @@ -818,8 +835,8 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,  	u16 version;  	ceph_decode_16_safe(p, end, version, bad); -	if (version > 6) { -		pr_warning("got unknown v %d > %d of inc osdmap\n", version, 6); +	if (version != 6) { +		pr_warning("got unknown v %d != 6 of inc osdmap\n", version);  		goto bad;  	} @@ -963,15 +980,14 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,  	while (len--) {  		struct ceph_pg_mapping *pg;  		int j; -		struct ceph_pg_v1 pgid_v1;  		struct ceph_pg pgid;  		u32 pglen; -		ceph_decode_need(p, end, sizeof(u64) + sizeof(u32), bad); -		ceph_decode_copy(p, &pgid_v1, sizeof(pgid_v1)); -		pgid.pool = le32_to_cpu(pgid_v1.pool); -		pgid.seed = le16_to_cpu(pgid_v1.ps); -		pglen = ceph_decode_32(p); +		err = __decode_pgid(p, end, &pgid); +		if (err) +			goto bad; +		ceph_decode_need(p, end, sizeof(u32), bad); +		pglen = ceph_decode_32(p);  		if (pglen) {  			ceph_decode_need(p, end, pglen*sizeof(u32), bad); diff --git a/net/core/dev.c b/net/core/dev.c index a06a7a58dd1..b24ab0e98eb 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1545,7 +1545,6 @@ void net_enable_timestamp(void)  		return;  	}  #endif -	WARN_ON(in_interrupt());  	static_key_slow_inc(&netstamp_needed);  }  EXPORT_SYMBOL(net_enable_timestamp); @@ -1625,7 +1624,6 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)  	}  	skb_orphan(skb); -	nf_reset(skb);  	if (unlikely(!is_skb_forwardable(dev, skb))) {  		atomic_long_inc(&dev->rx_dropped); @@ -1641,6 +1639,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)  	skb->mark = 0;  	secpath_reset(skb);  	nf_reset(skb); +	nf_reset_trace(skb);  	return netif_rx(skb);  }  EXPORT_SYMBOL_GPL(dev_forward_skb); @@ -2149,6 +2148,9 @@ static void skb_warn_bad_offload(const struct sk_buff *skb)  	struct net_device *dev = skb->dev;  	const char *driver = ""; +	if (!net_ratelimit()) +		return; +  	if (dev && dev->dev.parent)  		driver = dev_driver_string(dev->dev.parent); @@ -2219,9 +2221,9 @@ struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,  	struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);  	struct packet_offload *ptype;  	__be16 type = skb->protocol; +	int vlan_depth = ETH_HLEN;  	while (type == htons(ETH_P_8021Q)) { -		int vlan_depth = ETH_HLEN;  		struct vlan_hdr *vh;  		if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN))) @@ -3315,6 +3317,7 @@ int netdev_rx_handler_register(struct net_device *dev,  	if (dev->rx_handler)  		return -EBUSY; +	/* Note: rx_handler_data must be set before rx_handler */  	rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);  	rcu_assign_pointer(dev->rx_handler, rx_handler); @@ -3335,6 +3338,11 @@ void netdev_rx_handler_unregister(struct net_device *dev)  	ASSERT_RTNL();  	RCU_INIT_POINTER(dev->rx_handler, NULL); +	/* a reader seeing a non NULL rx_handler in a rcu_read_lock() +	 * section has a guarantee to see a non NULL rx_handler_data +	 * as well. +	 */ +	synchronize_net();  	RCU_INIT_POINTER(dev->rx_handler_data, NULL);  }  EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister); @@ -3444,6 +3452,7 @@ ncls:  		}  		switch (rx_handler(&skb)) {  		case RX_HANDLER_CONSUMED: +			ret = NET_RX_SUCCESS;  			goto unlock;  		case RX_HANDLER_ANOTHER:  			goto another_round; @@ -4103,7 +4112,7 @@ static void net_rx_action(struct softirq_action *h)  		 * Allow this to run for 2 jiffies since which will allow  		 * an average latency of 1.5/HZ.  		 */ -		if (unlikely(budget <= 0 || time_after(jiffies, time_limit))) +		if (unlikely(budget <= 0 || time_after_eq(jiffies, time_limit)))  			goto softnet_break;  		local_irq_enable(); @@ -4780,7 +4789,7 @@ EXPORT_SYMBOL(dev_set_mac_address);  /**   *	dev_change_carrier - Change device carrier   *	@dev: device - *	@new_carries: new value + *	@new_carrier: new value   *   *	Change device carrier   */ diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c index bd2eb9d3e36..abdc9e6ef33 100644 --- a/net/core/dev_addr_lists.c +++ b/net/core/dev_addr_lists.c @@ -37,7 +37,7 @@ static int __hw_addr_create_ex(struct netdev_hw_addr_list *list,  	ha->type = addr_type;  	ha->refcount = 1;  	ha->global_use = global; -	ha->synced = false; +	ha->synced = 0;  	list_add_tail_rcu(&ha->list, &list->list);  	list->count++; @@ -165,7 +165,7 @@ int __hw_addr_sync(struct netdev_hw_addr_list *to_list,  					    addr_len, ha->type);  			if (err)  				break; -			ha->synced = true; +			ha->synced++;  			ha->refcount++;  		} else if (ha->refcount == 1) {  			__hw_addr_del(to_list, ha->addr, addr_len, ha->type); @@ -186,7 +186,7 @@ void __hw_addr_unsync(struct netdev_hw_addr_list *to_list,  		if (ha->synced) {  			__hw_addr_del(to_list, ha->addr,  				      addr_len, ha->type); -			ha->synced = false; +			ha->synced--;  			__hw_addr_del(from_list, ha->addr,  				      addr_len, ha->type);  		} diff --git a/net/core/flow.c b/net/core/flow.c index c56ea6f7f6c..2bfd081c59f 100644 --- a/net/core/flow.c +++ b/net/core/flow.c @@ -328,7 +328,7 @@ static void flow_cache_flush_per_cpu(void *data)  	struct flow_flush_info *info = data;  	struct tasklet_struct *tasklet; -	tasklet = this_cpu_ptr(&info->cache->percpu->flush_tasklet); +	tasklet = &this_cpu_ptr(info->cache->percpu)->flush_tasklet;  	tasklet->data = (unsigned long)info;  	tasklet_schedule(tasklet);  } diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c index 9d4c7201400..e187bf06d67 100644 --- a/net/core/flow_dissector.c +++ b/net/core/flow_dissector.c @@ -140,6 +140,8 @@ ipv6:  			flow->ports = *ports;  	} +	flow->thoff = (u16) nhoff; +  	return true;  }  EXPORT_SYMBOL(skb_flow_dissect); diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index b376410ff25..23854b51a25 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -496,8 +496,10 @@ static int rtnl_link_fill(struct sk_buff *skb, const struct net_device *dev)  	}  	if (ops->fill_info) {  		data = nla_nest_start(skb, IFLA_INFO_DATA); -		if (data == NULL) +		if (data == NULL) { +			err = -EMSGSIZE;  			goto err_cancel_link; +		}  		err = ops->fill_info(skb, dev);  		if (err < 0)  			goto err_cancel_data; @@ -979,6 +981,7 @@ static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,  			 * report anything.  			 */  			ivi.spoofchk = -1; +			memset(ivi.mac, 0, sizeof(ivi.mac));  			if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi))  				break;  			vf_mac.vf = @@ -1069,7 +1072,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb)  	rcu_read_lock();  	cb->seq = net->dev_base_seq; -	if (nlmsg_parse(cb->nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX, +	if (nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,  			ifla_policy) >= 0) {  		if (tb[IFLA_EXT_MASK]) @@ -1919,7 +1922,7 @@ static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh)  	u32 ext_filter_mask = 0;  	u16 min_ifinfo_dump_size = 0; -	if (nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX, +	if (nlmsg_parse(nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,  			ifla_policy) >= 0) {  		if (tb[IFLA_EXT_MASK])  			ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]); @@ -2620,7 +2623,7 @@ static int rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)  		struct rtattr *attr = (void *)nlh + NLMSG_ALIGN(min_len);  		while (RTA_OK(attr, attrlen)) { -			unsigned int flavor = attr->rta_type; +			unsigned int flavor = attr->rta_type & NLA_TYPE_MASK;  			if (flavor) {  				if (flavor > rta_max[sz_idx])  					return -EINVAL; diff --git a/net/core/scm.c b/net/core/scm.c index 905dcc6ad1e..2dc6cdaaae8 100644 --- a/net/core/scm.c +++ b/net/core/scm.c @@ -24,6 +24,7 @@  #include <linux/interrupt.h>  #include <linux/netdevice.h>  #include <linux/security.h> +#include <linux/pid_namespace.h>  #include <linux/pid.h>  #include <linux/nsproxy.h>  #include <linux/slab.h> @@ -52,7 +53,8 @@ static __inline__ int scm_check_creds(struct ucred *creds)  	if (!uid_valid(uid) || !gid_valid(gid))  		return -EINVAL; -	if ((creds->pid == task_tgid_vnr(current) || nsown_capable(CAP_SYS_ADMIN)) && +	if ((creds->pid == task_tgid_vnr(current) || +	     ns_capable(current->nsproxy->pid_ns->user_ns, CAP_SYS_ADMIN)) &&  	    ((uid_eq(uid, cred->uid)   || uid_eq(uid, cred->euid) ||  	      uid_eq(uid, cred->suid)) || nsown_capable(CAP_SETUID)) &&  	    ((gid_eq(gid, cred->gid)   || gid_eq(gid, cred->egid) || diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c index 1b588e23cf8..21291f1abcd 100644 --- a/net/dcb/dcbnl.c +++ b/net/dcb/dcbnl.c @@ -284,6 +284,7 @@ static int dcbnl_getperm_hwaddr(struct net_device *netdev, struct nlmsghdr *nlh,  	if (!netdev->dcbnl_ops->getpermhwaddr)  		return -EOPNOTSUPP; +	memset(perm_addr, 0, sizeof(perm_addr));  	netdev->dcbnl_ops->getpermhwaddr(netdev, perm_addr);  	return nla_put(skb, DCB_ATTR_PERM_HWADDR, sizeof(perm_addr), perm_addr); @@ -1042,6 +1043,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)  	if (ops->ieee_getets) {  		struct ieee_ets ets; +		memset(&ets, 0, sizeof(ets));  		err = ops->ieee_getets(netdev, &ets);  		if (!err &&  		    nla_put(skb, DCB_ATTR_IEEE_ETS, sizeof(ets), &ets)) @@ -1050,6 +1052,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)  	if (ops->ieee_getmaxrate) {  		struct ieee_maxrate maxrate; +		memset(&maxrate, 0, sizeof(maxrate));  		err = ops->ieee_getmaxrate(netdev, &maxrate);  		if (!err) {  			err = nla_put(skb, DCB_ATTR_IEEE_MAXRATE, @@ -1061,6 +1064,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)  	if (ops->ieee_getpfc) {  		struct ieee_pfc pfc; +		memset(&pfc, 0, sizeof(pfc));  		err = ops->ieee_getpfc(netdev, &pfc);  		if (!err &&  		    nla_put(skb, DCB_ATTR_IEEE_PFC, sizeof(pfc), &pfc)) @@ -1094,6 +1098,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)  	/* get peer info if available */  	if (ops->ieee_peer_getets) {  		struct ieee_ets ets; +		memset(&ets, 0, sizeof(ets));  		err = ops->ieee_peer_getets(netdev, &ets);  		if (!err &&  		    nla_put(skb, DCB_ATTR_IEEE_PEER_ETS, sizeof(ets), &ets)) @@ -1102,6 +1107,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)  	if (ops->ieee_peer_getpfc) {  		struct ieee_pfc pfc; +		memset(&pfc, 0, sizeof(pfc));  		err = ops->ieee_peer_getpfc(netdev, &pfc);  		if (!err &&  		    nla_put(skb, DCB_ATTR_IEEE_PEER_PFC, sizeof(pfc), &pfc)) @@ -1280,6 +1286,7 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)  	/* peer info if available */  	if (ops->cee_peer_getpg) {  		struct cee_pg pg; +		memset(&pg, 0, sizeof(pg));  		err = ops->cee_peer_getpg(netdev, &pg);  		if (!err &&  		    nla_put(skb, DCB_ATTR_CEE_PEER_PG, sizeof(pg), &pg)) @@ -1288,6 +1295,7 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)  	if (ops->cee_peer_getpfc) {  		struct cee_pfc pfc; +		memset(&pfc, 0, sizeof(pfc));  		err = ops->cee_peer_getpfc(netdev, &pfc);  		if (!err &&  		    nla_put(skb, DCB_ATTR_CEE_PEER_PFC, sizeof(pfc), &pfc)) diff --git a/net/ieee802154/6lowpan.h b/net/ieee802154/6lowpan.h index 8c2251fb0a3..bba5f833631 100644 --- a/net/ieee802154/6lowpan.h +++ b/net/ieee802154/6lowpan.h @@ -84,7 +84,7 @@  	(memcmp(addr1, addr2, length >> 3) == 0)  /* local link, i.e. FE80::/10 */ -#define is_addr_link_local(a) (((a)->s6_addr16[0]) == 0x80FE) +#define is_addr_link_local(a) (((a)->s6_addr16[0]) == htons(0xFE80))  /*   * check whether we can compress the IID to 16 bits, diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c index 68f6a94f766..c929d9c1c4b 100644 --- a/net/ipv4/af_inet.c +++ b/net/ipv4/af_inet.c @@ -1333,8 +1333,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,  				iph->frag_off |= htons(IP_MF);  			offset += (skb->len - skb->mac_len - iph->ihl * 4);  		} else  { -			if (!(iph->frag_off & htons(IP_DF))) -				iph->id = htons(id++); +			iph->id = htons(id++);  		}  		iph->tot_len = htons(skb->len - skb->mac_len);  		iph->check = 0; diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c index f678507bc82..c6287cd978c 100644 --- a/net/ipv4/devinet.c +++ b/net/ipv4/devinet.c @@ -587,13 +587,16 @@ static void check_lifetime(struct work_struct *work)  {  	unsigned long now, next, next_sec, next_sched;  	struct in_ifaddr *ifa; +	struct hlist_node *n;  	int i;  	now = jiffies;  	next = round_jiffies_up(now + ADDR_CHECK_FREQUENCY); -	rcu_read_lock();  	for (i = 0; i < IN4_ADDR_HSIZE; i++) { +		bool change_needed = false; + +		rcu_read_lock();  		hlist_for_each_entry_rcu(ifa, &inet_addr_lst[i], hash) {  			unsigned long age; @@ -606,16 +609,7 @@ static void check_lifetime(struct work_struct *work)  			if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME &&  			    age >= ifa->ifa_valid_lft) { -				struct in_ifaddr **ifap ; - -				rtnl_lock(); -				for (ifap = &ifa->ifa_dev->ifa_list; -				     *ifap != NULL; ifap = &ifa->ifa_next) { -					if (*ifap == ifa) -						inet_del_ifa(ifa->ifa_dev, -							     ifap, 1); -				} -				rtnl_unlock(); +				change_needed = true;  			} else if (ifa->ifa_preferred_lft ==  				   INFINITY_LIFE_TIME) {  				continue; @@ -625,10 +619,8 @@ static void check_lifetime(struct work_struct *work)  					next = ifa->ifa_tstamp +  					       ifa->ifa_valid_lft * HZ; -				if (!(ifa->ifa_flags & IFA_F_DEPRECATED)) { -					ifa->ifa_flags |= IFA_F_DEPRECATED; -					rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0); -				} +				if (!(ifa->ifa_flags & IFA_F_DEPRECATED)) +					change_needed = true;  			} else if (time_before(ifa->ifa_tstamp +  					       ifa->ifa_preferred_lft * HZ,  					       next)) { @@ -636,8 +628,42 @@ static void check_lifetime(struct work_struct *work)  				       ifa->ifa_preferred_lft * HZ;  			}  		} +		rcu_read_unlock(); +		if (!change_needed) +			continue; +		rtnl_lock(); +		hlist_for_each_entry_safe(ifa, n, &inet_addr_lst[i], hash) { +			unsigned long age; + +			if (ifa->ifa_flags & IFA_F_PERMANENT) +				continue; + +			/* We try to batch several events at once. */ +			age = (now - ifa->ifa_tstamp + +			       ADDRCONF_TIMER_FUZZ_MINUS) / HZ; + +			if (ifa->ifa_valid_lft != INFINITY_LIFE_TIME && +			    age >= ifa->ifa_valid_lft) { +				struct in_ifaddr **ifap; + +				for (ifap = &ifa->ifa_dev->ifa_list; +				     *ifap != NULL; ifap = &(*ifap)->ifa_next) { +					if (*ifap == ifa) { +						inet_del_ifa(ifa->ifa_dev, +							     ifap, 1); +						break; +					} +				} +			} else if (ifa->ifa_preferred_lft != +				   INFINITY_LIFE_TIME && +				   age >= ifa->ifa_preferred_lft && +				   !(ifa->ifa_flags & IFA_F_DEPRECATED)) { +				ifa->ifa_flags |= IFA_F_DEPRECATED; +				rtmsg_ifa(RTM_NEWADDR, ifa, NULL, 0); +			} +		} +		rtnl_unlock();  	} -	rcu_read_unlock();  	next_sec = round_jiffies_up(next);  	next_sched = next; @@ -802,8 +828,12 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg  		if (nlh->nlmsg_flags & NLM_F_EXCL ||  		    !(nlh->nlmsg_flags & NLM_F_REPLACE))  			return -EEXIST; - -		set_ifa_lifetime(ifa_existing, valid_lft, prefered_lft); +		ifa = ifa_existing; +		set_ifa_lifetime(ifa, valid_lft, prefered_lft); +		cancel_delayed_work(&check_lifetime_work); +		schedule_delayed_work(&check_lifetime_work, 0); +		rtmsg_ifa(RTM_NEWADDR, ifa, nlh, NETLINK_CB(skb).portid); +		blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa);  	}  	return 0;  } diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 3b4f0cd2e63..4cfe34d4cc9 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -139,8 +139,6 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)  	/* skb is pure payload to encrypt */ -	err = -ENOMEM; -  	esp = x->data;  	aead = esp->aead;  	alen = crypto_aead_authsize(aead); @@ -176,8 +174,10 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)  	}  	tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen); -	if (!tmp) +	if (!tmp) { +		err = -ENOMEM;  		goto error; +	}  	seqhi = esp_tmp_seqhi(tmp);  	iv = esp_tmp_iv(aead, tmp, seqhilen); diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 7d1874be1df..786d97aee75 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c @@ -735,6 +735,7 @@ EXPORT_SYMBOL(inet_csk_destroy_sock);   * tcp/dccp_create_openreq_child().   */  void inet_csk_prepare_forced_close(struct sock *sk) +	__releases(&sk->sk_lock.slock)  {  	/* sk_clone_lock locked the socket and set refcnt to 2 */  	bh_unlock_sock(sk); diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c index 245ae078a07..f4fd23de9b1 100644 --- a/net/ipv4/inet_fragment.c +++ b/net/ipv4/inet_fragment.c @@ -21,6 +21,7 @@  #include <linux/rtnetlink.h>  #include <linux/slab.h> +#include <net/sock.h>  #include <net/inet_frag.h>  static void inet_frag_secret_rebuild(unsigned long dummy) @@ -277,6 +278,7 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,  	__releases(&f->lock)  {  	struct inet_frag_queue *q; +	int depth = 0;  	hlist_for_each_entry(q, &f->hash[hash], list) {  		if (q->net == nf && f->match(q, key)) { @@ -284,9 +286,25 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,  			read_unlock(&f->lock);  			return q;  		} +		depth++;  	}  	read_unlock(&f->lock); -	return inet_frag_create(nf, f, key); +	if (depth <= INETFRAGS_MAXDEPTH) +		return inet_frag_create(nf, f, key); +	else +		return ERR_PTR(-ENOBUFS);  }  EXPORT_SYMBOL(inet_frag_find); + +void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q, +				   const char *prefix) +{ +	static const char msg[] = "inet_frag_find: Fragment hash bucket" +		" list length grew over limit " __stringify(INETFRAGS_MAXDEPTH) +		". Dropping fragment.\n"; + +	if (PTR_ERR(q) == -ENOBUFS) +		LIMIT_NETDEBUG(KERN_WARNING "%s%s", prefix, msg); +} +EXPORT_SYMBOL(inet_frag_maybe_warn_overflow); diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index b6d30acb600..52c273ea05c 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -248,8 +248,7 @@ static void ip_expire(unsigned long arg)  		if (!head->dev)  			goto out_rcu_unlock; -		/* skb dst is stale, drop it, and perform route lookup again */ -		skb_dst_drop(head); +		/* skb has no dst, perform route lookup again */  		iph = ip_hdr(head);  		err = ip_route_input_noref(head, iph->daddr, iph->saddr,  					   iph->tos, head->dev); @@ -292,14 +291,11 @@ static inline struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user)  	hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol);  	q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash); -	if (q == NULL) -		goto out_nomem; - +	if (IS_ERR_OR_NULL(q)) { +		inet_frag_maybe_warn_overflow(q, pr_fmt()); +		return NULL; +	}  	return container_of(q, struct ipq, q); - -out_nomem: -	LIMIT_NETDEBUG(KERN_ERR pr_fmt("ip_frag_create: no memory left !\n")); -	return NULL;  }  /* Is the fragment too far ahead to be part of ipq? */ @@ -526,9 +522,16 @@ found:  		qp->q.max_size = skb->len + ihl;  	if (qp->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && -	    qp->q.meat == qp->q.len) -		return ip_frag_reasm(qp, prev, dev); +	    qp->q.meat == qp->q.len) { +		unsigned long orefdst = skb->_skb_refdst; + +		skb->_skb_refdst = 0UL; +		err = ip_frag_reasm(qp, prev, dev); +		skb->_skb_refdst = orefdst; +		return err; +	} +	skb_dst_drop(skb);  	inet_frag_lru_move(&qp->q);  	return -EINPROGRESS; diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index d0ef0e674ec..91d66dbde9c 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -798,10 +798,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev  	if (dev->header_ops && dev->type == ARPHRD_IPGRE) {  		gre_hlen = 0; -		if (skb->protocol == htons(ETH_P_IP)) -			tiph = (const struct iphdr *)skb->data; -		else -			tiph = &tunnel->parms.iph; +		tiph = (const struct iphdr *)skb->data;  	} else {  		gre_hlen = tunnel->hlen;  		tiph = &tunnel->parms.iph; diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c index f6289bf6f33..ec7264514a8 100644 --- a/net/ipv4/ip_options.c +++ b/net/ipv4/ip_options.c @@ -370,7 +370,6 @@ int ip_options_compile(struct net *net,  				}  				switch (optptr[3]&0xF) {  				      case IPOPT_TS_TSONLY: -					opt->ts = optptr - iph;  					if (skb)  						timeptr = &optptr[optptr[2]-1];  					opt->ts_needtime = 1; @@ -381,7 +380,6 @@ int ip_options_compile(struct net *net,  						pp_ptr = optptr + 2;  						goto error;  					} -					opt->ts = optptr - iph;  					if (rt)  {  						spec_dst_fill(&spec_dst, skb);  						memcpy(&optptr[optptr[2]-1], &spec_dst, 4); @@ -396,7 +394,6 @@ int ip_options_compile(struct net *net,  						pp_ptr = optptr + 2;  						goto error;  					} -					opt->ts = optptr - iph;  					{  						__be32 addr;  						memcpy(&addr, &optptr[optptr[2]-1], 4); @@ -423,18 +420,18 @@ int ip_options_compile(struct net *net,  					put_unaligned_be32(midtime, timeptr);  					opt->is_changed = 1;  				} -			} else { +			} else if ((optptr[3]&0xF) != IPOPT_TS_PRESPEC) {  				unsigned int overflow = optptr[3]>>4;  				if (overflow == 15) {  					pp_ptr = optptr + 3;  					goto error;  				} -				opt->ts = optptr - iph;  				if (skb) {  					optptr[3] = (optptr[3]&0xF)|((overflow+1)<<4);  					opt->is_changed = 1;  				}  			} +			opt->ts = optptr - iph;  			break;  		      case IPOPT_RA:  			if (optlen < 4) { diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c index 98cbc687701..bf6c5cf31ae 100644 --- a/net/ipv4/ipconfig.c +++ b/net/ipv4/ipconfig.c @@ -1522,7 +1522,8 @@ static int __init ip_auto_config(void)  		}  	for (i++; i < CONF_NAMESERVERS_MAX; i++)  		if (ic_nameservers[i] != NONE) -			pr_cont(", nameserver%u=%pI4\n", i, &ic_nameservers[i]); +			pr_cont(", nameserver%u=%pI4", i, &ic_nameservers[i]); +	pr_cont("\n");  #endif /* !SILENT */  	return 0; diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig index ce2d43e1f09..0d755c50994 100644 --- a/net/ipv4/netfilter/Kconfig +++ b/net/ipv4/netfilter/Kconfig @@ -36,19 +36,6 @@ config NF_CONNTRACK_PROC_COMPAT  	  If unsure, say Y. -config IP_NF_QUEUE -	tristate "IP Userspace queueing via NETLINK (OBSOLETE)" -	depends on NETFILTER_ADVANCED -	help -	  Netfilter has the ability to queue packets to user space: the -	  netlink device can be used to access them using this driver. - -	  This option enables the old IPv4-only "ip_queue" implementation -	  which has been obsoleted by the new "nfnetlink_queue" code (see -	  CONFIG_NETFILTER_NETLINK_QUEUE). - -	  To compile it as a module, choose M here.  If unsure, say N. -  config IP_NF_IPTABLES  	tristate "IP tables support (required for filtering/masq/NAT)"  	default m if NETFILTER_ADVANCED=n diff --git a/net/ipv4/netfilter/ipt_rpfilter.c b/net/ipv4/netfilter/ipt_rpfilter.c index c30130062cd..c49dcd0284a 100644 --- a/net/ipv4/netfilter/ipt_rpfilter.c +++ b/net/ipv4/netfilter/ipt_rpfilter.c @@ -66,6 +66,12 @@ static bool rpfilter_lookup_reverse(struct flowi4 *fl4,  	return dev_match;  } +static bool rpfilter_is_local(const struct sk_buff *skb) +{ +	const struct rtable *rt = skb_rtable(skb); +	return rt && (rt->rt_flags & RTCF_LOCAL); +} +  static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)  {  	const struct xt_rpfilter_info *info; @@ -76,7 +82,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)  	info = par->matchinfo;  	invert = info->flags & XT_RPFILTER_INVERT; -	if (par->in->flags & IFF_LOOPBACK) +	if (rpfilter_is_local(skb))  		return true ^ invert;  	iph = ip_hdr(skb); diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index ef54377fb11..397e0f69435 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -349,8 +349,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,  	 * hasn't changed since we received the original syn, but I see  	 * no easy way to do this.  	 */ -	flowi4_init_output(&fl4, 0, sk->sk_mark, RT_CONN_FLAGS(sk), -			   RT_SCOPE_UNIVERSE, IPPROTO_TCP, +	flowi4_init_output(&fl4, sk->sk_bound_dev_if, sk->sk_mark, +			   RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, IPPROTO_TCP,  			   inet_sk_flowi_flags(sk),  			   (opt && opt->srr) ? opt->faddr : ireq->rmt_addr,  			   ireq->loc_addr, th->source, th->dest); diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 47e854fcae2..e2202079070 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -775,7 +775,7 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)  			 * Make sure that we have exactly size bytes  			 * available to the caller, no more, no less.  			 */ -			skb->avail_size = size; +			skb->reserved_tailroom = skb->end - skb->tail - size;  			return skb;  		}  		__kfree_skb(skb); diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 0d9bdacce99..13b9c08fc15 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -113,6 +113,7 @@ int sysctl_tcp_early_retrans __read_mostly = 2;  #define FLAG_DSACKING_ACK	0x800 /* SACK blocks contained D-SACK info */  #define FLAG_NONHEAD_RETRANS_ACKED	0x1000 /* Non-head rexmitted data was ACKed */  #define FLAG_SACK_RENEGING	0x2000 /* snd_una advanced to a sacked seq */ +#define FLAG_UPDATE_TS_RECENT	0x4000 /* tcp_replace_ts_recent() */  #define FLAG_ACKED		(FLAG_DATA_ACKED|FLAG_SYN_ACKED)  #define FLAG_NOT_DUP		(FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED) @@ -2059,11 +2060,8 @@ void tcp_enter_loss(struct sock *sk, int how)  	if (tcp_is_reno(tp))  		tcp_reset_reno_sack(tp); -	if (!how) { -		/* Push undo marker, if it was plain RTO and nothing -		 * was retransmitted. */ -		tp->undo_marker = tp->snd_una; -	} else { +	tp->undo_marker = tp->snd_una; +	if (how) {  		tp->sacked_out = 0;  		tp->fackets_out = 0;  	} @@ -3567,6 +3565,27 @@ static void tcp_send_challenge_ack(struct sock *sk)  	}  } +static void tcp_store_ts_recent(struct tcp_sock *tp) +{ +	tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval; +	tp->rx_opt.ts_recent_stamp = get_seconds(); +} + +static void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) +{ +	if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) { +		/* PAWS bug workaround wrt. ACK frames, the PAWS discard +		 * extra check below makes sure this can only happen +		 * for pure ACK frames.  -DaveM +		 * +		 * Not only, also it occurs for expired timestamps. +		 */ + +		if (tcp_paws_check(&tp->rx_opt, 0)) +			tcp_store_ts_recent(tp); +	} +} +  /* This routine deals with incoming acks, but not outgoing ones. */  static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)  { @@ -3610,6 +3629,12 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)  	prior_fackets = tp->fackets_out;  	prior_in_flight = tcp_packets_in_flight(tp); +	/* ts_recent update must be made after we are sure that the packet +	 * is in window. +	 */ +	if (flag & FLAG_UPDATE_TS_RECENT) +		tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); +  	if (!(flag & FLAG_SLOWPATH) && after(ack, prior_snd_una)) {  		/* Window is constant, pure forward advance.  		 * No more checks are required. @@ -3930,27 +3955,6 @@ const u8 *tcp_parse_md5sig_option(const struct tcphdr *th)  EXPORT_SYMBOL(tcp_parse_md5sig_option);  #endif -static inline void tcp_store_ts_recent(struct tcp_sock *tp) -{ -	tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval; -	tp->rx_opt.ts_recent_stamp = get_seconds(); -} - -static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) -{ -	if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) { -		/* PAWS bug workaround wrt. ACK frames, the PAWS discard -		 * extra check below makes sure this can only happen -		 * for pure ACK frames.  -DaveM -		 * -		 * Not only, also it occurs for expired timestamps. -		 */ - -		if (tcp_paws_check(&tp->rx_opt, 0)) -			tcp_store_ts_recent(tp); -	} -} -  /* Sorry, PAWS as specified is broken wrt. pure-ACKs -DaveM   *   * It is not fatal. If this ACK does _not_ change critical state (seqs, window) @@ -5546,14 +5550,9 @@ slow_path:  		return 0;  step5: -	if (tcp_ack(sk, skb, FLAG_SLOWPATH) < 0) +	if (tcp_ack(sk, skb, FLAG_SLOWPATH | FLAG_UPDATE_TS_RECENT) < 0)  		goto discard; -	/* ts_recent update must be made after we are sure that the packet -	 * is in window. -	 */ -	tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); -  	tcp_rcv_rtt_measure_ts(sk, skb);  	/* Process urgent data. */ @@ -5989,7 +5988,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,  	/* step 5: check the ACK field */  	if (true) { -		int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH) > 0; +		int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH | +						  FLAG_UPDATE_TS_RECENT) > 0;  		switch (sk->sk_state) {  		case TCP_SYN_RECV: @@ -6140,11 +6140,6 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,  		}  	} -	/* ts_recent update must be made after we are sure that the packet -	 * is in window. -	 */ -	tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); -  	/* step 6: check the URG bit */  	tcp_urg(sk, skb, th); diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 4a8ec457310..d09203c6326 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -274,13 +274,6 @@ static void tcp_v4_mtu_reduced(struct sock *sk)  	struct inet_sock *inet = inet_sk(sk);  	u32 mtu = tcp_sk(sk)->mtu_info; -	/* We are not interested in TCP_LISTEN and open_requests (SYN-ACKs -	 * send out by Linux are always <576bytes so they should go through -	 * unfragmented). -	 */ -	if (sk->sk_state == TCP_LISTEN) -		return; -  	dst = inet_csk_update_pmtu(sk, mtu);  	if (!dst)  		return; @@ -408,6 +401,13 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)  			goto out;  		if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */ +			/* We are not interested in TCP_LISTEN and open_requests +			 * (SYN-ACKs send out by Linux are always <576bytes so +			 * they should go through unfragmented). +			 */ +			if (sk->sk_state == TCP_LISTEN) +				goto out; +  			tp->mtu_info = info;  			if (!sock_owned_by_user(sk)) {  				tcp_v4_mtu_reduced(sk); diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index e2b4461074d..509912a5ff9 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -1298,7 +1298,6 @@ static void __pskb_trim_head(struct sk_buff *skb, int len)  	eat = min_t(int, len, skb_headlen(skb));  	if (eat) {  		__skb_pull(skb, eat); -		skb->avail_size -= eat;  		len -= eat;  		if (!len)  			return; @@ -1810,8 +1809,11 @@ static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb)  			goto send_now;  	} -	/* Ok, it looks like it is advisable to defer.  */ -	tp->tso_deferred = 1 | (jiffies << 1); +	/* Ok, it looks like it is advisable to defer. +	 * Do not rearm the timer if already set to not break TCP ACK clocking. +	 */ +	if (!tp->tso_deferred) +		tp->tso_deferred = 1 | (jiffies << 1);  	return true; @@ -2386,8 +2388,12 @@ int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)  	 */  	TCP_SKB_CB(skb)->when = tcp_time_stamp; -	/* make sure skb->data is aligned on arches that require it */ -	if (unlikely(NET_IP_ALIGN && ((unsigned long)skb->data & 3))) { +	/* make sure skb->data is aligned on arches that require it +	 * and check if ack-trimming & collapsing extended the headroom +	 * beyond what csum_start can cover. +	 */ +	if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) || +		     skb_headroom(skb) >= 0xFFFF)) {  		struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER,  						   GFP_ATOMIC);  		return nskb ? tcp_transmit_skb(sk, nskb, 0, GFP_ATOMIC) : @@ -2707,6 +2713,7 @@ struct sk_buff *tcp_make_synack(struct sock *sk, struct dst_entry *dst,  	skb_reserve(skb, MAX_TCP_HEADER);  	skb_dst_set(skb, dst); +	security_skb_owned_by(skb, sk);  	mss = dst_metric_advmss(dst);  	if (tp->rx_opt.user_mss && tp->rx_opt.user_mss < mss) diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 265c42cf963..0a073a26372 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -1762,9 +1762,16 @@ int udp_rcv(struct sk_buff *skb)  void udp_destroy_sock(struct sock *sk)  { +	struct udp_sock *up = udp_sk(sk);  	bool slow = lock_sock_fast(sk);  	udp_flush_pending_frames(sk);  	unlock_sock_fast(sk, slow); +	if (static_key_false(&udp_encap_needed) && up->encap_type) { +		void (*encap_destroy)(struct sock *sk); +		encap_destroy = ACCESS_ONCE(up->encap_destroy); +		if (encap_destroy) +			encap_destroy(sk); +	}  }  /* diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index f2c7e615f90..dae802c0af7 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -168,8 +168,6 @@ static void inet6_prefix_notify(int event, struct inet6_dev *idev,  static bool ipv6_chk_same_addr(struct net *net, const struct in6_addr *addr,  			       struct net_device *dev); -static ATOMIC_NOTIFIER_HEAD(inet6addr_chain); -  static struct ipv6_devconf ipv6_devconf __read_mostly = {  	.forwarding		= 0,  	.hop_limit		= IPV6_DEFAULT_HOPLIMIT, @@ -837,7 +835,7 @@ out2:  	rcu_read_unlock_bh();  	if (likely(err == 0)) -		atomic_notifier_call_chain(&inet6addr_chain, NETDEV_UP, ifa); +		inet6addr_notifier_call_chain(NETDEV_UP, ifa);  	else {  		kfree(ifa);  		ifa = ERR_PTR(err); @@ -927,7 +925,7 @@ static void ipv6_del_addr(struct inet6_ifaddr *ifp)  	ipv6_ifa_notify(RTM_DELADDR, ifp); -	atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifp); +	inet6addr_notifier_call_chain(NETDEV_DOWN, ifp);  	/*  	 * Purge or update corresponding prefix @@ -2529,6 +2527,9 @@ static void sit_add_v4_addrs(struct inet6_dev *idev)  static void init_loopback(struct net_device *dev)  {  	struct inet6_dev  *idev; +	struct net_device *sp_dev; +	struct inet6_ifaddr *sp_ifa; +	struct rt6_info *sp_rt;  	/* ::1 */ @@ -2540,6 +2541,30 @@ static void init_loopback(struct net_device *dev)  	}  	add_addr(idev, &in6addr_loopback, 128, IFA_HOST); + +	/* Add routes to other interface's IPv6 addresses */ +	for_each_netdev(dev_net(dev), sp_dev) { +		if (!strcmp(sp_dev->name, dev->name)) +			continue; + +		idev = __in6_dev_get(sp_dev); +		if (!idev) +			continue; + +		read_lock_bh(&idev->lock); +		list_for_each_entry(sp_ifa, &idev->addr_list, if_list) { + +			if (sp_ifa->flags & (IFA_F_DADFAILED | IFA_F_TENTATIVE)) +				continue; + +			sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, 0); + +			/* Failure cases are ignored */ +			if (!IS_ERR(sp_rt)) +				ip6_ins_rt(sp_rt); +		} +		read_unlock_bh(&idev->lock); +	}  }  static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr *addr) @@ -2961,7 +2986,7 @@ static int addrconf_ifdown(struct net_device *dev, int how)  		if (state != INET6_IFADDR_STATE_DEAD) {  			__ipv6_ifa_notify(RTM_DELADDR, ifa); -			atomic_notifier_call_chain(&inet6addr_chain, NETDEV_DOWN, ifa); +			inet6addr_notifier_call_chain(NETDEV_DOWN, ifa);  		}  		in6_ifa_put(ifa); @@ -4784,26 +4809,20 @@ static void addrconf_sysctl_unregister(struct inet6_dev *idev)  static int __net_init addrconf_init_net(struct net *net)  { -	int err; +	int err = -ENOMEM;  	struct ipv6_devconf *all, *dflt; -	err = -ENOMEM; -	all = &ipv6_devconf; -	dflt = &ipv6_devconf_dflt; +	all = kmemdup(&ipv6_devconf, sizeof(ipv6_devconf), GFP_KERNEL); +	if (all == NULL) +		goto err_alloc_all; -	if (!net_eq(net, &init_net)) { -		all = kmemdup(all, sizeof(ipv6_devconf), GFP_KERNEL); -		if (all == NULL) -			goto err_alloc_all; +	dflt = kmemdup(&ipv6_devconf_dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL); +	if (dflt == NULL) +		goto err_alloc_dflt; -		dflt = kmemdup(dflt, sizeof(ipv6_devconf_dflt), GFP_KERNEL); -		if (dflt == NULL) -			goto err_alloc_dflt; -	} else { -		/* these will be inherited by all namespaces */ -		dflt->autoconf = ipv6_defaults.autoconf; -		dflt->disable_ipv6 = ipv6_defaults.disable_ipv6; -	} +	/* these will be inherited by all namespaces */ +	dflt->autoconf = ipv6_defaults.autoconf; +	dflt->disable_ipv6 = ipv6_defaults.disable_ipv6;  	net->ipv6.devconf_all = all;  	net->ipv6.devconf_dflt = dflt; @@ -4848,22 +4867,6 @@ static struct pernet_operations addrconf_ops = {  	.exit = addrconf_exit_net,  }; -/* - *      Device notifier - */ - -int register_inet6addr_notifier(struct notifier_block *nb) -{ -	return atomic_notifier_chain_register(&inet6addr_chain, nb); -} -EXPORT_SYMBOL(register_inet6addr_notifier); - -int unregister_inet6addr_notifier(struct notifier_block *nb) -{ -	return atomic_notifier_chain_unregister(&inet6addr_chain, nb); -} -EXPORT_SYMBOL(unregister_inet6addr_notifier); -  static struct rtnl_af_ops inet6_ops = {  	.family		  = AF_INET6,  	.fill_link_af	  = inet6_fill_link_af, diff --git a/net/ipv6/addrconf_core.c b/net/ipv6/addrconf_core.c index d051e5f4bf3..72104562c86 100644 --- a/net/ipv6/addrconf_core.c +++ b/net/ipv6/addrconf_core.c @@ -78,3 +78,22 @@ int __ipv6_addr_type(const struct in6_addr *addr)  }  EXPORT_SYMBOL(__ipv6_addr_type); +static ATOMIC_NOTIFIER_HEAD(inet6addr_chain); + +int register_inet6addr_notifier(struct notifier_block *nb) +{ +	return atomic_notifier_chain_register(&inet6addr_chain, nb); +} +EXPORT_SYMBOL(register_inet6addr_notifier); + +int unregister_inet6addr_notifier(struct notifier_block *nb) +{ +	return atomic_notifier_chain_unregister(&inet6addr_chain, nb); +} +EXPORT_SYMBOL(unregister_inet6addr_notifier); + +int inet6addr_notifier_call_chain(unsigned long val, void *v) +{ +	return atomic_notifier_call_chain(&inet6addr_chain, val, v); +} +EXPORT_SYMBOL(inet6addr_notifier_call_chain); diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c index b1876e52091..2bab2aa5974 100644 --- a/net/ipv6/ip6_input.c +++ b/net/ipv6/ip6_input.c @@ -118,6 +118,18 @@ int ipv6_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt  	    ipv6_addr_loopback(&hdr->daddr))  		goto err; +	/* RFC4291 Errata ID: 3480 +	 * Interface-Local scope spans only a single interface on a +	 * node and is useful only for loopback transmission of +	 * multicast.  Packets with interface-local scope received +	 * from another node must be discarded. +	 */ +	if (!(skb->pkt_type == PACKET_LOOPBACK || +	      dev->flags & IFF_LOOPBACK) && +	    ipv6_addr_is_multicast(&hdr->daddr) && +	    IPV6_ADDR_MC_SCOPE(&hdr->daddr) == 1) +		goto err; +  	/* RFC4291 2.7  	 * Nodes must not originate a packet to a multicast address whose scope  	 * field contains the reserved value 0; if such a packet is received, it @@ -281,7 +293,8 @@ int ip6_mc_input(struct sk_buff *skb)  	 *      IPv6 multicast router mode is now supported ;)  	 */  	if (dev_net(skb->dev)->ipv6.devconf_all->mc_forwarding && -	    !(ipv6_addr_type(&hdr->daddr) & IPV6_ADDR_LINKLOCAL) && +	    !(ipv6_addr_type(&hdr->daddr) & +	      (IPV6_ADDR_LOOPBACK|IPV6_ADDR_LINKLOCAL)) &&  	    likely(!(IP6CB(skb)->flags & IP6SKB_FORWARDED))) {  		/*  		 * Okay, we try to forward - split and duplicate diff --git a/net/ipv6/netfilter/ip6t_NPT.c b/net/ipv6/netfilter/ip6t_NPT.c index 83acc1405a1..cb631143721 100644 --- a/net/ipv6/netfilter/ip6t_NPT.c +++ b/net/ipv6/netfilter/ip6t_NPT.c @@ -57,7 +57,7 @@ static bool ip6t_npt_map_pfx(const struct ip6t_npt_tginfo *npt,  		if (pfx_len - i >= 32)  			mask = 0;  		else -			mask = htonl(~((1 << (pfx_len - i)) - 1)); +			mask = htonl((1 << (i - pfx_len + 32)) - 1);  		idx = i / 32;  		addr->s6_addr32[idx] &= mask; @@ -114,6 +114,7 @@ ip6t_dnpt_tg(struct sk_buff *skb, const struct xt_action_param *par)  static struct xt_target ip6t_npt_target_reg[] __read_mostly = {  	{  		.name		= "SNPT", +		.table		= "mangle",  		.target		= ip6t_snpt_tg,  		.targetsize	= sizeof(struct ip6t_npt_tginfo),  		.checkentry	= ip6t_npt_checkentry, @@ -124,6 +125,7 @@ static struct xt_target ip6t_npt_target_reg[] __read_mostly = {  	},  	{  		.name		= "DNPT", +		.table		= "mangle",  		.target		= ip6t_dnpt_tg,  		.targetsize	= sizeof(struct ip6t_npt_tginfo),  		.checkentry	= ip6t_npt_checkentry, diff --git a/net/ipv6/netfilter/ip6t_rpfilter.c b/net/ipv6/netfilter/ip6t_rpfilter.c index 5060d54199a..e0983f3648a 100644 --- a/net/ipv6/netfilter/ip6t_rpfilter.c +++ b/net/ipv6/netfilter/ip6t_rpfilter.c @@ -71,6 +71,12 @@ static bool rpfilter_lookup_reverse6(const struct sk_buff *skb,  	return ret;  } +static bool rpfilter_is_local(const struct sk_buff *skb) +{ +	const struct rt6_info *rt = (const void *) skb_dst(skb); +	return rt && (rt->rt6i_flags & RTF_LOCAL); +} +  static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)  {  	const struct xt_rpfilter_info *info = par->matchinfo; @@ -78,7 +84,7 @@ static bool rpfilter_mt(const struct sk_buff *skb, struct xt_action_param *par)  	struct ipv6hdr *iph;  	bool invert = info->flags & XT_RPFILTER_INVERT; -	if (par->in->flags & IFF_LOOPBACK) +	if (rpfilter_is_local(skb))  		return true ^ invert;  	iph = ipv6_hdr(skb); diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c index 54087e96d7b..6700069949d 100644 --- a/net/ipv6/netfilter/nf_conntrack_reasm.c +++ b/net/ipv6/netfilter/nf_conntrack_reasm.c @@ -14,6 +14,8 @@   * 2 of the License, or (at your option) any later version.   */ +#define pr_fmt(fmt) "IPv6-nf: " fmt +  #include <linux/errno.h>  #include <linux/types.h>  #include <linux/string.h> @@ -180,13 +182,11 @@ static inline struct frag_queue *fq_find(struct net *net, __be32 id,  	q = inet_frag_find(&net->nf_frag.frags, &nf_frags, &arg, hash);  	local_bh_enable(); -	if (q == NULL) -		goto oom; - +	if (IS_ERR_OR_NULL(q)) { +		inet_frag_maybe_warn_overflow(q, pr_fmt()); +		return NULL; +	}  	return container_of(q, struct frag_queue, q); - -oom: -	return NULL;  } diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 3c6a77290c6..0ba10e53a62 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c @@ -26,6 +26,9 @@   *	YOSHIFUJI,H. @USAGI	Always remove fragment header to   *				calculate ICV correctly.   */ + +#define pr_fmt(fmt) "IPv6: " fmt +  #include <linux/errno.h>  #include <linux/types.h>  #include <linux/string.h> @@ -185,9 +188,10 @@ fq_find(struct net *net, __be32 id, const struct in6_addr *src, const struct in6  	hash = inet6_hash_frag(id, src, dst, ip6_frags.rnd);  	q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash); -	if (q == NULL) +	if (IS_ERR_OR_NULL(q)) { +		inet_frag_maybe_warn_overflow(q, pr_fmt());  		return NULL; - +	}  	return container_of(q, struct frag_queue, q);  } @@ -326,9 +330,17 @@ found:  	}  	if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && -	    fq->q.meat == fq->q.len) -		return ip6_frag_reasm(fq, prev, dev); +	    fq->q.meat == fq->q.len) { +		int res; +		unsigned long orefdst = skb->_skb_refdst; + +		skb->_skb_refdst = 0UL; +		res = ip6_frag_reasm(fq, prev, dev); +		skb->_skb_refdst = orefdst; +		return res; +	} +	skb_dst_drop(skb);  	inet_frag_lru_move(&fq->q);  	return -1; diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 9b6460055df..46a5be85be8 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -386,9 +386,17 @@ static void tcp_v6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,  		if (dst)  			dst->ops->redirect(dst, sk, skb); +		goto out;  	}  	if (type == ICMPV6_PKT_TOOBIG) { +		/* We are not interested in TCP_LISTEN and open_requests +		 * (SYN-ACKs send out by Linux are always <576bytes so +		 * they should go through unfragmented). +		 */ +		if (sk->sk_state == TCP_LISTEN) +			goto out; +  		tp->mtu_info = ntohl(info);  		if (!sock_owned_by_user(sk))  			tcp_v6_mtu_reduced(sk); diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index 599e1ba6d1c..d8e5e852fc7 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -1285,10 +1285,18 @@ do_confirm:  void udpv6_destroy_sock(struct sock *sk)  { +	struct udp_sock *up = udp_sk(sk);  	lock_sock(sk);  	udp_v6_flush_pending_frames(sk);  	release_sock(sk); +	if (static_key_false(&udpv6_encap_needed) && up->encap_type) { +		void (*encap_destroy)(struct sock *sk); +		encap_destroy = ACCESS_ONCE(up->encap_destroy); +		if (encap_destroy) +			encap_destroy(sk); +	} +  	inet6_destroy_sock(sk);  } diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c index d07e3a62644..e493b3397ae 100644 --- a/net/irda/af_irda.c +++ b/net/irda/af_irda.c @@ -1386,6 +1386,8 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock,  	IRDA_DEBUG(4, "%s()\n", __func__); +	msg->msg_namelen = 0; +  	skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,  				flags & MSG_DONTWAIT, &err);  	if (!skb) @@ -2583,8 +2585,10 @@ bed:  				    NULL, NULL, NULL);  		/* Check if the we got some results */ -		if (!self->cachedaddr) -			return -EAGAIN;		/* Didn't find any devices */ +		if (!self->cachedaddr) { +			err = -EAGAIN;		/* Didn't find any devices */ +			goto out; +		}  		daddr = self->cachedaddr;  		/* Cleanup */  		self->cachedaddr = 0; diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c index 9a5fd3c3e53..362ba47968e 100644 --- a/net/irda/ircomm/ircomm_tty.c +++ b/net/irda/ircomm/ircomm_tty.c @@ -280,7 +280,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,  	struct tty_port *port = &self->port;  	DECLARE_WAITQUEUE(wait, current);  	int		retval; -	int		do_clocal = 0, extra_count = 0; +	int		do_clocal = 0;  	unsigned long	flags;  	IRDA_DEBUG(2, "%s()\n", __func__ ); @@ -289,8 +289,15 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,  	 * If non-blocking mode is set, or the port is not enabled,  	 * then make the check up front and then exit.  	 */ -	if (filp->f_flags & O_NONBLOCK || tty->flags & (1 << TTY_IO_ERROR)){ -		/* nonblock mode is set or port is not enabled */ +	if (test_bit(TTY_IO_ERROR, &tty->flags)) { +		port->flags |= ASYNC_NORMAL_ACTIVE; +		return 0; +	} + +	if (filp->f_flags & O_NONBLOCK) { +		/* nonblock mode is set */ +		if (tty->termios.c_cflag & CBAUD) +			tty_port_raise_dtr_rts(port);  		port->flags |= ASYNC_NORMAL_ACTIVE;  		IRDA_DEBUG(1, "%s(), O_NONBLOCK requested!\n", __func__ );  		return 0; @@ -315,18 +322,16 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,  	      __FILE__, __LINE__, tty->driver->name, port->count);  	spin_lock_irqsave(&port->lock, flags); -	if (!tty_hung_up_p(filp)) { -		extra_count = 1; +	if (!tty_hung_up_p(filp))  		port->count--; -	} -	spin_unlock_irqrestore(&port->lock, flags);  	port->blocked_open++; +	spin_unlock_irqrestore(&port->lock, flags);  	while (1) {  		if (tty->termios.c_cflag & CBAUD)  			tty_port_raise_dtr_rts(port); -		current->state = TASK_INTERRUPTIBLE; +		set_current_state(TASK_INTERRUPTIBLE);  		if (tty_hung_up_p(filp) ||  		    !test_bit(ASYNCB_INITIALIZED, &port->flags)) { @@ -361,13 +366,11 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,  	__set_current_state(TASK_RUNNING);  	remove_wait_queue(&port->open_wait, &wait); -	if (extra_count) { -		/* ++ is not atomic, so this should be protected - Jean II */ -		spin_lock_irqsave(&port->lock, flags); +	spin_lock_irqsave(&port->lock, flags); +	if (!tty_hung_up_p(filp))  		port->count++; -		spin_unlock_irqrestore(&port->lock, flags); -	}  	port->blocked_open--; +	spin_unlock_irqrestore(&port->lock, flags);  	IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",  	      __FILE__, __LINE__, tty->driver->name, port->count); diff --git a/net/irda/iriap.c b/net/irda/iriap.c index 29340a9a6fb..e1b37f5a269 100644 --- a/net/irda/iriap.c +++ b/net/irda/iriap.c @@ -303,7 +303,8 @@ static void iriap_disconnect_indication(void *instance, void *sap,  {  	struct iriap_cb *self; -	IRDA_DEBUG(4, "%s(), reason=%s\n", __func__, irlmp_reasons[reason]); +	IRDA_DEBUG(4, "%s(), reason=%s [%d]\n", __func__, +		   irlmp_reason_str(reason), reason);  	self = instance; diff --git a/net/irda/irlmp.c b/net/irda/irlmp.c index 6115a44c0a2..1064621da6f 100644 --- a/net/irda/irlmp.c +++ b/net/irda/irlmp.c @@ -66,8 +66,15 @@ const char *irlmp_reasons[] = {  	"LM_LAP_RESET",  	"LM_INIT_DISCONNECT",  	"ERROR, NOT USED", +	"UNKNOWN",  }; +const char *irlmp_reason_str(LM_REASON reason) +{ +	reason = min_t(size_t, reason, ARRAY_SIZE(irlmp_reasons) - 1); +	return irlmp_reasons[reason]; +} +  /*   * Function irlmp_init (void)   * @@ -747,7 +754,8 @@ void irlmp_disconnect_indication(struct lsap_cb *self, LM_REASON reason,  {  	struct lsap_cb *lsap; -	IRDA_DEBUG(1, "%s(), reason=%s\n", __func__, irlmp_reasons[reason]); +	IRDA_DEBUG(1, "%s(), reason=%s [%d]\n", __func__, +		   irlmp_reason_str(reason), reason);  	IRDA_ASSERT(self != NULL, return;);  	IRDA_ASSERT(self->magic == LMP_LSAP_MAGIC, return;); diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index a7d11ffe428..206ce6db2c3 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -49,12 +49,6 @@ static const u8 iprm_shutdown[8] =  #define TRGCLS_SIZE	(sizeof(((struct iucv_message *)0)->class)) -/* macros to set/get socket control buffer at correct offset */ -#define CB_TAG(skb)	((skb)->cb)		/* iucv message tag */ -#define CB_TAG_LEN	(sizeof(((struct iucv_message *) 0)->tag)) -#define CB_TRGCLS(skb)	((skb)->cb + CB_TAG_LEN) /* iucv msg target class */ -#define CB_TRGCLS_LEN	(TRGCLS_SIZE) -  #define __iucv_sock_wait(sk, condition, timeo, ret)			\  do {									\  	DEFINE_WAIT(__wait);						\ @@ -1141,7 +1135,7 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,  	/* increment and save iucv message tag for msg_completion cbk */  	txmsg.tag = iucv->send_tag++; -	memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN); +	IUCV_SKB_CB(skb)->tag = txmsg.tag;  	if (iucv->transport == AF_IUCV_TRANS_HIPER) {  		atomic_inc(&iucv->msg_sent); @@ -1224,7 +1218,7 @@ static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len)  			return -ENOMEM;  		/* copy target class to control buffer of new skb */ -		memcpy(CB_TRGCLS(nskb), CB_TRGCLS(skb), CB_TRGCLS_LEN); +		IUCV_SKB_CB(nskb)->class = IUCV_SKB_CB(skb)->class;  		/* copy data fragment */  		memcpy(nskb->data, skb->data + copied, size); @@ -1256,7 +1250,7 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,  	/* store msg target class in the second 4 bytes of skb ctrl buffer */  	/* Note: the first 4 bytes are reserved for msg tag */ -	memcpy(CB_TRGCLS(skb), &msg->class, CB_TRGCLS_LEN); +	IUCV_SKB_CB(skb)->class = msg->class;  	/* check for special IPRM messages (e.g. iucv_sock_shutdown) */  	if ((msg->flags & IUCV_IPRMDATA) && len > 7) { @@ -1292,6 +1286,7 @@ static void iucv_process_message(struct sock *sk, struct sk_buff *skb,  		}  	} +	IUCV_SKB_CB(skb)->offset = 0;  	if (sock_queue_rcv_skb(sk, skb))  		skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb);  } @@ -1327,6 +1322,9 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,  	unsigned int copied, rlen;  	struct sk_buff *skb, *rskb, *cskb;  	int err = 0; +	u32 offset; + +	msg->msg_namelen = 0;  	if ((sk->sk_state == IUCV_DISCONN) &&  	    skb_queue_empty(&iucv->backlog_skb_q) && @@ -1346,13 +1344,14 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,  		return err;  	} -	rlen   = skb->len;		/* real length of skb */ +	offset = IUCV_SKB_CB(skb)->offset; +	rlen   = skb->len - offset;		/* real length of skb */  	copied = min_t(unsigned int, rlen, len);  	if (!rlen)  		sk->sk_shutdown = sk->sk_shutdown | RCV_SHUTDOWN;  	cskb = skb; -	if (skb_copy_datagram_iovec(cskb, 0, msg->msg_iov, copied)) { +	if (skb_copy_datagram_iovec(cskb, offset, msg->msg_iov, copied)) {  		if (!(flags & MSG_PEEK))  			skb_queue_head(&sk->sk_receive_queue, skb);  		return -EFAULT; @@ -1370,7 +1369,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,  	 * get the trgcls from the control buffer of the skb due to  	 * fragmentation of original iucv message. */  	err = put_cmsg(msg, SOL_IUCV, SCM_IUCV_TRGCLS, -			CB_TRGCLS_LEN, CB_TRGCLS(skb)); +		       sizeof(IUCV_SKB_CB(skb)->class), +		       (void *)&IUCV_SKB_CB(skb)->class);  	if (err) {  		if (!(flags & MSG_PEEK))  			skb_queue_head(&sk->sk_receive_queue, skb); @@ -1382,9 +1382,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,  		/* SOCK_STREAM: re-queue skb if it contains unreceived data */  		if (sk->sk_type == SOCK_STREAM) { -			skb_pull(skb, copied); -			if (skb->len) { -				skb_queue_head(&sk->sk_receive_queue, skb); +			if (copied < rlen) { +				IUCV_SKB_CB(skb)->offset = offset + copied;  				goto done;  			}  		} @@ -1403,6 +1402,7 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock,  		spin_lock_bh(&iucv->message_q.lock);  		rskb = skb_dequeue(&iucv->backlog_skb_q);  		while (rskb) { +			IUCV_SKB_CB(rskb)->offset = 0;  			if (sock_queue_rcv_skb(sk, rskb)) {  				skb_queue_head(&iucv->backlog_skb_q,  						rskb); @@ -1830,7 +1830,7 @@ static void iucv_callback_txdone(struct iucv_path *path,  		spin_lock_irqsave(&list->lock, flags);  		while (list_skb != (struct sk_buff *)list) { -			if (!memcmp(&msg->tag, CB_TAG(list_skb), CB_TAG_LEN)) { +			if (msg->tag != IUCV_SKB_CB(list_skb)->tag) {  				this = list_skb;  				break;  			} @@ -2091,6 +2091,7 @@ static int afiucv_hs_callback_rx(struct sock *sk, struct sk_buff *skb)  	skb_pull(skb, sizeof(struct af_iucv_trans_hdr));  	skb_reset_transport_header(skb);  	skb_reset_network_header(skb); +	IUCV_SKB_CB(skb)->offset = 0;  	spin_lock(&iucv->message_q.lock);  	if (skb_queue_empty(&iucv->backlog_skb_q)) {  		if (sock_queue_rcv_skb(sk, skb)) { @@ -2195,8 +2196,7 @@ static int afiucv_hs_rcv(struct sk_buff *skb, struct net_device *dev,  		/* fall through and receive zero length data */  	case 0:  		/* plain data frame */ -		memcpy(CB_TRGCLS(skb), &trans_hdr->iucv_hdr.class, -		       CB_TRGCLS_LEN); +		IUCV_SKB_CB(skb)->class = trans_hdr->iucv_hdr.class;  		err = afiucv_hs_callback_rx(sk, skb);  		break;  	default: diff --git a/net/key/af_key.c b/net/key/af_key.c index 556fdafdd1e..5b1e5af2571 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -2201,7 +2201,7 @@ static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, const struct sadb_  		      XFRM_POLICY_BLOCK : XFRM_POLICY_ALLOW);  	xp->priority = pol->sadb_x_policy_priority; -	sa = ext_hdrs[SADB_EXT_ADDRESS_SRC-1], +	sa = ext_hdrs[SADB_EXT_ADDRESS_SRC-1];  	xp->family = pfkey_sadb_addr2xfrm_addr(sa, &xp->selector.saddr);  	if (!xp->family) {  		err = -EINVAL; @@ -2214,7 +2214,7 @@ static int pfkey_spdadd(struct sock *sk, struct sk_buff *skb, const struct sadb_  	if (xp->selector.sport)  		xp->selector.sport_mask = htons(0xffff); -	sa = ext_hdrs[SADB_EXT_ADDRESS_DST-1], +	sa = ext_hdrs[SADB_EXT_ADDRESS_DST-1];  	pfkey_sadb_addr2xfrm_addr(sa, &xp->selector.daddr);  	xp->selector.prefixlen_d = sa->sadb_address_prefixlen; @@ -2315,7 +2315,7 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sa  	memset(&sel, 0, sizeof(sel)); -	sa = ext_hdrs[SADB_EXT_ADDRESS_SRC-1], +	sa = ext_hdrs[SADB_EXT_ADDRESS_SRC-1];  	sel.family = pfkey_sadb_addr2xfrm_addr(sa, &sel.saddr);  	sel.prefixlen_s = sa->sadb_address_prefixlen;  	sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto); @@ -2323,7 +2323,7 @@ static int pfkey_spddelete(struct sock *sk, struct sk_buff *skb, const struct sa  	if (sel.sport)  		sel.sport_mask = htons(0xffff); -	sa = ext_hdrs[SADB_EXT_ADDRESS_DST-1], +	sa = ext_hdrs[SADB_EXT_ADDRESS_DST-1];  	pfkey_sadb_addr2xfrm_addr(sa, &sel.daddr);  	sel.prefixlen_d = sa->sadb_address_prefixlen;  	sel.proto = pfkey_proto_to_xfrm(sa->sadb_address_proto); @@ -2693,6 +2693,7 @@ static int key_notify_policy_flush(const struct km_event *c)  	hdr->sadb_msg_pid = c->portid;  	hdr->sadb_msg_version = PF_KEY_V2;  	hdr->sadb_msg_errno = (uint8_t) 0; +	hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC;  	hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t));  	pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net);  	return 0; diff --git a/net/l2tp/l2tp_core.c b/net/l2tp/l2tp_core.c index d36875f3427..8aecf5df665 100644 --- a/net/l2tp/l2tp_core.c +++ b/net/l2tp/l2tp_core.c @@ -114,7 +114,6 @@ struct l2tp_net {  static void l2tp_session_set_header_len(struct l2tp_session *session, int version);  static void l2tp_tunnel_free(struct l2tp_tunnel *tunnel); -static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel);  static inline struct l2tp_net *l2tp_pernet(struct net *net)  { @@ -192,6 +191,7 @@ struct sock *l2tp_tunnel_sock_lookup(struct l2tp_tunnel *tunnel)  	} else {  		/* Socket is owned by kernelspace */  		sk = tunnel->sock; +		sock_hold(sk);  	}  out: @@ -210,6 +210,7 @@ void l2tp_tunnel_sock_put(struct sock *sk)  		}  		sock_put(sk);  	} +	sock_put(sk);  }  EXPORT_SYMBOL_GPL(l2tp_tunnel_sock_put); @@ -373,10 +374,8 @@ static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *sk  	struct sk_buff *skbp;  	struct sk_buff *tmp;  	u32 ns = L2TP_SKB_CB(skb)->ns; -	struct l2tp_stats *sstats;  	spin_lock_bh(&session->reorder_q.lock); -	sstats = &session->stats;  	skb_queue_walk_safe(&session->reorder_q, skbp, tmp) {  		if (L2TP_SKB_CB(skbp)->ns > ns) {  			__skb_queue_before(&session->reorder_q, skbp, skb); @@ -384,9 +383,7 @@ static void l2tp_recv_queue_skb(struct l2tp_session *session, struct sk_buff *sk  				 "%s: pkt %hu, inserted before %hu, reorder_q len=%d\n",  				 session->name, ns, L2TP_SKB_CB(skbp)->ns,  				 skb_queue_len(&session->reorder_q)); -			u64_stats_update_begin(&sstats->syncp); -			sstats->rx_oos_packets++; -			u64_stats_update_end(&sstats->syncp); +			atomic_long_inc(&session->stats.rx_oos_packets);  			goto out;  		}  	} @@ -403,23 +400,16 @@ static void l2tp_recv_dequeue_skb(struct l2tp_session *session, struct sk_buff *  {  	struct l2tp_tunnel *tunnel = session->tunnel;  	int length = L2TP_SKB_CB(skb)->length; -	struct l2tp_stats *tstats, *sstats;  	/* We're about to requeue the skb, so return resources  	 * to its current owner (a socket receive buffer).  	 */  	skb_orphan(skb); -	tstats = &tunnel->stats; -	u64_stats_update_begin(&tstats->syncp); -	sstats = &session->stats; -	u64_stats_update_begin(&sstats->syncp); -	tstats->rx_packets++; -	tstats->rx_bytes += length; -	sstats->rx_packets++; -	sstats->rx_bytes += length; -	u64_stats_update_end(&tstats->syncp); -	u64_stats_update_end(&sstats->syncp); +	atomic_long_inc(&tunnel->stats.rx_packets); +	atomic_long_add(length, &tunnel->stats.rx_bytes); +	atomic_long_inc(&session->stats.rx_packets); +	atomic_long_add(length, &session->stats.rx_bytes);  	if (L2TP_SKB_CB(skb)->has_seq) {  		/* Bump our Nr */ @@ -450,7 +440,6 @@ static void l2tp_recv_dequeue(struct l2tp_session *session)  {  	struct sk_buff *skb;  	struct sk_buff *tmp; -	struct l2tp_stats *sstats;  	/* If the pkt at the head of the queue has the nr that we  	 * expect to send up next, dequeue it and any other @@ -458,13 +447,10 @@ static void l2tp_recv_dequeue(struct l2tp_session *session)  	 */  start:  	spin_lock_bh(&session->reorder_q.lock); -	sstats = &session->stats;  	skb_queue_walk_safe(&session->reorder_q, skb, tmp) {  		if (time_after(jiffies, L2TP_SKB_CB(skb)->expires)) { -			u64_stats_update_begin(&sstats->syncp); -			sstats->rx_seq_discards++; -			sstats->rx_errors++; -			u64_stats_update_end(&sstats->syncp); +			atomic_long_inc(&session->stats.rx_seq_discards); +			atomic_long_inc(&session->stats.rx_errors);  			l2tp_dbg(session, L2TP_MSG_SEQ,  				 "%s: oos pkt %u len %d discarded (too old), waiting for %u, reorder_q_len=%d\n",  				 session->name, L2TP_SKB_CB(skb)->ns, @@ -623,7 +609,6 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,  	struct l2tp_tunnel *tunnel = session->tunnel;  	int offset;  	u32 ns, nr; -	struct l2tp_stats *sstats = &session->stats;  	/* The ref count is increased since we now hold a pointer to  	 * the session. Take care to decrement the refcnt when exiting @@ -640,9 +625,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,  				  "%s: cookie mismatch (%u/%u). Discarding.\n",  				  tunnel->name, tunnel->tunnel_id,  				  session->session_id); -			u64_stats_update_begin(&sstats->syncp); -			sstats->rx_cookie_discards++; -			u64_stats_update_end(&sstats->syncp); +			atomic_long_inc(&session->stats.rx_cookie_discards);  			goto discard;  		}  		ptr += session->peer_cookie_len; @@ -711,9 +694,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,  			l2tp_warn(session, L2TP_MSG_SEQ,  				  "%s: recv data has no seq numbers when required. Discarding.\n",  				  session->name); -			u64_stats_update_begin(&sstats->syncp); -			sstats->rx_seq_discards++; -			u64_stats_update_end(&sstats->syncp); +			atomic_long_inc(&session->stats.rx_seq_discards);  			goto discard;  		} @@ -732,9 +713,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,  			l2tp_warn(session, L2TP_MSG_SEQ,  				  "%s: recv data has no seq numbers when required. Discarding.\n",  				  session->name); -			u64_stats_update_begin(&sstats->syncp); -			sstats->rx_seq_discards++; -			u64_stats_update_end(&sstats->syncp); +			atomic_long_inc(&session->stats.rx_seq_discards);  			goto discard;  		}  	} @@ -788,9 +767,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,  			 * packets  			 */  			if (L2TP_SKB_CB(skb)->ns != session->nr) { -				u64_stats_update_begin(&sstats->syncp); -				sstats->rx_seq_discards++; -				u64_stats_update_end(&sstats->syncp); +				atomic_long_inc(&session->stats.rx_seq_discards);  				l2tp_dbg(session, L2TP_MSG_SEQ,  					 "%s: oos pkt %u len %d discarded, waiting for %u, reorder_q_len=%d\n",  					 session->name, L2TP_SKB_CB(skb)->ns, @@ -816,9 +793,7 @@ void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb,  	return;  discard: -	u64_stats_update_begin(&sstats->syncp); -	sstats->rx_errors++; -	u64_stats_update_end(&sstats->syncp); +	atomic_long_inc(&session->stats.rx_errors);  	kfree_skb(skb);  	if (session->deref) @@ -828,6 +803,23 @@ discard:  }  EXPORT_SYMBOL(l2tp_recv_common); +/* Drop skbs from the session's reorder_q + */ +int l2tp_session_queue_purge(struct l2tp_session *session) +{ +	struct sk_buff *skb = NULL; +	BUG_ON(!session); +	BUG_ON(session->magic != L2TP_SESSION_MAGIC); +	while ((skb = skb_dequeue(&session->reorder_q))) { +		atomic_long_inc(&session->stats.rx_errors); +		kfree_skb(skb); +		if (session->deref) +			(*session->deref)(session); +	} +	return 0; +} +EXPORT_SYMBOL_GPL(l2tp_session_queue_purge); +  /* Internal UDP receive frame. Do the real work of receiving an L2TP data frame   * here. The skb is not on a list when we get here.   * Returns 0 if the packet was a data packet and was successfully passed on. @@ -843,7 +835,6 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,  	u32 tunnel_id, session_id;  	u16 version;  	int length; -	struct l2tp_stats *tstats;  	if (tunnel->sock && l2tp_verify_udp_checksum(tunnel->sock, skb))  		goto discard_bad_csum; @@ -932,10 +923,7 @@ static int l2tp_udp_recv_core(struct l2tp_tunnel *tunnel, struct sk_buff *skb,  discard_bad_csum:  	LIMIT_NETDEBUG("%s: UDP: bad checksum\n", tunnel->name);  	UDP_INC_STATS_USER(tunnel->l2tp_net, UDP_MIB_INERRORS, 0); -	tstats = &tunnel->stats; -	u64_stats_update_begin(&tstats->syncp); -	tstats->rx_errors++; -	u64_stats_update_end(&tstats->syncp); +	atomic_long_inc(&tunnel->stats.rx_errors);  	kfree_skb(skb);  	return 0; @@ -1062,7 +1050,6 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,  	struct l2tp_tunnel *tunnel = session->tunnel;  	unsigned int len = skb->len;  	int error; -	struct l2tp_stats *tstats, *sstats;  	/* Debug */  	if (session->send_seq) @@ -1091,21 +1078,15 @@ static int l2tp_xmit_core(struct l2tp_session *session, struct sk_buff *skb,  		error = ip_queue_xmit(skb, fl);  	/* Update stats */ -	tstats = &tunnel->stats; -	u64_stats_update_begin(&tstats->syncp); -	sstats = &session->stats; -	u64_stats_update_begin(&sstats->syncp);  	if (error >= 0) { -		tstats->tx_packets++; -		tstats->tx_bytes += len; -		sstats->tx_packets++; -		sstats->tx_bytes += len; +		atomic_long_inc(&tunnel->stats.tx_packets); +		atomic_long_add(len, &tunnel->stats.tx_bytes); +		atomic_long_inc(&session->stats.tx_packets); +		atomic_long_add(len, &session->stats.tx_bytes);  	} else { -		tstats->tx_errors++; -		sstats->tx_errors++; +		atomic_long_inc(&tunnel->stats.tx_errors); +		atomic_long_inc(&session->stats.tx_errors);  	} -	u64_stats_update_end(&tstats->syncp); -	u64_stats_update_end(&sstats->syncp);  	return 0;  } @@ -1282,6 +1263,7 @@ static void l2tp_tunnel_destruct(struct sock *sk)  		/* No longer an encapsulation socket. See net/ipv4/udp.c */  		(udp_sk(sk))->encap_type = 0;  		(udp_sk(sk))->encap_rcv = NULL; +		(udp_sk(sk))->encap_destroy = NULL;  		break;  	case L2TP_ENCAPTYPE_IP:  		break; @@ -1311,7 +1293,7 @@ end:  /* When the tunnel is closed, all the attached sessions need to go too.   */ -static void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel) +void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel)  {  	int hash;  	struct hlist_node *walk; @@ -1334,25 +1316,13 @@ again:  			hlist_del_init(&session->hlist); -			/* Since we should hold the sock lock while -			 * doing any unbinding, we need to release the -			 * lock we're holding before taking that lock. -			 * Hold a reference to the sock so it doesn't -			 * disappear as we're jumping between locks. -			 */  			if (session->ref != NULL)  				(*session->ref)(session);  			write_unlock_bh(&tunnel->hlist_lock); -			if (tunnel->version != L2TP_HDR_VER_2) { -				struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net); - -				spin_lock_bh(&pn->l2tp_session_hlist_lock); -				hlist_del_init_rcu(&session->global_hlist); -				spin_unlock_bh(&pn->l2tp_session_hlist_lock); -				synchronize_rcu(); -			} +			__l2tp_session_unhash(session); +			l2tp_session_queue_purge(session);  			if (session->session_close != NULL)  				(*session->session_close)(session); @@ -1360,6 +1330,8 @@ again:  			if (session->deref != NULL)  				(*session->deref)(session); +			l2tp_session_dec_refcount(session); +  			write_lock_bh(&tunnel->hlist_lock);  			/* Now restart from the beginning of this hash @@ -1372,6 +1344,17 @@ again:  	}  	write_unlock_bh(&tunnel->hlist_lock);  } +EXPORT_SYMBOL_GPL(l2tp_tunnel_closeall); + +/* Tunnel socket destroy hook for UDP encapsulation */ +static void l2tp_udp_encap_destroy(struct sock *sk) +{ +	struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk); +	if (tunnel) { +		l2tp_tunnel_closeall(tunnel); +		sock_put(sk); +	} +}  /* Really kill the tunnel.   * Come here only when all sessions have been cleared from the tunnel. @@ -1397,19 +1380,21 @@ static void l2tp_tunnel_del_work(struct work_struct *work)  		return;  	sock = sk->sk_socket; -	BUG_ON(!sock); -	/* If the tunnel socket was created directly by the kernel, use the -	 * sk_* API to release the socket now.  Otherwise go through the -	 * inet_* layer to shut the socket down, and let userspace close it. +	/* If the tunnel socket was created by userspace, then go through the +	 * inet layer to shut the socket down, and let userspace close it. +	 * Otherwise, if we created the socket directly within the kernel, use +	 * the sk API to release it here.  	 * In either case the tunnel resources are freed in the socket  	 * destructor when the tunnel socket goes away.  	 */ -	if (sock->file == NULL) { -		kernel_sock_shutdown(sock, SHUT_RDWR); -		sk_release_kernel(sk); +	if (tunnel->fd >= 0) { +		if (sock) +			inet_shutdown(sock, 2);  	} else { -		inet_shutdown(sock, 2); +		if (sock) +			kernel_sock_shutdown(sock, SHUT_RDWR); +		sk_release_kernel(sk);  	}  	l2tp_tunnel_sock_put(sk); @@ -1668,6 +1653,7 @@ int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32  		/* Mark socket as an encapsulation socket. See net/ipv4/udp.c */  		udp_sk(sk)->encap_type = UDP_ENCAP_L2TPINUDP;  		udp_sk(sk)->encap_rcv = l2tp_udp_encap_recv; +		udp_sk(sk)->encap_destroy = l2tp_udp_encap_destroy;  #if IS_ENABLED(CONFIG_IPV6)  		if (sk->sk_family == PF_INET6)  			udpv6_encap_enable(); @@ -1723,6 +1709,7 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_create);   */  int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel)  { +	l2tp_tunnel_closeall(tunnel);  	return (false == queue_work(l2tp_wq, &tunnel->del_work));  }  EXPORT_SYMBOL_GPL(l2tp_tunnel_delete); @@ -1731,62 +1718,71 @@ EXPORT_SYMBOL_GPL(l2tp_tunnel_delete);   */  void l2tp_session_free(struct l2tp_session *session)  { -	struct l2tp_tunnel *tunnel; +	struct l2tp_tunnel *tunnel = session->tunnel;  	BUG_ON(atomic_read(&session->ref_count) != 0); -	tunnel = session->tunnel; -	if (tunnel != NULL) { +	if (tunnel) {  		BUG_ON(tunnel->magic != L2TP_TUNNEL_MAGIC); +		if (session->session_id != 0) +			atomic_dec(&l2tp_session_count); +		sock_put(tunnel->sock); +		session->tunnel = NULL; +		l2tp_tunnel_dec_refcount(tunnel); +	} + +	kfree(session); -		/* Delete the session from the hash */ +	return; +} +EXPORT_SYMBOL_GPL(l2tp_session_free); + +/* Remove an l2tp session from l2tp_core's hash lists. + * Provides a tidyup interface for pseudowire code which can't just route all + * shutdown via. l2tp_session_delete and a pseudowire-specific session_close + * callback. + */ +void __l2tp_session_unhash(struct l2tp_session *session) +{ +	struct l2tp_tunnel *tunnel = session->tunnel; + +	/* Remove the session from core hashes */ +	if (tunnel) { +		/* Remove from the per-tunnel hash */  		write_lock_bh(&tunnel->hlist_lock);  		hlist_del_init(&session->hlist);  		write_unlock_bh(&tunnel->hlist_lock); -		/* Unlink from the global hash if not L2TPv2 */ +		/* For L2TPv3 we have a per-net hash: remove from there, too */  		if (tunnel->version != L2TP_HDR_VER_2) {  			struct l2tp_net *pn = l2tp_pernet(tunnel->l2tp_net); -  			spin_lock_bh(&pn->l2tp_session_hlist_lock);  			hlist_del_init_rcu(&session->global_hlist);  			spin_unlock_bh(&pn->l2tp_session_hlist_lock);  			synchronize_rcu();  		} - -		if (session->session_id != 0) -			atomic_dec(&l2tp_session_count); - -		sock_put(tunnel->sock); - -		/* This will delete the tunnel context if this -		 * is the last session on the tunnel. -		 */ -		session->tunnel = NULL; -		l2tp_tunnel_dec_refcount(tunnel);  	} - -	kfree(session); - -	return;  } -EXPORT_SYMBOL_GPL(l2tp_session_free); +EXPORT_SYMBOL_GPL(__l2tp_session_unhash);  /* This function is used by the netlink SESSION_DELETE command and by     pseudowire modules.   */  int l2tp_session_delete(struct l2tp_session *session)  { +	if (session->ref) +		(*session->ref)(session); +	__l2tp_session_unhash(session); +	l2tp_session_queue_purge(session);  	if (session->session_close != NULL)  		(*session->session_close)(session); - +	if (session->deref) +		(*session->ref)(session);  	l2tp_session_dec_refcount(session); -  	return 0;  }  EXPORT_SYMBOL_GPL(l2tp_session_delete); -  /* We come here whenever a session's send_seq, cookie_len or   * l2specific_len parameters are set.   */ diff --git a/net/l2tp/l2tp_core.h b/net/l2tp/l2tp_core.h index 8eb8f1d47f3..485a490fd99 100644 --- a/net/l2tp/l2tp_core.h +++ b/net/l2tp/l2tp_core.h @@ -36,16 +36,15 @@ enum {  struct sk_buff;  struct l2tp_stats { -	u64			tx_packets; -	u64			tx_bytes; -	u64			tx_errors; -	u64			rx_packets; -	u64			rx_bytes; -	u64			rx_seq_discards; -	u64			rx_oos_packets; -	u64			rx_errors; -	u64			rx_cookie_discards; -	struct u64_stats_sync	syncp; +	atomic_long_t		tx_packets; +	atomic_long_t		tx_bytes; +	atomic_long_t		tx_errors; +	atomic_long_t		rx_packets; +	atomic_long_t		rx_bytes; +	atomic_long_t		rx_seq_discards; +	atomic_long_t		rx_oos_packets; +	atomic_long_t		rx_errors; +	atomic_long_t		rx_cookie_discards;  };  struct l2tp_tunnel; @@ -240,11 +239,14 @@ extern struct l2tp_tunnel *l2tp_tunnel_find(struct net *net, u32 tunnel_id);  extern struct l2tp_tunnel *l2tp_tunnel_find_nth(struct net *net, int nth);  extern int l2tp_tunnel_create(struct net *net, int fd, int version, u32 tunnel_id, u32 peer_tunnel_id, struct l2tp_tunnel_cfg *cfg, struct l2tp_tunnel **tunnelp); +extern void l2tp_tunnel_closeall(struct l2tp_tunnel *tunnel);  extern int l2tp_tunnel_delete(struct l2tp_tunnel *tunnel);  extern struct l2tp_session *l2tp_session_create(int priv_size, struct l2tp_tunnel *tunnel, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg); +extern void __l2tp_session_unhash(struct l2tp_session *session);  extern int l2tp_session_delete(struct l2tp_session *session);  extern void l2tp_session_free(struct l2tp_session *session);  extern void l2tp_recv_common(struct l2tp_session *session, struct sk_buff *skb, unsigned char *ptr, unsigned char *optr, u16 hdrflags, int length, int (*payload_hook)(struct sk_buff *skb)); +extern int l2tp_session_queue_purge(struct l2tp_session *session);  extern int l2tp_udp_encap_recv(struct sock *sk, struct sk_buff *skb);  extern int l2tp_xmit_skb(struct l2tp_session *session, struct sk_buff *skb, int hdr_len); diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c index c3813bc8455..072d7202e18 100644 --- a/net/l2tp/l2tp_debugfs.c +++ b/net/l2tp/l2tp_debugfs.c @@ -146,14 +146,14 @@ static void l2tp_dfs_seq_tunnel_show(struct seq_file *m, void *v)  		   tunnel->sock ? atomic_read(&tunnel->sock->sk_refcnt) : 0,  		   atomic_read(&tunnel->ref_count)); -	seq_printf(m, " %08x rx %llu/%llu/%llu rx %llu/%llu/%llu\n", +	seq_printf(m, " %08x rx %ld/%ld/%ld rx %ld/%ld/%ld\n",  		   tunnel->debug, -		   (unsigned long long)tunnel->stats.tx_packets, -		   (unsigned long long)tunnel->stats.tx_bytes, -		   (unsigned long long)tunnel->stats.tx_errors, -		   (unsigned long long)tunnel->stats.rx_packets, -		   (unsigned long long)tunnel->stats.rx_bytes, -		   (unsigned long long)tunnel->stats.rx_errors); +		   atomic_long_read(&tunnel->stats.tx_packets), +		   atomic_long_read(&tunnel->stats.tx_bytes), +		   atomic_long_read(&tunnel->stats.tx_errors), +		   atomic_long_read(&tunnel->stats.rx_packets), +		   atomic_long_read(&tunnel->stats.rx_bytes), +		   atomic_long_read(&tunnel->stats.rx_errors));  	if (tunnel->show != NULL)  		tunnel->show(m, tunnel); @@ -203,14 +203,14 @@ static void l2tp_dfs_seq_session_show(struct seq_file *m, void *v)  		seq_printf(m, "\n");  	} -	seq_printf(m, "   %hu/%hu tx %llu/%llu/%llu rx %llu/%llu/%llu\n", +	seq_printf(m, "   %hu/%hu tx %ld/%ld/%ld rx %ld/%ld/%ld\n",  		   session->nr, session->ns, -		   (unsigned long long)session->stats.tx_packets, -		   (unsigned long long)session->stats.tx_bytes, -		   (unsigned long long)session->stats.tx_errors, -		   (unsigned long long)session->stats.rx_packets, -		   (unsigned long long)session->stats.rx_bytes, -		   (unsigned long long)session->stats.rx_errors); +		   atomic_long_read(&session->stats.tx_packets), +		   atomic_long_read(&session->stats.tx_bytes), +		   atomic_long_read(&session->stats.tx_errors), +		   atomic_long_read(&session->stats.rx_packets), +		   atomic_long_read(&session->stats.rx_bytes), +		   atomic_long_read(&session->stats.rx_errors));  	if (session->show != NULL)  		session->show(m, session); diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index 7f41b705126..571db8dd229 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@ -228,10 +228,16 @@ static void l2tp_ip_close(struct sock *sk, long timeout)  static void l2tp_ip_destroy_sock(struct sock *sk)  {  	struct sk_buff *skb; +	struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk);  	while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)  		kfree_skb(skb); +	if (tunnel) { +		l2tp_tunnel_closeall(tunnel); +		sock_put(sk); +	} +  	sk_refcnt_debug_dec(sk);  } diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c index 41f2f8126eb..b8a6039314e 100644 --- a/net/l2tp/l2tp_ip6.c +++ b/net/l2tp/l2tp_ip6.c @@ -241,10 +241,17 @@ static void l2tp_ip6_close(struct sock *sk, long timeout)  static void l2tp_ip6_destroy_sock(struct sock *sk)  { +	struct l2tp_tunnel *tunnel = l2tp_sock_to_tunnel(sk); +  	lock_sock(sk);  	ip6_flush_pending_frames(sk);  	release_sock(sk); +	if (tunnel) { +		l2tp_tunnel_closeall(tunnel); +		sock_put(sk); +	} +  	inet6_destroy_sock(sk);  } @@ -683,6 +690,7 @@ static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk,  		lsa->l2tp_addr = ipv6_hdr(skb)->saddr;  		lsa->l2tp_flowinfo = 0;  		lsa->l2tp_scope_id = 0; +		lsa->l2tp_conn_id = 0;  		if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL)  			lsa->l2tp_scope_id = IP6CB(skb)->iif;  	} diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c index c1bab22db85..0825ff26e11 100644 --- a/net/l2tp/l2tp_netlink.c +++ b/net/l2tp/l2tp_netlink.c @@ -246,8 +246,6 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int fla  #if IS_ENABLED(CONFIG_IPV6)  	struct ipv6_pinfo *np = NULL;  #endif -	struct l2tp_stats stats; -	unsigned int start;  	hdr = genlmsg_put(skb, portid, seq, &l2tp_nl_family, flags,  			  L2TP_CMD_TUNNEL_GET); @@ -265,28 +263,22 @@ static int l2tp_nl_tunnel_send(struct sk_buff *skb, u32 portid, u32 seq, int fla  	if (nest == NULL)  		goto nla_put_failure; -	do { -		start = u64_stats_fetch_begin(&tunnel->stats.syncp); -		stats.tx_packets = tunnel->stats.tx_packets; -		stats.tx_bytes = tunnel->stats.tx_bytes; -		stats.tx_errors = tunnel->stats.tx_errors; -		stats.rx_packets = tunnel->stats.rx_packets; -		stats.rx_bytes = tunnel->stats.rx_bytes; -		stats.rx_errors = tunnel->stats.rx_errors; -		stats.rx_seq_discards = tunnel->stats.rx_seq_discards; -		stats.rx_oos_packets = tunnel->stats.rx_oos_packets; -	} while (u64_stats_fetch_retry(&tunnel->stats.syncp, start)); - -	if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, stats.tx_packets) || -	    nla_put_u64(skb, L2TP_ATTR_TX_BYTES, stats.tx_bytes) || -	    nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, stats.tx_errors) || -	    nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, stats.rx_packets) || -	    nla_put_u64(skb, L2TP_ATTR_RX_BYTES, stats.rx_bytes) || +	if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, +		    atomic_long_read(&tunnel->stats.tx_packets)) || +	    nla_put_u64(skb, L2TP_ATTR_TX_BYTES, +		    atomic_long_read(&tunnel->stats.tx_bytes)) || +	    nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, +		    atomic_long_read(&tunnel->stats.tx_errors)) || +	    nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, +		    atomic_long_read(&tunnel->stats.rx_packets)) || +	    nla_put_u64(skb, L2TP_ATTR_RX_BYTES, +		    atomic_long_read(&tunnel->stats.rx_bytes)) ||  	    nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, -			stats.rx_seq_discards) || +		    atomic_long_read(&tunnel->stats.rx_seq_discards)) ||  	    nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS, -			stats.rx_oos_packets) || -	    nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, stats.rx_errors)) +		    atomic_long_read(&tunnel->stats.rx_oos_packets)) || +	    nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, +		    atomic_long_read(&tunnel->stats.rx_errors)))  		goto nla_put_failure;  	nla_nest_end(skb, nest); @@ -612,8 +604,6 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl  	struct nlattr *nest;  	struct l2tp_tunnel *tunnel = session->tunnel;  	struct sock *sk = NULL; -	struct l2tp_stats stats; -	unsigned int start;  	sk = tunnel->sock; @@ -656,28 +646,22 @@ static int l2tp_nl_session_send(struct sk_buff *skb, u32 portid, u32 seq, int fl  	if (nest == NULL)  		goto nla_put_failure; -	do { -		start = u64_stats_fetch_begin(&session->stats.syncp); -		stats.tx_packets = session->stats.tx_packets; -		stats.tx_bytes = session->stats.tx_bytes; -		stats.tx_errors = session->stats.tx_errors; -		stats.rx_packets = session->stats.rx_packets; -		stats.rx_bytes = session->stats.rx_bytes; -		stats.rx_errors = session->stats.rx_errors; -		stats.rx_seq_discards = session->stats.rx_seq_discards; -		stats.rx_oos_packets = session->stats.rx_oos_packets; -	} while (u64_stats_fetch_retry(&session->stats.syncp, start)); - -	if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, stats.tx_packets) || -	    nla_put_u64(skb, L2TP_ATTR_TX_BYTES, stats.tx_bytes) || -	    nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, stats.tx_errors) || -	    nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, stats.rx_packets) || -	    nla_put_u64(skb, L2TP_ATTR_RX_BYTES, stats.rx_bytes) || +	if (nla_put_u64(skb, L2TP_ATTR_TX_PACKETS, +		atomic_long_read(&session->stats.tx_packets)) || +	    nla_put_u64(skb, L2TP_ATTR_TX_BYTES, +		atomic_long_read(&session->stats.tx_bytes)) || +	    nla_put_u64(skb, L2TP_ATTR_TX_ERRORS, +		atomic_long_read(&session->stats.tx_errors)) || +	    nla_put_u64(skb, L2TP_ATTR_RX_PACKETS, +		atomic_long_read(&session->stats.rx_packets)) || +	    nla_put_u64(skb, L2TP_ATTR_RX_BYTES, +		atomic_long_read(&session->stats.rx_bytes)) ||  	    nla_put_u64(skb, L2TP_ATTR_RX_SEQ_DISCARDS, -			stats.rx_seq_discards) || +		atomic_long_read(&session->stats.rx_seq_discards)) ||  	    nla_put_u64(skb, L2TP_ATTR_RX_OOS_PACKETS, -			stats.rx_oos_packets) || -	    nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, stats.rx_errors)) +		atomic_long_read(&session->stats.rx_oos_packets)) || +	    nla_put_u64(skb, L2TP_ATTR_RX_ERRORS, +		atomic_long_read(&session->stats.rx_errors)))  		goto nla_put_failure;  	nla_nest_end(skb, nest); diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 6a53371dba1..637a341c1e2 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -97,6 +97,7 @@  #include <net/ip.h>  #include <net/udp.h>  #include <net/xfrm.h> +#include <net/inet_common.h>  #include <asm/byteorder.h>  #include <linux/atomic.h> @@ -259,7 +260,7 @@ static void pppol2tp_recv(struct l2tp_session *session, struct sk_buff *skb, int  			  session->name);  		/* Not bound. Nothing we can do, so discard. */ -		session->stats.rx_errors++; +		atomic_long_inc(&session->stats.rx_errors);  		kfree_skb(skb);  	} @@ -447,34 +448,16 @@ static void pppol2tp_session_close(struct l2tp_session *session)  {  	struct pppol2tp_session *ps = l2tp_session_priv(session);  	struct sock *sk = ps->sock; -	struct sk_buff *skb; +	struct socket *sock = sk->sk_socket;  	BUG_ON(session->magic != L2TP_SESSION_MAGIC); -	if (session->session_id == 0) -		goto out; - -	if (sk != NULL) { -		lock_sock(sk); - -		if (sk->sk_state & (PPPOX_CONNECTED | PPPOX_BOUND)) { -			pppox_unbind_sock(sk); -			sk->sk_state = PPPOX_DEAD; -			sk->sk_state_change(sk); -		} - -		/* Purge any queued data */ -		skb_queue_purge(&sk->sk_receive_queue); -		skb_queue_purge(&sk->sk_write_queue); -		while ((skb = skb_dequeue(&session->reorder_q))) { -			kfree_skb(skb); -			sock_put(sk); -		} -		release_sock(sk); +	if (sock) { +		inet_shutdown(sock, 2); +		/* Don't let the session go away before our socket does */ +		l2tp_session_inc_refcount(session);  	} - -out:  	return;  } @@ -483,19 +466,12 @@ out:   */  static void pppol2tp_session_destruct(struct sock *sk)  { -	struct l2tp_session *session; - -	if (sk->sk_user_data != NULL) { -		session = sk->sk_user_data; -		if (session == NULL) -			goto out; - +	struct l2tp_session *session = sk->sk_user_data; +	if (session) {  		sk->sk_user_data = NULL;  		BUG_ON(session->magic != L2TP_SESSION_MAGIC);  		l2tp_session_dec_refcount(session);  	} - -out:  	return;  } @@ -525,16 +501,13 @@ static int pppol2tp_release(struct socket *sock)  	session = pppol2tp_sock_to_session(sk);  	/* Purge any queued data */ -	skb_queue_purge(&sk->sk_receive_queue); -	skb_queue_purge(&sk->sk_write_queue);  	if (session != NULL) { -		struct sk_buff *skb; -		while ((skb = skb_dequeue(&session->reorder_q))) { -			kfree_skb(skb); -			sock_put(sk); -		} +		__l2tp_session_unhash(session); +		l2tp_session_queue_purge(session);  		sock_put(sk);  	} +	skb_queue_purge(&sk->sk_receive_queue); +	skb_queue_purge(&sk->sk_write_queue);  	release_sock(sk); @@ -880,18 +853,6 @@ out:  	return error;  } -/* Called when deleting sessions via the netlink interface. - */ -static int pppol2tp_session_delete(struct l2tp_session *session) -{ -	struct pppol2tp_session *ps = l2tp_session_priv(session); - -	if (ps->sock == NULL) -		l2tp_session_dec_refcount(session); - -	return 0; -} -  #endif /* CONFIG_L2TP_V3 */  /* getname() support. @@ -1025,14 +986,14 @@ end:  static void pppol2tp_copy_stats(struct pppol2tp_ioc_stats *dest,  				struct l2tp_stats *stats)  { -	dest->tx_packets = stats->tx_packets; -	dest->tx_bytes = stats->tx_bytes; -	dest->tx_errors = stats->tx_errors; -	dest->rx_packets = stats->rx_packets; -	dest->rx_bytes = stats->rx_bytes; -	dest->rx_seq_discards = stats->rx_seq_discards; -	dest->rx_oos_packets = stats->rx_oos_packets; -	dest->rx_errors = stats->rx_errors; +	dest->tx_packets = atomic_long_read(&stats->tx_packets); +	dest->tx_bytes = atomic_long_read(&stats->tx_bytes); +	dest->tx_errors = atomic_long_read(&stats->tx_errors); +	dest->rx_packets = atomic_long_read(&stats->rx_packets); +	dest->rx_bytes = atomic_long_read(&stats->rx_bytes); +	dest->rx_seq_discards = atomic_long_read(&stats->rx_seq_discards); +	dest->rx_oos_packets = atomic_long_read(&stats->rx_oos_packets); +	dest->rx_errors = atomic_long_read(&stats->rx_errors);  }  /* Session ioctl helper. @@ -1666,14 +1627,14 @@ static void pppol2tp_seq_tunnel_show(struct seq_file *m, void *v)  		   tunnel->name,  		   (tunnel == tunnel->sock->sk_user_data) ? 'Y' : 'N',  		   atomic_read(&tunnel->ref_count) - 1); -	seq_printf(m, " %08x %llu/%llu/%llu %llu/%llu/%llu\n", +	seq_printf(m, " %08x %ld/%ld/%ld %ld/%ld/%ld\n",  		   tunnel->debug, -		   (unsigned long long)tunnel->stats.tx_packets, -		   (unsigned long long)tunnel->stats.tx_bytes, -		   (unsigned long long)tunnel->stats.tx_errors, -		   (unsigned long long)tunnel->stats.rx_packets, -		   (unsigned long long)tunnel->stats.rx_bytes, -		   (unsigned long long)tunnel->stats.rx_errors); +		   atomic_long_read(&tunnel->stats.tx_packets), +		   atomic_long_read(&tunnel->stats.tx_bytes), +		   atomic_long_read(&tunnel->stats.tx_errors), +		   atomic_long_read(&tunnel->stats.rx_packets), +		   atomic_long_read(&tunnel->stats.rx_bytes), +		   atomic_long_read(&tunnel->stats.rx_errors));  }  static void pppol2tp_seq_session_show(struct seq_file *m, void *v) @@ -1708,14 +1669,14 @@ static void pppol2tp_seq_session_show(struct seq_file *m, void *v)  		   session->lns_mode ? "LNS" : "LAC",  		   session->debug,  		   jiffies_to_msecs(session->reorder_timeout)); -	seq_printf(m, "   %hu/%hu %llu/%llu/%llu %llu/%llu/%llu\n", +	seq_printf(m, "   %hu/%hu %ld/%ld/%ld %ld/%ld/%ld\n",  		   session->nr, session->ns, -		   (unsigned long long)session->stats.tx_packets, -		   (unsigned long long)session->stats.tx_bytes, -		   (unsigned long long)session->stats.tx_errors, -		   (unsigned long long)session->stats.rx_packets, -		   (unsigned long long)session->stats.rx_bytes, -		   (unsigned long long)session->stats.rx_errors); +		   atomic_long_read(&session->stats.tx_packets), +		   atomic_long_read(&session->stats.tx_bytes), +		   atomic_long_read(&session->stats.tx_errors), +		   atomic_long_read(&session->stats.rx_packets), +		   atomic_long_read(&session->stats.rx_bytes), +		   atomic_long_read(&session->stats.rx_errors));  	if (po)  		seq_printf(m, "   interface %s\n", ppp_dev_name(&po->chan)); @@ -1839,7 +1800,7 @@ static const struct pppox_proto pppol2tp_proto = {  static const struct l2tp_nl_cmd_ops pppol2tp_nl_cmd_ops = {  	.session_create	= pppol2tp_session_create, -	.session_delete	= pppol2tp_session_delete, +	.session_delete	= l2tp_session_delete,  };  #endif /* CONFIG_L2TP_V3 */ diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 88709882c46..48aaa89253e 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -720,6 +720,8 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock,  	int target;	/* Read at least this many bytes */  	long timeo; +	msg->msg_namelen = 0; +  	lock_sock(sk);  	copied = -ENOTCONN;  	if (unlikely(sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_LISTEN)) diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c index 808f5fcd1ce..a6893602f87 100644 --- a/net/mac80211/cfg.c +++ b/net/mac80211/cfg.c @@ -2582,7 +2582,7 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local,  			list_del(&dep->list);  			mutex_unlock(&local->mtx); -			ieee80211_roc_notify_destroy(dep); +			ieee80211_roc_notify_destroy(dep, true);  			return 0;  		} @@ -2622,7 +2622,7 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local,  			ieee80211_start_next_roc(local);  		mutex_unlock(&local->mtx); -		ieee80211_roc_notify_destroy(found); +		ieee80211_roc_notify_destroy(found, true);  	} else {  		/* work may be pending so use it all the time */  		found->abort = true; @@ -2632,6 +2632,8 @@ static int ieee80211_cancel_roc(struct ieee80211_local *local,  		/* work will clean up etc */  		flush_delayed_work(&found->work); +		WARN_ON(!found->to_be_freed); +		kfree(found);  	}  	return 0; @@ -3290,14 +3292,19 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,  	int ret = -ENODATA;  	rcu_read_lock(); -	if (local->use_chanctx) { -		chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); -		if (chanctx_conf) { -			*chandef = chanctx_conf->def; -			ret = 0; -		} -	} else if (local->open_count == local->monitors) { -		*chandef = local->monitor_chandef; +	chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); +	if (chanctx_conf) { +		*chandef = chanctx_conf->def; +		ret = 0; +	} else if (local->open_count > 0 && +		   local->open_count == local->monitors && +		   sdata->vif.type == NL80211_IFTYPE_MONITOR) { +		if (local->use_chanctx) +			*chandef = local->monitor_chandef; +		else +			cfg80211_chandef_create(chandef, +						local->_oper_channel, +						local->_oper_channel_type);  		ret = 0;  	}  	rcu_read_unlock(); diff --git a/net/mac80211/chan.c b/net/mac80211/chan.c index 78c0d90dd64..931be419ab5 100644 --- a/net/mac80211/chan.c +++ b/net/mac80211/chan.c @@ -63,6 +63,7 @@ ieee80211_new_chanctx(struct ieee80211_local *local,  		      enum ieee80211_chanctx_mode mode)  {  	struct ieee80211_chanctx *ctx; +	u32 changed;  	int err;  	lockdep_assert_held(&local->chanctx_mtx); @@ -76,6 +77,13 @@ ieee80211_new_chanctx(struct ieee80211_local *local,  	ctx->conf.rx_chains_dynamic = 1;  	ctx->mode = mode; +	/* acquire mutex to prevent idle from changing */ +	mutex_lock(&local->mtx); +	/* turn idle off *before* setting channel -- some drivers need that */ +	changed = ieee80211_idle_off(local); +	if (changed) +		ieee80211_hw_config(local, changed); +  	if (!local->use_chanctx) {  		local->_oper_channel_type =  			cfg80211_get_chandef_type(chandef); @@ -85,14 +93,17 @@ ieee80211_new_chanctx(struct ieee80211_local *local,  		err = drv_add_chanctx(local, ctx);  		if (err) {  			kfree(ctx); -			return ERR_PTR(err); +			ctx = ERR_PTR(err); + +			ieee80211_recalc_idle(local); +			goto out;  		}  	} +	/* and keep the mutex held until the new chanctx is on the list */  	list_add_rcu(&ctx->list, &local->chanctx_list); -	mutex_lock(&local->mtx); -	ieee80211_recalc_idle(local); + out:  	mutex_unlock(&local->mtx);  	return ctx; diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h index 388580a1bad..5672533a083 100644 --- a/net/mac80211/ieee80211_i.h +++ b/net/mac80211/ieee80211_i.h @@ -309,6 +309,7 @@ struct ieee80211_roc_work {  	struct ieee80211_channel *chan;  	bool started, abort, hw_begun, notified; +	bool to_be_freed;  	unsigned long hw_start_time; @@ -1347,7 +1348,7 @@ void ieee80211_offchannel_return(struct ieee80211_local *local);  void ieee80211_roc_setup(struct ieee80211_local *local);  void ieee80211_start_next_roc(struct ieee80211_local *local);  void ieee80211_roc_purge(struct ieee80211_sub_if_data *sdata); -void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc); +void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc, bool free);  void ieee80211_sw_roc_work(struct work_struct *work);  void ieee80211_handle_roc_started(struct ieee80211_roc_work *roc); @@ -1361,6 +1362,7 @@ int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata,  			     enum nl80211_iftype type);  void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata);  void ieee80211_remove_interfaces(struct ieee80211_local *local); +u32 ieee80211_idle_off(struct ieee80211_local *local);  void ieee80211_recalc_idle(struct ieee80211_local *local);  void ieee80211_adjust_monitor_flags(struct ieee80211_sub_if_data *sdata,  				    const int offset); diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 640afab304d..9ed49ad0380 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -78,7 +78,7 @@ void ieee80211_recalc_txpower(struct ieee80211_sub_if_data *sdata)  		ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_TXPOWER);  } -static u32 ieee80211_idle_off(struct ieee80211_local *local) +static u32 __ieee80211_idle_off(struct ieee80211_local *local)  {  	if (!(local->hw.conf.flags & IEEE80211_CONF_IDLE))  		return 0; @@ -87,7 +87,7 @@ static u32 ieee80211_idle_off(struct ieee80211_local *local)  	return IEEE80211_CONF_CHANGE_IDLE;  } -static u32 ieee80211_idle_on(struct ieee80211_local *local) +static u32 __ieee80211_idle_on(struct ieee80211_local *local)  {  	if (local->hw.conf.flags & IEEE80211_CONF_IDLE)  		return 0; @@ -98,16 +98,18 @@ static u32 ieee80211_idle_on(struct ieee80211_local *local)  	return IEEE80211_CONF_CHANGE_IDLE;  } -void ieee80211_recalc_idle(struct ieee80211_local *local) +static u32 __ieee80211_recalc_idle(struct ieee80211_local *local, +				   bool force_active)  {  	bool working = false, scanning, active;  	unsigned int led_trig_start = 0, led_trig_stop = 0;  	struct ieee80211_roc_work *roc; -	u32 change;  	lockdep_assert_held(&local->mtx); -	active = !list_empty(&local->chanctx_list) || local->monitors; +	active = force_active || +		 !list_empty(&local->chanctx_list) || +		 local->monitors;  	if (!local->ops->remain_on_channel) {  		list_for_each_entry(roc, &local->roc_list, list) { @@ -132,9 +134,18 @@ void ieee80211_recalc_idle(struct ieee80211_local *local)  	ieee80211_mod_tpt_led_trig(local, led_trig_start, led_trig_stop);  	if (working || scanning || active) -		change = ieee80211_idle_off(local); -	else -		change = ieee80211_idle_on(local); +		return __ieee80211_idle_off(local); +	return __ieee80211_idle_on(local); +} + +u32 ieee80211_idle_off(struct ieee80211_local *local) +{ +	return __ieee80211_recalc_idle(local, true); +} + +void ieee80211_recalc_idle(struct ieee80211_local *local) +{ +	u32 change = __ieee80211_recalc_idle(local, false);  	if (change)  		ieee80211_hw_config(local, change);  } @@ -349,21 +360,19 @@ static void ieee80211_set_default_queues(struct ieee80211_sub_if_data *sdata)  static int ieee80211_add_virtual_monitor(struct ieee80211_local *local)  {  	struct ieee80211_sub_if_data *sdata; -	int ret = 0; +	int ret;  	if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF))  		return 0; -	mutex_lock(&local->iflist_mtx); +	ASSERT_RTNL();  	if (local->monitor_sdata) -		goto out_unlock; +		return 0;  	sdata = kzalloc(sizeof(*sdata) + local->hw.vif_data_size, GFP_KERNEL); -	if (!sdata) { -		ret = -ENOMEM; -		goto out_unlock; -	} +	if (!sdata) +		return -ENOMEM;  	/* set up data */  	sdata->local = local; @@ -377,13 +386,13 @@ static int ieee80211_add_virtual_monitor(struct ieee80211_local *local)  	if (WARN_ON(ret)) {  		/* ok .. stupid driver, it asked for this! */  		kfree(sdata); -		goto out_unlock; +		return ret;  	}  	ret = ieee80211_check_queues(sdata);  	if (ret) {  		kfree(sdata); -		goto out_unlock; +		return ret;  	}  	ret = ieee80211_vif_use_channel(sdata, &local->monitor_chandef, @@ -391,13 +400,14 @@ static int ieee80211_add_virtual_monitor(struct ieee80211_local *local)  	if (ret) {  		drv_remove_interface(local, sdata);  		kfree(sdata); -		goto out_unlock; +		return ret;  	} +	mutex_lock(&local->iflist_mtx);  	rcu_assign_pointer(local->monitor_sdata, sdata); - out_unlock:  	mutex_unlock(&local->iflist_mtx); -	return ret; + +	return 0;  }  static void ieee80211_del_virtual_monitor(struct ieee80211_local *local) @@ -407,14 +417,20 @@ static void ieee80211_del_virtual_monitor(struct ieee80211_local *local)  	if (!(local->hw.flags & IEEE80211_HW_WANT_MONITOR_VIF))  		return; +	ASSERT_RTNL(); +  	mutex_lock(&local->iflist_mtx);  	sdata = rcu_dereference_protected(local->monitor_sdata,  					  lockdep_is_held(&local->iflist_mtx)); -	if (!sdata) -		goto out_unlock; +	if (!sdata) { +		mutex_unlock(&local->iflist_mtx); +		return; +	}  	rcu_assign_pointer(local->monitor_sdata, NULL); +	mutex_unlock(&local->iflist_mtx); +  	synchronize_net();  	ieee80211_vif_release_channel(sdata); @@ -422,8 +438,6 @@ static void ieee80211_del_virtual_monitor(struct ieee80211_local *local)  	drv_remove_interface(local, sdata);  	kfree(sdata); - out_unlock: -	mutex_unlock(&local->iflist_mtx);  }  /* @@ -541,6 +555,9 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)  		ieee80211_adjust_monitor_flags(sdata, 1);  		ieee80211_configure_filter(local); +		mutex_lock(&local->mtx); +		ieee80211_recalc_idle(local); +		mutex_unlock(&local->mtx);  		netif_carrier_on(dev);  		break; @@ -812,6 +829,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,  		ieee80211_adjust_monitor_flags(sdata, -1);  		ieee80211_configure_filter(local); +		mutex_lock(&local->mtx); +		ieee80211_recalc_idle(local); +		mutex_unlock(&local->mtx);  		break;  	case NL80211_IFTYPE_P2P_DEVICE:  		/* relies on synchronize_rcu() below */ diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c index 29ce2aa87e7..4749b385869 100644 --- a/net/mac80211/mesh.c +++ b/net/mac80211/mesh.c @@ -1060,7 +1060,8 @@ void ieee80211_mesh_notify_scan_completed(struct ieee80211_local *local)  	rcu_read_lock();  	list_for_each_entry_rcu(sdata, &local->interfaces, list) -		if (ieee80211_vif_is_mesh(&sdata->vif)) +		if (ieee80211_vif_is_mesh(&sdata->vif) && +		    ieee80211_sdata_running(sdata))  			ieee80211_queue_work(&local->hw, &sdata->work);  	rcu_read_unlock();  } diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index 9f6464f3e05..346ad4cfb01 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -647,6 +647,9 @@ static void ieee80211_add_vht_ie(struct ieee80211_sub_if_data *sdata,  		our_mcs = (le16_to_cpu(vht_cap.vht_mcs.rx_mcs_map) &  								mask) >> shift; +		if (our_mcs == IEEE80211_VHT_MCS_NOT_SUPPORTED) +			continue; +  		switch (ap_mcs) {  		default:  			if (our_mcs <= ap_mcs) @@ -3503,6 +3506,14 @@ void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata)  	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;  	/* +	 * Stop timers before deleting work items, as timers +	 * could race and re-add the work-items. They will be +	 * re-established on connection. +	 */ +	del_timer_sync(&ifmgd->conn_mon_timer); +	del_timer_sync(&ifmgd->bcn_mon_timer); + +	/*  	 * we need to use atomic bitops for the running bits  	 * only because both timers might fire at the same  	 * time -- the code here is properly synchronised. @@ -3516,13 +3527,9 @@ void ieee80211_sta_quiesce(struct ieee80211_sub_if_data *sdata)  	if (del_timer_sync(&ifmgd->timer))  		set_bit(TMR_RUNNING_TIMER, &ifmgd->timers_running); -	cancel_work_sync(&ifmgd->chswitch_work);  	if (del_timer_sync(&ifmgd->chswitch_timer))  		set_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running); - -	/* these will just be re-established on connection */ -	del_timer_sync(&ifmgd->conn_mon_timer); -	del_timer_sync(&ifmgd->bcn_mon_timer); +	cancel_work_sync(&ifmgd->chswitch_work);  }  void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata) @@ -3601,8 +3608,10 @@ void ieee80211_mlme_notify_scan_completed(struct ieee80211_local *local)  	/* Restart STA timers */  	rcu_read_lock(); -	list_for_each_entry_rcu(sdata, &local->interfaces, list) -		ieee80211_restart_sta_timer(sdata); +	list_for_each_entry_rcu(sdata, &local->interfaces, list) { +		if (ieee80211_sdata_running(sdata)) +			ieee80211_restart_sta_timer(sdata); +	}  	rcu_read_unlock();  } @@ -3955,8 +3964,16 @@ int ieee80211_mgd_auth(struct ieee80211_sub_if_data *sdata,  	/* prep auth_data so we don't go into idle on disassoc */  	ifmgd->auth_data = auth_data; -	if (ifmgd->associated) -		ieee80211_set_disassoc(sdata, 0, 0, false, NULL); +	if (ifmgd->associated) { +		u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; + +		ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, +				       WLAN_REASON_UNSPECIFIED, +				       false, frame_buf); + +		__cfg80211_send_deauth(sdata->dev, frame_buf, +				       sizeof(frame_buf)); +	}  	sdata_info(sdata, "authenticate with %pM\n", req->bss->bssid); @@ -4016,8 +4033,16 @@ int ieee80211_mgd_assoc(struct ieee80211_sub_if_data *sdata,  	mutex_lock(&ifmgd->mtx); -	if (ifmgd->associated) -		ieee80211_set_disassoc(sdata, 0, 0, false, NULL); +	if (ifmgd->associated) { +		u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN]; + +		ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, +				       WLAN_REASON_UNSPECIFIED, +				       false, frame_buf); + +		__cfg80211_send_deauth(sdata->dev, frame_buf, +				       sizeof(frame_buf)); +	}  	if (ifmgd->auth_data && !ifmgd->auth_data->done) {  		err = -EBUSY; @@ -4315,6 +4340,17 @@ void ieee80211_mgd_stop(struct ieee80211_sub_if_data *sdata)  {  	struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; +	/* +	 * Make sure some work items will not run after this, +	 * they will not do anything but might not have been +	 * cancelled when disconnecting. +	 */ +	cancel_work_sync(&ifmgd->monitor_work); +	cancel_work_sync(&ifmgd->beacon_connection_loss_work); +	cancel_work_sync(&ifmgd->request_smps_work); +	cancel_work_sync(&ifmgd->csa_connection_drop_work); +	cancel_work_sync(&ifmgd->chswitch_work); +  	mutex_lock(&ifmgd->mtx);  	if (ifmgd->assoc_data)  		ieee80211_destroy_assoc_data(sdata, false); diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c index cc79b4a2e82..430bd254e49 100644 --- a/net/mac80211/offchannel.c +++ b/net/mac80211/offchannel.c @@ -297,10 +297,13 @@ void ieee80211_start_next_roc(struct ieee80211_local *local)  	}  } -void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc) +void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc, bool free)  {  	struct ieee80211_roc_work *dep, *tmp; +	if (WARN_ON(roc->to_be_freed)) +		return; +  	/* was never transmitted */  	if (roc->frame) {  		cfg80211_mgmt_tx_status(&roc->sdata->wdev, @@ -316,9 +319,12 @@ void ieee80211_roc_notify_destroy(struct ieee80211_roc_work *roc)  						   GFP_KERNEL);  	list_for_each_entry_safe(dep, tmp, &roc->dependents, list) -		ieee80211_roc_notify_destroy(dep); +		ieee80211_roc_notify_destroy(dep, true); -	kfree(roc); +	if (free) +		kfree(roc); +	else +		roc->to_be_freed = true;  }  void ieee80211_sw_roc_work(struct work_struct *work) @@ -331,6 +337,9 @@ void ieee80211_sw_roc_work(struct work_struct *work)  	mutex_lock(&local->mtx); +	if (roc->to_be_freed) +		goto out_unlock; +  	if (roc->abort)  		goto finish; @@ -370,7 +379,7 @@ void ieee80211_sw_roc_work(struct work_struct *work)   finish:  		list_del(&roc->list);  		started = roc->started; -		ieee80211_roc_notify_destroy(roc); +		ieee80211_roc_notify_destroy(roc, !roc->abort);  		if (started) {  			drv_flush(local, false); @@ -410,7 +419,7 @@ static void ieee80211_hw_roc_done(struct work_struct *work)  	list_del(&roc->list); -	ieee80211_roc_notify_destroy(roc); +	ieee80211_roc_notify_destroy(roc, true);  	/* if there's another roc, start it now */  	ieee80211_start_next_roc(local); @@ -460,12 +469,14 @@ void ieee80211_roc_purge(struct ieee80211_sub_if_data *sdata)  	list_for_each_entry_safe(roc, tmp, &tmp_list, list) {  		if (local->ops->remain_on_channel) {  			list_del(&roc->list); -			ieee80211_roc_notify_destroy(roc); +			ieee80211_roc_notify_destroy(roc, true);  		} else {  			ieee80211_queue_delayed_work(&local->hw, &roc->work, 0);  			/* work will clean up etc */  			flush_delayed_work(&roc->work); +			WARN_ON(!roc->to_be_freed); +			kfree(roc);  		}  	} diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index bb73ed2d20b..c6844ad080b 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -2675,7 +2675,19 @@ ieee80211_rx_h_action_return(struct ieee80211_rx_data *rx)  		memset(nskb->cb, 0, sizeof(nskb->cb)); -		ieee80211_tx_skb(rx->sdata, nskb); +		if (rx->sdata->vif.type == NL80211_IFTYPE_P2P_DEVICE) { +			struct ieee80211_tx_info *info = IEEE80211_SKB_CB(nskb); + +			info->flags = IEEE80211_TX_CTL_TX_OFFCHAN | +				      IEEE80211_TX_INTFL_OFFCHAN_TX_OK | +				      IEEE80211_TX_CTL_NO_CCK_RATE; +			if (local->hw.flags & IEEE80211_HW_QUEUE_CONTROL) +				info->hw_queue = +					local->hw.offchannel_tx_hw_queue; +		} + +		__ieee80211_tx_skb_tid_band(rx->sdata, nskb, 7, +					    status->band);  	}  	dev_kfree_skb(rx->skb);  	return RX_QUEUED; diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c index a79ce820cb5..238a0cca320 100644 --- a/net/mac80211/sta_info.c +++ b/net/mac80211/sta_info.c @@ -766,6 +766,7 @@ int __must_check __sta_info_destroy(struct sta_info *sta)  	struct ieee80211_local *local;  	struct ieee80211_sub_if_data *sdata;  	int ret, i; +	bool have_key = false;  	might_sleep(); @@ -793,12 +794,19 @@ int __must_check __sta_info_destroy(struct sta_info *sta)  	list_del_rcu(&sta->list);  	mutex_lock(&local->key_mtx); -	for (i = 0; i < NUM_DEFAULT_KEYS; i++) +	for (i = 0; i < NUM_DEFAULT_KEYS; i++) {  		__ieee80211_key_free(key_mtx_dereference(local, sta->gtk[i])); -	if (sta->ptk) +		have_key = true; +	} +	if (sta->ptk) {  		__ieee80211_key_free(key_mtx_dereference(local, sta->ptk)); +		have_key = true; +	}  	mutex_unlock(&local->key_mtx); +	if (!have_key) +		synchronize_net(); +  	sta->dead = true;  	local->num_sta--; diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c index ce78d1149f1..8914d2d2881 100644 --- a/net/mac80211/tx.c +++ b/net/mac80211/tx.c @@ -2745,7 +2745,8 @@ ieee80211_get_buffered_bc(struct ieee80211_hw *hw,  				cpu_to_le16(IEEE80211_FCTL_MOREDATA);  		} -		sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev); +		if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) +			sdata = IEEE80211_DEV_TO_SUB_IF(skb->dev);  		if (!ieee80211_tx_prepare(sdata, &tx, skb))  			break;  		dev_kfree_skb_any(skb); diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c index 0f92dc24cb8..d7df6ac2c6f 100644 --- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c +++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c @@ -339,7 +339,11 @@ bitmap_ipmac_tlist(const struct ip_set *set,  nla_put_failure:  	nla_nest_cancel(skb, nested);  	ipset_nest_end(skb, atd); -	return -EMSGSIZE; +	if (unlikely(id == first)) { +		cb->args[2] = 0; +		return -EMSGSIZE; +	} +	return 0;  }  static int diff --git a/net/netfilter/ipset/ip_set_hash_ipportnet.c b/net/netfilter/ipset/ip_set_hash_ipportnet.c index f2627226a08..10a30b4fc7d 100644 --- a/net/netfilter/ipset/ip_set_hash_ipportnet.c +++ b/net/netfilter/ipset/ip_set_hash_ipportnet.c @@ -104,6 +104,15 @@ hash_ipportnet4_data_flags(struct hash_ipportnet4_elem *dst, u32 flags)  	dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);  } +static inline void +hash_ipportnet4_data_reset_flags(struct hash_ipportnet4_elem *dst, u32 *flags) +{ +	if (dst->nomatch) { +		*flags = IPSET_FLAG_NOMATCH; +		dst->nomatch = 0; +	} +} +  static inline int  hash_ipportnet4_data_match(const struct hash_ipportnet4_elem *elem)  { @@ -414,6 +423,15 @@ hash_ipportnet6_data_flags(struct hash_ipportnet6_elem *dst, u32 flags)  	dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);  } +static inline void +hash_ipportnet6_data_reset_flags(struct hash_ipportnet6_elem *dst, u32 *flags) +{ +	if (dst->nomatch) { +		*flags = IPSET_FLAG_NOMATCH; +		dst->nomatch = 0; +	} +} +  static inline int  hash_ipportnet6_data_match(const struct hash_ipportnet6_elem *elem)  { diff --git a/net/netfilter/ipset/ip_set_hash_net.c b/net/netfilter/ipset/ip_set_hash_net.c index 4b677cf6bf7..d6a59154d71 100644 --- a/net/netfilter/ipset/ip_set_hash_net.c +++ b/net/netfilter/ipset/ip_set_hash_net.c @@ -87,7 +87,16 @@ hash_net4_data_copy(struct hash_net4_elem *dst,  static inline void  hash_net4_data_flags(struct hash_net4_elem *dst, u32 flags)  { -	dst->nomatch = flags & IPSET_FLAG_NOMATCH; +	dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH); +} + +static inline void +hash_net4_data_reset_flags(struct hash_net4_elem *dst, u32 *flags) +{ +	if (dst->nomatch) { +		*flags = IPSET_FLAG_NOMATCH; +		dst->nomatch = 0; +	}  }  static inline int @@ -308,7 +317,16 @@ hash_net6_data_copy(struct hash_net6_elem *dst,  static inline void  hash_net6_data_flags(struct hash_net6_elem *dst, u32 flags)  { -	dst->nomatch = flags & IPSET_FLAG_NOMATCH; +	dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH); +} + +static inline void +hash_net6_data_reset_flags(struct hash_net6_elem *dst, u32 *flags) +{ +	if (dst->nomatch) { +		*flags = IPSET_FLAG_NOMATCH; +		dst->nomatch = 0; +	}  }  static inline int diff --git a/net/netfilter/ipset/ip_set_hash_netiface.c b/net/netfilter/ipset/ip_set_hash_netiface.c index 6ba985f1c96..f2b0a3c3013 100644 --- a/net/netfilter/ipset/ip_set_hash_netiface.c +++ b/net/netfilter/ipset/ip_set_hash_netiface.c @@ -198,7 +198,16 @@ hash_netiface4_data_copy(struct hash_netiface4_elem *dst,  static inline void  hash_netiface4_data_flags(struct hash_netiface4_elem *dst, u32 flags)  { -	dst->nomatch = flags & IPSET_FLAG_NOMATCH; +	dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH); +} + +static inline void +hash_netiface4_data_reset_flags(struct hash_netiface4_elem *dst, u32 *flags) +{ +	if (dst->nomatch) { +		*flags = IPSET_FLAG_NOMATCH; +		dst->nomatch = 0; +	}  }  static inline int @@ -494,7 +503,7 @@ hash_netiface6_data_copy(struct hash_netiface6_elem *dst,  static inline void  hash_netiface6_data_flags(struct hash_netiface6_elem *dst, u32 flags)  { -	dst->nomatch = flags & IPSET_FLAG_NOMATCH; +	dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);  }  static inline int @@ -504,6 +513,15 @@ hash_netiface6_data_match(const struct hash_netiface6_elem *elem)  }  static inline void +hash_netiface6_data_reset_flags(struct hash_netiface6_elem *dst, u32 *flags) +{ +	if (dst->nomatch) { +		*flags = IPSET_FLAG_NOMATCH; +		dst->nomatch = 0; +	} +} + +static inline void  hash_netiface6_data_zero_out(struct hash_netiface6_elem *elem)  {  	elem->elem = 0; diff --git a/net/netfilter/ipset/ip_set_hash_netport.c b/net/netfilter/ipset/ip_set_hash_netport.c index af20c0c5ced..349deb672a2 100644 --- a/net/netfilter/ipset/ip_set_hash_netport.c +++ b/net/netfilter/ipset/ip_set_hash_netport.c @@ -104,6 +104,15 @@ hash_netport4_data_flags(struct hash_netport4_elem *dst, u32 flags)  	dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);  } +static inline void +hash_netport4_data_reset_flags(struct hash_netport4_elem *dst, u32 *flags) +{ +	if (dst->nomatch) { +		*flags = IPSET_FLAG_NOMATCH; +		dst->nomatch = 0; +	} +} +  static inline int  hash_netport4_data_match(const struct hash_netport4_elem *elem)  { @@ -375,6 +384,15 @@ hash_netport6_data_flags(struct hash_netport6_elem *dst, u32 flags)  	dst->nomatch = !!(flags & IPSET_FLAG_NOMATCH);  } +static inline void +hash_netport6_data_reset_flags(struct hash_netport6_elem *dst, u32 *flags) +{ +	if (dst->nomatch) { +		*flags = IPSET_FLAG_NOMATCH; +		dst->nomatch = 0; +	} +} +  static inline int  hash_netport6_data_match(const struct hash_netport6_elem *elem)  { diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c index 8371c2bac2e..09c744aa898 100644 --- a/net/netfilter/ipset/ip_set_list_set.c +++ b/net/netfilter/ipset/ip_set_list_set.c @@ -174,9 +174,13 @@ list_set_add(struct list_set *map, u32 i, ip_set_id_t id,  {  	const struct set_elem *e = list_set_elem(map, i); -	if (i == map->size - 1 && e->id != IPSET_INVALID_ID) -		/* Last element replaced: e.g. add new,before,last */ -		ip_set_put_byindex(e->id); +	if (e->id != IPSET_INVALID_ID) { +		const struct set_elem *x = list_set_elem(map, map->size - 1); + +		/* Last element replaced or pushed off */ +		if (x->id != IPSET_INVALID_ID) +			ip_set_put_byindex(x->id); +	}  	if (with_timeout(map->timeout))  		list_elem_tadd(map, i, id, ip_set_timeout_set(timeout));  	else diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index 47edf5a40a5..61f49d24171 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c @@ -1394,10 +1394,8 @@ ip_vs_in_icmp(struct sk_buff *skb, int *related, unsigned int hooknum)  			skb_reset_network_header(skb);  			IP_VS_DBG(12, "ICMP for IPIP %pI4->%pI4: mtu=%u\n",  				&ip_hdr(skb)->saddr, &ip_hdr(skb)->daddr, mtu); -			rcu_read_lock();  			ipv4_update_pmtu(skb, dev_net(skb->dev),  					 mtu, 0, 0, 0, 0); -			rcu_read_unlock();  			/* Client uses PMTUD? */  			if (!(cih->frag_off & htons(IP_DF)))  				goto ignore_ipip; @@ -1577,7 +1575,8 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)  	}  	/* ipvs enabled in this netns ? */  	net = skb_net(skb); -	if (!net_ipvs(net)->enable) +	ipvs = net_ipvs(net); +	if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))  		return NF_ACCEPT;  	ip_vs_fill_iph_skb(af, skb, &iph); @@ -1654,7 +1653,6 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)  	}  	IP_VS_DBG_PKT(11, af, pp, skb, 0, "Incoming packet"); -	ipvs = net_ipvs(net);  	/* Check the server status */  	if (cp->dest && !(cp->dest->flags & IP_VS_DEST_F_AVAILABLE)) {  		/* the destination server is not available */ @@ -1815,13 +1813,15 @@ ip_vs_forward_icmp(unsigned int hooknum, struct sk_buff *skb,  {  	int r;  	struct net *net; +	struct netns_ipvs *ipvs;  	if (ip_hdr(skb)->protocol != IPPROTO_ICMP)  		return NF_ACCEPT;  	/* ipvs enabled in this netns ? */  	net = skb_net(skb); -	if (!net_ipvs(net)->enable) +	ipvs = net_ipvs(net); +	if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))  		return NF_ACCEPT;  	return ip_vs_in_icmp(skb, &r, hooknum); @@ -1835,6 +1835,7 @@ ip_vs_forward_icmp_v6(unsigned int hooknum, struct sk_buff *skb,  {  	int r;  	struct net *net; +	struct netns_ipvs *ipvs;  	struct ip_vs_iphdr iphdr;  	ip_vs_fill_iph_skb(AF_INET6, skb, &iphdr); @@ -1843,7 +1844,8 @@ ip_vs_forward_icmp_v6(unsigned int hooknum, struct sk_buff *skb,  	/* ipvs enabled in this netns ? */  	net = skb_net(skb); -	if (!net_ipvs(net)->enable) +	ipvs = net_ipvs(net); +	if (unlikely(sysctl_backup_only(ipvs) || !ipvs->enable))  		return NF_ACCEPT;  	return ip_vs_in_icmp_v6(skb, &r, hooknum, &iphdr); diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index c68198bf912..9e2d1cccd1e 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -1808,6 +1808,12 @@ static struct ctl_table vs_vars[] = {  		.mode		= 0644,  		.proc_handler	= proc_dointvec,  	}, +	{ +		.procname	= "backup_only", +		.maxlen		= sizeof(int), +		.mode		= 0644, +		.proc_handler	= proc_dointvec, +	},  #ifdef CONFIG_IP_VS_DEBUG  	{  		.procname	= "debug_level", @@ -3741,6 +3747,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net)  	tbl[idx++].data = &ipvs->sysctl_nat_icmp_send;  	ipvs->sysctl_pmtu_disc = 1;  	tbl[idx++].data = &ipvs->sysctl_pmtu_disc; +	tbl[idx++].data = &ipvs->sysctl_backup_only;  	ipvs->sysctl_hdr = register_net_sysctl(net, "net/ipv4/vs", tbl); diff --git a/net/netfilter/ipvs/ip_vs_proto_sctp.c b/net/netfilter/ipvs/ip_vs_proto_sctp.c index ae8ec6f2768..cd1d7298f7b 100644 --- a/net/netfilter/ipvs/ip_vs_proto_sctp.c +++ b/net/netfilter/ipvs/ip_vs_proto_sctp.c @@ -906,7 +906,7 @@ set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp,  	sctp_chunkhdr_t _sctpch, *sch;  	unsigned char chunk_type;  	int event, next_state; -	int ihl; +	int ihl, cofs;  #ifdef CONFIG_IP_VS_IPV6  	ihl = cp->af == AF_INET ? ip_hdrlen(skb) : sizeof(struct ipv6hdr); @@ -914,8 +914,8 @@ set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp,  	ihl = ip_hdrlen(skb);  #endif -	sch = skb_header_pointer(skb, ihl + sizeof(sctp_sctphdr_t), -				sizeof(_sctpch), &_sctpch); +	cofs = ihl + sizeof(sctp_sctphdr_t); +	sch = skb_header_pointer(skb, cofs, sizeof(_sctpch), &_sctpch);  	if (sch == NULL)  		return; @@ -933,10 +933,12 @@ set_sctp_state(struct ip_vs_proto_data *pd, struct ip_vs_conn *cp,  	 */  	if ((sch->type == SCTP_CID_COOKIE_ECHO) ||  	    (sch->type == SCTP_CID_COOKIE_ACK)) { -		sch = skb_header_pointer(skb, (ihl + sizeof(sctp_sctphdr_t) + -				sch->length), sizeof(_sctpch), &_sctpch); -		if (sch) { -			if (sch->type == SCTP_CID_ABORT) +		int clen = ntohs(sch->length); + +		if (clen >= sizeof(sctp_chunkhdr_t)) { +			sch = skb_header_pointer(skb, cofs + ALIGN(clen, 4), +						 sizeof(_sctpch), &_sctpch); +			if (sch && sch->type == SCTP_CID_ABORT)  				chunk_type = sch->type;  		}  	} diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c index a9740bd6fe5..94b4b9853f6 100644 --- a/net/netfilter/nf_conntrack_helper.c +++ b/net/netfilter/nf_conntrack_helper.c @@ -339,6 +339,13 @@ void nf_ct_helper_log(struct sk_buff *skb, const struct nf_conn *ct,  {  	const struct nf_conn_help *help;  	const struct nf_conntrack_helper *helper; +	struct va_format vaf; +	va_list args; + +	va_start(args, fmt); + +	vaf.fmt = fmt; +	vaf.va = &args;  	/* Called from the helper function, this call never fails */  	help = nfct_help(ct); @@ -347,7 +354,9 @@ void nf_ct_helper_log(struct sk_buff *skb, const struct nf_conn *ct,  	helper = rcu_dereference(help->helper);  	nf_log_packet(nf_ct_l3num(ct), 0, skb, NULL, NULL, NULL, -		      "nf_ct_%s: dropping packet: %s ", helper->name, fmt); +		      "nf_ct_%s: dropping packet: %pV ", helper->name, &vaf); + +	va_end(args);  }  EXPORT_SYMBOL_GPL(nf_ct_helper_log); diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c index 432f9578000..ba65b2041eb 100644 --- a/net/netfilter/nf_conntrack_proto_dccp.c +++ b/net/netfilter/nf_conntrack_proto_dccp.c @@ -969,6 +969,10 @@ static int __init nf_conntrack_proto_dccp_init(void)  {  	int ret; +	ret = register_pernet_subsys(&dccp_net_ops); +	if (ret < 0) +		goto out_pernet; +  	ret = nf_ct_l4proto_register(&dccp_proto4);  	if (ret < 0)  		goto out_dccp4; @@ -977,16 +981,12 @@ static int __init nf_conntrack_proto_dccp_init(void)  	if (ret < 0)  		goto out_dccp6; -	ret = register_pernet_subsys(&dccp_net_ops); -	if (ret < 0) -		goto out_pernet; -  	return 0; -out_pernet: -	nf_ct_l4proto_unregister(&dccp_proto6);  out_dccp6:  	nf_ct_l4proto_unregister(&dccp_proto4);  out_dccp4: +	unregister_pernet_subsys(&dccp_net_ops); +out_pernet:  	return ret;  } diff --git a/net/netfilter/nf_conntrack_proto_gre.c b/net/netfilter/nf_conntrack_proto_gre.c index bd7d01d9c7e..155ce9f8a0d 100644 --- a/net/netfilter/nf_conntrack_proto_gre.c +++ b/net/netfilter/nf_conntrack_proto_gre.c @@ -420,18 +420,18 @@ static int __init nf_ct_proto_gre_init(void)  {  	int ret; -	ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_gre4); -	if (ret < 0) -		goto out_gre4; -  	ret = register_pernet_subsys(&proto_gre_net_ops);  	if (ret < 0)  		goto out_pernet; +	ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_gre4); +	if (ret < 0) +		goto out_gre4; +  	return 0; -out_pernet: -	nf_ct_l4proto_unregister(&nf_conntrack_l4proto_gre4);  out_gre4: +	unregister_pernet_subsys(&proto_gre_net_ops); +out_pernet:  	return ret;  } diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c index 480f616d593..ec83536def9 100644 --- a/net/netfilter/nf_conntrack_proto_sctp.c +++ b/net/netfilter/nf_conntrack_proto_sctp.c @@ -888,6 +888,10 @@ static int __init nf_conntrack_proto_sctp_init(void)  {  	int ret; +	ret = register_pernet_subsys(&sctp_net_ops); +	if (ret < 0) +		goto out_pernet; +  	ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_sctp4);  	if (ret < 0)  		goto out_sctp4; @@ -896,16 +900,12 @@ static int __init nf_conntrack_proto_sctp_init(void)  	if (ret < 0)  		goto out_sctp6; -	ret = register_pernet_subsys(&sctp_net_ops); -	if (ret < 0) -		goto out_pernet; -  	return 0; -out_pernet: -	nf_ct_l4proto_unregister(&nf_conntrack_l4proto_sctp6);  out_sctp6:  	nf_ct_l4proto_unregister(&nf_conntrack_l4proto_sctp4);  out_sctp4: +	unregister_pernet_subsys(&sctp_net_ops); +out_pernet:  	return ret;  } diff --git a/net/netfilter/nf_conntrack_proto_udplite.c b/net/netfilter/nf_conntrack_proto_udplite.c index 157489581c3..ca969f6273f 100644 --- a/net/netfilter/nf_conntrack_proto_udplite.c +++ b/net/netfilter/nf_conntrack_proto_udplite.c @@ -371,6 +371,10 @@ static int __init nf_conntrack_proto_udplite_init(void)  {  	int ret; +	ret = register_pernet_subsys(&udplite_net_ops); +	if (ret < 0) +		goto out_pernet; +  	ret = nf_ct_l4proto_register(&nf_conntrack_l4proto_udplite4);  	if (ret < 0)  		goto out_udplite4; @@ -379,16 +383,12 @@ static int __init nf_conntrack_proto_udplite_init(void)  	if (ret < 0)  		goto out_udplite6; -	ret = register_pernet_subsys(&udplite_net_ops); -	if (ret < 0) -		goto out_pernet; -  	return 0; -out_pernet: -	nf_ct_l4proto_unregister(&nf_conntrack_l4proto_udplite6);  out_udplite6:  	nf_ct_l4proto_unregister(&nf_conntrack_l4proto_udplite4);  out_udplite4: +	unregister_pernet_subsys(&udplite_net_ops); +out_pernet:  	return ret;  } diff --git a/net/netfilter/nf_conntrack_sip.c b/net/netfilter/nf_conntrack_sip.c index 0e7d423324c..e0c4373b474 100644 --- a/net/netfilter/nf_conntrack_sip.c +++ b/net/netfilter/nf_conntrack_sip.c @@ -1593,10 +1593,8 @@ static int sip_help_tcp(struct sk_buff *skb, unsigned int protoff,  		end += strlen("\r\n\r\n") + clen;  		msglen = origlen = end - dptr; -		if (msglen > datalen) { -			nf_ct_helper_log(skb, ct, "incomplete/bad SIP message"); -			return NF_DROP; -		} +		if (msglen > datalen) +			return NF_ACCEPT;  		ret = process_sip_msg(skb, ct, protoff, dataoff,  				      &dptr, &msglen); diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c index 6bcce401fd1..fedee394366 100644 --- a/net/netfilter/nf_conntrack_standalone.c +++ b/net/netfilter/nf_conntrack_standalone.c @@ -568,6 +568,7 @@ static int __init nf_conntrack_standalone_init(void)  		register_net_sysctl(&init_net, "net", nf_ct_netfilter_table);  	if (!nf_ct_netfilter_header) {  		pr_err("nf_conntrack: can't register to sysctl.\n"); +		ret = -ENOMEM;  		goto out_sysctl;  	}  #endif diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c index 8d5769c6d16..ad24be070e5 100644 --- a/net/netfilter/nf_nat_core.c +++ b/net/netfilter/nf_nat_core.c @@ -467,33 +467,22 @@ EXPORT_SYMBOL_GPL(nf_nat_packet);  struct nf_nat_proto_clean {  	u8	l3proto;  	u8	l4proto; -	bool	hash;  }; -/* Clear NAT section of all conntracks, in case we're loaded again. */ -static int nf_nat_proto_clean(struct nf_conn *i, void *data) +/* kill conntracks with affected NAT section */ +static int nf_nat_proto_remove(struct nf_conn *i, void *data)  {  	const struct nf_nat_proto_clean *clean = data;  	struct nf_conn_nat *nat = nfct_nat(i);  	if (!nat)  		return 0; -	if (!(i->status & IPS_SRC_NAT_DONE)) -		return 0; +  	if ((clean->l3proto && nf_ct_l3num(i) != clean->l3proto) ||  	    (clean->l4proto && nf_ct_protonum(i) != clean->l4proto))  		return 0; -	if (clean->hash) { -		spin_lock_bh(&nf_nat_lock); -		hlist_del_rcu(&nat->bysource); -		spin_unlock_bh(&nf_nat_lock); -	} else { -		memset(nat, 0, sizeof(*nat)); -		i->status &= ~(IPS_NAT_MASK | IPS_NAT_DONE_MASK | -			       IPS_SEQ_ADJUST); -	} -	return 0; +	return i->status & IPS_NAT_MASK ? 1 : 0;  }  static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto) @@ -505,16 +494,8 @@ static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto)  	struct net *net;  	rtnl_lock(); -	/* Step 1 - remove from bysource hash */ -	clean.hash = true;  	for_each_net(net) -		nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean); -	synchronize_rcu(); - -	/* Step 2 - clean NAT section */ -	clean.hash = false; -	for_each_net(net) -		nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean); +		nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean);  	rtnl_unlock();  } @@ -526,16 +507,9 @@ static void nf_nat_l3proto_clean(u8 l3proto)  	struct net *net;  	rtnl_lock(); -	/* Step 1 - remove from bysource hash */ -	clean.hash = true; -	for_each_net(net) -		nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean); -	synchronize_rcu(); -	/* Step 2 - clean NAT section */ -	clean.hash = false;  	for_each_net(net) -		nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean); +		nf_ct_iterate_cleanup(net, nf_nat_proto_remove, &clean);  	rtnl_unlock();  } @@ -773,7 +747,7 @@ static void __net_exit nf_nat_net_exit(struct net *net)  {  	struct nf_nat_proto_clean clean = {}; -	nf_ct_iterate_cleanup(net, &nf_nat_proto_clean, &clean); +	nf_ct_iterate_cleanup(net, &nf_nat_proto_remove, &clean);  	synchronize_rcu();  	nf_ct_free_hashtable(net->ct.nat_bysource, net->ct.nat_htable_size);  } diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index d578ec25171..0b1b32cda30 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c @@ -62,11 +62,6 @@ void nfnl_unlock(__u8 subsys_id)  }  EXPORT_SYMBOL_GPL(nfnl_unlock); -static struct mutex *nfnl_get_lock(__u8 subsys_id) -{ -	return &table[subsys_id].mutex; -} -  int nfnetlink_subsys_register(const struct nfnetlink_subsystem *n)  {  	nfnl_lock(n->subsys_id); @@ -199,7 +194,7 @@ replay:  			rcu_read_unlock();  			nfnl_lock(subsys_id);  			if (rcu_dereference_protected(table[subsys_id].subsys, -				lockdep_is_held(nfnl_get_lock(subsys_id))) != ss || +				lockdep_is_held(&table[subsys_id].mutex)) != ss ||  			    nfnetlink_find_client(type, ss) != nc)  				err = -EAGAIN;  			else if (nc->call) diff --git a/net/netfilter/nfnetlink_acct.c b/net/netfilter/nfnetlink_acct.c index 589d686f0b4..dc3fd5d4446 100644 --- a/net/netfilter/nfnetlink_acct.c +++ b/net/netfilter/nfnetlink_acct.c @@ -49,6 +49,8 @@ nfnl_acct_new(struct sock *nfnl, struct sk_buff *skb,  		return -EINVAL;  	acct_name = nla_data(tb[NFACCT_NAME]); +	if (strlen(acct_name) == 0) +		return -EINVAL;  	list_for_each_entry(nfacct, &nfnl_acct_list, head) {  		if (strncmp(nfacct->name, acct_name, NFACCT_NAME_MAX) != 0) diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c index 858fd52c104..42680b2baa1 100644 --- a/net/netfilter/nfnetlink_queue_core.c +++ b/net/netfilter/nfnetlink_queue_core.c @@ -112,7 +112,7 @@ instance_create(u_int16_t queue_num, int portid)  	inst->queue_num = queue_num;  	inst->peer_portid = portid;  	inst->queue_maxlen = NFQNL_QMAX_DEFAULT; -	inst->copy_range = 0xfffff; +	inst->copy_range = 0xffff;  	inst->copy_mode = NFQNL_COPY_NONE;  	spin_lock_init(&inst->lock);  	INIT_LIST_HEAD(&inst->queue_list); @@ -1062,8 +1062,10 @@ static int __init nfnetlink_queue_init(void)  #ifdef CONFIG_PROC_FS  	if (!proc_create("nfnetlink_queue", 0440, -			 proc_net_netfilter, &nfqnl_file_ops)) +			 proc_net_netfilter, &nfqnl_file_ops)) { +		status = -ENOMEM;  		goto cleanup_subsys; +	}  #endif  	register_netdevice_notifier(&nfqnl_dev_notifier); diff --git a/net/netfilter/xt_AUDIT.c b/net/netfilter/xt_AUDIT.c index ba92824086f..3228d7f24eb 100644 --- a/net/netfilter/xt_AUDIT.c +++ b/net/netfilter/xt_AUDIT.c @@ -124,6 +124,9 @@ audit_tg(struct sk_buff *skb, const struct xt_action_param *par)  	const struct xt_audit_info *info = par->targinfo;  	struct audit_buffer *ab; +	if (audit_enabled == 0) +		goto errout; +  	ab = audit_log_start(NULL, GFP_ATOMIC, AUDIT_NETFILTER_PKT);  	if (ab == NULL)  		goto errout; diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c index 847d495cd4d..8a6c6ea466d 100644 --- a/net/netlabel/netlabel_unlabeled.c +++ b/net/netlabel/netlabel_unlabeled.c @@ -1189,8 +1189,6 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb,  	struct netlbl_unlhsh_walk_arg cb_arg;  	u32 skip_bkt = cb->args[0];  	u32 skip_chain = cb->args[1]; -	u32 skip_addr4 = cb->args[2]; -	u32 skip_addr6 = cb->args[3];  	u32 iter_bkt;  	u32 iter_chain = 0, iter_addr4 = 0, iter_addr6 = 0;  	struct netlbl_unlhsh_iface *iface; @@ -1215,7 +1213,7 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb,  				continue;  			netlbl_af4list_foreach_rcu(addr4,  						   &iface->addr4_list) { -				if (iter_addr4++ < skip_addr4) +				if (iter_addr4++ < cb->args[2])  					continue;  				if (netlbl_unlabel_staticlist_gen(  					      NLBL_UNLABEL_C_STATICLIST, @@ -1231,7 +1229,7 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb,  #if IS_ENABLED(CONFIG_IPV6)  			netlbl_af6list_foreach_rcu(addr6,  						   &iface->addr6_list) { -				if (iter_addr6++ < skip_addr6) +				if (iter_addr6++ < cb->args[3])  					continue;  				if (netlbl_unlabel_staticlist_gen(  					      NLBL_UNLABEL_C_STATICLIST, @@ -1250,10 +1248,10 @@ static int netlbl_unlabel_staticlist(struct sk_buff *skb,  unlabel_staticlist_return:  	rcu_read_unlock(); -	cb->args[0] = skip_bkt; -	cb->args[1] = skip_chain; -	cb->args[2] = skip_addr4; -	cb->args[3] = skip_addr6; +	cb->args[0] = iter_bkt; +	cb->args[1] = iter_chain; +	cb->args[2] = iter_addr4; +	cb->args[3] = iter_addr6;  	return skb->len;  } @@ -1273,12 +1271,9 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb,  {  	struct netlbl_unlhsh_walk_arg cb_arg;  	struct netlbl_unlhsh_iface *iface; -	u32 skip_addr4 = cb->args[0]; -	u32 skip_addr6 = cb->args[1]; -	u32 iter_addr4 = 0; +	u32 iter_addr4 = 0, iter_addr6 = 0;  	struct netlbl_af4list *addr4;  #if IS_ENABLED(CONFIG_IPV6) -	u32 iter_addr6 = 0;  	struct netlbl_af6list *addr6;  #endif @@ -1292,7 +1287,7 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb,  		goto unlabel_staticlistdef_return;  	netlbl_af4list_foreach_rcu(addr4, &iface->addr4_list) { -		if (iter_addr4++ < skip_addr4) +		if (iter_addr4++ < cb->args[0])  			continue;  		if (netlbl_unlabel_staticlist_gen(NLBL_UNLABEL_C_STATICLISTDEF,  					      iface, @@ -1305,7 +1300,7 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb,  	}  #if IS_ENABLED(CONFIG_IPV6)  	netlbl_af6list_foreach_rcu(addr6, &iface->addr6_list) { -		if (iter_addr6++ < skip_addr6) +		if (iter_addr6++ < cb->args[1])  			continue;  		if (netlbl_unlabel_staticlist_gen(NLBL_UNLABEL_C_STATICLISTDEF,  					      iface, @@ -1320,8 +1315,8 @@ static int netlbl_unlabel_staticlistdef(struct sk_buff *skb,  unlabel_staticlistdef_return:  	rcu_read_unlock(); -	cb->args[0] = skip_addr4; -	cb->args[1] = skip_addr6; +	cb->args[0] = iter_addr4; +	cb->args[1] = iter_addr6;  	return skb->len;  } diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c index f2aabb6f410..5a55be3f17a 100644 --- a/net/netlink/genetlink.c +++ b/net/netlink/genetlink.c @@ -142,6 +142,7 @@ int genl_register_mc_group(struct genl_family *family,  	int err = 0;  	BUG_ON(grp->name[0] == '\0'); +	BUG_ON(memchr(grp->name, '\0', GENL_NAMSIZ) == NULL);  	genl_lock(); diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index d1fa1d9ffd2..103bd704b5f 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c @@ -1173,6 +1173,7 @@ static int nr_recvmsg(struct kiocb *iocb, struct socket *sock,  	}  	if (sax != NULL) { +		memset(sax, 0, sizeof(*sax));  		sax->sax25_family = AF_NETROM;  		skb_copy_from_linear_data_offset(skb, 7, sax->sax25_call.ax25_call,  			      AX25_ADDR_LEN); diff --git a/net/nfc/llcp/llcp.c b/net/nfc/llcp/llcp.c index 7f8266dd14c..ee25f25f0cd 100644 --- a/net/nfc/llcp/llcp.c +++ b/net/nfc/llcp/llcp.c @@ -68,7 +68,8 @@ static void nfc_llcp_socket_purge(struct nfc_llcp_sock *sock)  	}  } -static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen) +static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen, +				    int err)  {  	struct sock *sk;  	struct hlist_node *tmp; @@ -100,11 +101,12 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen)  				nfc_llcp_accept_unlink(accept_sk); +				if (err) +					accept_sk->sk_err = err;  				accept_sk->sk_state = LLCP_CLOSED; +				accept_sk->sk_state_change(sk);  				bh_unlock_sock(accept_sk); - -				sock_orphan(accept_sk);  			}  			if (listen == true) { @@ -123,16 +125,45 @@ static void nfc_llcp_socket_release(struct nfc_llcp_local *local, bool listen)  			continue;  		} +		if (err) +			sk->sk_err = err;  		sk->sk_state = LLCP_CLOSED; +		sk->sk_state_change(sk);  		bh_unlock_sock(sk); -		sock_orphan(sk); -  		sk_del_node_init(sk);  	}  	write_unlock(&local->sockets.lock); + +	/* +	 * If we want to keep the listening sockets alive, +	 * we don't touch the RAW ones. +	 */ +	if (listen == true) +		return; + +	write_lock(&local->raw_sockets.lock); + +	sk_for_each_safe(sk, tmp, &local->raw_sockets.head) { +		llcp_sock = nfc_llcp_sock(sk); + +		bh_lock_sock(sk); + +		nfc_llcp_socket_purge(llcp_sock); + +		if (err) +			sk->sk_err = err; +		sk->sk_state = LLCP_CLOSED; +		sk->sk_state_change(sk); + +		bh_unlock_sock(sk); + +		sk_del_node_init(sk); +	} + +	write_unlock(&local->raw_sockets.lock);  }  struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local) @@ -142,20 +173,25 @@ struct nfc_llcp_local *nfc_llcp_local_get(struct nfc_llcp_local *local)  	return local;  } -static void local_release(struct kref *ref) +static void local_cleanup(struct nfc_llcp_local *local, bool listen)  { -	struct nfc_llcp_local *local; - -	local = container_of(ref, struct nfc_llcp_local, ref); - -	list_del(&local->list); -	nfc_llcp_socket_release(local, false); +	nfc_llcp_socket_release(local, listen, ENXIO);  	del_timer_sync(&local->link_timer);  	skb_queue_purge(&local->tx_queue);  	cancel_work_sync(&local->tx_work);  	cancel_work_sync(&local->rx_work);  	cancel_work_sync(&local->timeout_work);  	kfree_skb(local->rx_pending); +} + +static void local_release(struct kref *ref) +{ +	struct nfc_llcp_local *local; + +	local = container_of(ref, struct nfc_llcp_local, ref); + +	list_del(&local->list); +	local_cleanup(local, false);  	kfree(local);  } @@ -785,7 +821,6 @@ static void nfc_llcp_recv_ui(struct nfc_llcp_local *local,  		skb_get(skb);  	} else {  		pr_err("Receive queue is full\n"); -		kfree_skb(skb);  	}  	nfc_llcp_sock_put(llcp_sock); @@ -986,7 +1021,6 @@ static void nfc_llcp_recv_hdlc(struct nfc_llcp_local *local,  			skb_get(skb);  		} else {  			pr_err("Receive queue is full\n"); -			kfree_skb(skb);  		}  	} @@ -1348,7 +1382,7 @@ void nfc_llcp_mac_is_down(struct nfc_dev *dev)  		return;  	/* Close and purge all existing sockets */ -	nfc_llcp_socket_release(local, true); +	nfc_llcp_socket_release(local, true, 0);  }  void nfc_llcp_mac_is_up(struct nfc_dev *dev, u32 target_idx, @@ -1427,6 +1461,8 @@ void nfc_llcp_unregister_device(struct nfc_dev *dev)  		return;  	} +	local_cleanup(local, false); +  	nfc_llcp_local_put(local);  } diff --git a/net/nfc/llcp/sock.c b/net/nfc/llcp/sock.c index 5332751943a..6c94447ec41 100644 --- a/net/nfc/llcp/sock.c +++ b/net/nfc/llcp/sock.c @@ -270,7 +270,9 @@ struct sock *nfc_llcp_accept_dequeue(struct sock *parent,  		}  		if (sk->sk_state == LLCP_CONNECTED || !newsock) { -			nfc_llcp_accept_unlink(sk); +			list_del_init(&lsk->accept_queue); +			sock_put(sk); +  			if (newsock)  				sock_graft(sk, newsock); @@ -278,6 +280,8 @@ struct sock *nfc_llcp_accept_dequeue(struct sock *parent,  			pr_debug("Returning sk state %d\n", sk->sk_state); +			sk_acceptq_removed(parent); +  			return sk;  		} @@ -462,8 +466,6 @@ static int llcp_sock_release(struct socket *sock)  			nfc_llcp_accept_unlink(accept_sk);  			release_sock(accept_sk); - -			sock_orphan(accept_sk);  		}  	} @@ -644,6 +646,8 @@ static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock,  	pr_debug("%p %zu\n", sk, len); +	msg->msg_namelen = 0; +  	lock_sock(sk);  	if (sk->sk_state == LLCP_CLOSED && @@ -689,6 +693,7 @@ static int llcp_sock_recvmsg(struct kiocb *iocb, struct socket *sock,  		pr_debug("Datagram socket %d %d\n", ui_cb->dsap, ui_cb->ssap); +		memset(sockaddr, 0, sizeof(*sockaddr));  		sockaddr->sa_family = AF_NFC;  		sockaddr->nfc_protocol = NFC_PROTO_NFC_DEP;  		sockaddr->dsap = ui_cb->dsap; diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c index ac2defeeba8..d4d5363c7ba 100644 --- a/net/openvswitch/actions.c +++ b/net/openvswitch/actions.c @@ -58,7 +58,7 @@ static int __pop_vlan_tci(struct sk_buff *skb, __be16 *current_tci)  	if (skb->ip_summed == CHECKSUM_COMPLETE)  		skb->csum = csum_sub(skb->csum, csum_partial(skb->data -					+ ETH_HLEN, VLAN_HLEN, 0)); +					+ (2 * ETH_ALEN), VLAN_HLEN, 0));  	vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN);  	*current_tci = vhdr->h_vlan_TCI; @@ -115,7 +115,7 @@ static int push_vlan(struct sk_buff *skb, const struct ovs_action_push_vlan *vla  		if (skb->ip_summed == CHECKSUM_COMPLETE)  			skb->csum = csum_add(skb->csum, csum_partial(skb->data -					+ ETH_HLEN, VLAN_HLEN, 0)); +					+ (2 * ETH_ALEN), VLAN_HLEN, 0));  	}  	__vlan_hwaccel_put_tag(skb, ntohs(vlan->vlan_tci) & ~VLAN_TAG_PRESENT); diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c index e87a26506db..6980c3e6f06 100644 --- a/net/openvswitch/datapath.c +++ b/net/openvswitch/datapath.c @@ -394,6 +394,7 @@ static int queue_userspace_packet(struct net *net, int dp_ifindex,  	skb_copy_and_csum_dev(skb, nla_data(nla)); +	genlmsg_end(user_skb, upcall);  	err = genlmsg_unicast(net, user_skb, upcall_info->portid);  out: @@ -1592,10 +1593,8 @@ struct sk_buff *ovs_vport_cmd_build_info(struct vport *vport, u32 portid,  		return ERR_PTR(-ENOMEM);  	retval = ovs_vport_cmd_fill_info(vport, skb, portid, seq, 0, cmd); -	if (retval < 0) { -		kfree_skb(skb); -		return ERR_PTR(retval); -	} +	BUG_ON(retval < 0); +  	return skb;  } @@ -1690,6 +1689,7 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)  	if (IS_ERR(vport))  		goto exit_unlock; +	err = 0;  	reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq,  					 OVS_VPORT_CMD_NEW);  	if (IS_ERR(reply)) { @@ -1724,24 +1724,32 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)  	    nla_get_u32(a[OVS_VPORT_ATTR_TYPE]) != vport->ops->type)  		err = -EINVAL; +	reply = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); +	if (!reply) { +		err = -ENOMEM; +		goto exit_unlock; +	} +  	if (!err && a[OVS_VPORT_ATTR_OPTIONS])  		err = ovs_vport_set_options(vport, a[OVS_VPORT_ATTR_OPTIONS]);  	if (err) -		goto exit_unlock; +		goto exit_free; +  	if (a[OVS_VPORT_ATTR_UPCALL_PID])  		vport->upcall_portid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]); -	reply = ovs_vport_cmd_build_info(vport, info->snd_portid, info->snd_seq, -					 OVS_VPORT_CMD_NEW); -	if (IS_ERR(reply)) { -		netlink_set_err(sock_net(skb->sk)->genl_sock, 0, -				ovs_dp_vport_multicast_group.id, PTR_ERR(reply)); -		goto exit_unlock; -	} +	err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid, +				      info->snd_seq, 0, OVS_VPORT_CMD_NEW); +	BUG_ON(err < 0);  	genl_notify(reply, genl_info_net(info), info->snd_portid,  		    ovs_dp_vport_multicast_group.id, info->nlhdr, GFP_KERNEL); +	rtnl_unlock(); +	return 0; + +exit_free: +	kfree_skb(reply);  exit_unlock:  	rtnl_unlock();  	return err; @@ -1771,6 +1779,7 @@ static int ovs_vport_cmd_del(struct sk_buff *skb, struct genl_info *info)  	if (IS_ERR(reply))  		goto exit_unlock; +	err = 0;  	ovs_dp_detach_port(vport);  	genl_notify(reply, genl_info_net(info), info->snd_portid, diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c index 20605ecf100..67a2b783fe7 100644 --- a/net/openvswitch/flow.c +++ b/net/openvswitch/flow.c @@ -482,7 +482,11 @@ static __be16 parse_ethertype(struct sk_buff *skb)  		return htons(ETH_P_802_2);  	__skb_pull(skb, sizeof(struct llc_snap_hdr)); -	return llc->ethertype; + +	if (ntohs(llc->ethertype) >= 1536) +		return llc->ethertype; + +	return htons(ETH_P_802_2);  }  static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key, @@ -791,9 +795,9 @@ void ovs_flow_tbl_insert(struct flow_table *table, struct sw_flow *flow)  void ovs_flow_tbl_remove(struct flow_table *table, struct sw_flow *flow)  { +	BUG_ON(table->count == 0);  	hlist_del_rcu(&flow->hash_node[table->node_ver]);  	table->count--; -	BUG_ON(table->count < 0);  }  /* The size of the argument for each %OVS_KEY_ATTR_* Netlink attribute.  */ diff --git a/net/openvswitch/vport-netdev.c b/net/openvswitch/vport-netdev.c index 670cbc3518d..2130d61c384 100644 --- a/net/openvswitch/vport-netdev.c +++ b/net/openvswitch/vport-netdev.c @@ -43,8 +43,7 @@ static void netdev_port_receive(struct vport *vport, struct sk_buff *skb)  	/* Make our own copy of the packet.  Otherwise we will mangle the  	 * packet for anyone who came before us (e.g. tcpdump via AF_PACKET). -	 * (No one comes after us, since we tell handle_bridge() that we took -	 * the packet.) */ +	 */  	skb = skb_share_check(skb, GFP_ATOMIC);  	if (unlikely(!skb))  		return; diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c index ba717cc038b..f6b8132ce4c 100644 --- a/net/openvswitch/vport.c +++ b/net/openvswitch/vport.c @@ -325,8 +325,7 @@ int ovs_vport_get_options(const struct vport *vport, struct sk_buff *skb)   * @skb: skb that was received   *   * Must be called with rcu_read_lock.  The packet cannot be shared and - * skb->data should point to the Ethernet header.  The caller must have already - * called compute_ip_summed() to initialize the checksumming fields. + * skb->data should point to the Ethernet header.   */  void ovs_vport_receive(struct vport *vport, struct sk_buff *skb)  { diff --git a/net/rds/stats.c b/net/rds/stats.c index 7be790d60b9..73be187d389 100644 --- a/net/rds/stats.c +++ b/net/rds/stats.c @@ -87,6 +87,7 @@ void rds_stats_info_copy(struct rds_info_iterator *iter,  	for (i = 0; i < nr; i++) {  		BUG_ON(strlen(names[i]) >= sizeof(ctr.name));  		strncpy(ctr.name, names[i], sizeof(ctr.name) - 1); +		ctr.name[sizeof(ctr.name) - 1] = '\0';  		ctr.value = values[i];  		rds_info_copy(iter, &ctr, sizeof(ctr)); diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index cf68e6e4054..9c834745159 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c @@ -1253,6 +1253,7 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock,  	skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);  	if (srose != NULL) { +		memset(srose, 0, msg->msg_namelen);  		srose->srose_family = AF_ROSE;  		srose->srose_addr   = rose->dest_addr;  		srose->srose_call   = rose->dest_call; diff --git a/net/sched/cls_fw.c b/net/sched/cls_fw.c index 1135d8227f9..9b97172db84 100644 --- a/net/sched/cls_fw.c +++ b/net/sched/cls_fw.c @@ -204,7 +204,6 @@ fw_change_attrs(struct net *net, struct tcf_proto *tp, struct fw_filter *f,  	if (err < 0)  		return err; -	err = -EINVAL;  	if (tb[TCA_FW_CLASSID]) {  		f->res.classid = nla_get_u32(tb[TCA_FW_CLASSID]);  		tcf_bind_filter(tp, &f->res, base); @@ -218,6 +217,7 @@ fw_change_attrs(struct net *net, struct tcf_proto *tp, struct fw_filter *f,  	}  #endif /* CONFIG_NET_CLS_IND */ +	err = -EINVAL;  	if (tb[TCA_FW_MASK]) {  		mask = nla_get_u32(tb[TCA_FW_MASK]);  		if (mask != head->mask) diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 13aa47aa2ff..1bc210ffcba 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -962,8 +962,11 @@ cbq_dequeue(struct Qdisc *sch)  		cbq_update(q);  		if ((incr -= incr2) < 0)  			incr = 0; +		q->now += incr; +	} else { +		if (now > q->now) +			q->now = now;  	} -	q->now += incr;  	q->now_rt = now;  	for (;;) { diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c index 4e606fcb253..55786283a3d 100644 --- a/net/sched/sch_fq_codel.c +++ b/net/sched/sch_fq_codel.c @@ -195,7 +195,7 @@ static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)  		flow->deficit = q->quantum;  		flow->dropped = 0;  	} -	if (++sch->q.qlen < sch->limit) +	if (++sch->q.qlen <= sch->limit)  		return NET_XMIT_SUCCESS;  	q->drop_overlimit++; diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index ffad48109a2..eac7e0ee23c 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -904,7 +904,7 @@ void psched_ratecfg_precompute(struct psched_ratecfg *r, u32 rate)  	u64 mult;  	int shift; -	r->rate_bps = rate << 3; +	r->rate_bps = (u64)rate << 3;  	r->shift = 0;  	r->mult = 1;  	/* diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c index e9a77f621c3..d51852bba01 100644 --- a/net/sched/sch_qfq.c +++ b/net/sched/sch_qfq.c @@ -298,6 +298,10 @@ static void qfq_update_agg(struct qfq_sched *q, struct qfq_aggregate *agg,  	    new_num_classes == q->max_agg_classes - 1) /* agg no more full */  		hlist_add_head(&agg->nonfull_next, &q->nonfull_aggs); +	/* The next assignment may let +	 * agg->initial_budget > agg->budgetmax +	 * hold, we will take it into account in charge_actual_service(). +	 */  	agg->budgetmax = new_num_classes * agg->lmax;  	new_agg_weight = agg->class_weight * new_num_classes;  	agg->inv_w = ONE_FP/new_agg_weight; @@ -817,7 +821,7 @@ static void qfq_make_eligible(struct qfq_sched *q)  	unsigned long old_vslot = q->oldV >> q->min_slot_shift;  	if (vslot != old_vslot) { -		unsigned long mask = (1UL << fls(vslot ^ old_vslot)) - 1; +		unsigned long mask = (1ULL << fls(vslot ^ old_vslot)) - 1;  		qfq_move_groups(q, mask, IR, ER);  		qfq_move_groups(q, mask, IB, EB);  	} @@ -988,12 +992,23 @@ static inline struct sk_buff *qfq_peek_skb(struct qfq_aggregate *agg,  /* Update F according to the actual service received by the aggregate. */  static inline void charge_actual_service(struct qfq_aggregate *agg)  { -	/* compute the service received by the aggregate */ -	u32 service_received = agg->initial_budget - agg->budget; +	/* Compute the service received by the aggregate, taking into +	 * account that, after decreasing the number of classes in +	 * agg, it may happen that +	 * agg->initial_budget - agg->budget > agg->bugdetmax +	 */ +	u32 service_received = min(agg->budgetmax, +				   agg->initial_budget - agg->budget);  	agg->F = agg->S + (u64)service_received * agg->inv_w;  } +static inline void qfq_update_agg_ts(struct qfq_sched *q, +				     struct qfq_aggregate *agg, +				     enum update_reason reason); + +static void qfq_schedule_agg(struct qfq_sched *q, struct qfq_aggregate *agg); +  static struct sk_buff *qfq_dequeue(struct Qdisc *sch)  {  	struct qfq_sched *q = qdisc_priv(sch); @@ -1021,7 +1036,7 @@ static struct sk_buff *qfq_dequeue(struct Qdisc *sch)  		in_serv_agg->initial_budget = in_serv_agg->budget =  			in_serv_agg->budgetmax; -		if (!list_empty(&in_serv_agg->active)) +		if (!list_empty(&in_serv_agg->active)) {  			/*  			 * Still active: reschedule for  			 * service. Possible optimization: if no other @@ -1032,8 +1047,9 @@ static struct sk_buff *qfq_dequeue(struct Qdisc *sch)  			 * handle it, we would need to maintain an  			 * extra num_active_aggs field.  			*/ -			qfq_activate_agg(q, in_serv_agg, requeue); -		else if (sch->q.qlen == 0) { /* no aggregate to serve */ +			qfq_update_agg_ts(q, in_serv_agg, requeue); +			qfq_schedule_agg(q, in_serv_agg); +		} else if (sch->q.qlen == 0) { /* no aggregate to serve */  			q->in_serv_agg = NULL;  			return NULL;  		} @@ -1052,7 +1068,15 @@ static struct sk_buff *qfq_dequeue(struct Qdisc *sch)  	qdisc_bstats_update(sch, skb);  	agg_dequeue(in_serv_agg, cl, len); -	in_serv_agg->budget -= len; +	/* If lmax is lowered, through qfq_change_class, for a class +	 * owning pending packets with larger size than the new value +	 * of lmax, then the following condition may hold. +	 */ +	if (unlikely(in_serv_agg->budget < len)) +		in_serv_agg->budget = 0; +	else +		in_serv_agg->budget -= len; +  	q->V += (u64)len * IWSUM;  	pr_debug("qfq dequeue: len %u F %lld now %lld\n",  		 len, (unsigned long long) in_serv_agg->F, @@ -1217,17 +1241,11 @@ static int qfq_enqueue(struct sk_buff *skb, struct Qdisc *sch)  	cl->deficit = agg->lmax;  	list_add_tail(&cl->alist, &agg->active); -	if (list_first_entry(&agg->active, struct qfq_class, alist) != cl) -		return err; /* aggregate was not empty, nothing else to do */ +	if (list_first_entry(&agg->active, struct qfq_class, alist) != cl || +	    q->in_serv_agg == agg) +		return err; /* non-empty or in service, nothing else to do */ -	/* recharge budget */ -	agg->initial_budget = agg->budget = agg->budgetmax; - -	qfq_update_agg_ts(q, agg, enqueue); -	if (q->in_serv_agg == NULL) -		q->in_serv_agg = agg; -	else if (agg != q->in_serv_agg) -		qfq_schedule_agg(q, agg); +	qfq_activate_agg(q, agg, enqueue);  	return err;  } @@ -1261,7 +1279,8 @@ static void qfq_schedule_agg(struct qfq_sched *q, struct qfq_aggregate *agg)  		/* group was surely ineligible, remove */  		__clear_bit(grp->index, &q->bitmaps[IR]);  		__clear_bit(grp->index, &q->bitmaps[IB]); -	} else if (!q->bitmaps[ER] && qfq_gt(roundedS, q->V)) +	} else if (!q->bitmaps[ER] && qfq_gt(roundedS, q->V) && +		   q->in_serv_agg == NULL)  		q->V = roundedS;  	grp->S = roundedS; @@ -1284,8 +1303,15 @@ skip_update:  static void qfq_activate_agg(struct qfq_sched *q, struct qfq_aggregate *agg,  			     enum update_reason reason)  { +	agg->initial_budget = agg->budget = agg->budgetmax; /* recharge budg. */ +  	qfq_update_agg_ts(q, agg, reason); -	qfq_schedule_agg(q, agg); +	if (q->in_serv_agg == NULL) { /* no aggr. in service or scheduled */ +		q->in_serv_agg = agg; /* start serving this aggregate */ +		 /* update V: to be in service, agg must be eligible */ +		q->oldV = q->V = agg->S; +	} else if (agg != q->in_serv_agg) +		qfq_schedule_agg(q, agg);  }  static void qfq_slot_remove(struct qfq_sched *q, struct qfq_group *grp, @@ -1357,8 +1383,6 @@ static void qfq_deactivate_agg(struct qfq_sched *q, struct qfq_aggregate *agg)  			__set_bit(grp->index, &q->bitmaps[s]);  		}  	} - -	qfq_update_eligible(q);  }  static void qfq_qlen_notify(struct Qdisc *sch, unsigned long arg) diff --git a/net/sctp/associola.c b/net/sctp/associola.c index 43cd0dd9149..d2709e2b7be 100644 --- a/net/sctp/associola.c +++ b/net/sctp/associola.c @@ -1079,7 +1079,7 @@ struct sctp_transport *sctp_assoc_lookup_tsn(struct sctp_association *asoc,  			transports) {  		if (transport == active) -			break; +			continue;  		list_for_each_entry(chunk, &transport->transmitted,  				transmitted_list) {  			if (key == chunk->subh.data_hdr->tsn) { diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c index 5131fcfedb0..de1a0138317 100644 --- a/net/sctp/sm_statefuns.c +++ b/net/sctp/sm_statefuns.c @@ -2082,7 +2082,7 @@ sctp_disposition_t sctp_sf_do_5_2_4_dupcook(struct net *net,  	}  	/* Delete the tempory new association. */ -	sctp_add_cmd_sf(commands, SCTP_CMD_NEW_ASOC, SCTP_ASOC(new_asoc)); +	sctp_add_cmd_sf(commands, SCTP_CMD_SET_ASOC, SCTP_ASOC(new_asoc));  	sctp_add_cmd_sf(commands, SCTP_CMD_DELETE_TCB, SCTP_NULL());  	/* Restore association pointer to provide SCTP command interpeter diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c index f7d34e7b6f8..5ead6055089 100644 --- a/net/sunrpc/auth_gss/svcauth_gss.c +++ b/net/sunrpc/auth_gss/svcauth_gss.c @@ -447,17 +447,21 @@ static int rsc_parse(struct cache_detail *cd,  	else {  		int N, i; +		/* +		 * NOTE: we skip uid_valid()/gid_valid() checks here: +		 * instead, * -1 id's are later mapped to the +		 * (export-specific) anonymous id by nfsd_setuser. +		 * +		 * (But supplementary gid's get no such special +		 * treatment so are checked for validity here.) +		 */  		/* uid */  		rsci.cred.cr_uid = make_kuid(&init_user_ns, id); -		if (!uid_valid(rsci.cred.cr_uid)) -			goto out;  		/* gid */  		if (get_int(&mesg, &id))  			goto out;  		rsci.cred.cr_gid = make_kgid(&init_user_ns, id); -		if (!gid_valid(rsci.cred.cr_gid)) -			goto out;  		/* number of additional gid's */  		if (get_int(&mesg, &N)) diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c index dcc446e7fbf..d5f35f15af9 100644 --- a/net/sunrpc/clnt.c +++ b/net/sunrpc/clnt.c @@ -304,10 +304,8 @@ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, stru  	err = rpciod_up();  	if (err)  		goto out_no_rpciod; -	err = -EINVAL; -	if (!xprt) -		goto out_no_xprt; +	err = -EINVAL;  	if (args->version >= program->nrvers)  		goto out_err;  	version = program->version[args->version]; @@ -382,10 +380,9 @@ out_no_principal:  out_no_stats:  	kfree(clnt);  out_err: -	xprt_put(xprt); -out_no_xprt:  	rpciod_down();  out_no_rpciod: +	xprt_put(xprt);  	return ERR_PTR(err);  } @@ -512,7 +509,7 @@ static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args,  	new = rpc_new_client(args, xprt);  	if (IS_ERR(new)) {  		err = PTR_ERR(new); -		goto out_put; +		goto out_err;  	}  	atomic_inc(&clnt->cl_count); @@ -525,8 +522,6 @@ static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args,  	new->cl_chatty = clnt->cl_chatty;  	return new; -out_put: -	xprt_put(xprt);  out_err:  	dprintk("RPC:       %s: returned error %d\n", __func__, err);  	return ERR_PTR(err); diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c index a0f48a51e14..a9129f8d707 100644 --- a/net/sunrpc/rpc_pipe.c +++ b/net/sunrpc/rpc_pipe.c @@ -1175,6 +1175,7 @@ static struct file_system_type rpc_pipe_fs_type = {  	.kill_sb	= rpc_kill_sb,  };  MODULE_ALIAS_FS("rpc_pipefs"); +MODULE_ALIAS("rpc_pipefs");  static void  init_once(void *foo) diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c index fb20f25ddec..f8529fc8e54 100644 --- a/net/sunrpc/sched.c +++ b/net/sunrpc/sched.c @@ -180,6 +180,8 @@ static void __rpc_add_wait_queue(struct rpc_wait_queue *queue,  		list_add_tail(&task->u.tk_wait.list, &queue->tasks[0]);  	task->tk_waitqueue = queue;  	queue->qlen++; +	/* barrier matches the read in rpc_wake_up_task_queue_locked() */ +	smp_wmb();  	rpc_set_queued(task);  	dprintk("RPC: %5u added to queue %p \"%s\"\n", @@ -430,8 +432,11 @@ static void __rpc_do_wake_up_task(struct rpc_wait_queue *queue, struct rpc_task   */  static void rpc_wake_up_task_queue_locked(struct rpc_wait_queue *queue, struct rpc_task *task)  { -	if (RPC_IS_QUEUED(task) && task->tk_waitqueue == queue) -		__rpc_do_wake_up_task(queue, task); +	if (RPC_IS_QUEUED(task)) { +		smp_rmb(); +		if (task->tk_waitqueue == queue) +			__rpc_do_wake_up_task(queue, task); +	}  }  /* diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index c1d8476b769..3d02130828d 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -849,6 +849,14 @@ static void xs_tcp_close(struct rpc_xprt *xprt)  		xs_tcp_shutdown(xprt);  } +static void xs_local_destroy(struct rpc_xprt *xprt) +{ +	xs_close(xprt); +	xs_free_peer_addresses(xprt); +	xprt_free(xprt); +	module_put(THIS_MODULE); +} +  /**   * xs_destroy - prepare to shutdown a transport   * @xprt: doomed transport @@ -862,10 +870,7 @@ static void xs_destroy(struct rpc_xprt *xprt)  	cancel_delayed_work_sync(&transport->connect_worker); -	xs_close(xprt); -	xs_free_peer_addresses(xprt); -	xprt_free(xprt); -	module_put(THIS_MODULE); +	xs_local_destroy(xprt);  }  static inline struct rpc_xprt *xprt_from_sock(struct sock *sk) @@ -2482,7 +2487,7 @@ static struct rpc_xprt_ops xs_local_ops = {  	.send_request		= xs_local_send_request,  	.set_retrans_timeout	= xprt_set_retrans_timeout_def,  	.close			= xs_close, -	.destroy		= xs_destroy, +	.destroy		= xs_local_destroy,  	.print_stats		= xs_local_print_stats,  }; diff --git a/net/tipc/socket.c b/net/tipc/socket.c index a9622b6cd91..515ce38e4f4 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -790,6 +790,7 @@ static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg)  	if (addr) {  		addr->family = AF_TIPC;  		addr->addrtype = TIPC_ADDR_ID; +		memset(&addr->addr, 0, sizeof(addr->addr));  		addr->addr.id.ref = msg_origport(msg);  		addr->addr.id.node = msg_orignode(msg);  		addr->addr.name.domain = 0;	/* could leave uninitialized */ @@ -904,6 +905,9 @@ static int recv_msg(struct kiocb *iocb, struct socket *sock,  		goto exit;  	} +	/* will be updated in set_orig_addr() if needed */ +	m->msg_namelen = 0; +  	timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT);  restart: @@ -1013,6 +1017,9 @@ static int recv_stream(struct kiocb *iocb, struct socket *sock,  		goto exit;  	} +	/* will be updated in set_orig_addr() if needed */ +	m->msg_namelen = 0; +  	target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len);  	timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index 51be64f163e..2db702d82e7 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -382,7 +382,7 @@ static void unix_sock_destructor(struct sock *sk)  #endif  } -static int unix_release_sock(struct sock *sk, int embrion) +static void unix_release_sock(struct sock *sk, int embrion)  {  	struct unix_sock *u = unix_sk(sk);  	struct path path; @@ -451,8 +451,6 @@ static int unix_release_sock(struct sock *sk, int embrion)  	if (unix_tot_inflight)  		unix_gc();		/* Garbage collect fds */ - -	return 0;  }  static void init_peercred(struct sock *sk) @@ -699,9 +697,10 @@ static int unix_release(struct socket *sock)  	if (!sk)  		return 0; +	unix_release_sock(sk, 0);  	sock->sk = NULL; -	return unix_release_sock(sk, 0); +	return 0;  }  static int unix_autobind(struct socket *sock) @@ -1994,7 +1993,7 @@ again:  			if ((UNIXCB(skb).pid  != siocb->scm->pid) ||  			    (UNIXCB(skb).cred != siocb->scm->cred))  				break; -		} else { +		} else if (test_bit(SOCK_PASSCRED, &sock->flags)) {  			/* Copy credentials */  			scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred);  			check_creds = 1; diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c index ca511c4f388..7f93e2a42d7 100644 --- a/net/vmw_vsock/af_vsock.c +++ b/net/vmw_vsock/af_vsock.c @@ -207,7 +207,7 @@ static struct sock *__vsock_find_bound_socket(struct sockaddr_vm *addr)  	struct vsock_sock *vsk;  	list_for_each_entry(vsk, vsock_bound_sockets(addr), bound_table) -		if (vsock_addr_equals_addr_any(addr, &vsk->local_addr)) +		if (addr->svm_port == vsk->local_addr.svm_port)  			return sk_vsock(vsk);  	return NULL; @@ -220,8 +220,8 @@ static struct sock *__vsock_find_connected_socket(struct sockaddr_vm *src,  	list_for_each_entry(vsk, vsock_connected_sockets(src, dst),  			    connected_table) { -		if (vsock_addr_equals_addr(src, &vsk->remote_addr) -		    && vsock_addr_equals_addr(dst, &vsk->local_addr)) { +		if (vsock_addr_equals_addr(src, &vsk->remote_addr) && +		    dst->svm_port == vsk->local_addr.svm_port) {  			return sk_vsock(vsk);  		}  	} @@ -1670,6 +1670,8 @@ vsock_stream_recvmsg(struct kiocb *kiocb,  	vsk = vsock_sk(sk);  	err = 0; +	msg->msg_namelen = 0; +  	lock_sock(sk);  	if (sk->sk_state != SS_CONNECTED) { diff --git a/net/vmw_vsock/vmci_transport.c b/net/vmw_vsock/vmci_transport.c index a70ace83a15..5e04d3d9628 100644 --- a/net/vmw_vsock/vmci_transport.c +++ b/net/vmw_vsock/vmci_transport.c @@ -464,19 +464,16 @@ static struct sock *vmci_transport_get_pending(  	struct vsock_sock *vlistener;  	struct vsock_sock *vpending;  	struct sock *pending; +	struct sockaddr_vm src; + +	vsock_addr_init(&src, pkt->dg.src.context, pkt->src_port);  	vlistener = vsock_sk(listener);  	list_for_each_entry(vpending, &vlistener->pending_links,  			    pending_links) { -		struct sockaddr_vm src; -		struct sockaddr_vm dst; - -		vsock_addr_init(&src, pkt->dg.src.context, pkt->src_port); -		vsock_addr_init(&dst, pkt->dg.dst.context, pkt->dst_port); -  		if (vsock_addr_equals_addr(&src, &vpending->remote_addr) && -		    vsock_addr_equals_addr(&dst, &vpending->local_addr)) { +		    pkt->dst_port == vpending->local_addr.svm_port) {  			pending = sk_vsock(vpending);  			sock_hold(pending);  			goto found; @@ -739,10 +736,15 @@ static int vmci_transport_recv_stream_cb(void *data, struct vmci_datagram *dg)  	 */  	bh_lock_sock(sk); -	if (!sock_owned_by_user(sk) && sk->sk_state == SS_CONNECTED) -		vmci_trans(vsk)->notify_ops->handle_notify_pkt( -				sk, pkt, true, &dst, &src, -				&bh_process_pkt); +	if (!sock_owned_by_user(sk)) { +		/* The local context ID may be out of date, update it. */ +		vsk->local_addr.svm_cid = dst.svm_cid; + +		if (sk->sk_state == SS_CONNECTED) +			vmci_trans(vsk)->notify_ops->handle_notify_pkt( +					sk, pkt, true, &dst, &src, +					&bh_process_pkt); +	}  	bh_unlock_sock(sk); @@ -902,6 +904,9 @@ static void vmci_transport_recv_pkt_work(struct work_struct *work)  	lock_sock(sk); +	/* The local context ID may be out of date. */ +	vsock_sk(sk)->local_addr.svm_cid = pkt->dg.dst.context; +  	switch (sk->sk_state) {  	case SS_LISTEN:  		vmci_transport_recv_listen(sk, pkt); @@ -958,6 +963,10 @@ static int vmci_transport_recv_listen(struct sock *sk,  	pending = vmci_transport_get_pending(sk, pkt);  	if (pending) {  		lock_sock(pending); + +		/* The local context ID may be out of date. */ +		vsock_sk(pending)->local_addr.svm_cid = pkt->dg.dst.context; +  		switch (pending->sk_state) {  		case SS_CONNECTING:  			err = vmci_transport_recv_connecting_server(sk, @@ -1727,6 +1736,8 @@ static int vmci_transport_dgram_dequeue(struct kiocb *kiocb,  	if (flags & MSG_OOB || flags & MSG_ERRQUEUE)  		return -EOPNOTSUPP; +	msg->msg_namelen = 0; +  	/* Retrieve the head sk_buff from the socket's receive queue. */  	err = 0;  	skb = skb_recv_datagram(&vsk->sk, flags, noblock, &err); @@ -1759,7 +1770,6 @@ static int vmci_transport_dgram_dequeue(struct kiocb *kiocb,  	if (err)  		goto out; -	msg->msg_namelen = 0;  	if (msg->msg_name) {  		struct sockaddr_vm *vm_addr; diff --git a/net/vmw_vsock/vsock_addr.c b/net/vmw_vsock/vsock_addr.c index b7df1aea7c5..ec2611b4ea0 100644 --- a/net/vmw_vsock/vsock_addr.c +++ b/net/vmw_vsock/vsock_addr.c @@ -64,16 +64,6 @@ bool vsock_addr_equals_addr(const struct sockaddr_vm *addr,  }  EXPORT_SYMBOL_GPL(vsock_addr_equals_addr); -bool vsock_addr_equals_addr_any(const struct sockaddr_vm *addr, -				const struct sockaddr_vm *other) -{ -	return (addr->svm_cid == VMADDR_CID_ANY || -		other->svm_cid == VMADDR_CID_ANY || -		addr->svm_cid == other->svm_cid) && -	       addr->svm_port == other->svm_port; -} -EXPORT_SYMBOL_GPL(vsock_addr_equals_addr_any); -  int vsock_addr_cast(const struct sockaddr *addr,  		    size_t len, struct sockaddr_vm **out_addr)  { diff --git a/net/vmw_vsock/vsock_addr.h b/net/vmw_vsock/vsock_addr.h index cdfbcefdf84..9ccd5316eac 100644 --- a/net/vmw_vsock/vsock_addr.h +++ b/net/vmw_vsock/vsock_addr.h @@ -24,8 +24,6 @@ bool vsock_addr_bound(const struct sockaddr_vm *addr);  void vsock_addr_unbind(struct sockaddr_vm *addr);  bool vsock_addr_equals_addr(const struct sockaddr_vm *addr,  			    const struct sockaddr_vm *other); -bool vsock_addr_equals_addr_any(const struct sockaddr_vm *addr, -				const struct sockaddr_vm *other);  int vsock_addr_cast(const struct sockaddr *addr, size_t len,  		    struct sockaddr_vm **out_addr); diff --git a/net/wireless/core.c b/net/wireless/core.c index 5ffff039b01..6ddf74f0ae1 100644 --- a/net/wireless/core.c +++ b/net/wireless/core.c @@ -212,6 +212,39 @@ static void cfg80211_rfkill_poll(struct rfkill *rfkill, void *data)  	rdev_rfkill_poll(rdev);  } +void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev, +			      struct wireless_dev *wdev) +{ +	lockdep_assert_held(&rdev->devlist_mtx); +	lockdep_assert_held(&rdev->sched_scan_mtx); + +	if (WARN_ON(wdev->iftype != NL80211_IFTYPE_P2P_DEVICE)) +		return; + +	if (!wdev->p2p_started) +		return; + +	rdev_stop_p2p_device(rdev, wdev); +	wdev->p2p_started = false; + +	rdev->opencount--; + +	if (rdev->scan_req && rdev->scan_req->wdev == wdev) { +		bool busy = work_busy(&rdev->scan_done_wk); + +		/* +		 * If the work isn't pending or running (in which case it would +		 * be waiting for the lock we hold) the driver didn't properly +		 * cancel the scan when the interface was removed. In this case +		 * warn and leak the scan request object to not crash later. +		 */ +		WARN_ON(!busy); + +		rdev->scan_req->aborted = true; +		___cfg80211_scan_done(rdev, !busy); +	} +} +  static int cfg80211_rfkill_set_block(void *data, bool blocked)  {  	struct cfg80211_registered_device *rdev = data; @@ -221,7 +254,8 @@ static int cfg80211_rfkill_set_block(void *data, bool blocked)  		return 0;  	rtnl_lock(); -	mutex_lock(&rdev->devlist_mtx); + +	/* read-only iteration need not hold the devlist_mtx */  	list_for_each_entry(wdev, &rdev->wdev_list, list) {  		if (wdev->netdev) { @@ -231,18 +265,18 @@ static int cfg80211_rfkill_set_block(void *data, bool blocked)  		/* otherwise, check iftype */  		switch (wdev->iftype) {  		case NL80211_IFTYPE_P2P_DEVICE: -			if (!wdev->p2p_started) -				break; -			rdev_stop_p2p_device(rdev, wdev); -			wdev->p2p_started = false; -			rdev->opencount--; +			/* but this requires it */ +			mutex_lock(&rdev->devlist_mtx); +			mutex_lock(&rdev->sched_scan_mtx); +			cfg80211_stop_p2p_device(rdev, wdev); +			mutex_unlock(&rdev->sched_scan_mtx); +			mutex_unlock(&rdev->devlist_mtx);  			break;  		default:  			break;  		}  	} -	mutex_unlock(&rdev->devlist_mtx);  	rtnl_unlock();  	return 0; @@ -367,8 +401,7 @@ struct wiphy *wiphy_new(const struct cfg80211_ops *ops, int sizeof_priv)  	rdev->wiphy.rts_threshold = (u32) -1;  	rdev->wiphy.coverage_class = 0; -	rdev->wiphy.features = NL80211_FEATURE_SCAN_FLUSH | -			       NL80211_FEATURE_ADVERTISE_CHAN_LIMITS; +	rdev->wiphy.features = NL80211_FEATURE_SCAN_FLUSH;  	return &rdev->wiphy;  } @@ -746,17 +779,13 @@ static void wdev_cleanup_work(struct work_struct *work)  	wdev = container_of(work, struct wireless_dev, cleanup_work);  	rdev = wiphy_to_dev(wdev->wiphy); -	cfg80211_lock_rdev(rdev); +	mutex_lock(&rdev->sched_scan_mtx);  	if (WARN_ON(rdev->scan_req && rdev->scan_req->wdev == wdev)) {  		rdev->scan_req->aborted = true;  		___cfg80211_scan_done(rdev, true);  	} -	cfg80211_unlock_rdev(rdev); - -	mutex_lock(&rdev->sched_scan_mtx); -  	if (WARN_ON(rdev->sched_scan_req &&  		    rdev->sched_scan_req->dev == wdev->netdev)) {  		__cfg80211_stop_sched_scan(rdev, false); @@ -782,21 +811,19 @@ void cfg80211_unregister_wdev(struct wireless_dev *wdev)  		return;  	mutex_lock(&rdev->devlist_mtx); +	mutex_lock(&rdev->sched_scan_mtx);  	list_del_rcu(&wdev->list);  	rdev->devlist_generation++;  	switch (wdev->iftype) {  	case NL80211_IFTYPE_P2P_DEVICE: -		if (!wdev->p2p_started) -			break; -		rdev_stop_p2p_device(rdev, wdev); -		wdev->p2p_started = false; -		rdev->opencount--; +		cfg80211_stop_p2p_device(rdev, wdev);  		break;  	default:  		WARN_ON_ONCE(1);  		break;  	} +	mutex_unlock(&rdev->sched_scan_mtx);  	mutex_unlock(&rdev->devlist_mtx);  }  EXPORT_SYMBOL(cfg80211_unregister_wdev); @@ -937,6 +964,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,  		cfg80211_update_iface_num(rdev, wdev->iftype, 1);  		cfg80211_lock_rdev(rdev);  		mutex_lock(&rdev->devlist_mtx); +		mutex_lock(&rdev->sched_scan_mtx);  		wdev_lock(wdev);  		switch (wdev->iftype) {  #ifdef CONFIG_CFG80211_WEXT @@ -968,6 +996,7 @@ static int cfg80211_netdev_notifier_call(struct notifier_block *nb,  			break;  		}  		wdev_unlock(wdev); +		mutex_unlock(&rdev->sched_scan_mtx);  		rdev->opencount++;  		mutex_unlock(&rdev->devlist_mtx);  		cfg80211_unlock_rdev(rdev); diff --git a/net/wireless/core.h b/net/wireless/core.h index 3aec0e429d8..5845c2b37aa 100644 --- a/net/wireless/core.h +++ b/net/wireless/core.h @@ -503,6 +503,9 @@ int cfg80211_validate_beacon_int(struct cfg80211_registered_device *rdev,  void cfg80211_update_iface_num(struct cfg80211_registered_device *rdev,  			       enum nl80211_iftype iftype, int num); +void cfg80211_stop_p2p_device(struct cfg80211_registered_device *rdev, +			      struct wireless_dev *wdev); +  #define CFG80211_MAX_NUM_DIFFERENT_CHANNELS 10  #ifdef CONFIG_CFG80211_DEVELOPER_WARNINGS diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index e652d05ff71..58e13a8c95f 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -557,18 +557,6 @@ static int nl80211_msg_put_channel(struct sk_buff *msg,  	if ((chan->flags & IEEE80211_CHAN_RADAR) &&  	    nla_put_flag(msg, NL80211_FREQUENCY_ATTR_RADAR))  		goto nla_put_failure; -	if ((chan->flags & IEEE80211_CHAN_NO_HT40MINUS) && -	    nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_HT40_MINUS)) -		goto nla_put_failure; -	if ((chan->flags & IEEE80211_CHAN_NO_HT40PLUS) && -	    nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_HT40_PLUS)) -		goto nla_put_failure; -	if ((chan->flags & IEEE80211_CHAN_NO_80MHZ) && -	    nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_80MHZ)) -		goto nla_put_failure; -	if ((chan->flags & IEEE80211_CHAN_NO_160MHZ) && -	    nla_put_flag(msg, NL80211_FREQUENCY_ATTR_NO_160MHZ)) -		goto nla_put_failure;  	if (nla_put_u32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER,  			DBM_TO_MBM(chan->max_power))) @@ -1310,15 +1298,6 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flag  			dev->wiphy.max_acl_mac_addrs))  		goto nla_put_failure; -	if (dev->wiphy.extended_capabilities && -	    (nla_put(msg, NL80211_ATTR_EXT_CAPA, -		     dev->wiphy.extended_capabilities_len, -		     dev->wiphy.extended_capabilities) || -	     nla_put(msg, NL80211_ATTR_EXT_CAPA_MASK, -		     dev->wiphy.extended_capabilities_len, -		     dev->wiphy.extended_capabilities_mask))) -		goto nla_put_failure; -  	return genlmsg_end(msg, hdr);   nla_put_failure: @@ -1328,7 +1307,7 @@ static int nl80211_send_wiphy(struct sk_buff *msg, u32 portid, u32 seq, int flag  static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb)  { -	int idx = 0; +	int idx = 0, ret;  	int start = cb->args[0];  	struct cfg80211_registered_device *dev; @@ -1338,9 +1317,29 @@ static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb)  			continue;  		if (++idx <= start)  			continue; -		if (nl80211_send_wiphy(skb, NETLINK_CB(cb->skb).portid, -				       cb->nlh->nlmsg_seq, NLM_F_MULTI, -				       dev) < 0) { +		ret = nl80211_send_wiphy(skb, NETLINK_CB(cb->skb).portid, +					 cb->nlh->nlmsg_seq, NLM_F_MULTI, +					 dev); +		if (ret < 0) { +			/* +			 * If sending the wiphy data didn't fit (ENOBUFS or +			 * EMSGSIZE returned), this SKB is still empty (so +			 * it's not too big because another wiphy dataset is +			 * already in the skb) and we've not tried to adjust +			 * the dump allocation yet ... then adjust the alloc +			 * size to be bigger, and return 1 but with the empty +			 * skb. This results in an empty message being RX'ed +			 * in userspace, but that is ignored. +			 * +			 * We can then retry with the larger buffer. +			 */ +			if ((ret == -ENOBUFS || ret == -EMSGSIZE) && +			    !skb->len && +			    cb->min_dump_alloc < 4096) { +				cb->min_dump_alloc = 4096; +				mutex_unlock(&cfg80211_mutex); +				return 1; +			}  			idx--;  			break;  		} @@ -1357,7 +1356,7 @@ static int nl80211_get_wiphy(struct sk_buff *skb, struct genl_info *info)  	struct sk_buff *msg;  	struct cfg80211_registered_device *dev = info->user_ptr[0]; -	msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); +	msg = nlmsg_new(4096, GFP_KERNEL);  	if (!msg)  		return -ENOMEM; @@ -4703,14 +4702,19 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)  	if (!rdev->ops->scan)  		return -EOPNOTSUPP; -	if (rdev->scan_req) -		return -EBUSY; +	mutex_lock(&rdev->sched_scan_mtx); +	if (rdev->scan_req) { +		err = -EBUSY; +		goto unlock; +	}  	if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) {  		n_channels = validate_scan_freqs(  				info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]); -		if (!n_channels) -			return -EINVAL; +		if (!n_channels) { +			err = -EINVAL; +			goto unlock; +		}  	} else {  		enum ieee80211_band band;  		n_channels = 0; @@ -4724,23 +4728,29 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)  		nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp)  			n_ssids++; -	if (n_ssids > wiphy->max_scan_ssids) -		return -EINVAL; +	if (n_ssids > wiphy->max_scan_ssids) { +		err = -EINVAL; +		goto unlock; +	}  	if (info->attrs[NL80211_ATTR_IE])  		ie_len = nla_len(info->attrs[NL80211_ATTR_IE]);  	else  		ie_len = 0; -	if (ie_len > wiphy->max_scan_ie_len) -		return -EINVAL; +	if (ie_len > wiphy->max_scan_ie_len) { +		err = -EINVAL; +		goto unlock; +	}  	request = kzalloc(sizeof(*request)  			+ sizeof(*request->ssids) * n_ssids  			+ sizeof(*request->channels) * n_channels  			+ ie_len, GFP_KERNEL); -	if (!request) -		return -ENOMEM; +	if (!request) { +		err = -ENOMEM; +		goto unlock; +	}  	if (n_ssids)  		request->ssids = (void *)&request->channels[n_channels]; @@ -4877,6 +4887,8 @@ static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info)  		kfree(request);  	} + unlock: +	mutex_unlock(&rdev->sched_scan_mtx);  	return err;  } @@ -7750,20 +7762,9 @@ static int nl80211_stop_p2p_device(struct sk_buff *skb, struct genl_info *info)  	if (!rdev->ops->stop_p2p_device)  		return -EOPNOTSUPP; -	if (!wdev->p2p_started) -		return 0; - -	rdev_stop_p2p_device(rdev, wdev); -	wdev->p2p_started = false; - -	mutex_lock(&rdev->devlist_mtx); -	rdev->opencount--; -	mutex_unlock(&rdev->devlist_mtx); - -	if (WARN_ON(rdev->scan_req && rdev->scan_req->wdev == wdev)) { -		rdev->scan_req->aborted = true; -		___cfg80211_scan_done(rdev, true); -	} +	mutex_lock(&rdev->sched_scan_mtx); +	cfg80211_stop_p2p_device(rdev, wdev); +	mutex_unlock(&rdev->sched_scan_mtx);  	return 0;  } @@ -8487,7 +8488,7 @@ static int nl80211_add_scan_req(struct sk_buff *msg,  	struct nlattr *nest;  	int i; -	ASSERT_RDEV_LOCK(rdev); +	lockdep_assert_held(&rdev->sched_scan_mtx);  	if (WARN_ON(!req))  		return 0; diff --git a/net/wireless/scan.c b/net/wireless/scan.c index 674aadca007..fd99ea495b7 100644 --- a/net/wireless/scan.c +++ b/net/wireless/scan.c @@ -169,7 +169,7 @@ void ___cfg80211_scan_done(struct cfg80211_registered_device *rdev, bool leak)  	union iwreq_data wrqu;  #endif -	ASSERT_RDEV_LOCK(rdev); +	lockdep_assert_held(&rdev->sched_scan_mtx);  	request = rdev->scan_req; @@ -230,9 +230,9 @@ void __cfg80211_scan_done(struct work_struct *wk)  	rdev = container_of(wk, struct cfg80211_registered_device,  			    scan_done_wk); -	cfg80211_lock_rdev(rdev); +	mutex_lock(&rdev->sched_scan_mtx);  	___cfg80211_scan_done(rdev, false); -	cfg80211_unlock_rdev(rdev); +	mutex_unlock(&rdev->sched_scan_mtx);  }  void cfg80211_scan_done(struct cfg80211_scan_request *request, bool aborted) @@ -698,11 +698,6 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,  	found = rb_find_bss(dev, tmp, BSS_CMP_REGULAR);  	if (found) { -		found->pub.beacon_interval = tmp->pub.beacon_interval; -		found->pub.signal = tmp->pub.signal; -		found->pub.capability = tmp->pub.capability; -		found->ts = tmp->ts; -  		/* Update IEs */  		if (rcu_access_pointer(tmp->pub.proberesp_ies)) {  			const struct cfg80211_bss_ies *old; @@ -723,6 +718,8 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,  			if (found->pub.hidden_beacon_bss &&  			    !list_empty(&found->hidden_list)) { +				const struct cfg80211_bss_ies *f; +  				/*  				 * The found BSS struct is one of the probe  				 * response members of a group, but we're @@ -732,6 +729,10 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,  				 * SSID to showing it, which is confusing so  				 * drop this information.  				 */ + +				f = rcu_access_pointer(tmp->pub.beacon_ies); +				kfree_rcu((struct cfg80211_bss_ies *)f, +					  rcu_head);  				goto drop;  			} @@ -761,6 +762,11 @@ cfg80211_bss_update(struct cfg80211_registered_device *dev,  				kfree_rcu((struct cfg80211_bss_ies *)old,  					  rcu_head);  		} + +		found->pub.beacon_interval = tmp->pub.beacon_interval; +		found->pub.signal = tmp->pub.signal; +		found->pub.capability = tmp->pub.capability; +		found->ts = tmp->ts;  	} else {  		struct cfg80211_internal_bss *new;  		struct cfg80211_internal_bss *hidden; @@ -1056,6 +1062,7 @@ int cfg80211_wext_siwscan(struct net_device *dev,  	if (IS_ERR(rdev))  		return PTR_ERR(rdev); +	mutex_lock(&rdev->sched_scan_mtx);  	if (rdev->scan_req) {  		err = -EBUSY;  		goto out; @@ -1162,6 +1169,7 @@ int cfg80211_wext_siwscan(struct net_device *dev,  		dev_hold(dev);  	}   out: +	mutex_unlock(&rdev->sched_scan_mtx);  	kfree(creq);  	cfg80211_unlock_rdev(rdev);  	return err; diff --git a/net/wireless/sme.c b/net/wireless/sme.c index f432bd3755b..482c70e7012 100644 --- a/net/wireless/sme.c +++ b/net/wireless/sme.c @@ -85,6 +85,7 @@ static int cfg80211_conn_scan(struct wireless_dev *wdev)  	ASSERT_RTNL();  	ASSERT_RDEV_LOCK(rdev);  	ASSERT_WDEV_LOCK(wdev); +	lockdep_assert_held(&rdev->sched_scan_mtx);  	if (rdev->scan_req)  		return -EBUSY; @@ -223,6 +224,7 @@ void cfg80211_conn_work(struct work_struct *work)  	rtnl_lock();  	cfg80211_lock_rdev(rdev);  	mutex_lock(&rdev->devlist_mtx); +	mutex_lock(&rdev->sched_scan_mtx);  	list_for_each_entry(wdev, &rdev->wdev_list, list) {  		wdev_lock(wdev); @@ -247,6 +249,7 @@ void cfg80211_conn_work(struct work_struct *work)  		wdev_unlock(wdev);  	} +	mutex_unlock(&rdev->sched_scan_mtx);  	mutex_unlock(&rdev->devlist_mtx);  	cfg80211_unlock_rdev(rdev);  	rtnl_unlock(); @@ -320,11 +323,9 @@ void cfg80211_sme_scan_done(struct net_device *dev)  {  	struct wireless_dev *wdev = dev->ieee80211_ptr; -	mutex_lock(&wiphy_to_dev(wdev->wiphy)->devlist_mtx);  	wdev_lock(wdev);  	__cfg80211_sme_scan_done(dev);  	wdev_unlock(wdev); -	mutex_unlock(&wiphy_to_dev(wdev->wiphy)->devlist_mtx);  }  void cfg80211_sme_rx_auth(struct net_device *dev, @@ -924,9 +925,12 @@ int cfg80211_connect(struct cfg80211_registered_device *rdev,  	int err;  	mutex_lock(&rdev->devlist_mtx); +	/* might request scan - scan_mtx -> wdev_mtx dependency */ +	mutex_lock(&rdev->sched_scan_mtx);  	wdev_lock(dev->ieee80211_ptr);  	err = __cfg80211_connect(rdev, dev, connect, connkeys, NULL);  	wdev_unlock(dev->ieee80211_ptr); +	mutex_unlock(&rdev->sched_scan_mtx);  	mutex_unlock(&rdev->devlist_mtx);  	return err; diff --git a/net/wireless/trace.h b/net/wireless/trace.h index b7a531380e1..7586de77a2f 100644 --- a/net/wireless/trace.h +++ b/net/wireless/trace.h @@ -27,7 +27,8 @@  #define WIPHY_PR_ARG	__entry->wiphy_name  #define WDEV_ENTRY	__field(u32, id) -#define WDEV_ASSIGN	(__entry->id) = (wdev ? wdev->identifier : 0) +#define WDEV_ASSIGN	(__entry->id) = (!IS_ERR_OR_NULL(wdev)	\ +					 ? wdev->identifier : 0)  #define WDEV_PR_FMT	"wdev(%u)"  #define WDEV_PR_ARG	(__entry->id) @@ -1778,7 +1779,7 @@ TRACE_EVENT(rdev_set_mac_acl,  	),  	TP_fast_assign(  		WIPHY_ASSIGN; -		WIPHY_ASSIGN; +		NETDEV_ASSIGN;  		__entry->acl_policy = params->acl_policy;  	),  	TP_printk(WIPHY_PR_FMT ", " NETDEV_PR_FMT ", acl policy: %d", diff --git a/net/wireless/wext-sme.c b/net/wireless/wext-sme.c index fb9622f6d99..e79cb5c0655 100644 --- a/net/wireless/wext-sme.c +++ b/net/wireless/wext-sme.c @@ -89,6 +89,7 @@ int cfg80211_mgd_wext_siwfreq(struct net_device *dev,  	cfg80211_lock_rdev(rdev);  	mutex_lock(&rdev->devlist_mtx); +	mutex_lock(&rdev->sched_scan_mtx);  	wdev_lock(wdev);  	if (wdev->sme_state != CFG80211_SME_IDLE) { @@ -135,6 +136,7 @@ int cfg80211_mgd_wext_siwfreq(struct net_device *dev,  	err = cfg80211_mgd_wext_connect(rdev, wdev);   out:  	wdev_unlock(wdev); +	mutex_unlock(&rdev->sched_scan_mtx);  	mutex_unlock(&rdev->devlist_mtx);  	cfg80211_unlock_rdev(rdev);  	return err; @@ -190,6 +192,7 @@ int cfg80211_mgd_wext_siwessid(struct net_device *dev,  	cfg80211_lock_rdev(rdev);  	mutex_lock(&rdev->devlist_mtx); +	mutex_lock(&rdev->sched_scan_mtx);  	wdev_lock(wdev);  	err = 0; @@ -223,6 +226,7 @@ int cfg80211_mgd_wext_siwessid(struct net_device *dev,  	err = cfg80211_mgd_wext_connect(rdev, wdev);   out:  	wdev_unlock(wdev); +	mutex_unlock(&rdev->sched_scan_mtx);  	mutex_unlock(&rdev->devlist_mtx);  	cfg80211_unlock_rdev(rdev);  	return err; @@ -285,6 +289,7 @@ int cfg80211_mgd_wext_siwap(struct net_device *dev,  	cfg80211_lock_rdev(rdev);  	mutex_lock(&rdev->devlist_mtx); +	mutex_lock(&rdev->sched_scan_mtx);  	wdev_lock(wdev);  	if (wdev->sme_state != CFG80211_SME_IDLE) { @@ -313,6 +318,7 @@ int cfg80211_mgd_wext_siwap(struct net_device *dev,  	err = cfg80211_mgd_wext_connect(rdev, wdev);   out:  	wdev_unlock(wdev); +	mutex_unlock(&rdev->sched_scan_mtx);  	mutex_unlock(&rdev->devlist_mtx);  	cfg80211_unlock_rdev(rdev);  	return err; diff --git a/net/xfrm/xfrm_replay.c b/net/xfrm/xfrm_replay.c index 35754cc8a9e..8dafe6d3c6e 100644 --- a/net/xfrm/xfrm_replay.c +++ b/net/xfrm/xfrm_replay.c @@ -334,6 +334,70 @@ static void xfrm_replay_notify_bmp(struct xfrm_state *x, int event)  		x->xflags &= ~XFRM_TIME_DEFER;  } +static void xfrm_replay_notify_esn(struct xfrm_state *x, int event) +{ +	u32 seq_diff, oseq_diff; +	struct km_event c; +	struct xfrm_replay_state_esn *replay_esn = x->replay_esn; +	struct xfrm_replay_state_esn *preplay_esn = x->preplay_esn; + +	/* we send notify messages in case +	 *  1. we updated on of the sequence numbers, and the seqno difference +	 *     is at least x->replay_maxdiff, in this case we also update the +	 *     timeout of our timer function +	 *  2. if x->replay_maxage has elapsed since last update, +	 *     and there were changes +	 * +	 *  The state structure must be locked! +	 */ + +	switch (event) { +	case XFRM_REPLAY_UPDATE: +		if (!x->replay_maxdiff) +			break; + +		if (replay_esn->seq_hi == preplay_esn->seq_hi) +			seq_diff = replay_esn->seq - preplay_esn->seq; +		else +			seq_diff = ~preplay_esn->seq + replay_esn->seq + 1; + +		if (replay_esn->oseq_hi == preplay_esn->oseq_hi) +			oseq_diff = replay_esn->oseq - preplay_esn->oseq; +		else +			oseq_diff = ~preplay_esn->oseq + replay_esn->oseq + 1; + +		if (seq_diff < x->replay_maxdiff && +		    oseq_diff < x->replay_maxdiff) { + +			if (x->xflags & XFRM_TIME_DEFER) +				event = XFRM_REPLAY_TIMEOUT; +			else +				return; +		} + +		break; + +	case XFRM_REPLAY_TIMEOUT: +		if (memcmp(x->replay_esn, x->preplay_esn, +			   xfrm_replay_state_esn_len(replay_esn)) == 0) { +			x->xflags |= XFRM_TIME_DEFER; +			return; +		} + +		break; +	} + +	memcpy(x->preplay_esn, x->replay_esn, +	       xfrm_replay_state_esn_len(replay_esn)); +	c.event = XFRM_MSG_NEWAE; +	c.data.aevent = event; +	km_state_notify(x, &c); + +	if (x->replay_maxage && +	    !mod_timer(&x->rtimer, jiffies + x->replay_maxage)) +		x->xflags &= ~XFRM_TIME_DEFER; +} +  static int xfrm_replay_overflow_esn(struct xfrm_state *x, struct sk_buff *skb)  {  	int err = 0; @@ -510,7 +574,7 @@ static struct xfrm_replay xfrm_replay_esn = {  	.advance	= xfrm_replay_advance_esn,  	.check		= xfrm_replay_check_esn,  	.recheck	= xfrm_replay_recheck_esn, -	.notify		= xfrm_replay_notify_bmp, +	.notify		= xfrm_replay_notify_esn,  	.overflow	= xfrm_replay_overflow_esn,  };  |