diff options
Diffstat (limited to 'drivers/infiniband/core/cma.c')
| -rw-r--r-- | drivers/infiniband/core/cma.c | 505 | 
1 files changed, 334 insertions, 171 deletions
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 5ed9d25d021..b6a33b3c516 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -47,6 +47,7 @@  #include <rdma/rdma_cm.h>  #include <rdma/rdma_cm_ib.h> +#include <rdma/rdma_netlink.h>  #include <rdma/ib_cache.h>  #include <rdma/ib_cm.h>  #include <rdma/ib_sa.h> @@ -89,20 +90,6 @@ struct cma_device {  	struct list_head	id_list;  }; -enum cma_state { -	CMA_IDLE, -	CMA_ADDR_QUERY, -	CMA_ADDR_RESOLVED, -	CMA_ROUTE_QUERY, -	CMA_ROUTE_RESOLVED, -	CMA_CONNECT, -	CMA_DISCONNECT, -	CMA_ADDR_BOUND, -	CMA_LISTEN, -	CMA_DEVICE_REMOVAL, -	CMA_DESTROYING -}; -  struct rdma_bind_list {  	struct idr		*ps;  	struct hlist_head	owners; @@ -126,7 +113,7 @@ struct rdma_id_private {  	struct list_head	mc_list;  	int			internal_id; -	enum cma_state		state; +	enum rdma_cm_state	state;  	spinlock_t		lock;  	struct mutex		qp_mutex; @@ -146,8 +133,10 @@ struct rdma_id_private {  	u32			seq_num;  	u32			qkey;  	u32			qp_num; +	pid_t			owner;  	u8			srq;  	u8			tos; +	u8			reuseaddr;  };  struct cma_multicast { @@ -164,8 +153,8 @@ struct cma_multicast {  struct cma_work {  	struct work_struct	work;  	struct rdma_id_private	*id; -	enum cma_state		old_state; -	enum cma_state		new_state; +	enum rdma_cm_state	old_state; +	enum rdma_cm_state	new_state;  	struct rdma_cm_event	event;  }; @@ -216,7 +205,7 @@ struct sdp_hah {  #define CMA_VERSION 0x00  #define SDP_MAJ_VERSION 0x2 -static int cma_comp(struct rdma_id_private *id_priv, enum cma_state comp) +static int cma_comp(struct rdma_id_private *id_priv, enum rdma_cm_state comp)  {  	unsigned long flags;  	int ret; @@ -228,7 +217,7 @@ static int cma_comp(struct rdma_id_private *id_priv, enum cma_state comp)  }  static int cma_comp_exch(struct rdma_id_private *id_priv, -			 enum cma_state comp, enum cma_state exch) +			 enum rdma_cm_state comp, enum rdma_cm_state exch)  {  	unsigned long flags;  	int ret; @@ -240,11 +229,11 @@ static int cma_comp_exch(struct rdma_id_private *id_priv,  	return ret;  } -static enum cma_state cma_exch(struct rdma_id_private *id_priv, -			       enum cma_state exch) +static enum rdma_cm_state cma_exch(struct rdma_id_private *id_priv, +				   enum rdma_cm_state exch)  {  	unsigned long flags; -	enum cma_state old; +	enum rdma_cm_state old;  	spin_lock_irqsave(&id_priv->lock, flags);  	old = id_priv->state; @@ -278,11 +267,6 @@ static inline void sdp_set_ip_ver(struct sdp_hh *hh, u8 ip_ver)  	hh->ip_version = (ip_ver << 4) | (hh->ip_version & 0xF);  } -static inline int cma_is_ud_ps(enum rdma_port_space ps) -{ -	return (ps == RDMA_PS_UDP || ps == RDMA_PS_IPOIB); -} -  static void cma_attach_to_dev(struct rdma_id_private *id_priv,  			      struct cma_device *cma_dev)  { @@ -412,7 +396,7 @@ static void cma_deref_id(struct rdma_id_private *id_priv)  }  static int cma_disable_callback(struct rdma_id_private *id_priv, -			      enum cma_state state) +				enum rdma_cm_state state)  {  	mutex_lock(&id_priv->handler_mutex);  	if (id_priv->state != state) { @@ -428,7 +412,8 @@ static int cma_has_cm_dev(struct rdma_id_private *id_priv)  }  struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler, -				  void *context, enum rdma_port_space ps) +				  void *context, enum rdma_port_space ps, +				  enum ib_qp_type qp_type)  {  	struct rdma_id_private *id_priv; @@ -436,10 +421,12 @@ struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler,  	if (!id_priv)  		return ERR_PTR(-ENOMEM); -	id_priv->state = CMA_IDLE; +	id_priv->owner = task_pid_nr(current); +	id_priv->state = RDMA_CM_IDLE;  	id_priv->id.context = context;  	id_priv->id.event_handler = event_handler;  	id_priv->id.ps = ps; +	id_priv->id.qp_type = qp_type;  	spin_lock_init(&id_priv->lock);  	mutex_init(&id_priv->qp_mutex);  	init_completion(&id_priv->comp); @@ -507,7 +494,7 @@ int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd,  	if (IS_ERR(qp))  		return PTR_ERR(qp); -	if (cma_is_ud_ps(id_priv->id.ps)) +	if (id->qp_type == IB_QPT_UD)  		ret = cma_init_ud_qp(id_priv, qp);  	else  		ret = cma_init_conn_qp(id_priv, qp); @@ -635,7 +622,7 @@ static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv,  	qp_attr->port_num = id_priv->id.port_num;  	*qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT; -	if (cma_is_ud_ps(id_priv->id.ps)) { +	if (id_priv->id.qp_type == IB_QPT_UD) {  		ret = cma_set_qkey(id_priv);  		if (ret)  			return ret; @@ -658,7 +645,7 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr,  	id_priv = container_of(id, struct rdma_id_private, id);  	switch (rdma_node_get_transport(id_priv->id.device->node_type)) {  	case RDMA_TRANSPORT_IB: -		if (!id_priv->cm_id.ib || cma_is_ud_ps(id_priv->id.ps)) +		if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD))  			ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask);  		else  			ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, @@ -712,6 +699,21 @@ static inline int cma_any_addr(struct sockaddr *addr)  	return cma_zero_addr(addr) || cma_loopback_addr(addr);  } +static int cma_addr_cmp(struct sockaddr *src, struct sockaddr *dst) +{ +	if (src->sa_family != dst->sa_family) +		return -1; + +	switch (src->sa_family) { +	case AF_INET: +		return ((struct sockaddr_in *) src)->sin_addr.s_addr != +		       ((struct sockaddr_in *) dst)->sin_addr.s_addr; +	default: +		return ipv6_addr_cmp(&((struct sockaddr_in6 *) src)->sin6_addr, +				     &((struct sockaddr_in6 *) dst)->sin6_addr); +	} +} +  static inline __be16 cma_port(struct sockaddr *addr)  {  	if (addr->sa_family == AF_INET) @@ -842,16 +844,16 @@ static void cma_cancel_listens(struct rdma_id_private *id_priv)  }  static void cma_cancel_operation(struct rdma_id_private *id_priv, -				 enum cma_state state) +				 enum rdma_cm_state state)  {  	switch (state) { -	case CMA_ADDR_QUERY: +	case RDMA_CM_ADDR_QUERY:  		rdma_addr_cancel(&id_priv->id.route.addr.dev_addr);  		break; -	case CMA_ROUTE_QUERY: +	case RDMA_CM_ROUTE_QUERY:  		cma_cancel_route(id_priv);  		break; -	case CMA_LISTEN: +	case RDMA_CM_LISTEN:  		if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr)  				&& !id_priv->cma_dev)  			cma_cancel_listens(id_priv); @@ -902,10 +904,10 @@ static void cma_leave_mc_groups(struct rdma_id_private *id_priv)  void rdma_destroy_id(struct rdma_cm_id *id)  {  	struct rdma_id_private *id_priv; -	enum cma_state state; +	enum rdma_cm_state state;  	id_priv = container_of(id, struct rdma_id_private, id); -	state = cma_exch(id_priv, CMA_DESTROYING); +	state = cma_exch(id_priv, RDMA_CM_DESTROYING);  	cma_cancel_operation(id_priv, state);  	/* @@ -999,9 +1001,9 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)  	int ret = 0;  	if ((ib_event->event != IB_CM_TIMEWAIT_EXIT && -		cma_disable_callback(id_priv, CMA_CONNECT)) || +		cma_disable_callback(id_priv, RDMA_CM_CONNECT)) ||  	    (ib_event->event == IB_CM_TIMEWAIT_EXIT && -		cma_disable_callback(id_priv, CMA_DISCONNECT))) +		cma_disable_callback(id_priv, RDMA_CM_DISCONNECT)))  		return 0;  	memset(&event, 0, sizeof event); @@ -1032,7 +1034,8 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)  		event.status = -ETIMEDOUT; /* fall through */  	case IB_CM_DREQ_RECEIVED:  	case IB_CM_DREP_RECEIVED: -		if (!cma_comp_exch(id_priv, CMA_CONNECT, CMA_DISCONNECT)) +		if (!cma_comp_exch(id_priv, RDMA_CM_CONNECT, +				   RDMA_CM_DISCONNECT))  			goto out;  		event.event = RDMA_CM_EVENT_DISCONNECTED;  		break; @@ -1059,7 +1062,7 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)  	if (ret) {  		/* Destroy the CM ID by returning a non-zero value. */  		id_priv->cm_id.ib = NULL; -		cma_exch(id_priv, CMA_DESTROYING); +		cma_exch(id_priv, RDMA_CM_DESTROYING);  		mutex_unlock(&id_priv->handler_mutex);  		rdma_destroy_id(&id_priv->id);  		return ret; @@ -1085,7 +1088,7 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,  		goto err;  	id = rdma_create_id(listen_id->event_handler, listen_id->context, -			    listen_id->ps); +			    listen_id->ps, ib_event->param.req_rcvd.qp_type);  	if (IS_ERR(id))  		goto err; @@ -1116,7 +1119,7 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id,  	rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid);  	id_priv = container_of(id, struct rdma_id_private, id); -	id_priv->state = CMA_CONNECT; +	id_priv->state = RDMA_CM_CONNECT;  	return id_priv;  destroy_id: @@ -1136,7 +1139,7 @@ static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,  	int ret;  	id = rdma_create_id(listen_id->event_handler, listen_id->context, -			    listen_id->ps); +			    listen_id->ps, IB_QPT_UD);  	if (IS_ERR(id))  		return NULL; @@ -1156,7 +1159,7 @@ static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id,  	}  	id_priv = container_of(id, struct rdma_id_private, id); -	id_priv->state = CMA_CONNECT; +	id_priv->state = RDMA_CM_CONNECT;  	return id_priv;  err:  	rdma_destroy_id(id); @@ -1185,13 +1188,13 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)  	int offset, ret;  	listen_id = cm_id->context; -	if (cma_disable_callback(listen_id, CMA_LISTEN)) +	if (cma_disable_callback(listen_id, RDMA_CM_LISTEN))  		return -ECONNABORTED;  	memset(&event, 0, sizeof event);  	offset = cma_user_data_offset(listen_id->id.ps);  	event.event = RDMA_CM_EVENT_CONNECT_REQUEST; -	if (cma_is_ud_ps(listen_id->id.ps)) { +	if (listen_id->id.qp_type == IB_QPT_UD) {  		conn_id = cma_new_udp_id(&listen_id->id, ib_event);  		event.param.ud.private_data = ib_event->private_data + offset;  		event.param.ud.private_data_len = @@ -1227,8 +1230,7 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)  		 * while we're accessing the cm_id.  		 */  		mutex_lock(&lock); -		if (cma_comp(conn_id, CMA_CONNECT) && -		    !cma_is_ud_ps(conn_id->id.ps)) +		if (cma_comp(conn_id, RDMA_CM_CONNECT) && (conn_id->id.qp_type != IB_QPT_UD))  			ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0);  		mutex_unlock(&lock);  		mutex_unlock(&conn_id->handler_mutex); @@ -1241,7 +1243,7 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event)  	conn_id->cm_id.ib = NULL;  release_conn_id: -	cma_exch(conn_id, CMA_DESTROYING); +	cma_exch(conn_id, RDMA_CM_DESTROYING);  	mutex_unlock(&conn_id->handler_mutex);  	rdma_destroy_id(&conn_id->id); @@ -1312,7 +1314,7 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)  	struct sockaddr_in *sin;  	int ret = 0; -	if (cma_disable_callback(id_priv, CMA_CONNECT)) +	if (cma_disable_callback(id_priv, RDMA_CM_CONNECT))  		return 0;  	memset(&event, 0, sizeof event); @@ -1355,7 +1357,7 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event)  	if (ret) {  		/* Destroy the CM ID by returning a non-zero value. */  		id_priv->cm_id.iw = NULL; -		cma_exch(id_priv, CMA_DESTROYING); +		cma_exch(id_priv, RDMA_CM_DESTROYING);  		mutex_unlock(&id_priv->handler_mutex);  		rdma_destroy_id(&id_priv->id);  		return ret; @@ -1377,20 +1379,20 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,  	struct ib_device_attr attr;  	listen_id = cm_id->context; -	if (cma_disable_callback(listen_id, CMA_LISTEN)) +	if (cma_disable_callback(listen_id, RDMA_CM_LISTEN))  		return -ECONNABORTED;  	/* Create a new RDMA id for the new IW CM ID */  	new_cm_id = rdma_create_id(listen_id->id.event_handler,  				   listen_id->id.context, -				   RDMA_PS_TCP); +				   RDMA_PS_TCP, IB_QPT_RC);  	if (IS_ERR(new_cm_id)) {  		ret = -ENOMEM;  		goto out;  	}  	conn_id = container_of(new_cm_id, struct rdma_id_private, id);  	mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); -	conn_id->state = CMA_CONNECT; +	conn_id->state = RDMA_CM_CONNECT;  	dev = ip_dev_find(&init_net, iw_event->local_addr.sin_addr.s_addr);  	if (!dev) { @@ -1445,7 +1447,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id,  	if (ret) {  		/* User wants to destroy the CM ID */  		conn_id->cm_id.iw = NULL; -		cma_exch(conn_id, CMA_DESTROYING); +		cma_exch(conn_id, RDMA_CM_DESTROYING);  		mutex_unlock(&conn_id->handler_mutex);  		cma_deref_id(conn_id);  		rdma_destroy_id(&conn_id->id); @@ -1532,13 +1534,14 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv,  	struct rdma_cm_id *id;  	int ret; -	id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps); +	id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps, +			    id_priv->id.qp_type);  	if (IS_ERR(id))  		return;  	dev_id_priv = container_of(id, struct rdma_id_private, id); -	dev_id_priv->state = CMA_ADDR_BOUND; +	dev_id_priv->state = RDMA_CM_ADDR_BOUND;  	memcpy(&id->route.addr.src_addr, &id_priv->id.route.addr.src_addr,  	       ip_addr_size((struct sockaddr *) &id_priv->id.route.addr.src_addr)); @@ -1564,50 +1567,6 @@ static void cma_listen_on_all(struct rdma_id_private *id_priv)  	mutex_unlock(&lock);  } -int rdma_listen(struct rdma_cm_id *id, int backlog) -{ -	struct rdma_id_private *id_priv; -	int ret; - -	id_priv = container_of(id, struct rdma_id_private, id); -	if (id_priv->state == CMA_IDLE) { -		((struct sockaddr *) &id->route.addr.src_addr)->sa_family = AF_INET; -		ret = rdma_bind_addr(id, (struct sockaddr *) &id->route.addr.src_addr); -		if (ret) -			return ret; -	} - -	if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_LISTEN)) -		return -EINVAL; - -	id_priv->backlog = backlog; -	if (id->device) { -		switch (rdma_node_get_transport(id->device->node_type)) { -		case RDMA_TRANSPORT_IB: -			ret = cma_ib_listen(id_priv); -			if (ret) -				goto err; -			break; -		case RDMA_TRANSPORT_IWARP: -			ret = cma_iw_listen(id_priv, backlog); -			if (ret) -				goto err; -			break; -		default: -			ret = -ENOSYS; -			goto err; -		} -	} else -		cma_listen_on_all(id_priv); - -	return 0; -err: -	id_priv->backlog = 0; -	cma_comp_exch(id_priv, CMA_LISTEN, CMA_ADDR_BOUND); -	return ret; -} -EXPORT_SYMBOL(rdma_listen); -  void rdma_set_service_type(struct rdma_cm_id *id, int tos)  {  	struct rdma_id_private *id_priv; @@ -1629,8 +1588,8 @@ static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec,  		route->num_paths = 1;  		*route->path_rec = *path_rec;  	} else { -		work->old_state = CMA_ROUTE_QUERY; -		work->new_state = CMA_ADDR_RESOLVED; +		work->old_state = RDMA_CM_ROUTE_QUERY; +		work->new_state = RDMA_CM_ADDR_RESOLVED;  		work->event.event = RDMA_CM_EVENT_ROUTE_ERROR;  		work->event.status = status;  	} @@ -1688,7 +1647,7 @@ static void cma_work_handler(struct work_struct *_work)  		goto out;  	if (id_priv->id.event_handler(&id_priv->id, &work->event)) { -		cma_exch(id_priv, CMA_DESTROYING); +		cma_exch(id_priv, RDMA_CM_DESTROYING);  		destroy = 1;  	}  out: @@ -1706,12 +1665,12 @@ static void cma_ndev_work_handler(struct work_struct *_work)  	int destroy = 0;  	mutex_lock(&id_priv->handler_mutex); -	if (id_priv->state == CMA_DESTROYING || -	    id_priv->state == CMA_DEVICE_REMOVAL) +	if (id_priv->state == RDMA_CM_DESTROYING || +	    id_priv->state == RDMA_CM_DEVICE_REMOVAL)  		goto out;  	if (id_priv->id.event_handler(&id_priv->id, &work->event)) { -		cma_exch(id_priv, CMA_DESTROYING); +		cma_exch(id_priv, RDMA_CM_DESTROYING);  		destroy = 1;  	} @@ -1735,8 +1694,8 @@ static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms)  	work->id = id_priv;  	INIT_WORK(&work->work, cma_work_handler); -	work->old_state = CMA_ROUTE_QUERY; -	work->new_state = CMA_ROUTE_RESOLVED; +	work->old_state = RDMA_CM_ROUTE_QUERY; +	work->new_state = RDMA_CM_ROUTE_RESOLVED;  	work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;  	route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL); @@ -1765,7 +1724,8 @@ int rdma_set_ib_paths(struct rdma_cm_id *id,  	int ret;  	id_priv = container_of(id, struct rdma_id_private, id); -	if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_RESOLVED)) +	if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, +			   RDMA_CM_ROUTE_RESOLVED))  		return -EINVAL;  	id->route.path_rec = kmemdup(path_rec, sizeof *path_rec * num_paths, @@ -1778,7 +1738,7 @@ int rdma_set_ib_paths(struct rdma_cm_id *id,  	id->route.num_paths = num_paths;  	return 0;  err: -	cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_ADDR_RESOLVED); +	cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_ADDR_RESOLVED);  	return ret;  }  EXPORT_SYMBOL(rdma_set_ib_paths); @@ -1793,8 +1753,8 @@ static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms)  	work->id = id_priv;  	INIT_WORK(&work->work, cma_work_handler); -	work->old_state = CMA_ROUTE_QUERY; -	work->new_state = CMA_ROUTE_RESOLVED; +	work->old_state = RDMA_CM_ROUTE_QUERY; +	work->new_state = RDMA_CM_ROUTE_RESOLVED;  	work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;  	queue_work(cma_wq, &work->work);  	return 0; @@ -1858,8 +1818,8 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv)  		goto err2;  	} -	work->old_state = CMA_ROUTE_QUERY; -	work->new_state = CMA_ROUTE_RESOLVED; +	work->old_state = RDMA_CM_ROUTE_QUERY; +	work->new_state = RDMA_CM_ROUTE_RESOLVED;  	work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED;  	work->event.status = 0; @@ -1881,7 +1841,7 @@ int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)  	int ret;  	id_priv = container_of(id, struct rdma_id_private, id); -	if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_QUERY)) +	if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_QUERY))  		return -EINVAL;  	atomic_inc(&id_priv->refcount); @@ -1910,7 +1870,7 @@ int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms)  	return 0;  err: -	cma_comp_exch(id_priv, CMA_ROUTE_QUERY, CMA_ADDR_RESOLVED); +	cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, RDMA_CM_ADDR_RESOLVED);  	cma_deref_id(id_priv);  	return ret;  } @@ -1969,14 +1929,16 @@ static void addr_handler(int status, struct sockaddr *src_addr,  	memset(&event, 0, sizeof event);  	mutex_lock(&id_priv->handler_mutex); -	if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_RESOLVED)) +	if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, +			   RDMA_CM_ADDR_RESOLVED))  		goto out;  	if (!status && !id_priv->cma_dev)  		status = cma_acquire_dev(id_priv);  	if (status) { -		if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ADDR_BOUND)) +		if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, +				   RDMA_CM_ADDR_BOUND))  			goto out;  		event.event = RDMA_CM_EVENT_ADDR_ERROR;  		event.status = status; @@ -1987,7 +1949,7 @@ static void addr_handler(int status, struct sockaddr *src_addr,  	}  	if (id_priv->id.event_handler(&id_priv->id, &event)) { -		cma_exch(id_priv, CMA_DESTROYING); +		cma_exch(id_priv, RDMA_CM_DESTROYING);  		mutex_unlock(&id_priv->handler_mutex);  		cma_deref_id(id_priv);  		rdma_destroy_id(&id_priv->id); @@ -2032,8 +1994,8 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv)  	work->id = id_priv;  	INIT_WORK(&work->work, cma_work_handler); -	work->old_state = CMA_ADDR_QUERY; -	work->new_state = CMA_ADDR_RESOLVED; +	work->old_state = RDMA_CM_ADDR_QUERY; +	work->new_state = RDMA_CM_ADDR_RESOLVED;  	work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED;  	queue_work(cma_wq, &work->work);  	return 0; @@ -2062,13 +2024,13 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,  	int ret;  	id_priv = container_of(id, struct rdma_id_private, id); -	if (id_priv->state == CMA_IDLE) { +	if (id_priv->state == RDMA_CM_IDLE) {  		ret = cma_bind_addr(id, src_addr, dst_addr);  		if (ret)  			return ret;  	} -	if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_ADDR_QUERY)) +	if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY))  		return -EINVAL;  	atomic_inc(&id_priv->refcount); @@ -2084,12 +2046,31 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,  	return 0;  err: -	cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_BOUND); +	cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND);  	cma_deref_id(id_priv);  	return ret;  }  EXPORT_SYMBOL(rdma_resolve_addr); +int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse) +{ +	struct rdma_id_private *id_priv; +	unsigned long flags; +	int ret; + +	id_priv = container_of(id, struct rdma_id_private, id); +	spin_lock_irqsave(&id_priv->lock, flags); +	if (id_priv->state == RDMA_CM_IDLE) { +		id_priv->reuseaddr = reuse; +		ret = 0; +	} else { +		ret = -EINVAL; +	} +	spin_unlock_irqrestore(&id_priv->lock, flags); +	return ret; +} +EXPORT_SYMBOL(rdma_set_reuseaddr); +  static void cma_bind_port(struct rdma_bind_list *bind_list,  			  struct rdma_id_private *id_priv)  { @@ -2165,41 +2146,71 @@ retry:  	return -EADDRNOTAVAIL;  } -static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv) +/* + * Check that the requested port is available.  This is called when trying to + * bind to a specific port, or when trying to listen on a bound port.  In + * the latter case, the provided id_priv may already be on the bind_list, but + * we still need to check that it's okay to start listening. + */ +static int cma_check_port(struct rdma_bind_list *bind_list, +			  struct rdma_id_private *id_priv, uint8_t reuseaddr)  {  	struct rdma_id_private *cur_id; -	struct sockaddr_in *sin, *cur_sin; -	struct rdma_bind_list *bind_list; +	struct sockaddr *addr, *cur_addr;  	struct hlist_node *node; + +	addr = (struct sockaddr *) &id_priv->id.route.addr.src_addr; +	if (cma_any_addr(addr) && !reuseaddr) +		return -EADDRNOTAVAIL; + +	hlist_for_each_entry(cur_id, node, &bind_list->owners, node) { +		if (id_priv == cur_id) +			continue; + +		if ((cur_id->state == RDMA_CM_LISTEN) || +		    !reuseaddr || !cur_id->reuseaddr) { +			cur_addr = (struct sockaddr *) &cur_id->id.route.addr.src_addr; +			if (cma_any_addr(cur_addr)) +				return -EADDRNOTAVAIL; + +			if (!cma_addr_cmp(addr, cur_addr)) +				return -EADDRINUSE; +		} +	} +	return 0; +} + +static int cma_use_port(struct idr *ps, struct rdma_id_private *id_priv) +{ +	struct rdma_bind_list *bind_list;  	unsigned short snum; +	int ret; -	sin = (struct sockaddr_in *) &id_priv->id.route.addr.src_addr; -	snum = ntohs(sin->sin_port); +	snum = ntohs(cma_port((struct sockaddr *) &id_priv->id.route.addr.src_addr));  	if (snum < PROT_SOCK && !capable(CAP_NET_BIND_SERVICE))  		return -EACCES;  	bind_list = idr_find(ps, snum); -	if (!bind_list) -		return cma_alloc_port(ps, id_priv, snum); - -	/* -	 * We don't support binding to any address if anyone is bound to -	 * a specific address on the same port. -	 */ -	if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr)) -		return -EADDRNOTAVAIL; - -	hlist_for_each_entry(cur_id, node, &bind_list->owners, node) { -		if (cma_any_addr((struct sockaddr *) &cur_id->id.route.addr.src_addr)) -			return -EADDRNOTAVAIL; - -		cur_sin = (struct sockaddr_in *) &cur_id->id.route.addr.src_addr; -		if (sin->sin_addr.s_addr == cur_sin->sin_addr.s_addr) -			return -EADDRINUSE; +	if (!bind_list) { +		ret = cma_alloc_port(ps, id_priv, snum); +	} else { +		ret = cma_check_port(bind_list, id_priv, id_priv->reuseaddr); +		if (!ret) +			cma_bind_port(bind_list, id_priv);  	} +	return ret; +} -	cma_bind_port(bind_list, id_priv); -	return 0; +static int cma_bind_listen(struct rdma_id_private *id_priv) +{ +	struct rdma_bind_list *bind_list = id_priv->bind_list; +	int ret = 0; + +	mutex_lock(&lock); +	if (bind_list->owners.first->next) +		ret = cma_check_port(bind_list, id_priv, 0); +	mutex_unlock(&lock); +	return ret;  }  static int cma_get_port(struct rdma_id_private *id_priv) @@ -2253,6 +2264,56 @@ static int cma_check_linklocal(struct rdma_dev_addr *dev_addr,  	return 0;  } +int rdma_listen(struct rdma_cm_id *id, int backlog) +{ +	struct rdma_id_private *id_priv; +	int ret; + +	id_priv = container_of(id, struct rdma_id_private, id); +	if (id_priv->state == RDMA_CM_IDLE) { +		((struct sockaddr *) &id->route.addr.src_addr)->sa_family = AF_INET; +		ret = rdma_bind_addr(id, (struct sockaddr *) &id->route.addr.src_addr); +		if (ret) +			return ret; +	} + +	if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN)) +		return -EINVAL; + +	if (id_priv->reuseaddr) { +		ret = cma_bind_listen(id_priv); +		if (ret) +			goto err; +	} + +	id_priv->backlog = backlog; +	if (id->device) { +		switch (rdma_node_get_transport(id->device->node_type)) { +		case RDMA_TRANSPORT_IB: +			ret = cma_ib_listen(id_priv); +			if (ret) +				goto err; +			break; +		case RDMA_TRANSPORT_IWARP: +			ret = cma_iw_listen(id_priv, backlog); +			if (ret) +				goto err; +			break; +		default: +			ret = -ENOSYS; +			goto err; +		} +	} else +		cma_listen_on_all(id_priv); + +	return 0; +err: +	id_priv->backlog = 0; +	cma_comp_exch(id_priv, RDMA_CM_LISTEN, RDMA_CM_ADDR_BOUND); +	return ret; +} +EXPORT_SYMBOL(rdma_listen); +  int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)  {  	struct rdma_id_private *id_priv; @@ -2262,7 +2323,7 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr)  		return -EAFNOSUPPORT;  	id_priv = container_of(id, struct rdma_id_private, id); -	if (!cma_comp_exch(id_priv, CMA_IDLE, CMA_ADDR_BOUND)) +	if (!cma_comp_exch(id_priv, RDMA_CM_IDLE, RDMA_CM_ADDR_BOUND))  		return -EINVAL;  	ret = cma_check_linklocal(&id->route.addr.dev_addr, addr); @@ -2289,7 +2350,7 @@ err2:  	if (id_priv->cma_dev)  		cma_release_dev(id_priv);  err1: -	cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_IDLE); +	cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE);  	return ret;  }  EXPORT_SYMBOL(rdma_bind_addr); @@ -2362,7 +2423,7 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,  	struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd;  	int ret = 0; -	if (cma_disable_callback(id_priv, CMA_CONNECT)) +	if (cma_disable_callback(id_priv, RDMA_CM_CONNECT))  		return 0;  	memset(&event, 0, sizeof event); @@ -2408,7 +2469,7 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id,  	if (ret) {  		/* Destroy the CM ID by returning a non-zero value. */  		id_priv->cm_id.ib = NULL; -		cma_exch(id_priv, CMA_DESTROYING); +		cma_exch(id_priv, RDMA_CM_DESTROYING);  		mutex_unlock(&id_priv->handler_mutex);  		rdma_destroy_id(&id_priv->id);  		return ret; @@ -2574,7 +2635,7 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)  	int ret;  	id_priv = container_of(id, struct rdma_id_private, id); -	if (!cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_CONNECT)) +	if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT))  		return -EINVAL;  	if (!id->qp) { @@ -2584,7 +2645,7 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)  	switch (rdma_node_get_transport(id->device->node_type)) {  	case RDMA_TRANSPORT_IB: -		if (cma_is_ud_ps(id->ps)) +		if (id->qp_type == IB_QPT_UD)  			ret = cma_resolve_ib_udp(id_priv, conn_param);  		else  			ret = cma_connect_ib(id_priv, conn_param); @@ -2601,7 +2662,7 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)  	return 0;  err: -	cma_comp_exch(id_priv, CMA_CONNECT, CMA_ROUTE_RESOLVED); +	cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_ROUTE_RESOLVED);  	return ret;  }  EXPORT_SYMBOL(rdma_connect); @@ -2687,7 +2748,10 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)  	int ret;  	id_priv = container_of(id, struct rdma_id_private, id); -	if (!cma_comp(id_priv, CMA_CONNECT)) + +	id_priv->owner = task_pid_nr(current); + +	if (!cma_comp(id_priv, RDMA_CM_CONNECT))  		return -EINVAL;  	if (!id->qp && conn_param) { @@ -2697,7 +2761,7 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param)  	switch (rdma_node_get_transport(id->device->node_type)) {  	case RDMA_TRANSPORT_IB: -		if (cma_is_ud_ps(id->ps)) +		if (id->qp_type == IB_QPT_UD)  			ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS,  						conn_param->private_data,  						conn_param->private_data_len); @@ -2758,7 +2822,7 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data,  	switch (rdma_node_get_transport(id->device->node_type)) {  	case RDMA_TRANSPORT_IB: -		if (cma_is_ud_ps(id->ps)) +		if (id->qp_type == IB_QPT_UD)  			ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT,  						private_data, private_data_len);  		else @@ -2816,8 +2880,8 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)  	int ret;  	id_priv = mc->id_priv; -	if (cma_disable_callback(id_priv, CMA_ADDR_BOUND) && -	    cma_disable_callback(id_priv, CMA_ADDR_RESOLVED)) +	if (cma_disable_callback(id_priv, RDMA_CM_ADDR_BOUND) && +	    cma_disable_callback(id_priv, RDMA_CM_ADDR_RESOLVED))  		return 0;  	mutex_lock(&id_priv->qp_mutex); @@ -2841,7 +2905,7 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast)  	ret = id_priv->id.event_handler(&id_priv->id, &event);  	if (ret) { -		cma_exch(id_priv, CMA_DESTROYING); +		cma_exch(id_priv, RDMA_CM_DESTROYING);  		mutex_unlock(&id_priv->handler_mutex);  		rdma_destroy_id(&id_priv->id);  		return 0; @@ -3024,8 +3088,8 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr,  	int ret;  	id_priv = container_of(id, struct rdma_id_private, id); -	if (!cma_comp(id_priv, CMA_ADDR_BOUND) && -	    !cma_comp(id_priv, CMA_ADDR_RESOLVED)) +	if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) && +	    !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED))  		return -EINVAL;  	mc = kmalloc(sizeof *mc, GFP_KERNEL); @@ -3190,19 +3254,19 @@ static void cma_add_one(struct ib_device *device)  static int cma_remove_id_dev(struct rdma_id_private *id_priv)  {  	struct rdma_cm_event event; -	enum cma_state state; +	enum rdma_cm_state state;  	int ret = 0;  	/* Record that we want to remove the device */ -	state = cma_exch(id_priv, CMA_DEVICE_REMOVAL); -	if (state == CMA_DESTROYING) +	state = cma_exch(id_priv, RDMA_CM_DEVICE_REMOVAL); +	if (state == RDMA_CM_DESTROYING)  		return 0;  	cma_cancel_operation(id_priv, state);  	mutex_lock(&id_priv->handler_mutex);  	/* Check for destruction from another callback. */ -	if (!cma_comp(id_priv, CMA_DEVICE_REMOVAL)) +	if (!cma_comp(id_priv, RDMA_CM_DEVICE_REMOVAL))  		goto out;  	memset(&event, 0, sizeof event); @@ -3257,6 +3321,100 @@ static void cma_remove_one(struct ib_device *device)  	kfree(cma_dev);  } +static int cma_get_id_stats(struct sk_buff *skb, struct netlink_callback *cb) +{ +	struct nlmsghdr *nlh; +	struct rdma_cm_id_stats *id_stats; +	struct rdma_id_private *id_priv; +	struct rdma_cm_id *id = NULL; +	struct cma_device *cma_dev; +	int i_dev = 0, i_id = 0; + +	/* +	 * We export all of the IDs as a sequence of messages.  Each +	 * ID gets its own netlink message. +	 */ +	mutex_lock(&lock); + +	list_for_each_entry(cma_dev, &dev_list, list) { +		if (i_dev < cb->args[0]) { +			i_dev++; +			continue; +		} + +		i_id = 0; +		list_for_each_entry(id_priv, &cma_dev->id_list, list) { +			if (i_id < cb->args[1]) { +				i_id++; +				continue; +			} + +			id_stats = ibnl_put_msg(skb, &nlh, cb->nlh->nlmsg_seq, +						sizeof *id_stats, RDMA_NL_RDMA_CM, +						RDMA_NL_RDMA_CM_ID_STATS); +			if (!id_stats) +				goto out; + +			memset(id_stats, 0, sizeof *id_stats); +			id = &id_priv->id; +			id_stats->node_type = id->route.addr.dev_addr.dev_type; +			id_stats->port_num = id->port_num; +			id_stats->bound_dev_if = +				id->route.addr.dev_addr.bound_dev_if; + +			if (id->route.addr.src_addr.ss_family == AF_INET) { +				if (ibnl_put_attr(skb, nlh, +						  sizeof(struct sockaddr_in), +						  &id->route.addr.src_addr, +						  RDMA_NL_RDMA_CM_ATTR_SRC_ADDR)) { +					goto out; +				} +				if (ibnl_put_attr(skb, nlh, +						  sizeof(struct sockaddr_in), +						  &id->route.addr.dst_addr, +						  RDMA_NL_RDMA_CM_ATTR_DST_ADDR)) { +					goto out; +				} +			} else if (id->route.addr.src_addr.ss_family == AF_INET6) { +				if (ibnl_put_attr(skb, nlh, +						  sizeof(struct sockaddr_in6), +						  &id->route.addr.src_addr, +						  RDMA_NL_RDMA_CM_ATTR_SRC_ADDR)) { +					goto out; +				} +				if (ibnl_put_attr(skb, nlh, +						  sizeof(struct sockaddr_in6), +						  &id->route.addr.dst_addr, +						  RDMA_NL_RDMA_CM_ATTR_DST_ADDR)) { +					goto out; +				} +			} + +			id_stats->pid		= id_priv->owner; +			id_stats->port_space	= id->ps; +			id_stats->cm_state	= id_priv->state; +			id_stats->qp_num	= id_priv->qp_num; +			id_stats->qp_type	= id->qp_type; + +			i_id++; +		} + +		cb->args[1] = 0; +		i_dev++; +	} + +out: +	mutex_unlock(&lock); +	cb->args[0] = i_dev; +	cb->args[1] = i_id; + +	return skb->len; +} + +static const struct ibnl_client_cbs cma_cb_table[] = { +	[RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats }, +}; +  static int __init cma_init(void)  {  	int ret; @@ -3272,6 +3430,10 @@ static int __init cma_init(void)  	ret = ib_register_client(&cma_client);  	if (ret)  		goto err; + +	if (ibnl_add_client(RDMA_NL_RDMA_CM, RDMA_NL_RDMA_CM_NUM_OPS, cma_cb_table)) +		printk(KERN_WARNING "RDMA CMA: failed to add netlink callback\n"); +  	return 0;  err: @@ -3284,6 +3446,7 @@ err:  static void __exit cma_cleanup(void)  { +	ibnl_remove_client(RDMA_NL_RDMA_CM);  	ib_unregister_client(&cma_client);  	unregister_netdevice_notifier(&cma_nb);  	rdma_addr_unregister_client(&addr_client);  |