diff options
Diffstat (limited to 'drivers/infiniband/core')
| -rw-r--r-- | drivers/infiniband/core/iwcm.c | 1 | ||||
| -rw-r--r-- | drivers/infiniband/core/mad.c | 35 | ||||
| -rw-r--r-- | drivers/infiniband/core/mad_priv.h | 3 | ||||
| -rw-r--r-- | drivers/infiniband/core/multicast.c | 10 | ||||
| -rw-r--r-- | drivers/infiniband/core/sa_query.c | 7 | ||||
| -rw-r--r-- | drivers/infiniband/core/smi.c | 8 | ||||
| -rw-r--r-- | drivers/infiniband/core/uverbs_main.c | 10 | 
7 files changed, 51 insertions, 23 deletions
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c index 8f9509e1ebf..55d093a36ae 100644 --- a/drivers/infiniband/core/iwcm.c +++ b/drivers/infiniband/core/iwcm.c @@ -362,6 +362,7 @@ static void destroy_cm_id(struct iw_cm_id *cm_id)  		 * In either case, must tell the provider to reject.  		 */  		cm_id_priv->state = IW_CM_STATE_DESTROYING; +		cm_id->device->iwcm->reject(cm_id, NULL, 0);  		break;  	case IW_CM_STATE_CONN_SENT:  	case IW_CM_STATE_DESTROYING: diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index de922a04ca2..7522008fda8 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -2,6 +2,7 @@   * Copyright (c) 2004-2007 Voltaire, Inc. All rights reserved.   * Copyright (c) 2005 Intel Corporation.  All rights reserved.   * Copyright (c) 2005 Mellanox Technologies Ltd.  All rights reserved. + * Copyright (c) 2009 HNR Consulting. All rights reserved.   *   * This software is available to you under a choice of one of two   * licenses.  You may choose to be licensed under the terms of the GNU @@ -45,14 +46,21 @@ MODULE_DESCRIPTION("kernel IB MAD API");  MODULE_AUTHOR("Hal Rosenstock");  MODULE_AUTHOR("Sean Hefty"); +int mad_sendq_size = IB_MAD_QP_SEND_SIZE; +int mad_recvq_size = IB_MAD_QP_RECV_SIZE; + +module_param_named(send_queue_size, mad_sendq_size, int, 0444); +MODULE_PARM_DESC(send_queue_size, "Size of send queue in number of work requests"); +module_param_named(recv_queue_size, mad_recvq_size, int, 0444); +MODULE_PARM_DESC(recv_queue_size, "Size of receive queue in number of work requests"); +  static struct kmem_cache *ib_mad_cache;  static struct list_head ib_mad_port_list;  static u32 ib_mad_client_id = 0;  /* Port list lock */ -static spinlock_t ib_mad_port_list_lock; - +static DEFINE_SPINLOCK(ib_mad_port_list_lock);  /* Forward declarations */  static int method_in_use(struct ib_mad_mgmt_method_table **method, @@ -1974,7 +1982,7 @@ static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)  	unsigned long delay;  	if (list_empty(&mad_agent_priv->wait_list)) { -		cancel_delayed_work(&mad_agent_priv->timed_work); +		__cancel_delayed_work(&mad_agent_priv->timed_work);  	} else {  		mad_send_wr = list_entry(mad_agent_priv->wait_list.next,  					 struct ib_mad_send_wr_private, @@ -1983,7 +1991,7 @@ static void adjust_timeout(struct ib_mad_agent_private *mad_agent_priv)  		if (time_after(mad_agent_priv->timeout,  			       mad_send_wr->timeout)) {  			mad_agent_priv->timeout = mad_send_wr->timeout; -			cancel_delayed_work(&mad_agent_priv->timed_work); +			__cancel_delayed_work(&mad_agent_priv->timed_work);  			delay = mad_send_wr->timeout - jiffies;  			if ((long)delay <= 0)  				delay = 1; @@ -2023,7 +2031,7 @@ static void wait_for_response(struct ib_mad_send_wr_private *mad_send_wr)  	/* Reschedule a work item if we have a shorter timeout */  	if (mad_agent_priv->wait_list.next == &mad_send_wr->agent_list) { -		cancel_delayed_work(&mad_agent_priv->timed_work); +		__cancel_delayed_work(&mad_agent_priv->timed_work);  		queue_delayed_work(mad_agent_priv->qp_info->port_priv->wq,  				   &mad_agent_priv->timed_work, delay);  	} @@ -2736,8 +2744,8 @@ static int create_mad_qp(struct ib_mad_qp_info *qp_info,  	qp_init_attr.send_cq = qp_info->port_priv->cq;  	qp_init_attr.recv_cq = qp_info->port_priv->cq;  	qp_init_attr.sq_sig_type = IB_SIGNAL_ALL_WR; -	qp_init_attr.cap.max_send_wr = IB_MAD_QP_SEND_SIZE; -	qp_init_attr.cap.max_recv_wr = IB_MAD_QP_RECV_SIZE; +	qp_init_attr.cap.max_send_wr = mad_sendq_size; +	qp_init_attr.cap.max_recv_wr = mad_recvq_size;  	qp_init_attr.cap.max_send_sge = IB_MAD_SEND_REQ_MAX_SG;  	qp_init_attr.cap.max_recv_sge = IB_MAD_RECV_REQ_MAX_SG;  	qp_init_attr.qp_type = qp_type; @@ -2752,8 +2760,8 @@ static int create_mad_qp(struct ib_mad_qp_info *qp_info,  		goto error;  	}  	/* Use minimum queue sizes unless the CQ is resized */ -	qp_info->send_queue.max_active = IB_MAD_QP_SEND_SIZE; -	qp_info->recv_queue.max_active = IB_MAD_QP_RECV_SIZE; +	qp_info->send_queue.max_active = mad_sendq_size; +	qp_info->recv_queue.max_active = mad_recvq_size;  	return 0;  error: @@ -2792,7 +2800,7 @@ static int ib_mad_port_open(struct ib_device *device,  	init_mad_qp(port_priv, &port_priv->qp_info[0]);  	init_mad_qp(port_priv, &port_priv->qp_info[1]); -	cq_size = (IB_MAD_QP_SEND_SIZE + IB_MAD_QP_RECV_SIZE) * 2; +	cq_size = (mad_sendq_size + mad_recvq_size) * 2;  	port_priv->cq = ib_create_cq(port_priv->device,  				     ib_mad_thread_completion_handler,  				     NULL, port_priv, cq_size, 0); @@ -2984,7 +2992,11 @@ static int __init ib_mad_init_module(void)  {  	int ret; -	spin_lock_init(&ib_mad_port_list_lock); +	mad_recvq_size = min(mad_recvq_size, IB_MAD_QP_MAX_SIZE); +	mad_recvq_size = max(mad_recvq_size, IB_MAD_QP_MIN_SIZE); + +	mad_sendq_size = min(mad_sendq_size, IB_MAD_QP_MAX_SIZE); +	mad_sendq_size = max(mad_sendq_size, IB_MAD_QP_MIN_SIZE);  	ib_mad_cache = kmem_cache_create("ib_mad",  					 sizeof(struct ib_mad_private), @@ -3021,4 +3033,3 @@ static void __exit ib_mad_cleanup_module(void)  module_init(ib_mad_init_module);  module_exit(ib_mad_cleanup_module); - diff --git a/drivers/infiniband/core/mad_priv.h b/drivers/infiniband/core/mad_priv.h index 05ce331733b..9430ab4969c 100644 --- a/drivers/infiniband/core/mad_priv.h +++ b/drivers/infiniband/core/mad_priv.h @@ -2,6 +2,7 @@   * Copyright (c) 2004, 2005, Voltaire, Inc. All rights reserved.   * Copyright (c) 2005 Intel Corporation. All rights reserved.   * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. + * Copyright (c) 2009 HNR Consulting. All rights reserved.   *   * This software is available to you under a choice of one of two   * licenses.  You may choose to be licensed under the terms of the GNU @@ -49,6 +50,8 @@  /* QP and CQ parameters */  #define IB_MAD_QP_SEND_SIZE	128  #define IB_MAD_QP_RECV_SIZE	512 +#define IB_MAD_QP_MIN_SIZE	64 +#define IB_MAD_QP_MAX_SIZE	8192  #define IB_MAD_SEND_REQ_MAX_SG	2  #define IB_MAD_RECV_REQ_MAX_SG	1 diff --git a/drivers/infiniband/core/multicast.c b/drivers/infiniband/core/multicast.c index 107f170c57c..8d82ba17135 100644 --- a/drivers/infiniband/core/multicast.c +++ b/drivers/infiniband/core/multicast.c @@ -106,6 +106,8 @@ struct mcast_group {  	struct ib_sa_query	*query;  	int			query_id;  	u16			pkey_index; +	u8			leave_state; +	int			retries;  };  struct mcast_member { @@ -350,6 +352,7 @@ static int send_leave(struct mcast_group *group, u8 leave_state)  	rec = group->rec;  	rec.join_state = leave_state; +	group->leave_state = leave_state;  	ret = ib_sa_mcmember_rec_query(&sa_client, port->dev->device,  				       port->port_num, IB_SA_METHOD_DELETE, &rec, @@ -542,7 +545,11 @@ static void leave_handler(int status, struct ib_sa_mcmember_rec *rec,  {  	struct mcast_group *group = context; -	mcast_work_handler(&group->work); +	if (status && group->retries > 0 && +	    !send_leave(group, group->leave_state)) +		group->retries--; +	else +		mcast_work_handler(&group->work);  }  static struct mcast_group *acquire_group(struct mcast_port *port, @@ -565,6 +572,7 @@ static struct mcast_group *acquire_group(struct mcast_port *port,  	if (!group)  		return NULL; +	group->retries = 3;  	group->port = port;  	group->rec.mgid = *mgid;  	group->pkey_index = MCAST_INVALID_PKEY_INDEX; diff --git a/drivers/infiniband/core/sa_query.c b/drivers/infiniband/core/sa_query.c index 1865049e80f..82543716d59 100644 --- a/drivers/infiniband/core/sa_query.c +++ b/drivers/infiniband/core/sa_query.c @@ -109,10 +109,10 @@ static struct ib_client sa_client = {  	.remove = ib_sa_remove_one  }; -static spinlock_t idr_lock; +static DEFINE_SPINLOCK(idr_lock);  static DEFINE_IDR(query_idr); -static spinlock_t tid_lock; +static DEFINE_SPINLOCK(tid_lock);  static u32 tid;  #define PATH_REC_FIELD(field) \ @@ -1077,9 +1077,6 @@ static int __init ib_sa_init(void)  {  	int ret; -	spin_lock_init(&idr_lock); -	spin_lock_init(&tid_lock); -  	get_random_bytes(&tid, sizeof tid);  	ret = ib_register_client(&sa_client); diff --git a/drivers/infiniband/core/smi.c b/drivers/infiniband/core/smi.c index 87236753bce..5855e4405d9 100644 --- a/drivers/infiniband/core/smi.c +++ b/drivers/infiniband/core/smi.c @@ -52,6 +52,10 @@ enum smi_action smi_handle_dr_smp_send(struct ib_smp *smp,  	hop_cnt = smp->hop_cnt;  	/* See section 14.2.2.2, Vol 1 IB spec */ +	/* C14-6 -- valid hop_cnt values are from 0 to 63 */ +	if (hop_cnt >= IB_SMP_MAX_PATH_HOPS) +		return IB_SMI_DISCARD; +  	if (!ib_get_smp_direction(smp)) {  		/* C14-9:1 */  		if (hop_cnt && hop_ptr == 0) { @@ -133,6 +137,10 @@ enum smi_action smi_handle_dr_smp_recv(struct ib_smp *smp, u8 node_type,  	hop_cnt = smp->hop_cnt;  	/* See section 14.2.2.2, Vol 1 IB spec */ +	/* C14-6 -- valid hop_cnt values are from 0 to 63 */ +	if (hop_cnt >= IB_SMP_MAX_PATH_HOPS) +		return IB_SMI_DISCARD; +  	if (!ib_get_smp_direction(smp)) {  		/* C14-9:1 -- sender should have incremented hop_ptr */  		if (hop_cnt && hop_ptr == 0) diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index eb36a81dd09..d3fff9e008a 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c @@ -73,7 +73,7 @@ DEFINE_IDR(ib_uverbs_cq_idr);  DEFINE_IDR(ib_uverbs_qp_idr);  DEFINE_IDR(ib_uverbs_srq_idr); -static spinlock_t map_lock; +static DEFINE_SPINLOCK(map_lock);  static struct ib_uverbs_device *dev_table[IB_UVERBS_MAX_DEVICES];  static DECLARE_BITMAP(dev_map, IB_UVERBS_MAX_DEVICES); @@ -584,14 +584,16 @@ static ssize_t ib_uverbs_write(struct file *filp, const char __user *buf,  	if (hdr.command < 0				||  	    hdr.command >= ARRAY_SIZE(uverbs_cmd_table) || -	    !uverbs_cmd_table[hdr.command]		|| -	    !(file->device->ib_dev->uverbs_cmd_mask & (1ull << hdr.command))) +	    !uverbs_cmd_table[hdr.command])  		return -EINVAL;  	if (!file->ucontext &&  	    hdr.command != IB_USER_VERBS_CMD_GET_CONTEXT)  		return -EINVAL; +	if (!(file->device->ib_dev->uverbs_cmd_mask & (1ull << hdr.command))) +		return -ENOSYS; +  	return uverbs_cmd_table[hdr.command](file, buf + sizeof hdr,  					     hdr.in_words * 4, hdr.out_words * 4);  } @@ -836,8 +838,6 @@ static int __init ib_uverbs_init(void)  {  	int ret; -	spin_lock_init(&map_lock); -  	ret = register_chrdev_region(IB_UVERBS_BASE_DEV, IB_UVERBS_MAX_DEVICES,  				     "infiniband_verbs");  	if (ret) {  |