diff options
Diffstat (limited to 'drivers/infiniband/hw')
39 files changed, 1336 insertions, 400 deletions
diff --git a/drivers/infiniband/hw/amso1100/c2.c b/drivers/infiniband/hw/amso1100/c2.c index 5ce7b9e8bff..d53cf519f42 100644 --- a/drivers/infiniband/hw/amso1100/c2.c +++ b/drivers/infiniband/hw/amso1100/c2.c @@ -920,8 +920,7 @@ static struct net_device *c2_devinit(struct c2_dev *c2dev,  	return netdev;  } -static int __devinit c2_probe(struct pci_dev *pcidev, -			      const struct pci_device_id *ent) +static int c2_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)  {  	int ret = 0, i;  	unsigned long reg0_start, reg0_flags, reg0_len; @@ -1191,7 +1190,7 @@ static int __devinit c2_probe(struct pci_dev *pcidev,  	return ret;  } -static void __devexit c2_remove(struct pci_dev *pcidev) +static void c2_remove(struct pci_dev *pcidev)  {  	struct c2_dev *c2dev = pci_get_drvdata(pcidev);  	struct net_device *netdev = c2dev->netdev; @@ -1236,18 +1235,7 @@ static struct pci_driver c2_pci_driver = {  	.name = DRV_NAME,  	.id_table = c2_pci_table,  	.probe = c2_probe, -	.remove = __devexit_p(c2_remove), +	.remove = c2_remove,  }; -static int __init c2_init_module(void) -{ -	return pci_register_driver(&c2_pci_driver); -} - -static void __exit c2_exit_module(void) -{ -	pci_unregister_driver(&c2_pci_driver); -} - -module_init(c2_init_module); -module_exit(c2_exit_module); +module_pci_driver(c2_pci_driver); diff --git a/drivers/infiniband/hw/amso1100/c2.h b/drivers/infiniband/hw/amso1100/c2.h index 6ae698e6877..ba7a1208ff9 100644 --- a/drivers/infiniband/hw/amso1100/c2.h +++ b/drivers/infiniband/hw/amso1100/c2.h @@ -498,16 +498,16 @@ extern int c2_post_send(struct ib_qp *ibqp, struct ib_send_wr *ib_wr,  			struct ib_send_wr **bad_wr);  extern int c2_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *ib_wr,  			   struct ib_recv_wr **bad_wr); -extern void __devinit c2_init_qp_table(struct c2_dev *c2dev); -extern void __devexit c2_cleanup_qp_table(struct c2_dev *c2dev); +extern void c2_init_qp_table(struct c2_dev *c2dev); +extern void c2_cleanup_qp_table(struct c2_dev *c2dev);  extern void c2_set_qp_state(struct c2_qp *, int);  extern struct c2_qp *c2_find_qpn(struct c2_dev *c2dev, int qpn);  /* PDs */  extern int c2_pd_alloc(struct c2_dev *c2dev, int privileged, struct c2_pd *pd);  extern void c2_pd_free(struct c2_dev *c2dev, struct c2_pd *pd); -extern int __devinit c2_init_pd_table(struct c2_dev *c2dev); -extern void __devexit c2_cleanup_pd_table(struct c2_dev *c2dev); +extern int c2_init_pd_table(struct c2_dev *c2dev); +extern void c2_cleanup_pd_table(struct c2_dev *c2dev);  /* CQs */  extern int c2_init_cq(struct c2_dev *c2dev, int entries, diff --git a/drivers/infiniband/hw/amso1100/c2_pd.c b/drivers/infiniband/hw/amso1100/c2_pd.c index 161f2a28535..f3e81dc357b 100644 --- a/drivers/infiniband/hw/amso1100/c2_pd.c +++ b/drivers/infiniband/hw/amso1100/c2_pd.c @@ -70,7 +70,7 @@ void c2_pd_free(struct c2_dev *c2dev, struct c2_pd *pd)  	spin_unlock(&c2dev->pd_table.lock);  } -int __devinit c2_init_pd_table(struct c2_dev *c2dev) +int c2_init_pd_table(struct c2_dev *c2dev)  {  	c2dev->pd_table.last = 0; @@ -84,7 +84,7 @@ int __devinit c2_init_pd_table(struct c2_dev *c2dev)  	return 0;  } -void __devexit c2_cleanup_pd_table(struct c2_dev *c2dev) +void c2_cleanup_pd_table(struct c2_dev *c2dev)  {  	kfree(c2dev->pd_table.table);  } diff --git a/drivers/infiniband/hw/amso1100/c2_qp.c b/drivers/infiniband/hw/amso1100/c2_qp.c index 0d7b6f23caf..0ab826b280b 100644 --- a/drivers/infiniband/hw/amso1100/c2_qp.c +++ b/drivers/infiniband/hw/amso1100/c2_qp.c @@ -382,14 +382,17 @@ static int c2_alloc_qpn(struct c2_dev *c2dev, struct c2_qp *qp)  {  	int ret; -        do { -		spin_lock_irq(&c2dev->qp_table.lock); -		ret = idr_get_new_above(&c2dev->qp_table.idr, qp, -					c2dev->qp_table.last++, &qp->qpn); -		spin_unlock_irq(&c2dev->qp_table.lock); -        } while ((ret == -EAGAIN) && -	 	 idr_pre_get(&c2dev->qp_table.idr, GFP_KERNEL)); -	return ret; +	idr_preload(GFP_KERNEL); +	spin_lock_irq(&c2dev->qp_table.lock); + +	ret = idr_alloc(&c2dev->qp_table.idr, qp, c2dev->qp_table.last++, 0, +			GFP_NOWAIT); +	if (ret >= 0) +		qp->qpn = ret; + +	spin_unlock_irq(&c2dev->qp_table.lock); +	idr_preload_end(); +	return ret < 0 ? ret : 0;  }  static void c2_free_qpn(struct c2_dev *c2dev, int qpn) @@ -1010,13 +1013,13 @@ out:  	return err;  } -void __devinit c2_init_qp_table(struct c2_dev *c2dev) +void c2_init_qp_table(struct c2_dev *c2dev)  {  	spin_lock_init(&c2dev->qp_table.lock);  	idr_init(&c2dev->qp_table.idr);  } -void __devexit c2_cleanup_qp_table(struct c2_dev *c2dev) +void c2_cleanup_qp_table(struct c2_dev *c2dev)  {  	idr_destroy(&c2dev->qp_table.idr);  } diff --git a/drivers/infiniband/hw/amso1100/c2_rnic.c b/drivers/infiniband/hw/amso1100/c2_rnic.c index e4a73158fc7..b7c98699005 100644 --- a/drivers/infiniband/hw/amso1100/c2_rnic.c +++ b/drivers/infiniband/hw/amso1100/c2_rnic.c @@ -442,7 +442,7 @@ static int c2_rnic_close(struct c2_dev *c2dev)   * involves initializing the various limits and resource pools that   * comprise the RNIC instance.   */ -int __devinit c2_rnic_init(struct c2_dev *c2dev) +int c2_rnic_init(struct c2_dev *c2dev)  {  	int err;  	u32 qsize, msgsize; @@ -611,7 +611,7 @@ int __devinit c2_rnic_init(struct c2_dev *c2dev)  /*   * Called by c2_remove to cleanup the RNIC resources.   */ -void __devexit c2_rnic_term(struct c2_dev *c2dev) +void c2_rnic_term(struct c2_dev *c2dev)  {  	/* Close the open adapter instance */ diff --git a/drivers/infiniband/hw/cxgb3/iwch.h b/drivers/infiniband/hw/cxgb3/iwch.h index a1c44578e03..837862287a2 100644 --- a/drivers/infiniband/hw/cxgb3/iwch.h +++ b/drivers/infiniband/hw/cxgb3/iwch.h @@ -153,19 +153,17 @@ static inline int insert_handle(struct iwch_dev *rhp, struct idr *idr,  				void *handle, u32 id)  {  	int ret; -	int newid; -	do { -		if (!idr_pre_get(idr, GFP_KERNEL)) { -			return -ENOMEM; -		} -		spin_lock_irq(&rhp->lock); -		ret = idr_get_new_above(idr, handle, id, &newid); -		BUG_ON(newid != id); -		spin_unlock_irq(&rhp->lock); -	} while (ret == -EAGAIN); +	idr_preload(GFP_KERNEL); +	spin_lock_irq(&rhp->lock); + +	ret = idr_alloc(idr, handle, id, id + 1, GFP_NOWAIT); + +	spin_unlock_irq(&rhp->lock); +	idr_preload_end(); -	return ret; +	BUG_ON(ret == -ENOSPC); +	return ret < 0 ? ret : 0;  }  static inline void remove_handle(struct iwch_dev *rhp, struct idr *idr, u32 id) diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c index 0bdf09aa6f4..9c12da0cbd3 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.c +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c @@ -738,7 +738,7 @@ static struct ib_mr *iwch_get_dma_mr(struct ib_pd *pd, int acc)  	return ibmr;  } -static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd) +static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)  {  	struct iwch_dev *rhp;  	struct iwch_pd *php; @@ -747,6 +747,9 @@ static struct ib_mw *iwch_alloc_mw(struct ib_pd *pd)  	u32 stag = 0;  	int ret; +	if (type != IB_MW_TYPE_1) +		return ERR_PTR(-EINVAL); +  	php = to_iwch_pd(pd);  	rhp = php->rhp;  	mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); @@ -783,8 +786,8 @@ static int iwch_dealloc_mw(struct ib_mw *mw)  	mmid = (mw->rkey) >> 8;  	cxio_deallocate_window(&rhp->rdev, mhp->attr.stag);  	remove_handle(rhp, &rhp->mmidr, mmid); -	kfree(mhp);  	PDBG("%s ib_mw %p mmid 0x%x ptr %p\n", __func__, mw, mmid, mhp); +	kfree(mhp);  	return 0;  } diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c index 6de8463f453..e5649e8b215 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_qp.c +++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c @@ -567,18 +567,19 @@ int iwch_bind_mw(struct ib_qp *qp,  	if (mw_bind->send_flags & IB_SEND_SIGNALED)  		t3_wr_flags = T3_COMPLETION_FLAG; -	sgl.addr = mw_bind->addr; -	sgl.lkey = mw_bind->mr->lkey; -	sgl.length = mw_bind->length; +	sgl.addr = mw_bind->bind_info.addr; +	sgl.lkey = mw_bind->bind_info.mr->lkey; +	sgl.length = mw_bind->bind_info.length;  	wqe->bind.reserved = 0;  	wqe->bind.type = TPT_VATO;  	/* TBD: check perms */ -	wqe->bind.perms = iwch_ib_to_tpt_bind_access(mw_bind->mw_access_flags); -	wqe->bind.mr_stag = cpu_to_be32(mw_bind->mr->lkey); +	wqe->bind.perms = iwch_ib_to_tpt_bind_access( +		mw_bind->bind_info.mw_access_flags); +	wqe->bind.mr_stag = cpu_to_be32(mw_bind->bind_info.mr->lkey);  	wqe->bind.mw_stag = cpu_to_be32(mw->rkey); -	wqe->bind.mw_len = cpu_to_be32(mw_bind->length); -	wqe->bind.mw_va = cpu_to_be64(mw_bind->addr); +	wqe->bind.mw_len = cpu_to_be32(mw_bind->bind_info.length); +	wqe->bind.mw_va = cpu_to_be64(mw_bind->bind_info.addr);  	err = iwch_sgl2pbl_map(rhp, &sgl, 1, &pbl_addr, &page_size);  	if (err) {  		spin_unlock_irqrestore(&qhp->lock, flag); diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c index 5de86968379..565bfb161c1 100644 --- a/drivers/infiniband/hw/cxgb4/cm.c +++ b/drivers/infiniband/hw/cxgb4/cm.c @@ -38,10 +38,12 @@  #include <linux/inetdevice.h>  #include <linux/ip.h>  #include <linux/tcp.h> +#include <linux/if_vlan.h>  #include <net/neighbour.h>  #include <net/netevent.h>  #include <net/route.h> +#include <net/tcp.h>  #include "iw_cxgb4.h" @@ -61,6 +63,14 @@ static char *states[] = {  	NULL,  }; +static int nocong; +module_param(nocong, int, 0644); +MODULE_PARM_DESC(nocong, "Turn of congestion control (default=0)"); + +static int enable_ecn; +module_param(enable_ecn, int, 0644); +MODULE_PARM_DESC(enable_ecn, "Enable ECN (default=0/disabled)"); +  static int dack_mode = 1;  module_param(dack_mode, int, 0644);  MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=1)"); @@ -133,14 +143,28 @@ static void connect_reply_upcall(struct c4iw_ep *ep, int status);  static LIST_HEAD(timeout_list);  static spinlock_t timeout_lock; +static void deref_qp(struct c4iw_ep *ep) +{ +	c4iw_qp_rem_ref(&ep->com.qp->ibqp); +	clear_bit(QP_REFERENCED, &ep->com.flags); +} + +static void ref_qp(struct c4iw_ep *ep) +{ +	set_bit(QP_REFERENCED, &ep->com.flags); +	c4iw_qp_add_ref(&ep->com.qp->ibqp); +} +  static void start_ep_timer(struct c4iw_ep *ep)  {  	PDBG("%s ep %p\n", __func__, ep);  	if (timer_pending(&ep->timer)) { -		PDBG("%s stopped / restarted timer ep %p\n", __func__, ep); -		del_timer_sync(&ep->timer); -	} else -		c4iw_get_ep(&ep->com); +		pr_err("%s timer already started! ep %p\n", +		       __func__, ep); +		return; +	} +	clear_bit(TIMEOUT, &ep->com.flags); +	c4iw_get_ep(&ep->com);  	ep->timer.expires = jiffies + ep_timeout_secs * HZ;  	ep->timer.data = (unsigned long)ep;  	ep->timer.function = ep_timeout; @@ -149,14 +173,10 @@ static void start_ep_timer(struct c4iw_ep *ep)  static void stop_ep_timer(struct c4iw_ep *ep)  { -	PDBG("%s ep %p\n", __func__, ep); -	if (!timer_pending(&ep->timer)) { -		WARN(1, "%s timer stopped when its not running! " -		       "ep %p state %u\n", __func__, ep, ep->com.state); -		return; -	} +	PDBG("%s ep %p stopping\n", __func__, ep);  	del_timer_sync(&ep->timer); -	c4iw_put_ep(&ep->com); +	if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) +		c4iw_put_ep(&ep->com);  }  static int c4iw_l2t_send(struct c4iw_rdev *rdev, struct sk_buff *skb, @@ -261,7 +281,10 @@ void _c4iw_free_ep(struct kref *kref)  	ep = container_of(kref, struct c4iw_ep, com.kref);  	PDBG("%s ep %p state %s\n", __func__, ep, states[state_read(&ep->com)]); +	if (test_bit(QP_REFERENCED, &ep->com.flags)) +		deref_qp(ep);  	if (test_bit(RELEASE_RESOURCES, &ep->com.flags)) { +		remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid);  		cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid);  		dst_release(ep->dst);  		cxgb4_l2t_release(ep->l2t); @@ -441,6 +464,50 @@ static int send_abort(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)  	return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);  } +#define VLAN_NONE 0xfff +#define FILTER_SEL_VLAN_NONE 0xffff +#define FILTER_SEL_WIDTH_P_FC (3+1) /* port uses 3 bits, FCoE one bit */ +#define FILTER_SEL_WIDTH_VIN_P_FC \ +	(6 + 7 + FILTER_SEL_WIDTH_P_FC) /* 6 bits are unused, VF uses 7 bits*/ +#define FILTER_SEL_WIDTH_TAG_P_FC \ +	(3 + FILTER_SEL_WIDTH_VIN_P_FC) /* PF uses 3 bits */ +#define FILTER_SEL_WIDTH_VLD_TAG_P_FC (1 + FILTER_SEL_WIDTH_TAG_P_FC) + +static unsigned int select_ntuple(struct c4iw_dev *dev, struct dst_entry *dst, +				  struct l2t_entry *l2t) +{ +	unsigned int ntuple = 0; +	u32 viid; + +	switch (dev->rdev.lldi.filt_mode) { + +	/* default filter mode */ +	case HW_TPL_FR_MT_PR_IV_P_FC: +		if (l2t->vlan == VLAN_NONE) +			ntuple |= FILTER_SEL_VLAN_NONE << FILTER_SEL_WIDTH_P_FC; +		else { +			ntuple |= l2t->vlan << FILTER_SEL_WIDTH_P_FC; +			ntuple |= 1 << FILTER_SEL_WIDTH_VLD_TAG_P_FC; +		} +		ntuple |= l2t->lport << S_PORT | IPPROTO_TCP << +			  FILTER_SEL_WIDTH_VLD_TAG_P_FC; +		break; +	case HW_TPL_FR_MT_PR_OV_P_FC: { +		viid = cxgb4_port_viid(l2t->neigh->dev); + +		ntuple |= FW_VIID_VIN_GET(viid) << FILTER_SEL_WIDTH_P_FC; +		ntuple |= FW_VIID_PFN_GET(viid) << FILTER_SEL_WIDTH_VIN_P_FC; +		ntuple |= FW_VIID_VIVLD_GET(viid) << FILTER_SEL_WIDTH_TAG_P_FC; +		ntuple |= l2t->lport << S_PORT | IPPROTO_TCP << +			  FILTER_SEL_WIDTH_VLD_TAG_P_FC; +		break; +	} +	default: +		break; +	} +	return ntuple; +} +  static int send_connect(struct c4iw_ep *ep)  {  	struct cpl_act_open_req *req; @@ -463,7 +530,8 @@ static int send_connect(struct c4iw_ep *ep)  	cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);  	wscale = compute_wscale(rcv_win); -	opt0 = KEEP_ALIVE(1) | +	opt0 = (nocong ? NO_CONG(1) : 0) | +	       KEEP_ALIVE(1) |  	       DELACK(1) |  	       WND_SCALE(wscale) |  	       MSS_IDX(mtu_idx) | @@ -474,6 +542,7 @@ static int send_connect(struct c4iw_ep *ep)  	       ULP_MODE(ULP_MODE_TCPDDP) |  	       RCV_BUFSIZ(rcv_win>>10);  	opt2 = RX_CHANNEL(0) | +	       CCTRL_ECN(enable_ecn) |  	       RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid);  	if (enable_tcp_timestamps)  		opt2 |= TSTAMPS_EN(1); @@ -492,8 +561,9 @@ static int send_connect(struct c4iw_ep *ep)  	req->local_ip = ep->com.local_addr.sin_addr.s_addr;  	req->peer_ip = ep->com.remote_addr.sin_addr.s_addr;  	req->opt0 = cpu_to_be64(opt0); -	req->params = 0; +	req->params = cpu_to_be32(select_ntuple(ep->com.dev, ep->dst, ep->l2t));  	req->opt2 = cpu_to_be32(opt2); +	set_bit(ACT_OPEN_REQ, &ep->com.history);  	return c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t);  } @@ -629,7 +699,7 @@ static int send_mpa_reject(struct c4iw_ep *ep, const void *pdata, u8 plen)  	memset(mpa, 0, sizeof(*mpa));  	memcpy(mpa->key, MPA_KEY_REP, sizeof(mpa->key));  	mpa->flags = MPA_REJECT; -	mpa->revision = mpa_rev; +	mpa->revision = ep->mpa_attr.version;  	mpa->private_data_size = htons(plen);  	if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) { @@ -770,6 +840,7 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)  	/* setup the hwtid for this connection */  	ep->hwtid = tid;  	cxgb4_insert_tid(t, ep, tid); +	insert_handle(dev, &dev->hwtid_idr, ep, ep->hwtid);  	ep->snd_seq = be32_to_cpu(req->snd_isn);  	ep->rcv_seq = be32_to_cpu(req->rcv_isn); @@ -777,7 +848,9 @@ static int act_establish(struct c4iw_dev *dev, struct sk_buff *skb)  	set_emss(ep, ntohs(req->tcp_opt));  	/* dealloc the atid */ +	remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid);  	cxgb4_free_atid(t, atid); +	set_bit(ACT_ESTAB, &ep->com.history);  	/* start MPA negotiation */  	send_flowc(ep, NULL); @@ -802,7 +875,7 @@ static void close_complete_upcall(struct c4iw_ep *ep)  		ep->com.cm_id->event_handler(ep->com.cm_id, &event);  		ep->com.cm_id->rem_ref(ep->com.cm_id);  		ep->com.cm_id = NULL; -		ep->com.qp = NULL; +		set_bit(CLOSE_UPCALL, &ep->com.history);  	}  } @@ -811,6 +884,7 @@ static int abort_connection(struct c4iw_ep *ep, struct sk_buff *skb, gfp_t gfp)  	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);  	close_complete_upcall(ep);  	state_set(&ep->com, ABORTING); +	set_bit(ABORT_CONN, &ep->com.history);  	return send_abort(ep, skb, gfp);  } @@ -825,6 +899,7 @@ static void peer_close_upcall(struct c4iw_ep *ep)  		PDBG("peer close delivered ep %p cm_id %p tid %u\n",  		     ep, ep->com.cm_id, ep->hwtid);  		ep->com.cm_id->event_handler(ep->com.cm_id, &event); +		set_bit(DISCONN_UPCALL, &ep->com.history);  	}  } @@ -842,7 +917,7 @@ static void peer_abort_upcall(struct c4iw_ep *ep)  		ep->com.cm_id->event_handler(ep->com.cm_id, &event);  		ep->com.cm_id->rem_ref(ep->com.cm_id);  		ep->com.cm_id = NULL; -		ep->com.qp = NULL; +		set_bit(ABORT_UPCALL, &ep->com.history);  	}  } @@ -875,12 +950,12 @@ static void connect_reply_upcall(struct c4iw_ep *ep, int status)  	PDBG("%s ep %p tid %u status %d\n", __func__, ep,  	     ep->hwtid, status); +	set_bit(CONN_RPL_UPCALL, &ep->com.history);  	ep->com.cm_id->event_handler(ep->com.cm_id, &event);  	if (status < 0) {  		ep->com.cm_id->rem_ref(ep->com.cm_id);  		ep->com.cm_id = NULL; -		ep->com.qp = NULL;  	}  } @@ -915,6 +990,7 @@ static void connect_request_upcall(struct c4iw_ep *ep)  						ep->parent_ep->com.cm_id,  						&event);  	} +	set_bit(CONNREQ_UPCALL, &ep->com.history);  	c4iw_put_ep(&ep->parent_ep->com);  	ep->parent_ep = NULL;  } @@ -931,6 +1007,7 @@ static void established_upcall(struct c4iw_ep *ep)  	if (ep->com.cm_id) {  		PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);  		ep->com.cm_id->event_handler(ep->com.cm_id, &event); +		set_bit(ESTAB_UPCALL, &ep->com.history);  	}  } @@ -1223,11 +1300,13 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)  	if (mpa->revision > mpa_rev) {  		printk(KERN_ERR MOD "%s MPA version mismatch. Local = %d,"  		       " Received = %d\n", __func__, mpa_rev, mpa->revision); +		stop_ep_timer(ep);  		abort_connection(ep, skb, GFP_KERNEL);  		return;  	}  	if (memcmp(mpa->key, MPA_KEY_REQ, sizeof(mpa->key))) { +		stop_ep_timer(ep);  		abort_connection(ep, skb, GFP_KERNEL);  		return;  	} @@ -1238,6 +1317,7 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)  	 * Fail if there's too much private data.  	 */  	if (plen > MPA_MAX_PRIVATE_DATA) { +		stop_ep_timer(ep);  		abort_connection(ep, skb, GFP_KERNEL);  		return;  	} @@ -1246,6 +1326,7 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)  	 * If plen does not account for pkt size  	 */  	if (ep->mpa_pkt_len > (sizeof(*mpa) + plen)) { +		stop_ep_timer(ep);  		abort_connection(ep, skb, GFP_KERNEL);  		return;  	} @@ -1316,36 +1397,40 @@ static int rx_data(struct c4iw_dev *dev, struct sk_buff *skb)  	unsigned int dlen = ntohs(hdr->len);  	unsigned int tid = GET_TID(hdr);  	struct tid_info *t = dev->rdev.lldi.tids; +	__u8 status = hdr->status;  	ep = lookup_tid(t, tid);  	PDBG("%s ep %p tid %u dlen %u\n", __func__, ep, ep->hwtid, dlen);  	skb_pull(skb, sizeof(*hdr));  	skb_trim(skb, dlen); -	ep->rcv_seq += dlen; -	BUG_ON(ep->rcv_seq != (ntohl(hdr->seq) + dlen)); -  	/* update RX credits */  	update_rx_credits(ep, dlen);  	switch (state_read(&ep->com)) {  	case MPA_REQ_SENT: +		ep->rcv_seq += dlen;  		process_mpa_reply(ep, skb);  		break;  	case MPA_REQ_WAIT: +		ep->rcv_seq += dlen;  		process_mpa_request(ep, skb);  		break; -	case MPA_REP_SENT: +	case FPDU_MODE: { +		struct c4iw_qp_attributes attrs; +		BUG_ON(!ep->com.qp); +		if (status) +			pr_err("%s Unexpected streaming data." \ +			       " qpid %u ep %p state %d tid %u status %d\n", +			       __func__, ep->com.qp->wq.sq.qid, ep, +			       state_read(&ep->com), ep->hwtid, status); +		attrs.next_state = C4IW_QP_STATE_ERROR; +		c4iw_modify_qp(ep->com.qp->rhp, ep->com.qp, +			       C4IW_QP_ATTR_NEXT_STATE, &attrs, 1); +		c4iw_ep_disconnect(ep, 1, GFP_KERNEL);  		break; +	}  	default: -		printk(KERN_ERR MOD "%s Unexpected streaming data." -		       " ep %p state %d tid %u\n", -		       __func__, ep, state_read(&ep->com), ep->hwtid); - -		/* -		 * The ep will timeout and inform the ULP of the failure. -		 * See ep_timeout(). -		 */  		break;  	}  	return 0; @@ -1368,6 +1453,7 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)  	mutex_lock(&ep->com.mutex);  	switch (ep->com.state) {  	case ABORTING: +		c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);  		__state_set(&ep->com, DEAD);  		release = 1;  		break; @@ -1383,6 +1469,63 @@ static int abort_rpl(struct c4iw_dev *dev, struct sk_buff *skb)  	return 0;  } +static void send_fw_act_open_req(struct c4iw_ep *ep, unsigned int atid) +{ +	struct sk_buff *skb; +	struct fw_ofld_connection_wr *req; +	unsigned int mtu_idx; +	int wscale; + +	skb = get_skb(NULL, sizeof(*req), GFP_KERNEL); +	req = (struct fw_ofld_connection_wr *)__skb_put(skb, sizeof(*req)); +	memset(req, 0, sizeof(*req)); +	req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR)); +	req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16))); +	req->le.filter = cpu_to_be32(select_ntuple(ep->com.dev, ep->dst, +				     ep->l2t)); +	req->le.lport = ep->com.local_addr.sin_port; +	req->le.pport = ep->com.remote_addr.sin_port; +	req->le.u.ipv4.lip = ep->com.local_addr.sin_addr.s_addr; +	req->le.u.ipv4.pip = ep->com.remote_addr.sin_addr.s_addr; +	req->tcb.t_state_to_astid = +			htonl(V_FW_OFLD_CONNECTION_WR_T_STATE(TCP_SYN_SENT) | +			V_FW_OFLD_CONNECTION_WR_ASTID(atid)); +	req->tcb.cplrxdataack_cplpassacceptrpl = +			htons(F_FW_OFLD_CONNECTION_WR_CPLRXDATAACK); +	req->tcb.tx_max = (__force __be32) jiffies; +	req->tcb.rcv_adv = htons(1); +	cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx); +	wscale = compute_wscale(rcv_win); +	req->tcb.opt0 = (__force __be64) (TCAM_BYPASS(1) | +		(nocong ? NO_CONG(1) : 0) | +		KEEP_ALIVE(1) | +		DELACK(1) | +		WND_SCALE(wscale) | +		MSS_IDX(mtu_idx) | +		L2T_IDX(ep->l2t->idx) | +		TX_CHAN(ep->tx_chan) | +		SMAC_SEL(ep->smac_idx) | +		DSCP(ep->tos) | +		ULP_MODE(ULP_MODE_TCPDDP) | +		RCV_BUFSIZ(rcv_win >> 10)); +	req->tcb.opt2 = (__force __be32) (PACE(1) | +		TX_QUEUE(ep->com.dev->rdev.lldi.tx_modq[ep->tx_chan]) | +		RX_CHANNEL(0) | +		CCTRL_ECN(enable_ecn) | +		RSS_QUEUE_VALID | RSS_QUEUE(ep->rss_qid)); +	if (enable_tcp_timestamps) +		req->tcb.opt2 |= (__force __be32) TSTAMPS_EN(1); +	if (enable_tcp_sack) +		req->tcb.opt2 |= (__force __be32) SACK_EN(1); +	if (wscale && enable_tcp_window_scaling) +		req->tcb.opt2 |= (__force __be32) WND_SCALE_EN(1); +	req->tcb.opt0 = cpu_to_be64((__force u64) req->tcb.opt0); +	req->tcb.opt2 = cpu_to_be32((__force u32) req->tcb.opt2); +	set_wr_txq(skb, CPL_PRIORITY_CONTROL, ep->ctrlq_idx); +	set_bit(ACT_OFLD_CONN, &ep->com.history); +	c4iw_l2t_send(&ep->com.dev->rdev, skb, ep->l2t); +} +  /*   * Return whether a failed active open has allocated a TID   */ @@ -1392,6 +1535,111 @@ static inline int act_open_has_tid(int status)  	       status != CPL_ERR_ARP_MISS;  } +#define ACT_OPEN_RETRY_COUNT 2 + +static int c4iw_reconnect(struct c4iw_ep *ep) +{ +	int err = 0; +	struct rtable *rt; +	struct port_info *pi; +	struct net_device *pdev; +	int step; +	struct neighbour *neigh; + +	PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id); +	init_timer(&ep->timer); + +	/* +	 * Allocate an active TID to initiate a TCP connection. +	 */ +	ep->atid = cxgb4_alloc_atid(ep->com.dev->rdev.lldi.tids, ep); +	if (ep->atid == -1) { +		pr_err("%s - cannot alloc atid.\n", __func__); +		err = -ENOMEM; +		goto fail2; +	} +	insert_handle(ep->com.dev, &ep->com.dev->atid_idr, ep, ep->atid); + +	/* find a route */ +	rt = find_route(ep->com.dev, +			ep->com.cm_id->local_addr.sin_addr.s_addr, +			ep->com.cm_id->remote_addr.sin_addr.s_addr, +			ep->com.cm_id->local_addr.sin_port, +			ep->com.cm_id->remote_addr.sin_port, 0); +	if (!rt) { +		pr_err("%s - cannot find route.\n", __func__); +		err = -EHOSTUNREACH; +		goto fail3; +	} +	ep->dst = &rt->dst; + +	neigh = dst_neigh_lookup(ep->dst, +			&ep->com.cm_id->remote_addr.sin_addr.s_addr); +	/* get a l2t entry */ +	if (neigh->dev->flags & IFF_LOOPBACK) { +		PDBG("%s LOOPBACK\n", __func__); +		pdev = ip_dev_find(&init_net, +				ep->com.cm_id->remote_addr.sin_addr.s_addr); +		ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t, +				neigh, pdev, 0); +		pi = (struct port_info *)netdev_priv(pdev); +		ep->mtu = pdev->mtu; +		ep->tx_chan = cxgb4_port_chan(pdev); +		ep->smac_idx = (cxgb4_port_viid(pdev) & 0x7F) << 1; +		dev_put(pdev); +	} else { +		ep->l2t = cxgb4_l2t_get(ep->com.dev->rdev.lldi.l2t, +				neigh, neigh->dev, 0); +		pi = (struct port_info *)netdev_priv(neigh->dev); +		ep->mtu = dst_mtu(ep->dst); +		ep->tx_chan = cxgb4_port_chan(neigh->dev); +		ep->smac_idx = (cxgb4_port_viid(neigh->dev) & +				0x7F) << 1; +	} + +	step = ep->com.dev->rdev.lldi.ntxq / ep->com.dev->rdev.lldi.nchan; +	ep->txq_idx = pi->port_id * step; +	ep->ctrlq_idx = pi->port_id; +	step = ep->com.dev->rdev.lldi.nrxq / ep->com.dev->rdev.lldi.nchan; +	ep->rss_qid = ep->com.dev->rdev.lldi.rxq_ids[pi->port_id * step]; + +	if (!ep->l2t) { +		pr_err("%s - cannot alloc l2e.\n", __func__); +		err = -ENOMEM; +		goto fail4; +	} + +	PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n", +	     __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid, +	     ep->l2t->idx); + +	state_set(&ep->com, CONNECTING); +	ep->tos = 0; + +	/* send connect request to rnic */ +	err = send_connect(ep); +	if (!err) +		goto out; + +	cxgb4_l2t_release(ep->l2t); +fail4: +	dst_release(ep->dst); +fail3: +	remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid); +	cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); +fail2: +	/* +	 * remember to send notification to upper layer. +	 * We are in here so the upper layer is not aware that this is +	 * re-connect attempt and so, upper layer is still waiting for +	 * response of 1st connect request. +	 */ +	connect_reply_upcall(ep, -ECONNRESET); +	c4iw_put_ep(&ep->com); +out: +	return err; +} +  static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)  {  	struct c4iw_ep *ep; @@ -1412,6 +1660,8 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)  		return 0;  	} +	set_bit(ACT_OPEN_RPL, &ep->com.history); +  	/*  	 * Log interesting failures.  	 */ @@ -1419,6 +1669,29 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)  	case CPL_ERR_CONN_RESET:  	case CPL_ERR_CONN_TIMEDOUT:  		break; +	case CPL_ERR_TCAM_FULL: +		if (dev->rdev.lldi.enable_fw_ofld_conn) { +			mutex_lock(&dev->rdev.stats.lock); +			dev->rdev.stats.tcam_full++; +			mutex_unlock(&dev->rdev.stats.lock); +			send_fw_act_open_req(ep, +					     GET_TID_TID(GET_AOPEN_ATID( +					     ntohl(rpl->atid_status)))); +			return 0; +		} +		break; +	case CPL_ERR_CONN_EXIST: +		if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) { +			set_bit(ACT_RETRY_INUSE, &ep->com.history); +			remove_handle(ep->com.dev, &ep->com.dev->atid_idr, +					atid); +			cxgb4_free_atid(t, atid); +			dst_release(ep->dst); +			cxgb4_l2t_release(ep->l2t); +			c4iw_reconnect(ep); +			return 0; +		} +		break;  	default:  		printk(KERN_INFO MOD "Active open failure - "  		       "atid %u status %u errno %d %pI4:%u->%pI4:%u\n", @@ -1436,6 +1709,7 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)  	if (status && act_open_has_tid(status))  		cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, GET_TID(rpl)); +	remove_handle(ep->com.dev, &ep->com.dev->atid_idr, atid);  	cxgb4_free_atid(t, atid);  	dst_release(ep->dst);  	cxgb4_l2t_release(ep->l2t); @@ -1452,13 +1726,14 @@ static int pass_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)  	struct c4iw_listen_ep *ep = lookup_stid(t, stid);  	if (!ep) { -		printk(KERN_ERR MOD "stid %d lookup failure!\n", stid); -		return 0; +		PDBG("%s stid %d lookup failure!\n", __func__, stid); +		goto out;  	}  	PDBG("%s ep %p status %d error %d\n", __func__, ep,  	     rpl->status, status2errno(rpl->status));  	c4iw_wake_up(&ep->com.wr_wait, status2errno(rpl->status)); +out:  	return 0;  } @@ -1510,14 +1785,15 @@ static void accept_cr(struct c4iw_ep *ep, __be32 peer_ip, struct sk_buff *skb,  	skb_get(skb);  	cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx);  	wscale = compute_wscale(rcv_win); -	opt0 = KEEP_ALIVE(1) | +	opt0 = (nocong ? NO_CONG(1) : 0) | +	       KEEP_ALIVE(1) |  	       DELACK(1) |  	       WND_SCALE(wscale) |  	       MSS_IDX(mtu_idx) |  	       L2T_IDX(ep->l2t->idx) |  	       TX_CHAN(ep->tx_chan) |  	       SMAC_SEL(ep->smac_idx) | -	       DSCP(ep->tos) | +	       DSCP(ep->tos >> 2) |  	       ULP_MODE(ULP_MODE_TCPDDP) |  	       RCV_BUFSIZ(rcv_win>>10);  	opt2 = RX_CHANNEL(0) | @@ -1529,6 +1805,15 @@ static void accept_cr(struct c4iw_ep *ep, __be32 peer_ip, struct sk_buff *skb,  		opt2 |= SACK_EN(1);  	if (wscale && enable_tcp_window_scaling)  		opt2 |= WND_SCALE_EN(1); +	if (enable_ecn) { +		const struct tcphdr *tcph; +		u32 hlen = ntohl(req->hdr_len); + +		tcph = (const void *)(req + 1) + G_ETH_HDR_LEN(hlen) + +			G_IP_HDR_LEN(hlen); +		if (tcph->ece && tcph->cwr) +			opt2 |= CCTRL_ECN(1); +	}  	rpl = cplhdr(skb);  	INIT_TP_WR(rpl, ep->hwtid); @@ -1645,22 +1930,30 @@ out:  static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)  { -	struct c4iw_ep *child_ep, *parent_ep; +	struct c4iw_ep *child_ep = NULL, *parent_ep;  	struct cpl_pass_accept_req *req = cplhdr(skb);  	unsigned int stid = GET_POPEN_TID(ntohl(req->tos_stid));  	struct tid_info *t = dev->rdev.lldi.tids;  	unsigned int hwtid = GET_TID(req);  	struct dst_entry *dst;  	struct rtable *rt; -	__be32 local_ip, peer_ip; +	__be32 local_ip, peer_ip = 0;  	__be16 local_port, peer_port;  	int err; +	u16 peer_mss = ntohs(req->tcpopt.mss);  	parent_ep = lookup_stid(t, stid); -	PDBG("%s parent ep %p tid %u\n", __func__, parent_ep, hwtid); - +	if (!parent_ep) { +		PDBG("%s connect request on invalid stid %d\n", __func__, stid); +		goto reject; +	}  	get_4tuple(req, &local_ip, &peer_ip, &local_port, &peer_port); +	PDBG("%s parent ep %p hwtid %u laddr 0x%x raddr 0x%x lport %d " \ +	     "rport %d peer_mss %d\n", __func__, parent_ep, hwtid, +	     ntohl(local_ip), ntohl(peer_ip), ntohs(local_port), +	     ntohs(peer_port), peer_mss); +  	if (state_read(&parent_ep->com) != LISTEN) {  		printk(KERN_ERR "%s - listening ep not in LISTEN\n",  		       __func__); @@ -1694,6 +1987,9 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)  		goto reject;  	} +	if (peer_mss && child_ep->mtu > (peer_mss + 40)) +		child_ep->mtu = peer_mss + 40; +  	state_set(&child_ep->com, CONNECTING);  	child_ep->com.dev = dev;  	child_ep->com.cm_id = NULL; @@ -1714,7 +2010,9 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)  	init_timer(&child_ep->timer);  	cxgb4_insert_tid(t, child_ep, hwtid); +	insert_handle(dev, &dev->hwtid_idr, child_ep, child_ep->hwtid);  	accept_cr(child_ep, peer_ip, skb, req); +	set_bit(PASS_ACCEPT_REQ, &child_ep->com.history);  	goto out;  reject:  	reject_cr(dev, hwtid, peer_ip, skb); @@ -1734,12 +2032,16 @@ static int pass_establish(struct c4iw_dev *dev, struct sk_buff *skb)  	ep->snd_seq = be32_to_cpu(req->snd_isn);  	ep->rcv_seq = be32_to_cpu(req->rcv_isn); +	PDBG("%s ep %p hwtid %u tcp_opt 0x%02x\n", __func__, ep, tid, +	     ntohs(req->tcp_opt)); +  	set_emss(ep, ntohs(req->tcp_opt));  	dst_confirm(ep->dst);  	state_set(&ep->com, MPA_REQ_WAIT);  	start_ep_timer(ep);  	send_flowc(ep, skb); +	set_bit(PASS_ESTAB, &ep->com.history);  	return 0;  } @@ -1759,6 +2061,7 @@ static int peer_close(struct c4iw_dev *dev, struct sk_buff *skb)  	PDBG("%s ep %p tid %u\n", __func__, ep, ep->hwtid);  	dst_confirm(ep->dst); +	set_bit(PEER_CLOSE, &ep->com.history);  	mutex_lock(&ep->com.mutex);  	switch (ep->com.state) {  	case MPA_REQ_WAIT: @@ -1838,74 +2141,6 @@ static int is_neg_adv_abort(unsigned int status)  	       status == CPL_ERR_PERSIST_NEG_ADVICE;  } -static int c4iw_reconnect(struct c4iw_ep *ep) -{ -	struct rtable *rt; -	int err = 0; - -	PDBG("%s qp %p cm_id %p\n", __func__, ep->com.qp, ep->com.cm_id); -	init_timer(&ep->timer); - -	/* -	 * Allocate an active TID to initiate a TCP connection. -	 */ -	ep->atid = cxgb4_alloc_atid(ep->com.dev->rdev.lldi.tids, ep); -	if (ep->atid == -1) { -		printk(KERN_ERR MOD "%s - cannot alloc atid.\n", __func__); -		err = -ENOMEM; -		goto fail2; -	} - -	/* find a route */ -	rt = find_route(ep->com.dev, -			ep->com.cm_id->local_addr.sin_addr.s_addr, -			ep->com.cm_id->remote_addr.sin_addr.s_addr, -			ep->com.cm_id->local_addr.sin_port, -			ep->com.cm_id->remote_addr.sin_port, 0); -	if (!rt) { -		printk(KERN_ERR MOD "%s - cannot find route.\n", __func__); -		err = -EHOSTUNREACH; -		goto fail3; -	} -	ep->dst = &rt->dst; - -	err = import_ep(ep, ep->com.cm_id->remote_addr.sin_addr.s_addr, -			ep->dst, ep->com.dev, false); -	if (err) { -		printk(KERN_ERR MOD "%s - cannot alloc l2e.\n", __func__); -		goto fail4; -	} - -	PDBG("%s txq_idx %u tx_chan %u smac_idx %u rss_qid %u l2t_idx %u\n", -	     __func__, ep->txq_idx, ep->tx_chan, ep->smac_idx, ep->rss_qid, -	     ep->l2t->idx); - -	state_set(&ep->com, CONNECTING); -	ep->tos = 0; - -	/* send connect request to rnic */ -	err = send_connect(ep); -	if (!err) -		goto out; - -	cxgb4_l2t_release(ep->l2t); -fail4: -	dst_release(ep->dst); -fail3: -	cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid); -fail2: -	/* -	 * remember to send notification to upper layer. -	 * We are in here so the upper layer is not aware that this is -	 * re-connect attempt and so, upper layer is still waiting for -	 * response of 1st connect request. -	 */ -	connect_reply_upcall(ep, -ECONNRESET); -	c4iw_put_ep(&ep->com); -out: -	return err; -} -  static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)  {  	struct cpl_abort_req_rss *req = cplhdr(skb); @@ -1926,6 +2161,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)  	}  	PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,  	     ep->com.state); +	set_bit(PEER_ABORT, &ep->com.history);  	/*  	 * Wake up any threads in rdma_init() or rdma_fini(). @@ -1944,7 +2180,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)  		break;  	case MPA_REQ_SENT:  		stop_ep_timer(ep); -		if (mpa_rev == 2 && ep->tried_with_mpa_v1) +		if (mpa_rev == 1 || (mpa_rev == 2 && ep->tried_with_mpa_v1))  			connect_reply_upcall(ep, -ECONNRESET);  		else {  			/* @@ -2016,9 +2252,8 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)  out:  	if (release)  		release_ep_resources(ep); - -	/* retry with mpa-v1 */ -	if (ep && ep->retry_with_mpa_v1) { +	else if (ep->retry_with_mpa_v1) { +		remove_handle(ep->com.dev, &ep->com.dev->hwtid_idr, ep->hwtid);  		cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid);  		dst_release(ep->dst);  		cxgb4_l2t_release(ep->l2t); @@ -2140,6 +2375,7 @@ int c4iw_reject_cr(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)  		c4iw_put_ep(&ep->com);  		return -ECONNRESET;  	} +	set_bit(ULP_REJECT, &ep->com.history);  	BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);  	if (mpa_rev == 0)  		abort_connection(ep, NULL, GFP_KERNEL); @@ -2169,6 +2405,7 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)  	BUG_ON(state_read(&ep->com) != MPA_REQ_RCVD);  	BUG_ON(!qp); +	set_bit(ULP_ACCEPT, &ep->com.history);  	if ((conn_param->ord > c4iw_max_read_depth) ||  	    (conn_param->ird > c4iw_max_read_depth)) {  		abort_connection(ep, NULL, GFP_KERNEL); @@ -2209,6 +2446,7 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)  	cm_id->add_ref(cm_id);  	ep->com.cm_id = cm_id;  	ep->com.qp = qp; +	ref_qp(ep);  	/* bind QP to EP and move to RTS */  	attrs.mpa_attr = ep->mpa_attr; @@ -2239,7 +2477,6 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)  	return 0;  err1:  	ep->com.cm_id = NULL; -	ep->com.qp = NULL;  	cm_id->rem_ref(cm_id);  err:  	c4iw_put_ep(&ep->com); @@ -2280,6 +2517,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)  	ep->com.cm_id = cm_id;  	ep->com.qp = get_qhp(dev, conn_param->qpn);  	BUG_ON(!ep->com.qp); +	ref_qp(ep);  	PDBG("%s qpn 0x%x qp %p cm_id %p\n", __func__, conn_param->qpn,  	     ep->com.qp, cm_id); @@ -2292,6 +2530,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)  		err = -ENOMEM;  		goto fail2;  	} +	insert_handle(dev, &dev->atid_idr, ep, ep->atid);  	PDBG("%s saddr 0x%x sport 0x%x raddr 0x%x rport 0x%x\n", __func__,  	     ntohl(cm_id->local_addr.sin_addr.s_addr), @@ -2337,6 +2576,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)  fail4:  	dst_release(ep->dst);  fail3: +	remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);  	cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);  fail2:  	cm_id->rem_ref(cm_id); @@ -2351,7 +2591,6 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)  	struct c4iw_dev *dev = to_c4iw_dev(cm_id->device);  	struct c4iw_listen_ep *ep; -  	might_sleep();  	ep = alloc_ep(sizeof(*ep), GFP_KERNEL); @@ -2370,30 +2609,54 @@ int c4iw_create_listen(struct iw_cm_id *cm_id, int backlog)  	/*  	 * Allocate a server TID.  	 */ -	ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, PF_INET, ep); +	if (dev->rdev.lldi.enable_fw_ofld_conn) +		ep->stid = cxgb4_alloc_sftid(dev->rdev.lldi.tids, PF_INET, ep); +	else +		ep->stid = cxgb4_alloc_stid(dev->rdev.lldi.tids, PF_INET, ep); +  	if (ep->stid == -1) {  		printk(KERN_ERR MOD "%s - cannot alloc stid.\n", __func__);  		err = -ENOMEM;  		goto fail2;  	} - +	insert_handle(dev, &dev->stid_idr, ep, ep->stid);  	state_set(&ep->com, LISTEN); -	c4iw_init_wr_wait(&ep->com.wr_wait); -	err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0], ep->stid, -				  ep->com.local_addr.sin_addr.s_addr, -				  ep->com.local_addr.sin_port, -				  ep->com.dev->rdev.lldi.rxq_ids[0]); -	if (err) -		goto fail3; - -	/* wait for pass_open_rpl */ -	err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, 0, 0, -				  __func__); +	if (dev->rdev.lldi.enable_fw_ofld_conn) { +		do { +			err = cxgb4_create_server_filter( +				ep->com.dev->rdev.lldi.ports[0], ep->stid, +				ep->com.local_addr.sin_addr.s_addr, +				ep->com.local_addr.sin_port, +				0, +				ep->com.dev->rdev.lldi.rxq_ids[0], +				0, +				0); +			if (err == -EBUSY) { +				set_current_state(TASK_UNINTERRUPTIBLE); +				schedule_timeout(usecs_to_jiffies(100)); +			} +		} while (err == -EBUSY); +	} else { +		c4iw_init_wr_wait(&ep->com.wr_wait); +		err = cxgb4_create_server(ep->com.dev->rdev.lldi.ports[0], +				ep->stid, ep->com.local_addr.sin_addr.s_addr, +				ep->com.local_addr.sin_port, +				0, +				ep->com.dev->rdev.lldi.rxq_ids[0]); +		if (!err) +			err = c4iw_wait_for_reply(&ep->com.dev->rdev, +						  &ep->com.wr_wait, +						  0, 0, __func__); +	}  	if (!err) {  		cm_id->provider_data = ep;  		goto out;  	} -fail3: +	pr_err("%s cxgb4_create_server/filter failed err %d " \ +	       "stid %d laddr %08x lport %d\n", \ +	       __func__, err, ep->stid, +	       ntohl(ep->com.local_addr.sin_addr.s_addr), +	       ntohs(ep->com.local_addr.sin_port));  	cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET);  fail2:  	cm_id->rem_ref(cm_id); @@ -2412,12 +2675,19 @@ int c4iw_destroy_listen(struct iw_cm_id *cm_id)  	might_sleep();  	state_set(&ep->com, DEAD); -	c4iw_init_wr_wait(&ep->com.wr_wait); -	err = listen_stop(ep); -	if (err) -		goto done; -	err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, 0, 0, -				  __func__); +	if (ep->com.dev->rdev.lldi.enable_fw_ofld_conn) { +		err = cxgb4_remove_server_filter( +			ep->com.dev->rdev.lldi.ports[0], ep->stid, +			ep->com.dev->rdev.lldi.rxq_ids[0], 0); +	} else { +		c4iw_init_wr_wait(&ep->com.wr_wait); +		err = listen_stop(ep); +		if (err) +			goto done; +		err = c4iw_wait_for_reply(&ep->com.dev->rdev, &ep->com.wr_wait, +					  0, 0, __func__); +	} +	remove_handle(ep->com.dev, &ep->com.dev->stid_idr, ep->stid);  	cxgb4_free_stid(ep->com.dev->rdev.lldi.tids, ep->stid, PF_INET);  done:  	cm_id->rem_ref(cm_id); @@ -2481,10 +2751,13 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)  	if (close) {  		if (abrupt) { +			set_bit(EP_DISC_ABORT, &ep->com.history);  			close_complete_upcall(ep);  			ret = send_abort(ep, NULL, gfp); -		} else +		} else { +			set_bit(EP_DISC_CLOSE, &ep->com.history);  			ret = send_halfclose(ep, gfp); +		}  		if (ret)  			fatal = 1;  	} @@ -2494,10 +2767,331 @@ int c4iw_ep_disconnect(struct c4iw_ep *ep, int abrupt, gfp_t gfp)  	return ret;  } -static int async_event(struct c4iw_dev *dev, struct sk_buff *skb) +static void active_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb, +			struct cpl_fw6_msg_ofld_connection_wr_rpl *req) +{ +	struct c4iw_ep *ep; +	int atid = be32_to_cpu(req->tid); + +	ep = (struct c4iw_ep *)lookup_atid(dev->rdev.lldi.tids, +					   (__force u32) req->tid); +	if (!ep) +		return; + +	switch (req->retval) { +	case FW_ENOMEM: +		set_bit(ACT_RETRY_NOMEM, &ep->com.history); +		if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) { +			send_fw_act_open_req(ep, atid); +			return; +		} +	case FW_EADDRINUSE: +		set_bit(ACT_RETRY_INUSE, &ep->com.history); +		if (ep->retry_count++ < ACT_OPEN_RETRY_COUNT) { +			send_fw_act_open_req(ep, atid); +			return; +		} +		break; +	default: +		pr_info("%s unexpected ofld conn wr retval %d\n", +		       __func__, req->retval); +		break; +	} +	pr_err("active ofld_connect_wr failure %d atid %d\n", +	       req->retval, atid); +	mutex_lock(&dev->rdev.stats.lock); +	dev->rdev.stats.act_ofld_conn_fails++; +	mutex_unlock(&dev->rdev.stats.lock); +	connect_reply_upcall(ep, status2errno(req->retval)); +	state_set(&ep->com, DEAD); +	remove_handle(dev, &dev->atid_idr, atid); +	cxgb4_free_atid(dev->rdev.lldi.tids, atid); +	dst_release(ep->dst); +	cxgb4_l2t_release(ep->l2t); +	c4iw_put_ep(&ep->com); +} + +static void passive_ofld_conn_reply(struct c4iw_dev *dev, struct sk_buff *skb, +			struct cpl_fw6_msg_ofld_connection_wr_rpl *req) +{ +	struct sk_buff *rpl_skb; +	struct cpl_pass_accept_req *cpl; +	int ret; + +	rpl_skb = (struct sk_buff *)(unsigned long)req->cookie; +	BUG_ON(!rpl_skb); +	if (req->retval) { +		PDBG("%s passive open failure %d\n", __func__, req->retval); +		mutex_lock(&dev->rdev.stats.lock); +		dev->rdev.stats.pas_ofld_conn_fails++; +		mutex_unlock(&dev->rdev.stats.lock); +		kfree_skb(rpl_skb); +	} else { +		cpl = (struct cpl_pass_accept_req *)cplhdr(rpl_skb); +		OPCODE_TID(cpl) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ, +					(__force u32) htonl( +					(__force u32) req->tid))); +		ret = pass_accept_req(dev, rpl_skb); +		if (!ret) +			kfree_skb(rpl_skb); +	} +	return; +} + +static int deferred_fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)  {  	struct cpl_fw6_msg *rpl = cplhdr(skb); -	c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]); +	struct cpl_fw6_msg_ofld_connection_wr_rpl *req; + +	switch (rpl->type) { +	case FW6_TYPE_CQE: +		c4iw_ev_dispatch(dev, (struct t4_cqe *)&rpl->data[0]); +		break; +	case FW6_TYPE_OFLD_CONNECTION_WR_RPL: +		req = (struct cpl_fw6_msg_ofld_connection_wr_rpl *)rpl->data; +		switch (req->t_state) { +		case TCP_SYN_SENT: +			active_ofld_conn_reply(dev, skb, req); +			break; +		case TCP_SYN_RECV: +			passive_ofld_conn_reply(dev, skb, req); +			break; +		default: +			pr_err("%s unexpected ofld conn wr state %d\n", +			       __func__, req->t_state); +			break; +		} +		break; +	} +	return 0; +} + +static void build_cpl_pass_accept_req(struct sk_buff *skb, int stid , u8 tos) +{ +	u32 l2info; +	u16 vlantag, len, hdr_len; +	u8 intf; +	struct cpl_rx_pkt *cpl = cplhdr(skb); +	struct cpl_pass_accept_req *req; +	struct tcp_options_received tmp_opt; + +	/* Store values from cpl_rx_pkt in temporary location. */ +	vlantag = (__force u16) cpl->vlan; +	len = (__force u16) cpl->len; +	l2info  = (__force u32) cpl->l2info; +	hdr_len = (__force u16) cpl->hdr_len; +	intf = cpl->iff; + +	__skb_pull(skb, sizeof(*req) + sizeof(struct rss_header)); + +	/* +	 * We need to parse the TCP options from SYN packet. +	 * to generate cpl_pass_accept_req. +	 */ +	memset(&tmp_opt, 0, sizeof(tmp_opt)); +	tcp_clear_options(&tmp_opt); +	tcp_parse_options(skb, &tmp_opt, NULL, 0, NULL); + +	req = (struct cpl_pass_accept_req *)__skb_push(skb, sizeof(*req)); +	memset(req, 0, sizeof(*req)); +	req->l2info = cpu_to_be16(V_SYN_INTF(intf) | +			 V_SYN_MAC_IDX(G_RX_MACIDX( +			 (__force int) htonl(l2info))) | +			 F_SYN_XACT_MATCH); +	req->hdr_len = cpu_to_be32(V_SYN_RX_CHAN(G_RX_CHAN( +					(__force int) htonl(l2info))) | +				   V_TCP_HDR_LEN(G_RX_TCPHDR_LEN( +					(__force int) htons(hdr_len))) | +				   V_IP_HDR_LEN(G_RX_IPHDR_LEN( +					(__force int) htons(hdr_len))) | +				   V_ETH_HDR_LEN(G_RX_ETHHDR_LEN( +					(__force int) htonl(l2info)))); +	req->vlan = (__force __be16) vlantag; +	req->len = (__force __be16) len; +	req->tos_stid = cpu_to_be32(PASS_OPEN_TID(stid) | +				    PASS_OPEN_TOS(tos)); +	req->tcpopt.mss = htons(tmp_opt.mss_clamp); +	if (tmp_opt.wscale_ok) +		req->tcpopt.wsf = tmp_opt.snd_wscale; +	req->tcpopt.tstamp = tmp_opt.saw_tstamp; +	if (tmp_opt.sack_ok) +		req->tcpopt.sack = 1; +	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_ACCEPT_REQ, 0)); +	return; +} + +static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb, +				  __be32 laddr, __be16 lport, +				  __be32 raddr, __be16 rport, +				  u32 rcv_isn, u32 filter, u16 window, +				  u32 rss_qid, u8 port_id) +{ +	struct sk_buff *req_skb; +	struct fw_ofld_connection_wr *req; +	struct cpl_pass_accept_req *cpl = cplhdr(skb); + +	req_skb = alloc_skb(sizeof(struct fw_ofld_connection_wr), GFP_KERNEL); +	req = (struct fw_ofld_connection_wr *)__skb_put(req_skb, sizeof(*req)); +	memset(req, 0, sizeof(*req)); +	req->op_compl = htonl(V_WR_OP(FW_OFLD_CONNECTION_WR) | FW_WR_COMPL(1)); +	req->len16_pkd = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*req), 16))); +	req->le.version_cpl = htonl(F_FW_OFLD_CONNECTION_WR_CPL); +	req->le.filter = (__force __be32) filter; +	req->le.lport = lport; +	req->le.pport = rport; +	req->le.u.ipv4.lip = laddr; +	req->le.u.ipv4.pip = raddr; +	req->tcb.rcv_nxt = htonl(rcv_isn + 1); +	req->tcb.rcv_adv = htons(window); +	req->tcb.t_state_to_astid = +		 htonl(V_FW_OFLD_CONNECTION_WR_T_STATE(TCP_SYN_RECV) | +			V_FW_OFLD_CONNECTION_WR_RCV_SCALE(cpl->tcpopt.wsf) | +			V_FW_OFLD_CONNECTION_WR_ASTID( +			GET_PASS_OPEN_TID(ntohl(cpl->tos_stid)))); + +	/* +	 * We store the qid in opt2 which will be used by the firmware +	 * to send us the wr response. +	 */ +	req->tcb.opt2 = htonl(V_RSS_QUEUE(rss_qid)); + +	/* +	 * We initialize the MSS index in TCB to 0xF. +	 * So that when driver sends cpl_pass_accept_rpl +	 * TCB picks up the correct value. If this was 0 +	 * TP will ignore any value > 0 for MSS index. +	 */ +	req->tcb.opt0 = cpu_to_be64(V_MSS_IDX(0xF)); +	req->cookie = (unsigned long)skb; + +	set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id); +	cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb); +} + +/* + * Handler for CPL_RX_PKT message. Need to handle cpl_rx_pkt + * messages when a filter is being used instead of server to + * redirect a syn packet. When packets hit filter they are redirected + * to the offload queue and driver tries to establish the connection + * using firmware work request. + */ +static int rx_pkt(struct c4iw_dev *dev, struct sk_buff *skb) +{ +	int stid; +	unsigned int filter; +	struct ethhdr *eh = NULL; +	struct vlan_ethhdr *vlan_eh = NULL; +	struct iphdr *iph; +	struct tcphdr *tcph; +	struct rss_header *rss = (void *)skb->data; +	struct cpl_rx_pkt *cpl = (void *)skb->data; +	struct cpl_pass_accept_req *req = (void *)(rss + 1); +	struct l2t_entry *e; +	struct dst_entry *dst; +	struct rtable *rt; +	struct c4iw_ep *lep; +	u16 window; +	struct port_info *pi; +	struct net_device *pdev; +	u16 rss_qid; +	int step; +	u32 tx_chan; +	struct neighbour *neigh; + +	/* Drop all non-SYN packets */ +	if (!(cpl->l2info & cpu_to_be32(F_RXF_SYN))) +		goto reject; + +	/* +	 * Drop all packets which did not hit the filter. +	 * Unlikely to happen. +	 */ +	if (!(rss->filter_hit && rss->filter_tid)) +		goto reject; + +	/* +	 * Calculate the server tid from filter hit index from cpl_rx_pkt. +	 */ +	stid = (__force int) cpu_to_be32((__force u32) rss->hash_val) +					  - dev->rdev.lldi.tids->sftid_base +					  + dev->rdev.lldi.tids->nstids; + +	lep = (struct c4iw_ep *)lookup_stid(dev->rdev.lldi.tids, stid); +	if (!lep) { +		PDBG("%s connect request on invalid stid %d\n", __func__, stid); +		goto reject; +	} + +	if (G_RX_ETHHDR_LEN(ntohl(cpl->l2info)) == ETH_HLEN) { +		eh = (struct ethhdr *)(req + 1); +		iph = (struct iphdr *)(eh + 1); +	} else { +		vlan_eh = (struct vlan_ethhdr *)(req + 1); +		iph = (struct iphdr *)(vlan_eh + 1); +		skb->vlan_tci = ntohs(cpl->vlan); +	} + +	if (iph->version != 0x4) +		goto reject; + +	tcph = (struct tcphdr *)(iph + 1); +	skb_set_network_header(skb, (void *)iph - (void *)rss); +	skb_set_transport_header(skb, (void *)tcph - (void *)rss); +	skb_get(skb); + +	PDBG("%s lip 0x%x lport %u pip 0x%x pport %u tos %d\n", __func__, +	     ntohl(iph->daddr), ntohs(tcph->dest), ntohl(iph->saddr), +	     ntohs(tcph->source), iph->tos); + +	rt = find_route(dev, iph->daddr, iph->saddr, tcph->dest, tcph->source, +			iph->tos); +	if (!rt) { +		pr_err("%s - failed to find dst entry!\n", +		       __func__); +		goto reject; +	} +	dst = &rt->dst; +	neigh = dst_neigh_lookup_skb(dst, skb); + +	if (neigh->dev->flags & IFF_LOOPBACK) { +		pdev = ip_dev_find(&init_net, iph->daddr); +		e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, +				    pdev, 0); +		pi = (struct port_info *)netdev_priv(pdev); +		tx_chan = cxgb4_port_chan(pdev); +		dev_put(pdev); +	} else { +		e = cxgb4_l2t_get(dev->rdev.lldi.l2t, neigh, +					neigh->dev, 0); +		pi = (struct port_info *)netdev_priv(neigh->dev); +		tx_chan = cxgb4_port_chan(neigh->dev); +	} +	if (!e) { +		pr_err("%s - failed to allocate l2t entry!\n", +		       __func__); +		goto free_dst; +	} + +	step = dev->rdev.lldi.nrxq / dev->rdev.lldi.nchan; +	rss_qid = dev->rdev.lldi.rxq_ids[pi->port_id * step]; +	window = (__force u16) htons((__force u16)tcph->window); + +	/* Calcuate filter portion for LE region. */ +	filter = (__force unsigned int) cpu_to_be32(select_ntuple(dev, dst, e)); + +	/* +	 * Synthesize the cpl_pass_accept_req. We have everything except the +	 * TID. Once firmware sends a reply with TID we update the TID field +	 * in cpl and pass it through the regular cpl_pass_accept_req path. +	 */ +	build_cpl_pass_accept_req(skb, stid, iph->tos); +	send_fw_pass_open_req(dev, skb, iph->daddr, tcph->dest, iph->saddr, +			      tcph->source, ntohl(tcph->seq), filter, window, +			      rss_qid, pi->port_id); +	cxgb4_l2t_release(e); +free_dst: +	dst_release(dst); +reject:  	return 0;  } @@ -2520,7 +3114,8 @@ static c4iw_handler_func work_handlers[NUM_CPL_CMDS] = {  	[CPL_CLOSE_CON_RPL] = close_con_rpl,  	[CPL_RDMA_TERMINATE] = terminate,  	[CPL_FW4_ACK] = fw4_ack, -	[CPL_FW6_MSG] = async_event +	[CPL_FW6_MSG] = deferred_fw6_msg, +	[CPL_RX_PKT] = rx_pkt  };  static void process_timeout(struct c4iw_ep *ep) @@ -2531,6 +3126,7 @@ static void process_timeout(struct c4iw_ep *ep)  	mutex_lock(&ep->com.mutex);  	PDBG("%s ep %p tid %u state %d\n", __func__, ep, ep->hwtid,  	     ep->com.state); +	set_bit(TIMEDOUT, &ep->com.history);  	switch (ep->com.state) {  	case MPA_REQ_SENT:  		__state_set(&ep->com, ABORTING); @@ -2604,11 +3200,16 @@ static DECLARE_WORK(skb_work, process_work);  static void ep_timeout(unsigned long arg)  {  	struct c4iw_ep *ep = (struct c4iw_ep *)arg; +	int kickit = 0;  	spin_lock(&timeout_lock); -	list_add_tail(&ep->entry, &timeout_list); +	if (!test_and_set_bit(TIMEOUT, &ep->com.flags)) { +		list_add_tail(&ep->entry, &timeout_list); +		kickit = 1; +	}  	spin_unlock(&timeout_lock); -	queue_work(workq, &skb_work); +	if (kickit) +		queue_work(workq, &skb_work);  }  /* @@ -2651,7 +3252,7 @@ static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)  	PDBG("%s type %u\n", __func__, rpl->type);  	switch (rpl->type) { -	case 1: +	case FW6_TYPE_WR_RPL:  		ret = (int)((be64_to_cpu(rpl->data[0]) >> 8) & 0xff);  		wr_waitp = (struct c4iw_wr_wait *)(__force unsigned long) rpl->data[1];  		PDBG("%s wr_waitp %p ret %u\n", __func__, wr_waitp, ret); @@ -2659,7 +3260,8 @@ static int fw6_msg(struct c4iw_dev *dev, struct sk_buff *skb)  			c4iw_wake_up(wr_waitp, ret ? -ret : 0);  		kfree_skb(skb);  		break; -	case 2: +	case FW6_TYPE_CQE: +	case FW6_TYPE_OFLD_CONNECTION_WR_RPL:  		sched(dev, skb);  		break;  	default: @@ -2696,8 +3298,14 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)  	/*  	 * Wake up any threads in rdma_init() or rdma_fini(). +	 * However, if we are on MPAv2 and want to retry with MPAv1 +	 * then, don't wake up yet.  	 */ -	c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); +	if (mpa_rev == 2 && !ep->tried_with_mpa_v1) { +		if (ep->com.state != MPA_REQ_SENT) +			c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET); +	} else +		c4iw_wake_up(&ep->com.wr_wait, -ECONNRESET);  	sched(dev, skb);  	return 0;  } @@ -2722,7 +3330,8 @@ c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS] = {  	[CPL_RDMA_TERMINATE] = sched,  	[CPL_FW4_ACK] = sched,  	[CPL_SET_TCB_RPL] = set_tcb_rpl, -	[CPL_FW6_MSG] = fw6_msg +	[CPL_FW6_MSG] = fw6_msg, +	[CPL_RX_PKT] = sched  };  int __init c4iw_cm_init(void) diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c index cb4ecd78370..80069ad595c 100644 --- a/drivers/infiniband/hw/cxgb4/device.c +++ b/drivers/infiniband/hw/cxgb4/device.c @@ -279,6 +279,11 @@ static int stats_show(struct seq_file *seq, void *v)  	seq_printf(seq, " DB State: %s Transitions %llu\n",  		   db_state_str[dev->db_state],  		   dev->rdev.stats.db_state_transitions); +	seq_printf(seq, "TCAM_FULL: %10llu\n", dev->rdev.stats.tcam_full); +	seq_printf(seq, "ACT_OFLD_CONN_FAILS: %10llu\n", +		   dev->rdev.stats.act_ofld_conn_fails); +	seq_printf(seq, "PAS_OFLD_CONN_FAILS: %10llu\n", +		   dev->rdev.stats.pas_ofld_conn_fails);  	return 0;  } @@ -309,6 +314,9 @@ static ssize_t stats_clear(struct file *file, const char __user *buf,  	dev->rdev.stats.db_empty = 0;  	dev->rdev.stats.db_drop = 0;  	dev->rdev.stats.db_state_transitions = 0; +	dev->rdev.stats.tcam_full = 0; +	dev->rdev.stats.act_ofld_conn_fails = 0; +	dev->rdev.stats.pas_ofld_conn_fails = 0;  	mutex_unlock(&dev->rdev.stats.lock);  	return count;  } @@ -322,6 +330,113 @@ static const struct file_operations stats_debugfs_fops = {  	.write   = stats_clear,  }; +static int dump_ep(int id, void *p, void *data) +{ +	struct c4iw_ep *ep = p; +	struct c4iw_debugfs_data *epd = data; +	int space; +	int cc; + +	space = epd->bufsize - epd->pos - 1; +	if (space == 0) +		return 1; + +	cc = snprintf(epd->buf + epd->pos, space, +			"ep %p cm_id %p qp %p state %d flags 0x%lx history 0x%lx " +			"hwtid %d atid %d %pI4:%d <-> %pI4:%d\n", +			ep, ep->com.cm_id, ep->com.qp, (int)ep->com.state, +			ep->com.flags, ep->com.history, ep->hwtid, ep->atid, +			&ep->com.local_addr.sin_addr.s_addr, +			ntohs(ep->com.local_addr.sin_port), +			&ep->com.remote_addr.sin_addr.s_addr, +			ntohs(ep->com.remote_addr.sin_port)); +	if (cc < space) +		epd->pos += cc; +	return 0; +} + +static int dump_listen_ep(int id, void *p, void *data) +{ +	struct c4iw_listen_ep *ep = p; +	struct c4iw_debugfs_data *epd = data; +	int space; +	int cc; + +	space = epd->bufsize - epd->pos - 1; +	if (space == 0) +		return 1; + +	cc = snprintf(epd->buf + epd->pos, space, +			"ep %p cm_id %p state %d flags 0x%lx stid %d backlog %d " +			"%pI4:%d\n", ep, ep->com.cm_id, (int)ep->com.state, +			ep->com.flags, ep->stid, ep->backlog, +			&ep->com.local_addr.sin_addr.s_addr, +			ntohs(ep->com.local_addr.sin_port)); +	if (cc < space) +		epd->pos += cc; +	return 0; +} + +static int ep_release(struct inode *inode, struct file *file) +{ +	struct c4iw_debugfs_data *epd = file->private_data; +	if (!epd) { +		pr_info("%s null qpd?\n", __func__); +		return 0; +	} +	vfree(epd->buf); +	kfree(epd); +	return 0; +} + +static int ep_open(struct inode *inode, struct file *file) +{ +	struct c4iw_debugfs_data *epd; +	int ret = 0; +	int count = 1; + +	epd = kmalloc(sizeof(*epd), GFP_KERNEL); +	if (!epd) { +		ret = -ENOMEM; +		goto out; +	} +	epd->devp = inode->i_private; +	epd->pos = 0; + +	spin_lock_irq(&epd->devp->lock); +	idr_for_each(&epd->devp->hwtid_idr, count_idrs, &count); +	idr_for_each(&epd->devp->atid_idr, count_idrs, &count); +	idr_for_each(&epd->devp->stid_idr, count_idrs, &count); +	spin_unlock_irq(&epd->devp->lock); + +	epd->bufsize = count * 160; +	epd->buf = vmalloc(epd->bufsize); +	if (!epd->buf) { +		ret = -ENOMEM; +		goto err1; +	} + +	spin_lock_irq(&epd->devp->lock); +	idr_for_each(&epd->devp->hwtid_idr, dump_ep, epd); +	idr_for_each(&epd->devp->atid_idr, dump_ep, epd); +	idr_for_each(&epd->devp->stid_idr, dump_listen_ep, epd); +	spin_unlock_irq(&epd->devp->lock); + +	file->private_data = epd; +	goto out; +err1: +	kfree(epd); +out: +	return ret; +} + +static const struct file_operations ep_debugfs_fops = { +	.owner   = THIS_MODULE, +	.open    = ep_open, +	.release = ep_release, +	.read    = debugfs_read, +}; +  static int setup_debugfs(struct c4iw_dev *devp)  {  	struct dentry *de; @@ -344,6 +459,11 @@ static int setup_debugfs(struct c4iw_dev *devp)  	if (de && de->d_inode)  		de->d_inode->i_size = 4096; +	de = debugfs_create_file("eps", S_IWUSR, devp->debugfs_root, +			(void *)devp, &ep_debugfs_fops); +	if (de && de->d_inode) +		de->d_inode->i_size = 4096; +  	return 0;  } @@ -413,7 +533,7 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)  	PDBG("udb len 0x%x udb base %p db_reg %p gts_reg %p qpshift %lu "  	     "qpmask 0x%x cqshift %lu cqmask 0x%x\n",  	     (unsigned)pci_resource_len(rdev->lldi.pdev, 2), -	     (void *)pci_resource_start(rdev->lldi.pdev, 2), +	     (void *)(unsigned long)pci_resource_start(rdev->lldi.pdev, 2),  	     rdev->lldi.db_reg,  	     rdev->lldi.gts_reg,  	     rdev->qpshift, rdev->qpmask, @@ -475,6 +595,9 @@ static void c4iw_dealloc(struct uld_ctx *ctx)  	idr_destroy(&ctx->dev->cqidr);  	idr_destroy(&ctx->dev->qpidr);  	idr_destroy(&ctx->dev->mmidr); +	idr_destroy(&ctx->dev->hwtid_idr); +	idr_destroy(&ctx->dev->stid_idr); +	idr_destroy(&ctx->dev->atid_idr);  	iounmap(ctx->dev->rdev.oc_mw_kva);  	ib_dealloc_device(&ctx->dev->ibdev);  	ctx->dev = NULL; @@ -532,6 +655,9 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)  	idr_init(&devp->cqidr);  	idr_init(&devp->qpidr);  	idr_init(&devp->mmidr); +	idr_init(&devp->hwtid_idr); +	idr_init(&devp->stid_idr); +	idr_init(&devp->atid_idr);  	spin_lock_init(&devp->lock);  	mutex_init(&devp->rdev.stats.lock);  	mutex_init(&devp->db_mutex); @@ -577,14 +703,76 @@ out:  	return ctx;  } +static inline struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl, +						 const __be64 *rsp, +						 u32 pktshift) +{ +	struct sk_buff *skb; + +	/* +	 * Allocate space for cpl_pass_accept_req which will be synthesized by +	 * driver. Once the driver synthesizes the request the skb will go +	 * through the regular cpl_pass_accept_req processing. +	 * The math here assumes sizeof cpl_pass_accept_req >= sizeof +	 * cpl_rx_pkt. +	 */ +	skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req) + +			sizeof(struct rss_header) - pktshift, GFP_ATOMIC); +	if (unlikely(!skb)) +		return NULL; + +	 __skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req) + +		   sizeof(struct rss_header) - pktshift); + +	/* +	 * This skb will contain: +	 *   rss_header from the rspq descriptor (1 flit) +	 *   cpl_rx_pkt struct from the rspq descriptor (2 flits) +	 *   space for the difference between the size of an +	 *      rx_pkt and pass_accept_req cpl (1 flit) +	 *   the packet data from the gl +	 */ +	skb_copy_to_linear_data(skb, rsp, sizeof(struct cpl_pass_accept_req) + +				sizeof(struct rss_header)); +	skb_copy_to_linear_data_offset(skb, sizeof(struct rss_header) + +				       sizeof(struct cpl_pass_accept_req), +				       gl->va + pktshift, +				       gl->tot_len - pktshift); +	return skb; +} + +static inline int recv_rx_pkt(struct c4iw_dev *dev, const struct pkt_gl *gl, +			   const __be64 *rsp) +{ +	unsigned int opcode = *(u8 *)rsp; +	struct sk_buff *skb; + +	if (opcode != CPL_RX_PKT) +		goto out; + +	skb = copy_gl_to_skb_pkt(gl , rsp, dev->rdev.lldi.sge_pktshift); +	if (skb == NULL) +		goto out; + +	if (c4iw_handlers[opcode] == NULL) { +		pr_info("%s no handler opcode 0x%x...\n", __func__, +		       opcode); +		kfree_skb(skb); +		goto out; +	} +	c4iw_handlers[opcode](dev, skb); +	return 1; +out: +	return 0; +} +  static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,  			const struct pkt_gl *gl)  {  	struct uld_ctx *ctx = handle;  	struct c4iw_dev *dev = ctx->dev;  	struct sk_buff *skb; -	const struct cpl_act_establish *rpl; -	unsigned int opcode; +	u8 opcode;  	if (gl == NULL) {  		/* omit RSS and rsp_ctrl at end of descriptor */ @@ -601,19 +789,30 @@ static int c4iw_uld_rx_handler(void *handle, const __be64 *rsp,  		u32 qid = be32_to_cpu(rc->pldbuflen_qid);  		c4iw_ev_handler(dev, qid);  		return 0; +	} else if (unlikely(*(u8 *)rsp != *(u8 *)gl->va)) { +		if (recv_rx_pkt(dev, gl, rsp)) +			return 0; + +		pr_info("%s: unexpected FL contents at %p, " \ +		       "RSS %#llx, FL %#llx, len %u\n", +		       pci_name(ctx->lldi.pdev), gl->va, +		       (unsigned long long)be64_to_cpu(*rsp), +		       (unsigned long long)be64_to_cpu( +		       *(__force __be64 *)gl->va), +		       gl->tot_len); + +		return 0;  	} else {  		skb = cxgb4_pktgl_to_skb(gl, 128, 128);  		if (unlikely(!skb))  			goto nomem;  	} -	rpl = cplhdr(skb); -	opcode = rpl->ot.opcode; - +	opcode = *(u8 *)rsp;  	if (c4iw_handlers[opcode])  		c4iw_handlers[opcode](dev, skb);  	else -		printk(KERN_INFO "%s no handler opcode 0x%x...\n", __func__, +		pr_info("%s no handler opcode 0x%x...\n", __func__,  		       opcode);  	return 0; diff --git a/drivers/infiniband/hw/cxgb4/ev.c b/drivers/infiniband/hw/cxgb4/ev.c index cf2f6b47617..1a840b2211d 100644 --- a/drivers/infiniband/hw/cxgb4/ev.c +++ b/drivers/infiniband/hw/cxgb4/ev.c @@ -46,9 +46,11 @@ static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp,  	if ((qhp->attr.state == C4IW_QP_STATE_ERROR) ||  	    (qhp->attr.state == C4IW_QP_STATE_TERMINATE)) { -		PDBG("%s AE received after RTS - " -		     "qp state %d qpid 0x%x status 0x%x\n", __func__, -		     qhp->attr.state, qhp->wq.sq.qid, CQE_STATUS(err_cqe)); +		pr_err("%s AE after RTS - qpid 0x%x opcode %d status 0x%x "\ +		       "type %d wrid.hi 0x%x wrid.lo 0x%x\n", +		       __func__, CQE_QPID(err_cqe), CQE_OPCODE(err_cqe), +		       CQE_STATUS(err_cqe), CQE_TYPE(err_cqe), +		       CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe));  		return;  	} diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index 9beb3a9f033..7eec5e13fa8 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h @@ -130,6 +130,9 @@ struct c4iw_stats {  	u64  db_empty;  	u64  db_drop;  	u64  db_state_transitions; +	u64  tcam_full; +	u64  act_ofld_conn_fails; +	u64  pas_ofld_conn_fails;  };  struct c4iw_rdev { @@ -223,6 +226,9 @@ struct c4iw_dev {  	struct dentry *debugfs_root;  	enum db_state db_state;  	int qpcnt; +	struct idr hwtid_idr; +	struct idr atid_idr; +	struct idr stid_idr;  };  static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev) @@ -254,20 +260,21 @@ static inline int _insert_handle(struct c4iw_dev *rhp, struct idr *idr,  				 void *handle, u32 id, int lock)  {  	int ret; -	int newid; -	do { -		if (!idr_pre_get(idr, lock ? GFP_KERNEL : GFP_ATOMIC)) -			return -ENOMEM; -		if (lock) -			spin_lock_irq(&rhp->lock); -		ret = idr_get_new_above(idr, handle, id, &newid); -		BUG_ON(!ret && newid != id); -		if (lock) -			spin_unlock_irq(&rhp->lock); -	} while (ret == -EAGAIN); +	if (lock) { +		idr_preload(GFP_KERNEL); +		spin_lock_irq(&rhp->lock); +	} -	return ret; +	ret = idr_alloc(idr, handle, id, id + 1, GFP_ATOMIC); + +	if (lock) { +		spin_unlock_irq(&rhp->lock); +		idr_preload_end(); +	} + +	BUG_ON(ret == -ENOSPC); +	return ret < 0 ? ret : 0;  }  static inline int insert_handle(struct c4iw_dev *rhp, struct idr *idr, @@ -710,6 +717,33 @@ enum c4iw_ep_flags {  	ABORT_REQ_IN_PROGRESS	= 1,  	RELEASE_RESOURCES	= 2,  	CLOSE_SENT		= 3, +	TIMEOUT                 = 4, +	QP_REFERENCED           = 5, +}; + +enum c4iw_ep_history { +	ACT_OPEN_REQ            = 0, +	ACT_OFLD_CONN           = 1, +	ACT_OPEN_RPL            = 2, +	ACT_ESTAB               = 3, +	PASS_ACCEPT_REQ         = 4, +	PASS_ESTAB              = 5, +	ABORT_UPCALL            = 6, +	ESTAB_UPCALL            = 7, +	CLOSE_UPCALL            = 8, +	ULP_ACCEPT              = 9, +	ULP_REJECT              = 10, +	TIMEDOUT                = 11, +	PEER_ABORT              = 12, +	PEER_CLOSE              = 13, +	CONNREQ_UPCALL          = 14, +	ABORT_CONN              = 15, +	DISCONN_UPCALL          = 16, +	EP_DISC_CLOSE           = 17, +	EP_DISC_ABORT           = 18, +	CONN_RPL_UPCALL         = 19, +	ACT_RETRY_NOMEM         = 20, +	ACT_RETRY_INUSE         = 21  };  struct c4iw_ep_common { @@ -723,6 +757,7 @@ struct c4iw_ep_common {  	struct sockaddr_in remote_addr;  	struct c4iw_wr_wait wr_wait;  	unsigned long flags; +	unsigned long history;  };  struct c4iw_listen_ep { @@ -760,6 +795,7 @@ struct c4iw_ep {  	u8 tos;  	u8 retry_with_mpa_v1;  	u8 tried_with_mpa_v1; +	unsigned int retry_count;  };  static inline struct c4iw_ep *to_ep(struct iw_cm_id *cm_id) @@ -833,7 +869,7 @@ struct ib_fast_reg_page_list *c4iw_alloc_fastreg_pbl(  					int page_list_len);  struct ib_mr *c4iw_alloc_fast_reg_mr(struct ib_pd *pd, int pbl_depth);  int c4iw_dealloc_mw(struct ib_mw *mw); -struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd); +struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);  struct ib_mr *c4iw_reg_user_mr(struct ib_pd *pd, u64 start,  					   u64 length, u64 virt, int acc,  					   struct ib_udata *udata); diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c index afd81790ab3..903a92d6f91 100644 --- a/drivers/infiniband/hw/cxgb4/mem.c +++ b/drivers/infiniband/hw/cxgb4/mem.c @@ -650,7 +650,7 @@ err:  	return ERR_PTR(err);  } -struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd) +struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)  {  	struct c4iw_dev *rhp;  	struct c4iw_pd *php; @@ -659,6 +659,9 @@ struct ib_mw *c4iw_alloc_mw(struct ib_pd *pd)  	u32 stag = 0;  	int ret; +	if (type != IB_MW_TYPE_1) +		return ERR_PTR(-EINVAL); +  	php = to_c4iw_pd(pd);  	rhp = php->rhp;  	mhp = kzalloc(sizeof(*mhp), GFP_KERNEL); diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c index 05bfe53bff6..17ba4f8bc12 100644 --- a/drivers/infiniband/hw/cxgb4/qp.c +++ b/drivers/infiniband/hw/cxgb4/qp.c @@ -1383,6 +1383,7 @@ err:  	qhp->ep = NULL;  	set_state(qhp, C4IW_QP_STATE_ERROR);  	free = 1; +	abort = 1;  	wake_up(&qhp->wait);  	BUG_ON(!ep);  	flush_qp(qhp); diff --git a/drivers/infiniband/hw/ehca/ehca_cq.c b/drivers/infiniband/hw/ehca/ehca_cq.c index 8f5290147e8..212150c25ea 100644 --- a/drivers/infiniband/hw/ehca/ehca_cq.c +++ b/drivers/infiniband/hw/ehca/ehca_cq.c @@ -128,7 +128,7 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,  	void *vpage;  	u32 counter;  	u64 rpage, cqx_fec, h_ret; -	int ipz_rc, ret, i; +	int ipz_rc, i;  	unsigned long flags;  	if (cqe >= 0xFFFFFFFF - 64 - additional_cqe) @@ -163,32 +163,19 @@ struct ib_cq *ehca_create_cq(struct ib_device *device, int cqe, int comp_vector,  	adapter_handle = shca->ipz_hca_handle;  	param.eq_handle = shca->eq.ipz_eq_handle; -	do { -		if (!idr_pre_get(&ehca_cq_idr, GFP_KERNEL)) { -			cq = ERR_PTR(-ENOMEM); -			ehca_err(device, "Can't reserve idr nr. device=%p", -				 device); -			goto create_cq_exit1; -		} - -		write_lock_irqsave(&ehca_cq_idr_lock, flags); -		ret = idr_get_new(&ehca_cq_idr, my_cq, &my_cq->token); -		write_unlock_irqrestore(&ehca_cq_idr_lock, flags); -	} while (ret == -EAGAIN); +	idr_preload(GFP_KERNEL); +	write_lock_irqsave(&ehca_cq_idr_lock, flags); +	my_cq->token = idr_alloc(&ehca_cq_idr, my_cq, 0, 0x2000000, GFP_NOWAIT); +	write_unlock_irqrestore(&ehca_cq_idr_lock, flags); +	idr_preload_end(); -	if (ret) { +	if (my_cq->token < 0) {  		cq = ERR_PTR(-ENOMEM);  		ehca_err(device, "Can't allocate new idr entry. device=%p",  			 device);  		goto create_cq_exit1;  	} -	if (my_cq->token > 0x1FFFFFF) { -		cq = ERR_PTR(-ENOMEM); -		ehca_err(device, "Invalid number of cq. device=%p", device); -		goto create_cq_exit2; -	} -  	/*  	 * CQs maximum depth is 4GB-64, but we need additional 20 as buffer  	 * for receiving errors CQEs. diff --git a/drivers/infiniband/hw/ehca/ehca_iverbs.h b/drivers/infiniband/hw/ehca/ehca_iverbs.h index 8f7f282ead6..22f79afa7fc 100644 --- a/drivers/infiniband/hw/ehca/ehca_iverbs.h +++ b/drivers/infiniband/hw/ehca/ehca_iverbs.h @@ -95,7 +95,7 @@ int ehca_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr);  int ehca_dereg_mr(struct ib_mr *mr); -struct ib_mw *ehca_alloc_mw(struct ib_pd *pd); +struct ib_mw *ehca_alloc_mw(struct ib_pd *pd, enum ib_mw_type type);  int ehca_bind_mw(struct ib_qp *qp, struct ib_mw *mw,  		 struct ib_mw_bind *mw_bind); diff --git a/drivers/infiniband/hw/ehca/ehca_main.c b/drivers/infiniband/hw/ehca/ehca_main.c index 832e7a7d0ae..f8a62918a88 100644 --- a/drivers/infiniband/hw/ehca/ehca_main.c +++ b/drivers/infiniband/hw/ehca/ehca_main.c @@ -713,8 +713,8 @@ static struct attribute_group ehca_dev_attr_grp = {  	.attrs = ehca_dev_attrs  }; -static int __devinit ehca_probe(struct platform_device *dev, -				const struct of_device_id *id) +static int ehca_probe(struct platform_device *dev, +		      const struct of_device_id *id)  {  	struct ehca_shca *shca;  	const u64 *handle; @@ -879,7 +879,7 @@ probe1:  	return -EINVAL;  } -static int __devexit ehca_remove(struct platform_device *dev) +static int ehca_remove(struct platform_device *dev)  {  	struct ehca_shca *shca = dev_get_drvdata(&dev->dev);  	unsigned long flags; diff --git a/drivers/infiniband/hw/ehca/ehca_mrmw.c b/drivers/infiniband/hw/ehca/ehca_mrmw.c index 87844869dcc..bcfb0c18362 100644 --- a/drivers/infiniband/hw/ehca/ehca_mrmw.c +++ b/drivers/infiniband/hw/ehca/ehca_mrmw.c @@ -688,7 +688,7 @@ dereg_mr_exit0:  /*----------------------------------------------------------------------*/ -struct ib_mw *ehca_alloc_mw(struct ib_pd *pd) +struct ib_mw *ehca_alloc_mw(struct ib_pd *pd, enum ib_mw_type type)  {  	struct ib_mw *ib_mw;  	u64 h_ret; @@ -698,6 +698,9 @@ struct ib_mw *ehca_alloc_mw(struct ib_pd *pd)  		container_of(pd->device, struct ehca_shca, ib_device);  	struct ehca_mw_hipzout_parms hipzout; +	if (type != IB_MW_TYPE_1) +		return ERR_PTR(-EINVAL); +  	e_mw = ehca_mw_new();  	if (!e_mw) {  		ib_mw = ERR_PTR(-ENOMEM); diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c index 149393915ae..00d6861a6a1 100644 --- a/drivers/infiniband/hw/ehca/ehca_qp.c +++ b/drivers/infiniband/hw/ehca/ehca_qp.c @@ -636,30 +636,26 @@ static struct ehca_qp *internal_create_qp(  		my_qp->send_cq =  			container_of(init_attr->send_cq, struct ehca_cq, ib_cq); -	do { -		if (!idr_pre_get(&ehca_qp_idr, GFP_KERNEL)) { -			ret = -ENOMEM; -			ehca_err(pd->device, "Can't reserve idr resources."); -			goto create_qp_exit0; -		} +	idr_preload(GFP_KERNEL); +	write_lock_irqsave(&ehca_qp_idr_lock, flags); -		write_lock_irqsave(&ehca_qp_idr_lock, flags); -		ret = idr_get_new(&ehca_qp_idr, my_qp, &my_qp->token); -		write_unlock_irqrestore(&ehca_qp_idr_lock, flags); -	} while (ret == -EAGAIN); +	ret = idr_alloc(&ehca_qp_idr, my_qp, 0, 0x2000000, GFP_NOWAIT); +	if (ret >= 0) +		my_qp->token = ret; -	if (ret) { -		ret = -ENOMEM; -		ehca_err(pd->device, "Can't allocate new idr entry."); +	write_unlock_irqrestore(&ehca_qp_idr_lock, flags); +	idr_preload_end(); +	if (ret < 0) { +		if (ret == -ENOSPC) { +			ret = -EINVAL; +			ehca_err(pd->device, "Invalid number of qp"); +		} else { +			ret = -ENOMEM; +			ehca_err(pd->device, "Can't allocate new idr entry."); +		}  		goto create_qp_exit0;  	} -	if (my_qp->token > 0x1FFFFFF) { -		ret = -EINVAL; -		ehca_err(pd->device, "Invalid number of qp"); -		goto create_qp_exit1; -	} -  	if (has_srq)  		parms.srq_token = my_qp->token; diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c index bfca37b2432..bd0caedafe9 100644 --- a/drivers/infiniband/hw/ipath/ipath_driver.c +++ b/drivers/infiniband/hw/ipath/ipath_driver.c @@ -127,9 +127,8 @@ const char *ipath_ibcstatus_str[] = {  	"LTState1C", "LTState1D", "LTState1E", "LTState1F"  }; -static void __devexit ipath_remove_one(struct pci_dev *); -static int __devinit ipath_init_one(struct pci_dev *, -				    const struct pci_device_id *); +static void ipath_remove_one(struct pci_dev *); +static int ipath_init_one(struct pci_dev *, const struct pci_device_id *);  /* Only needed for registration, nothing else needs this info */  #define PCI_VENDOR_ID_PATHSCALE 0x1fc1 @@ -148,7 +147,7 @@ MODULE_DEVICE_TABLE(pci, ipath_pci_tbl);  static struct pci_driver ipath_driver = {  	.name = IPATH_DRV_NAME,  	.probe = ipath_init_one, -	.remove = __devexit_p(ipath_remove_one), +	.remove = ipath_remove_one,  	.id_table = ipath_pci_tbl,  	.driver = {  		.groups = ipath_driver_attr_groups, @@ -195,11 +194,6 @@ static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev)  	struct ipath_devdata *dd;  	int ret; -	if (!idr_pre_get(&unit_table, GFP_KERNEL)) { -		dd = ERR_PTR(-ENOMEM); -		goto bail; -	} -  	dd = vzalloc(sizeof(*dd));  	if (!dd) {  		dd = ERR_PTR(-ENOMEM); @@ -207,9 +201,10 @@ static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev)  	}  	dd->ipath_unit = -1; +	idr_preload(GFP_KERNEL);  	spin_lock_irqsave(&ipath_devs_lock, flags); -	ret = idr_get_new(&unit_table, dd, &dd->ipath_unit); +	ret = idr_alloc(&unit_table, dd, 0, 0, GFP_NOWAIT);  	if (ret < 0) {  		printk(KERN_ERR IPATH_DRV_NAME  		       ": Could not allocate unit ID: error %d\n", -ret); @@ -217,6 +212,7 @@ static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev)  		dd = ERR_PTR(ret);  		goto bail_unlock;  	} +	dd->ipath_unit = ret;  	dd->pcidev = pdev;  	pci_set_drvdata(pdev, dd); @@ -225,7 +221,7 @@ static struct ipath_devdata *ipath_alloc_devdata(struct pci_dev *pdev)  bail_unlock:  	spin_unlock_irqrestore(&ipath_devs_lock, flags); - +	idr_preload_end();  bail:  	return dd;  } @@ -392,8 +388,7 @@ done:  static void cleanup_device(struct ipath_devdata *dd); -static int __devinit ipath_init_one(struct pci_dev *pdev, -				    const struct pci_device_id *ent) +static int ipath_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)  {  	int ret, len, j;  	struct ipath_devdata *dd; @@ -737,7 +732,7 @@ static void cleanup_device(struct ipath_devdata *dd)  	kfree(tmp);  } -static void __devexit ipath_remove_one(struct pci_dev *pdev) +static void ipath_remove_one(struct pci_dev *pdev)  {  	struct ipath_devdata *dd = pci_get_drvdata(pdev); @@ -2505,11 +2500,6 @@ static int __init infinipath_init(void)  	 * the PCI subsystem.  	 */  	idr_init(&unit_table); -	if (!idr_pre_get(&unit_table, GFP_KERNEL)) { -		printk(KERN_ERR IPATH_DRV_NAME ": idr_pre_get() failed\n"); -		ret = -ENOMEM; -		goto bail; -	}  	ret = pci_register_driver(&ipath_driver);  	if (ret < 0) { diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c index 3eb7e454849..aed8afee56d 100644 --- a/drivers/infiniband/hw/ipath/ipath_file_ops.c +++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c @@ -1864,9 +1864,9 @@ static int ipath_assign_port(struct file *fp,  		goto done_chk_sdma;  	} -	i_minor = iminor(fp->f_path.dentry->d_inode) - IPATH_USER_MINOR_BASE; +	i_minor = iminor(file_inode(fp)) - IPATH_USER_MINOR_BASE;  	ipath_cdbg(VERBOSE, "open on dev %lx (minor %d)\n", -		   (long)fp->f_path.dentry->d_inode->i_rdev, i_minor); +		   (long)file_inode(fp)->i_rdev, i_minor);  	if (i_minor)  		ret = find_free_port(i_minor - 1, fp, uinfo); diff --git a/drivers/infiniband/hw/ipath/ipath_fs.c b/drivers/infiniband/hw/ipath/ipath_fs.c index a4de9d58e9b..a479375a8fd 100644 --- a/drivers/infiniband/hw/ipath/ipath_fs.c +++ b/drivers/infiniband/hw/ipath/ipath_fs.c @@ -113,7 +113,7 @@ static ssize_t atomic_counters_read(struct file *file, char __user *buf,  	struct infinipath_counters counters;  	struct ipath_devdata *dd; -	dd = file->f_path.dentry->d_inode->i_private; +	dd = file_inode(file)->i_private;  	dd->ipath_f_read_counters(dd, &counters);  	return simple_read_from_buffer(buf, count, ppos, &counters, @@ -154,7 +154,7 @@ static ssize_t flash_read(struct file *file, char __user *buf,  		goto bail;  	} -	dd = file->f_path.dentry->d_inode->i_private; +	dd = file_inode(file)->i_private;  	if (ipath_eeprom_read(dd, pos, tmp, count)) {  		ipath_dev_err(dd, "failed to read from flash\n");  		ret = -ENXIO; @@ -207,7 +207,7 @@ static ssize_t flash_write(struct file *file, const char __user *buf,  		goto bail_tmp;  	} -	dd = file->f_path.dentry->d_inode->i_private; +	dd = file_inode(file)->i_private;  	if (ipath_eeprom_write(dd, pos, tmp, count)) {  		ret = -ENXIO;  		ipath_dev_err(dd, "failed to write to flash\n"); diff --git a/drivers/infiniband/hw/mlx4/cm.c b/drivers/infiniband/hw/mlx4/cm.c index dbc99d41605..e0d79b2395e 100644 --- a/drivers/infiniband/hw/mlx4/cm.c +++ b/drivers/infiniband/hw/mlx4/cm.c @@ -203,7 +203,7 @@ static void sl_id_map_add(struct ib_device *ibdev, struct id_map_entry *new)  static struct id_map_entry *  id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id)  { -	int ret, id; +	int ret;  	static int next_id;  	struct id_map_entry *ent;  	struct mlx4_ib_sriov *sriov = &to_mdev(ibdev)->sriov; @@ -220,25 +220,23 @@ id_map_alloc(struct ib_device *ibdev, int slave_id, u32 sl_cm_id)  	ent->dev = to_mdev(ibdev);  	INIT_DELAYED_WORK(&ent->timeout, id_map_ent_timeout); -	do { -		spin_lock(&to_mdev(ibdev)->sriov.id_map_lock); -		ret = idr_get_new_above(&sriov->pv_id_table, ent, -					next_id, &id); -		if (!ret) { -			next_id = ((unsigned) id + 1) & MAX_IDR_MASK; -			ent->pv_cm_id = (u32)id; -			sl_id_map_add(ibdev, ent); -		} +	idr_preload(GFP_KERNEL); +	spin_lock(&to_mdev(ibdev)->sriov.id_map_lock); -		spin_unlock(&sriov->id_map_lock); -	} while (ret == -EAGAIN && idr_pre_get(&sriov->pv_id_table, GFP_KERNEL)); -	/*the function idr_get_new_above can return -ENOSPC, so don't insert in that case.*/ -	if (!ret) { -		spin_lock(&sriov->id_map_lock); +	ret = idr_alloc(&sriov->pv_id_table, ent, next_id, 0, GFP_NOWAIT); +	if (ret >= 0) { +		next_id = max(ret + 1, 0); +		ent->pv_cm_id = (u32)ret; +		sl_id_map_add(ibdev, ent);  		list_add_tail(&ent->list, &sriov->cm_list); -		spin_unlock(&sriov->id_map_lock); -		return ent;  	} + +	spin_unlock(&sriov->id_map_lock); +	idr_preload_end(); + +	if (ret >= 0) +		return ent; +  	/*error flow*/  	kfree(ent);  	mlx4_ib_warn(ibdev, "No more space in the idr (err:0x%x)\n", ret); diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c index 0a903c129f0..934792c477b 100644 --- a/drivers/infiniband/hw/mlx4/mad.c +++ b/drivers/infiniband/hw/mlx4/mad.c @@ -1999,16 +1999,17 @@ int mlx4_ib_init_sriov(struct mlx4_ib_dev *dev)  			goto demux_err;  		err = mlx4_ib_alloc_demux_ctx(dev, &dev->sriov.demux[i], i + 1);  		if (err) -			goto demux_err; +			goto free_pv;  	}  	mlx4_ib_master_tunnels(dev, 1);  	return 0; +free_pv: +	free_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1);  demux_err: -	while (i > 0) { +	while (--i >= 0) {  		free_pv_object(dev, mlx4_master_func_num(dev->dev), i + 1);  		mlx4_ib_free_demux_ctx(&dev->sriov.demux[i]); -		--i;  	}  	mlx4_ib_device_unregister_sysfs(dev); diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index e7d81c0d1ac..23d734349d8 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -137,6 +137,14 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,  		props->device_cap_flags |= IB_DEVICE_MEM_MGT_EXTENSIONS;  	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC)  		props->device_cap_flags |= IB_DEVICE_XRC; +	if (dev->dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW) +		props->device_cap_flags |= IB_DEVICE_MEM_WINDOW; +	if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) { +		if (dev->dev->caps.bmme_flags & MLX4_BMME_FLAG_WIN_TYPE_2B) +			props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2B; +		else +			props->device_cap_flags |= IB_DEVICE_MEM_WINDOW_TYPE_2A; +	}  	props->vendor_id	   = be32_to_cpup((__be32 *) (out_mad->data + 36)) &  		0xffffff; @@ -1434,6 +1442,17 @@ static void *mlx4_ib_add(struct mlx4_dev *dev)  		ibdev->ib_dev.dealloc_fmr	= mlx4_ib_fmr_dealloc;  	} +	if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW || +	    dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN) { +		ibdev->ib_dev.alloc_mw = mlx4_ib_alloc_mw; +		ibdev->ib_dev.bind_mw = mlx4_ib_bind_mw; +		ibdev->ib_dev.dealloc_mw = mlx4_ib_dealloc_mw; + +		ibdev->ib_dev.uverbs_cmd_mask |= +			(1ull << IB_USER_VERBS_CMD_ALLOC_MW) | +			(1ull << IB_USER_VERBS_CMD_DEALLOC_MW); +	} +  	if (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) {  		ibdev->ib_dev.alloc_xrcd = mlx4_ib_alloc_xrcd;  		ibdev->ib_dev.dealloc_xrcd = mlx4_ib_dealloc_xrcd; @@ -1601,8 +1620,7 @@ static void do_slave_init(struct mlx4_ib_dev *ibdev, int slave, int do_init)  		spin_unlock_irqrestore(&ibdev->sriov.going_down_lock, flags);  	}  out: -	if (dm) -		kfree(dm); +	kfree(dm);  	return;  } diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index dcd845bc30f..f61ec26500c 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h @@ -116,6 +116,11 @@ struct mlx4_ib_mr {  	struct ib_umem	       *umem;  }; +struct mlx4_ib_mw { +	struct ib_mw		ibmw; +	struct mlx4_mw		mmw; +}; +  struct mlx4_ib_fast_reg_page_list {  	struct ib_fast_reg_page_list	ibfrpl;  	__be64			       *mapped_page_list; @@ -533,6 +538,11 @@ static inline struct mlx4_ib_mr *to_mmr(struct ib_mr *ibmr)  	return container_of(ibmr, struct mlx4_ib_mr, ibmr);  } +static inline struct mlx4_ib_mw *to_mmw(struct ib_mw *ibmw) +{ +	return container_of(ibmw, struct mlx4_ib_mw, ibmw); +} +  static inline struct mlx4_ib_fast_reg_page_list *to_mfrpl(struct ib_fast_reg_page_list *ibfrpl)  {  	return container_of(ibfrpl, struct mlx4_ib_fast_reg_page_list, ibfrpl); @@ -581,6 +591,10 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,  				  u64 virt_addr, int access_flags,  				  struct ib_udata *udata);  int mlx4_ib_dereg_mr(struct ib_mr *mr); +struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type); +int mlx4_ib_bind_mw(struct ib_qp *qp, struct ib_mw *mw, +		    struct ib_mw_bind *mw_bind); +int mlx4_ib_dealloc_mw(struct ib_mw *mw);  struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd,  					int max_page_list_len);  struct ib_fast_reg_page_list *mlx4_ib_alloc_fast_reg_page_list(struct ib_device *ibdev, @@ -652,12 +666,12 @@ int __mlx4_ib_query_gid(struct ib_device *ibdev, u8 port, int index,  int mlx4_ib_resolve_grh(struct mlx4_ib_dev *dev, const struct ib_ah_attr *ah_attr,  			u8 *mac, int *is_mcast, u8 port); -static inline int mlx4_ib_ah_grh_present(struct mlx4_ib_ah *ah) +static inline bool mlx4_ib_ah_grh_present(struct mlx4_ib_ah *ah)  {  	u8 port = be32_to_cpu(ah->av.ib.port_pd) >> 24 & 3;  	if (rdma_port_get_link_layer(ah->ibah.device, port) == IB_LINK_LAYER_ETHERNET) -		return 1; +		return true;  	return !!(ah->av.ib.g_slid & 0x80);  } diff --git a/drivers/infiniband/hw/mlx4/mr.c b/drivers/infiniband/hw/mlx4/mr.c index bbaf6176f20..e471f089ff0 100644 --- a/drivers/infiniband/hw/mlx4/mr.c +++ b/drivers/infiniband/hw/mlx4/mr.c @@ -41,9 +41,19 @@ static u32 convert_access(int acc)  	       (acc & IB_ACCESS_REMOTE_WRITE  ? MLX4_PERM_REMOTE_WRITE : 0) |  	       (acc & IB_ACCESS_REMOTE_READ   ? MLX4_PERM_REMOTE_READ  : 0) |  	       (acc & IB_ACCESS_LOCAL_WRITE   ? MLX4_PERM_LOCAL_WRITE  : 0) | +	       (acc & IB_ACCESS_MW_BIND	      ? MLX4_PERM_BIND_MW      : 0) |  	       MLX4_PERM_LOCAL_READ;  } +static enum mlx4_mw_type to_mlx4_type(enum ib_mw_type type) +{ +	switch (type) { +	case IB_MW_TYPE_1:	return MLX4_MW_TYPE_1; +	case IB_MW_TYPE_2:	return MLX4_MW_TYPE_2; +	default:		return -1; +	} +} +  struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc)  {  	struct mlx4_ib_mr *mr; @@ -68,7 +78,7 @@ struct ib_mr *mlx4_ib_get_dma_mr(struct ib_pd *pd, int acc)  	return &mr->ibmr;  err_mr: -	mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr); +	(void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr);  err_free:  	kfree(mr); @@ -163,7 +173,7 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,  	return &mr->ibmr;  err_mr: -	mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr); +	(void) mlx4_mr_free(to_mdev(pd->device)->dev, &mr->mmr);  err_umem:  	ib_umem_release(mr->umem); @@ -177,8 +187,11 @@ err_free:  int mlx4_ib_dereg_mr(struct ib_mr *ibmr)  {  	struct mlx4_ib_mr *mr = to_mmr(ibmr); +	int ret; -	mlx4_mr_free(to_mdev(ibmr->device)->dev, &mr->mmr); +	ret = mlx4_mr_free(to_mdev(ibmr->device)->dev, &mr->mmr); +	if (ret) +		return ret;  	if (mr->umem)  		ib_umem_release(mr->umem);  	kfree(mr); @@ -186,6 +199,70 @@ int mlx4_ib_dereg_mr(struct ib_mr *ibmr)  	return 0;  } +struct ib_mw *mlx4_ib_alloc_mw(struct ib_pd *pd, enum ib_mw_type type) +{ +	struct mlx4_ib_dev *dev = to_mdev(pd->device); +	struct mlx4_ib_mw *mw; +	int err; + +	mw = kmalloc(sizeof(*mw), GFP_KERNEL); +	if (!mw) +		return ERR_PTR(-ENOMEM); + +	err = mlx4_mw_alloc(dev->dev, to_mpd(pd)->pdn, +			    to_mlx4_type(type), &mw->mmw); +	if (err) +		goto err_free; + +	err = mlx4_mw_enable(dev->dev, &mw->mmw); +	if (err) +		goto err_mw; + +	mw->ibmw.rkey = mw->mmw.key; + +	return &mw->ibmw; + +err_mw: +	mlx4_mw_free(dev->dev, &mw->mmw); + +err_free: +	kfree(mw); + +	return ERR_PTR(err); +} + +int mlx4_ib_bind_mw(struct ib_qp *qp, struct ib_mw *mw, +		    struct ib_mw_bind *mw_bind) +{ +	struct ib_send_wr  wr; +	struct ib_send_wr *bad_wr; +	int ret; + +	memset(&wr, 0, sizeof(wr)); +	wr.opcode               = IB_WR_BIND_MW; +	wr.wr_id                = mw_bind->wr_id; +	wr.send_flags           = mw_bind->send_flags; +	wr.wr.bind_mw.mw        = mw; +	wr.wr.bind_mw.bind_info = mw_bind->bind_info; +	wr.wr.bind_mw.rkey      = ib_inc_rkey(mw->rkey); + +	ret = mlx4_ib_post_send(qp, &wr, &bad_wr); +	if (!ret) +		mw->rkey = wr.wr.bind_mw.rkey; + +	return ret; +} + +int mlx4_ib_dealloc_mw(struct ib_mw *ibmw) +{ +	struct mlx4_ib_mw *mw = to_mmw(ibmw); + +	mlx4_mw_free(to_mdev(ibmw->device)->dev, &mw->mmw); +	kfree(mw); + +	return 0; +} +  struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd,  					int max_page_list_len)  { @@ -212,7 +289,7 @@ struct ib_mr *mlx4_ib_alloc_fast_reg_mr(struct ib_pd *pd,  	return &mr->ibmr;  err_mr: -	mlx4_mr_free(dev->dev, &mr->mmr); +	(void) mlx4_mr_free(dev->dev, &mr->mmr);  err_free:  	kfree(mr); @@ -291,7 +368,7 @@ struct ib_fmr *mlx4_ib_fmr_alloc(struct ib_pd *pd, int acc,  	return &fmr->ibfmr;  err_mr: -	mlx4_mr_free(to_mdev(pd->device)->dev, &fmr->mfmr.mr); +	(void) mlx4_mr_free(to_mdev(pd->device)->dev, &fmr->mfmr.mr);  err_free:  	kfree(fmr); diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 19e0637220b..35cced2a4da 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c @@ -104,6 +104,7 @@ static const __be32 mlx4_ib_opcode[] = {  	[IB_WR_FAST_REG_MR]			= cpu_to_be32(MLX4_OPCODE_FMR),  	[IB_WR_MASKED_ATOMIC_CMP_AND_SWP]	= cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_CS),  	[IB_WR_MASKED_ATOMIC_FETCH_AND_ADD]	= cpu_to_be32(MLX4_OPCODE_MASKED_ATOMIC_FA), +	[IB_WR_BIND_MW]				= cpu_to_be32(MLX4_OPCODE_BIND_MW),  };  static struct mlx4_ib_sqp *to_msqp(struct mlx4_ib_qp *mqp) @@ -1746,11 +1747,11 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, struct ib_send_wr *wr,  	int header_size;  	int spc;  	int i; -	int is_eth; -	int is_vlan = 0; -	int is_grh; -	u16 vlan;  	int err = 0; +	u16 vlan = 0xffff; +	bool is_eth; +	bool is_vlan = false; +	bool is_grh;  	send_size = 0;  	for (i = 0; i < wr->num_sge; ++i) @@ -1953,9 +1954,12 @@ static int mlx4_wq_overflow(struct mlx4_ib_wq *wq, int nreq, struct ib_cq *ib_cq  static __be32 convert_access(int acc)  { -	return (acc & IB_ACCESS_REMOTE_ATOMIC ? cpu_to_be32(MLX4_WQE_FMR_PERM_ATOMIC)       : 0) | -	       (acc & IB_ACCESS_REMOTE_WRITE  ? cpu_to_be32(MLX4_WQE_FMR_PERM_REMOTE_WRITE) : 0) | -	       (acc & IB_ACCESS_REMOTE_READ   ? cpu_to_be32(MLX4_WQE_FMR_PERM_REMOTE_READ)  : 0) | +	return (acc & IB_ACCESS_REMOTE_ATOMIC ? +		cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_ATOMIC)       : 0) | +	       (acc & IB_ACCESS_REMOTE_WRITE  ? +		cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_WRITE) : 0) | +	       (acc & IB_ACCESS_REMOTE_READ   ? +		cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_READ)  : 0) |  	       (acc & IB_ACCESS_LOCAL_WRITE   ? cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_WRITE)  : 0) |  		cpu_to_be32(MLX4_WQE_FMR_PERM_LOCAL_READ);  } @@ -1981,12 +1985,28 @@ static void set_fmr_seg(struct mlx4_wqe_fmr_seg *fseg, struct ib_send_wr *wr)  	fseg->reserved[1]	= 0;  } +static void set_bind_seg(struct mlx4_wqe_bind_seg *bseg, struct ib_send_wr *wr) +{ +	bseg->flags1 = +		convert_access(wr->wr.bind_mw.bind_info.mw_access_flags) & +		cpu_to_be32(MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_READ  | +			    MLX4_WQE_FMR_AND_BIND_PERM_REMOTE_WRITE | +			    MLX4_WQE_FMR_AND_BIND_PERM_ATOMIC); +	bseg->flags2 = 0; +	if (wr->wr.bind_mw.mw->type == IB_MW_TYPE_2) +		bseg->flags2 |= cpu_to_be32(MLX4_WQE_BIND_TYPE_2); +	if (wr->wr.bind_mw.bind_info.mw_access_flags & IB_ZERO_BASED) +		bseg->flags2 |= cpu_to_be32(MLX4_WQE_BIND_ZERO_BASED); +	bseg->new_rkey = cpu_to_be32(wr->wr.bind_mw.rkey); +	bseg->lkey = cpu_to_be32(wr->wr.bind_mw.bind_info.mr->lkey); +	bseg->addr = cpu_to_be64(wr->wr.bind_mw.bind_info.addr); +	bseg->length = cpu_to_be64(wr->wr.bind_mw.bind_info.length); +} +  static void set_local_inv_seg(struct mlx4_wqe_local_inval_seg *iseg, u32 rkey)  { -	iseg->flags	= 0; -	iseg->mem_key	= cpu_to_be32(rkey); -	iseg->guest_id	= 0; -	iseg->pa	= 0; +	memset(iseg, 0, sizeof(*iseg)); +	iseg->mem_key = cpu_to_be32(rkey);  }  static __always_inline void set_raddr_seg(struct mlx4_wqe_raddr_seg *rseg, @@ -2291,6 +2311,13 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,  				size += sizeof (struct mlx4_wqe_fmr_seg) / 16;  				break; +			case IB_WR_BIND_MW: +				ctrl->srcrb_flags |= +					cpu_to_be32(MLX4_WQE_CTRL_STRONG_ORDER); +				set_bind_seg(wqe, wr); +				wqe  += sizeof(struct mlx4_wqe_bind_seg); +				size += sizeof(struct mlx4_wqe_bind_seg) / 16; +				break;  			default:  				/* No extra segments required for sends */  				break; diff --git a/drivers/infiniband/hw/mlx4/sysfs.c b/drivers/infiniband/hw/mlx4/sysfs.c index 5b2a01dfb90..97516eb363b 100644 --- a/drivers/infiniband/hw/mlx4/sysfs.c +++ b/drivers/infiniband/hw/mlx4/sysfs.c @@ -732,7 +732,7 @@ int mlx4_ib_device_register_sysfs(struct mlx4_ib_dev *dev)  	dev->ports_parent =  		kobject_create_and_add("ports",  				       kobject_get(dev->iov_parent)); -	if (!dev->iov_parent) { +	if (!dev->ports_parent) {  		ret = -ENOMEM;  		goto err_ports;  	} diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c index aa12a533ae9..87897b95666 100644 --- a/drivers/infiniband/hw/mthca/mthca_main.c +++ b/drivers/infiniband/hw/mthca/mthca_main.c @@ -130,7 +130,7 @@ static int log_mtts_per_seg = ilog2(MTHCA_MTT_SEG_SIZE / 8);  module_param_named(log_mtts_per_seg, log_mtts_per_seg, int, 0444);  MODULE_PARM_DESC(log_mtts_per_seg, "Log2 number of MTT entries per segment (1-5)"); -static char mthca_version[] __devinitdata = +static char mthca_version[] =  	DRV_NAME ": Mellanox InfiniBand HCA driver v"  	DRV_VERSION " (" DRV_RELDATE ")\n"; @@ -1139,8 +1139,7 @@ int __mthca_restart_one(struct pci_dev *pdev)  	return __mthca_init_one(pdev, hca_type);  } -static int __devinit mthca_init_one(struct pci_dev *pdev, -				    const struct pci_device_id *id) +static int mthca_init_one(struct pci_dev *pdev, const struct pci_device_id *id)  {  	int ret; @@ -1162,7 +1161,7 @@ static int __devinit mthca_init_one(struct pci_dev *pdev,  	return ret;  } -static void __devexit mthca_remove_one(struct pci_dev *pdev) +static void mthca_remove_one(struct pci_dev *pdev)  {  	mutex_lock(&mthca_device_mutex);  	__mthca_remove_one(pdev); @@ -1199,7 +1198,7 @@ static struct pci_driver mthca_driver = {  	.name		= DRV_NAME,  	.id_table	= mthca_pci_table,  	.probe		= mthca_init_one, -	.remove		= __devexit_p(mthca_remove_one) +	.remove		= mthca_remove_one,  };  static void __init __mthca_check_profile_val(const char *name, int *pval, diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c index 748db2d3e46..429141078ee 100644 --- a/drivers/infiniband/hw/nes/nes.c +++ b/drivers/infiniband/hw/nes/nes.c @@ -135,6 +135,7 @@ static int nes_inetaddr_event(struct notifier_block *notifier,  	struct net_device *event_netdev = ifa->ifa_dev->dev;  	struct nes_device *nesdev;  	struct net_device *netdev; +	struct net_device *upper_dev;  	struct nes_vnic *nesvnic;  	unsigned int is_bonded; @@ -145,8 +146,9 @@ static int nes_inetaddr_event(struct notifier_block *notifier,  				nesdev, nesdev->netdev[0]->name);  		netdev = nesdev->netdev[0];  		nesvnic = netdev_priv(netdev); +		upper_dev = netdev_master_upper_dev_get(netdev);  		is_bonded = netif_is_bond_slave(netdev) && -			    (netdev->master == event_netdev); +			    (upper_dev == event_netdev);  		if ((netdev == event_netdev) || is_bonded) {  			if (nesvnic->rdma_enabled == 0) {  				nes_debug(NES_DBG_NETDEV, "Returning without processing event for %s since" @@ -179,9 +181,9 @@ static int nes_inetaddr_event(struct notifier_block *notifier,  					/* fall through */  				case NETDEV_CHANGEADDR:  					/* Add the address to the IP table */ -					if (netdev->master) +					if (upper_dev)  						nesvnic->local_ipaddr = -							((struct in_device *)netdev->master->ip_ptr)->ifa_list->ifa_address; +							((struct in_device *)upper_dev->ip_ptr)->ifa_list->ifa_address;  					else  						nesvnic->local_ipaddr = ifa->ifa_address; @@ -444,7 +446,7 @@ static irqreturn_t nes_interrupt(int irq, void *dev_id)  /**   * nes_probe - Device initialization   */ -static int __devinit nes_probe(struct pci_dev *pcidev, const struct pci_device_id *ent) +static int nes_probe(struct pci_dev *pcidev, const struct pci_device_id *ent)  {  	struct net_device *netdev = NULL;  	struct nes_device *nesdev = NULL; @@ -749,7 +751,7 @@ static int __devinit nes_probe(struct pci_dev *pcidev, const struct pci_device_i  /**   * nes_remove - unload from kernel   */ -static void __devexit nes_remove(struct pci_dev *pcidev) +static void nes_remove(struct pci_dev *pcidev)  {  	struct nes_device *nesdev = pci_get_drvdata(pcidev);  	struct net_device *netdev; @@ -810,7 +812,7 @@ static struct pci_driver nes_pci_driver = {  	.name = DRV_NAME,  	.id_table = nes_pci_table,  	.probe = nes_probe, -	.remove = __devexit_p(nes_remove), +	.remove = nes_remove,  };  static ssize_t nes_show_adapter(struct device_driver *ddp, char *buf) diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c index 22ea67eea5d..24b9f1a0107 100644 --- a/drivers/infiniband/hw/nes/nes_cm.c +++ b/drivers/infiniband/hw/nes/nes_cm.c @@ -1340,7 +1340,7 @@ static int nes_addr_resolve_neigh(struct nes_vnic *nesvnic, u32 dst_ip, int arpi  	}  	if (netif_is_bond_slave(nesvnic->netdev)) -		netdev = nesvnic->netdev->master; +		netdev = netdev_master_upper_dev_get(nesvnic->netdev);  	else  		netdev = nesvnic->netdev; diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c index 9542e1644a5..85cf4d1ac44 100644 --- a/drivers/infiniband/hw/nes/nes_nic.c +++ b/drivers/infiniband/hw/nes/nes_nic.c @@ -1317,11 +1317,13 @@ static void nes_netdev_get_drvinfo(struct net_device *netdev,  	struct nes_vnic *nesvnic = netdev_priv(netdev);  	struct nes_adapter *nesadapter = nesvnic->nesdev->nesadapter; -	strcpy(drvinfo->driver, DRV_NAME); -	strcpy(drvinfo->bus_info, pci_name(nesvnic->nesdev->pcidev)); -	sprintf(drvinfo->fw_version, "%u.%u", nesadapter->firmware_version>>16, -				nesadapter->firmware_version & 0x000000ff); -	strcpy(drvinfo->version, DRV_VERSION); +	strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); +	strlcpy(drvinfo->bus_info, pci_name(nesvnic->nesdev->pcidev), +		sizeof(drvinfo->bus_info)); +	snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), +		 "%u.%u", nesadapter->firmware_version >> 16, +		 nesadapter->firmware_version & 0x000000ff); +	strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));  	drvinfo->testinfo_len = 0;  	drvinfo->eedump_len = 0;  	drvinfo->regdump_len = 0; @@ -1703,7 +1705,6 @@ struct net_device *nes_netdev_init(struct nes_device *nesdev,  	netdev->dev_addr[3] = (u8)(u64temp>>16);  	netdev->dev_addr[4] = (u8)(u64temp>>8);  	netdev->dev_addr[5] = (u8)u64temp; -	memcpy(netdev->perm_addr, netdev->dev_addr, 6);  	netdev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM | NETIF_F_HW_VLAN_RX;  	if ((nesvnic->logical_port < 2) || (nesdev->nesadapter->hw_rev != NE020_REV)) diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c index 07e4fbad987..8f67fe2e91e 100644 --- a/drivers/infiniband/hw/nes/nes_verbs.c +++ b/drivers/infiniband/hw/nes/nes_verbs.c @@ -55,7 +55,8 @@ static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);  /**   * nes_alloc_mw   */ -static struct ib_mw *nes_alloc_mw(struct ib_pd *ibpd) { +static struct ib_mw *nes_alloc_mw(struct ib_pd *ibpd, enum ib_mw_type type) +{  	struct nes_pd *nespd = to_nespd(ibpd);  	struct nes_vnic *nesvnic = to_nesvnic(ibpd->device);  	struct nes_device *nesdev = nesvnic->nesdev; @@ -71,6 +72,9 @@ static struct ib_mw *nes_alloc_mw(struct ib_pd *ibpd) {  	u32 driver_key = 0;  	u8 stag_key = 0; +	if (type != IB_MW_TYPE_1) +		return ERR_PTR(-EINVAL); +  	get_random_bytes(&next_stag_index, sizeof(next_stag_index));  	stag_key = (u8)next_stag_index; @@ -244,20 +248,19 @@ static int nes_bind_mw(struct ib_qp *ibqp, struct ib_mw *ibmw,  	if (ibmw_bind->send_flags & IB_SEND_SIGNALED)  		wqe_misc |= NES_IWARP_SQ_WQE_SIGNALED_COMPL; -	if (ibmw_bind->mw_access_flags & IB_ACCESS_REMOTE_WRITE) { +	if (ibmw_bind->bind_info.mw_access_flags & IB_ACCESS_REMOTE_WRITE)  		wqe_misc |= NES_CQP_STAG_RIGHTS_REMOTE_WRITE; -	} -	if (ibmw_bind->mw_access_flags & IB_ACCESS_REMOTE_READ) { +	if (ibmw_bind->bind_info.mw_access_flags & IB_ACCESS_REMOTE_READ)  		wqe_misc |= NES_CQP_STAG_RIGHTS_REMOTE_READ; -	}  	set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_WQE_MISC_IDX, wqe_misc); -	set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_MR_IDX, ibmw_bind->mr->lkey); +	set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_MR_IDX, +			    ibmw_bind->bind_info.mr->lkey);  	set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_MW_IDX, ibmw->rkey);  	set_wqe_32bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_LENGTH_LOW_IDX, -			ibmw_bind->length); +			ibmw_bind->bind_info.length);  	wqe->wqe_words[NES_IWARP_SQ_BIND_WQE_LENGTH_HIGH_IDX] = 0; -	u64temp = (u64)ibmw_bind->addr; +	u64temp = (u64)ibmw_bind->bind_info.addr;  	set_wqe_64bit_value(wqe->wqe_words, NES_IWARP_SQ_BIND_WQE_VA_FBO_LOW_IDX, u64temp);  	head++; diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c index c4e0131f1b5..48928c8e777 100644 --- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c +++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c @@ -51,18 +51,6 @@ static DEFINE_IDR(ocrdma_dev_id);  static union ib_gid ocrdma_zero_sgid; -static int ocrdma_get_instance(void) -{ -	int instance = 0; - -	/* Assign an unused number */ -	if (!idr_pre_get(&ocrdma_dev_id, GFP_KERNEL)) -		return -1; -	if (idr_get_new(&ocrdma_dev_id, NULL, &instance)) -		return -1; -	return instance; -} -  void ocrdma_get_guid(struct ocrdma_dev *dev, u8 *guid)  {  	u8 mac_addr[6]; @@ -416,7 +404,7 @@ static struct ocrdma_dev *ocrdma_add(struct be_dev_info *dev_info)  		goto idr_err;  	memcpy(&dev->nic_info, dev_info, sizeof(*dev_info)); -	dev->id = ocrdma_get_instance(); +	dev->id = idr_alloc(&ocrdma_dev_id, NULL, 0, 0, GFP_KERNEL);  	if (dev->id < 0)  		goto idr_err; diff --git a/drivers/infiniband/hw/qib/qib_file_ops.c b/drivers/infiniband/hw/qib/qib_file_ops.c index 959a5c4ff81..4f7aa301b3b 100644 --- a/drivers/infiniband/hw/qib/qib_file_ops.c +++ b/drivers/infiniband/hw/qib/qib_file_ops.c @@ -1524,7 +1524,7 @@ static int qib_assign_ctxt(struct file *fp, const struct qib_user_info *uinfo)  		}  	} -	i_minor = iminor(fp->f_dentry->d_inode) - QIB_USER_MINOR_BASE; +	i_minor = iminor(file_inode(fp)) - QIB_USER_MINOR_BASE;  	if (i_minor)  		ret = find_free_ctxt(i_minor - 1, fp, uinfo);  	else diff --git a/drivers/infiniband/hw/qib/qib_fs.c b/drivers/infiniband/hw/qib/qib_fs.c index 65a2a23f6f8..644bd6f6467 100644 --- a/drivers/infiniband/hw/qib/qib_fs.c +++ b/drivers/infiniband/hw/qib/qib_fs.c @@ -45,7 +45,7 @@  static struct super_block *qib_super; -#define private2dd(file) ((file)->f_dentry->d_inode->i_private) +#define private2dd(file) (file_inode(file)->i_private)  static int qibfs_mknod(struct inode *dir, struct dentry *dentry,  		       umode_t mode, const struct file_operations *fops, @@ -171,7 +171,7 @@ static const struct file_operations cntr_ops[] = {  };  /* - * Could use file->f_dentry->d_inode->i_ino to figure out which file, + * Could use file_inode(file)->i_ino to figure out which file,   * instead of separate routine for each, but for now, this works...   */ diff --git a/drivers/infiniband/hw/qib/qib_init.c b/drivers/infiniband/hw/qib/qib_init.c index 4443adfcd9e..50e33aa0b4e 100644 --- a/drivers/infiniband/hw/qib/qib_init.c +++ b/drivers/infiniband/hw/qib/qib_init.c @@ -1060,22 +1060,23 @@ struct qib_devdata *qib_alloc_devdata(struct pci_dev *pdev, size_t extra)  	struct qib_devdata *dd;  	int ret; -	if (!idr_pre_get(&qib_unit_table, GFP_KERNEL)) { -		dd = ERR_PTR(-ENOMEM); -		goto bail; -	} -  	dd = (struct qib_devdata *) ib_alloc_device(sizeof(*dd) + extra);  	if (!dd) {  		dd = ERR_PTR(-ENOMEM);  		goto bail;  	} +	idr_preload(GFP_KERNEL);  	spin_lock_irqsave(&qib_devs_lock, flags); -	ret = idr_get_new(&qib_unit_table, dd, &dd->unit); -	if (ret >= 0) + +	ret = idr_alloc(&qib_unit_table, dd, 0, 0, GFP_NOWAIT); +	if (ret >= 0) { +		dd->unit = ret;  		list_add(&dd->list, &qib_dev_list); +	} +  	spin_unlock_irqrestore(&qib_devs_lock, flags); +	idr_preload_end();  	if (ret < 0) {  		qib_early_err(&pdev->dev, @@ -1134,9 +1135,8 @@ void qib_disable_after_error(struct qib_devdata *dd)  		*dd->devstatusp |= QIB_STATUS_HWERROR;  } -static void __devexit qib_remove_one(struct pci_dev *); -static int __devinit qib_init_one(struct pci_dev *, -				  const struct pci_device_id *); +static void qib_remove_one(struct pci_dev *); +static int qib_init_one(struct pci_dev *, const struct pci_device_id *);  #define DRIVER_LOAD_MSG "QLogic " QIB_DRV_NAME " loaded: "  #define PFX QIB_DRV_NAME ": " @@ -1153,7 +1153,7 @@ MODULE_DEVICE_TABLE(pci, qib_pci_tbl);  struct pci_driver qib_driver = {  	.name = QIB_DRV_NAME,  	.probe = qib_init_one, -	.remove = __devexit_p(qib_remove_one), +	.remove = qib_remove_one,  	.id_table = qib_pci_tbl,  	.err_handler = &qib_pci_err_handler,  }; @@ -1181,11 +1181,6 @@ static int __init qlogic_ib_init(void)  	 * the PCI subsystem.  	 */  	idr_init(&qib_unit_table); -	if (!idr_pre_get(&qib_unit_table, GFP_KERNEL)) { -		pr_err("idr_pre_get() failed\n"); -		ret = -ENOMEM; -		goto bail_cq_wq; -	}  	ret = pci_register_driver(&qib_driver);  	if (ret < 0) { @@ -1200,7 +1195,6 @@ static int __init qlogic_ib_init(void)  bail_unit:  	idr_destroy(&qib_unit_table); -bail_cq_wq:  	destroy_workqueue(qib_cq_wq);  bail_dev:  	qib_dev_cleanup(); @@ -1342,8 +1336,7 @@ static void qib_postinit_cleanup(struct qib_devdata *dd)  	qib_free_devdata(dd);  } -static int __devinit qib_init_one(struct pci_dev *pdev, -				  const struct pci_device_id *ent) +static int qib_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)  {  	int ret, j, pidx, initfail;  	struct qib_devdata *dd = NULL; @@ -1448,7 +1441,7 @@ bail:  	return ret;  } -static void __devexit qib_remove_one(struct pci_dev *pdev) +static void qib_remove_one(struct pci_dev *pdev)  {  	struct qib_devdata *dd = pci_get_drvdata(pdev);  	int ret; diff --git a/drivers/infiniband/hw/qib/qib_qp.c b/drivers/infiniband/hw/qib/qib_qp.c index 4850d03870c..a6a2cc2ba26 100644 --- a/drivers/infiniband/hw/qib/qib_qp.c +++ b/drivers/infiniband/hw/qib/qib_qp.c @@ -263,20 +263,16 @@ static void remove_qp(struct qib_ibdev *dev, struct qib_qp *qp)  		struct qib_qp __rcu **qpp;  		qpp = &dev->qp_table[n]; -		q = rcu_dereference_protected(*qpp, -			lockdep_is_held(&dev->qpt_lock)); -		for (; q; qpp = &q->next) { +		for (; (q = rcu_dereference_protected(*qpp, +				lockdep_is_held(&dev->qpt_lock))) != NULL; +				qpp = &q->next)  			if (q == qp) {  				atomic_dec(&qp->refcount); -				*qpp = qp->next; -				rcu_assign_pointer(qp->next, NULL); -				q = rcu_dereference_protected(*qpp, -					lockdep_is_held(&dev->qpt_lock)); +				rcu_assign_pointer(*qpp, +					rcu_dereference_protected(qp->next, +					 lockdep_is_held(&dev->qpt_lock)));  				break;  			} -			q = rcu_dereference_protected(*qpp, -				lockdep_is_held(&dev->qpt_lock)); -		}  	}  	spin_unlock_irqrestore(&dev->qpt_lock, flags);  |