diff options
Diffstat (limited to 'drivers/infiniband/hw/ehca/ehca_qp.c')
| -rw-r--r-- | drivers/infiniband/hw/ehca/ehca_qp.c | 112 | 
1 files changed, 58 insertions, 54 deletions
diff --git a/drivers/infiniband/hw/ehca/ehca_qp.c b/drivers/infiniband/hw/ehca/ehca_qp.c index 00c10815971..0338f1fabe8 100644 --- a/drivers/infiniband/hw/ehca/ehca_qp.c +++ b/drivers/infiniband/hw/ehca/ehca_qp.c @@ -461,7 +461,7 @@ static struct ehca_qp *internal_create_qp(  					      ib_device);  	struct ib_ucontext *context = NULL;  	u64 h_ret; -	int is_llqp = 0, has_srq = 0; +	int is_llqp = 0, has_srq = 0, is_user = 0;  	int qp_type, max_send_sge, max_recv_sge, ret;  	/* h_call's out parameters */ @@ -609,9 +609,6 @@ static struct ehca_qp *internal_create_qp(  		}  	} -	if (pd->uobject && udata) -		context = pd->uobject->context; -  	my_qp = kmem_cache_zalloc(qp_cache, GFP_KERNEL);  	if (!my_qp) {  		ehca_err(pd->device, "pd=%p not enough memory to alloc qp", pd); @@ -619,6 +616,11 @@ static struct ehca_qp *internal_create_qp(  		return ERR_PTR(-ENOMEM);  	} +	if (pd->uobject && udata) { +		is_user = 1; +		context = pd->uobject->context; +	} +  	atomic_set(&my_qp->nr_events, 0);  	init_waitqueue_head(&my_qp->wait_completion);  	spin_lock_init(&my_qp->spinlock_s); @@ -707,7 +709,7 @@ static struct ehca_qp *internal_create_qp(  			(parms.squeue.is_small || parms.rqueue.is_small);  	} -	h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, &parms); +	h_ret = hipz_h_alloc_resource_qp(shca->ipz_hca_handle, &parms, is_user);  	if (h_ret != H_SUCCESS) {  		ehca_err(pd->device, "h_alloc_resource_qp() failed h_ret=%lli",  			 h_ret); @@ -769,18 +771,20 @@ static struct ehca_qp *internal_create_qp(  			goto create_qp_exit2;  		} -		my_qp->sq_map.entries = my_qp->ipz_squeue.queue_length / -			 my_qp->ipz_squeue.qe_size; -		my_qp->sq_map.map = vmalloc(my_qp->sq_map.entries * -					sizeof(struct ehca_qmap_entry)); -		if (!my_qp->sq_map.map) { -			ehca_err(pd->device, "Couldn't allocate squeue " -				 "map ret=%i", ret); -			goto create_qp_exit3; +		if (!is_user) { +			my_qp->sq_map.entries = my_qp->ipz_squeue.queue_length / +				my_qp->ipz_squeue.qe_size; +			my_qp->sq_map.map = vmalloc(my_qp->sq_map.entries * +						    sizeof(struct ehca_qmap_entry)); +			if (!my_qp->sq_map.map) { +				ehca_err(pd->device, "Couldn't allocate squeue " +					 "map ret=%i", ret); +				goto create_qp_exit3; +			} +			INIT_LIST_HEAD(&my_qp->sq_err_node); +			/* to avoid the generation of bogus flush CQEs */ +			reset_queue_map(&my_qp->sq_map);  		} -		INIT_LIST_HEAD(&my_qp->sq_err_node); -		/* to avoid the generation of bogus flush CQEs */ -		reset_queue_map(&my_qp->sq_map);  	}  	if (HAS_RQ(my_qp)) { @@ -792,20 +796,21 @@ static struct ehca_qp *internal_create_qp(  				 "and pages ret=%i", ret);  			goto create_qp_exit4;  		} - -		my_qp->rq_map.entries = my_qp->ipz_rqueue.queue_length / -			my_qp->ipz_rqueue.qe_size; -		my_qp->rq_map.map = vmalloc(my_qp->rq_map.entries * -				sizeof(struct ehca_qmap_entry)); -		if (!my_qp->rq_map.map) { -			ehca_err(pd->device, "Couldn't allocate squeue " -					"map ret=%i", ret); -			goto create_qp_exit5; +		if (!is_user) { +			my_qp->rq_map.entries = my_qp->ipz_rqueue.queue_length / +				my_qp->ipz_rqueue.qe_size; +			my_qp->rq_map.map = vmalloc(my_qp->rq_map.entries * +						    sizeof(struct ehca_qmap_entry)); +			if (!my_qp->rq_map.map) { +				ehca_err(pd->device, "Couldn't allocate squeue " +					 "map ret=%i", ret); +				goto create_qp_exit5; +			} +			INIT_LIST_HEAD(&my_qp->rq_err_node); +			/* to avoid the generation of bogus flush CQEs */ +			reset_queue_map(&my_qp->rq_map);  		} -		INIT_LIST_HEAD(&my_qp->rq_err_node); -		/* to avoid the generation of bogus flush CQEs */ -		reset_queue_map(&my_qp->rq_map); -	} else if (init_attr->srq) { +	} else if (init_attr->srq && !is_user) {  		/* this is a base QP, use the queue map of the SRQ */  		my_qp->rq_map = my_srq->rq_map;  		INIT_LIST_HEAD(&my_qp->rq_err_node); @@ -918,7 +923,7 @@ create_qp_exit7:  	kfree(my_qp->mod_qp_parm);  create_qp_exit6: -	if (HAS_RQ(my_qp)) +	if (HAS_RQ(my_qp) && !is_user)  		vfree(my_qp->rq_map.map);  create_qp_exit5: @@ -926,7 +931,7 @@ create_qp_exit5:  		ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue);  create_qp_exit4: -	if (HAS_SQ(my_qp)) +	if (HAS_SQ(my_qp) && !is_user)  		vfree(my_qp->sq_map.map);  create_qp_exit3: @@ -1244,6 +1249,7 @@ static int internal_modify_qp(struct ib_qp *ibqp,  	u64 update_mask;  	u64 h_ret;  	int bad_wqe_cnt = 0; +	int is_user = 0;  	int squeue_locked = 0;  	unsigned long flags = 0; @@ -1266,6 +1272,8 @@ static int internal_modify_qp(struct ib_qp *ibqp,  		ret = ehca2ib_return_code(h_ret);  		goto modify_qp_exit1;  	} +	if (ibqp->uobject) +		is_user = 1;  	qp_cur_state = ehca2ib_qp_state(mqpcb->qp_state); @@ -1728,7 +1736,8 @@ static int internal_modify_qp(struct ib_qp *ibqp,  			goto modify_qp_exit2;  		}  	} -	if ((qp_new_state == IB_QPS_ERR) && (qp_cur_state != IB_QPS_ERR)) { +	if ((qp_new_state == IB_QPS_ERR) && (qp_cur_state != IB_QPS_ERR) +	    && !is_user) {  		ret = check_for_left_cqes(my_qp, shca);  		if (ret)  			goto modify_qp_exit2; @@ -1738,16 +1747,17 @@ static int internal_modify_qp(struct ib_qp *ibqp,  		ipz_qeit_reset(&my_qp->ipz_rqueue);  		ipz_qeit_reset(&my_qp->ipz_squeue); -		if (qp_cur_state == IB_QPS_ERR) { +		if (qp_cur_state == IB_QPS_ERR && !is_user) {  			del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node);  			if (HAS_RQ(my_qp))  				del_from_err_list(my_qp->recv_cq,  						  &my_qp->rq_err_node);  		} -		reset_queue_map(&my_qp->sq_map); +		if (!is_user) +			reset_queue_map(&my_qp->sq_map); -		if (HAS_RQ(my_qp)) +		if (HAS_RQ(my_qp) && !is_user)  			reset_queue_map(&my_qp->rq_map);  	} @@ -1952,19 +1962,13 @@ int ehca_query_qp(struct ib_qp *qp,  	qp_attr->cap.max_inline_data = my_qp->sq_max_inline_data_size;  	qp_attr->dest_qp_num = qpcb->dest_qp_nr; -	qp_attr->pkey_index = -		EHCA_BMASK_GET(MQPCB_PRIM_P_KEY_IDX, qpcb->prim_p_key_idx); - -	qp_attr->port_num = -		EHCA_BMASK_GET(MQPCB_PRIM_PHYS_PORT, qpcb->prim_phys_port); - +	qp_attr->pkey_index = qpcb->prim_p_key_idx; +	qp_attr->port_num = qpcb->prim_phys_port;  	qp_attr->timeout = qpcb->timeout;  	qp_attr->retry_cnt = qpcb->retry_count;  	qp_attr->rnr_retry = qpcb->rnr_retry_count; -	qp_attr->alt_pkey_index = -		EHCA_BMASK_GET(MQPCB_PRIM_P_KEY_IDX, qpcb->alt_p_key_idx); - +	qp_attr->alt_pkey_index = qpcb->alt_p_key_idx;  	qp_attr->alt_port_num = qpcb->alt_phys_port;  	qp_attr->alt_timeout = qpcb->timeout_al; @@ -2051,8 +2055,7 @@ int ehca_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,  		update_mask |=  			EHCA_BMASK_SET(MQPCB_MASK_CURR_SRQ_LIMIT, 1)  			| EHCA_BMASK_SET(MQPCB_MASK_QP_AFF_ASYN_EV_LOG_REG, 1); -		mqpcb->curr_srq_limit = -			EHCA_BMASK_SET(MQPCB_CURR_SRQ_LIMIT, attr->srq_limit); +		mqpcb->curr_srq_limit = attr->srq_limit;  		mqpcb->qp_aff_asyn_ev_log_reg =  			EHCA_BMASK_SET(QPX_AAELOG_RESET_SRQ_LIMIT, 1);  	} @@ -2115,8 +2118,7 @@ int ehca_query_srq(struct ib_srq *srq, struct ib_srq_attr *srq_attr)  	srq_attr->max_wr = qpcb->max_nr_outst_recv_wr - 1;  	srq_attr->max_sge = 3; -	srq_attr->srq_limit = EHCA_BMASK_GET( -		MQPCB_CURR_SRQ_LIMIT, qpcb->curr_srq_limit); +	srq_attr->srq_limit = qpcb->curr_srq_limit;  	if (ehca_debug_level >= 2)  		ehca_dmp(qpcb, 4*70, "qp_num=%x", my_qp->real_qp_num); @@ -2138,10 +2140,12 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,  	int ret;  	u64 h_ret;  	u8 port_num; +	int is_user = 0;  	enum ib_qp_type	qp_type;  	unsigned long flags;  	if (uobject) { +		is_user = 1;  		if (my_qp->mm_count_galpa ||  		    my_qp->mm_count_rqueue || my_qp->mm_count_squeue) {  			ehca_err(dev, "Resources still referenced in " @@ -2168,10 +2172,10 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,  	 * SRQs will never get into an error list and do not have a recv_cq,  	 * so we need to skip them here.  	 */ -	if (HAS_RQ(my_qp) && !IS_SRQ(my_qp)) +	if (HAS_RQ(my_qp) && !IS_SRQ(my_qp) && !is_user)  		del_from_err_list(my_qp->recv_cq, &my_qp->rq_err_node); -	if (HAS_SQ(my_qp)) +	if (HAS_SQ(my_qp) && !is_user)  		del_from_err_list(my_qp->send_cq, &my_qp->sq_err_node);  	/* now wait until all pending events have completed */ @@ -2209,13 +2213,13 @@ static int internal_destroy_qp(struct ib_device *dev, struct ehca_qp *my_qp,  	if (HAS_RQ(my_qp)) {  		ipz_queue_dtor(my_pd, &my_qp->ipz_rqueue); - -		vfree(my_qp->rq_map.map); +		if (!is_user) +			vfree(my_qp->rq_map.map);  	}  	if (HAS_SQ(my_qp)) {  		ipz_queue_dtor(my_pd, &my_qp->ipz_squeue); - -		vfree(my_qp->sq_map.map); +		if (!is_user) +			vfree(my_qp->sq_map.map);  	}  	kmem_cache_free(qp_cache, my_qp);  	atomic_dec(&shca->num_qps);  |