diff options
| author | Arnd Bergmann <arnd@arndb.de> | 2013-05-06 23:38:00 +0200 | 
|---|---|---|
| committer | Arnd Bergmann <arnd@arndb.de> | 2013-05-06 23:38:00 +0200 | 
| commit | 4183bef2e093a2f0aab45f2d5fed82b0e02aeacf (patch) | |
| tree | e84c484db3746a0f3ead0fb7a2bb9e5a42b89e85 /drivers/vhost/tcm_vhost.c | |
| parent | 662478d060a39b8faf2b1fe2fbbb212556c2e052 (diff) | |
| parent | 775c4f66fd855e68a98fc5049003810fe98e2e20 (diff) | |
| download | olio-linux-3.10-4183bef2e093a2f0aab45f2d5fed82b0e02aeacf.tar.xz olio-linux-3.10-4183bef2e093a2f0aab45f2d5fed82b0e02aeacf.zip  | |
Merge branch 'late/dt' into next/dt2
This is support for the ARM Chromebook, originally scheduled
as a "late" pull request. Since it's already late now, we
can combine this into the existing next/dt2 branch.
* late/dt:
  ARM: exynos: dts: cros5250: add EC device
  ARM: dts: Add sbs-battery for exynos5250-snow
  ARM: dts: Add i2c-arbitrator bus for exynos5250-snow
  ARM: dts: Add chip-id controller node on Exynos4/5 SoC
  ARM: EXYNOS: Create virtual I/O mapping for Chip-ID controller using device tree
Diffstat (limited to 'drivers/vhost/tcm_vhost.c')
| -rw-r--r-- | drivers/vhost/tcm_vhost.c | 198 | 
1 files changed, 130 insertions, 68 deletions
diff --git a/drivers/vhost/tcm_vhost.c b/drivers/vhost/tcm_vhost.c index 2968b493465..957a0b98a5d 100644 --- a/drivers/vhost/tcm_vhost.c +++ b/drivers/vhost/tcm_vhost.c @@ -74,9 +74,8 @@ enum {  struct vhost_scsi {  	/* Protected by vhost_scsi->dev.mutex */ -	struct tcm_vhost_tpg *vs_tpg[VHOST_SCSI_MAX_TARGET]; +	struct tcm_vhost_tpg **vs_tpg;  	char vs_vhost_wwpn[TRANSPORT_IQN_LEN]; -	bool vs_endpoint;  	struct vhost_dev dev;  	struct vhost_virtqueue vqs[VHOST_SCSI_MAX_VQ]; @@ -579,9 +578,27 @@ static void tcm_vhost_submission_work(struct work_struct *work)  	}  } +static void vhost_scsi_send_bad_target(struct vhost_scsi *vs, +	struct vhost_virtqueue *vq, int head, unsigned out) +{ +	struct virtio_scsi_cmd_resp __user *resp; +	struct virtio_scsi_cmd_resp rsp; +	int ret; + +	memset(&rsp, 0, sizeof(rsp)); +	rsp.response = VIRTIO_SCSI_S_BAD_TARGET; +	resp = vq->iov[out].iov_base; +	ret = __copy_to_user(resp, &rsp, sizeof(rsp)); +	if (!ret) +		vhost_add_used_and_signal(&vs->dev, vq, head, 0); +	else +		pr_err("Faulted on virtio_scsi_cmd_resp\n"); +} +  static void vhost_scsi_handle_vq(struct vhost_scsi *vs,  	struct vhost_virtqueue *vq)  { +	struct tcm_vhost_tpg **vs_tpg;  	struct virtio_scsi_cmd_req v_req;  	struct tcm_vhost_tpg *tv_tpg;  	struct tcm_vhost_cmd *tv_cmd; @@ -590,8 +607,16 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,  	int head, ret;  	u8 target; -	/* Must use ioctl VHOST_SCSI_SET_ENDPOINT */ -	if (unlikely(!vs->vs_endpoint)) +	/* +	 * We can handle the vq only after the endpoint is setup by calling the +	 * VHOST_SCSI_SET_ENDPOINT ioctl. +	 * +	 * TODO: Check that we are running from vhost_worker which acts +	 * as read-side critical section for vhost kind of RCU. +	 * See the comments in struct vhost_virtqueue in drivers/vhost/vhost.h +	 */ +	vs_tpg = rcu_dereference_check(vq->private_data, 1); +	if (!vs_tpg)  		return;  	mutex_lock(&vq->mutex); @@ -661,23 +686,11 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,  		/* Extract the tpgt */  		target = v_req.lun[1]; -		tv_tpg = vs->vs_tpg[target]; +		tv_tpg = ACCESS_ONCE(vs_tpg[target]);  		/* Target does not exist, fail the request */  		if (unlikely(!tv_tpg)) { -			struct virtio_scsi_cmd_resp __user *resp; -			struct virtio_scsi_cmd_resp rsp; - -			memset(&rsp, 0, sizeof(rsp)); -			rsp.response = VIRTIO_SCSI_S_BAD_TARGET; -			resp = vq->iov[out].iov_base; -			ret = __copy_to_user(resp, &rsp, sizeof(rsp)); -			if (!ret) -				vhost_add_used_and_signal(&vs->dev, -							  vq, head, 0); -			else -				pr_err("Faulted on virtio_scsi_cmd_resp\n"); - +			vhost_scsi_send_bad_target(vs, vq, head, out);  			continue;  		} @@ -690,22 +703,13 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,  		if (IS_ERR(tv_cmd)) {  			vq_err(vq, "vhost_scsi_allocate_cmd failed %ld\n",  					PTR_ERR(tv_cmd)); -			break; +			goto err_cmd;  		}  		pr_debug("Allocated tv_cmd: %p exp_data_len: %d, data_direction"  			": %d\n", tv_cmd, exp_data_len, data_direction);  		tv_cmd->tvc_vhost = vs;  		tv_cmd->tvc_vq = vq; - -		if (unlikely(vq->iov[out].iov_len != -				sizeof(struct virtio_scsi_cmd_resp))) { -			vq_err(vq, "Expecting virtio_scsi_cmd_resp, got %zu" -				" bytes, out: %d, in: %d\n", -				vq->iov[out].iov_len, out, in); -			break; -		} -  		tv_cmd->tvc_resp = vq->iov[out].iov_base;  		/* @@ -725,7 +729,7 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,  				" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",  				scsi_command_size(tv_cmd->tvc_cdb),  				TCM_VHOST_MAX_CDB_SIZE); -			break; /* TODO */ +			goto err_free;  		}  		tv_cmd->tvc_lun = ((v_req.lun[2] << 8) | v_req.lun[3]) & 0x3FFF; @@ -738,7 +742,7 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,  					data_direction == DMA_TO_DEVICE);  			if (unlikely(ret)) {  				vq_err(vq, "Failed to map iov to sgl\n"); -				break; /* TODO */ +				goto err_free;  			}  		} @@ -759,6 +763,13 @@ static void vhost_scsi_handle_vq(struct vhost_scsi *vs,  	}  	mutex_unlock(&vq->mutex); +	return; + +err_free: +	vhost_scsi_free_cmd(tv_cmd); +err_cmd: +	vhost_scsi_send_bad_target(vs, vq, head, out); +	mutex_unlock(&vq->mutex);  }  static void vhost_scsi_ctl_handle_kick(struct vhost_work *work) @@ -780,6 +791,20 @@ static void vhost_scsi_handle_kick(struct vhost_work *work)  	vhost_scsi_handle_vq(vs, vq);  } +static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index) +{ +	vhost_poll_flush(&vs->dev.vqs[index].poll); +} + +static void vhost_scsi_flush(struct vhost_scsi *vs) +{ +	int i; + +	for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) +		vhost_scsi_flush_vq(vs, i); +	vhost_work_flush(&vs->dev, &vs->vs_completion_work); +} +  /*   * Called from vhost_scsi_ioctl() context to walk the list of available   * tcm_vhost_tpg with an active struct tcm_vhost_nexus @@ -790,8 +815,10 @@ static int vhost_scsi_set_endpoint(  {  	struct tcm_vhost_tport *tv_tport;  	struct tcm_vhost_tpg *tv_tpg; +	struct tcm_vhost_tpg **vs_tpg; +	struct vhost_virtqueue *vq; +	int index, ret, i, len;  	bool match = false; -	int index, ret;  	mutex_lock(&vs->dev.mutex);  	/* Verify that ring has been setup correctly. */ @@ -803,6 +830,15 @@ static int vhost_scsi_set_endpoint(  		}  	} +	len = sizeof(vs_tpg[0]) * VHOST_SCSI_MAX_TARGET; +	vs_tpg = kzalloc(len, GFP_KERNEL); +	if (!vs_tpg) { +		mutex_unlock(&vs->dev.mutex); +		return -ENOMEM; +	} +	if (vs->vs_tpg) +		memcpy(vs_tpg, vs->vs_tpg, len); +  	mutex_lock(&tcm_vhost_mutex);  	list_for_each_entry(tv_tpg, &tcm_vhost_list, tv_tpg_list) {  		mutex_lock(&tv_tpg->tv_tpg_mutex); @@ -817,14 +853,15 @@ static int vhost_scsi_set_endpoint(  		tv_tport = tv_tpg->tport;  		if (!strcmp(tv_tport->tport_name, t->vhost_wwpn)) { -			if (vs->vs_tpg[tv_tpg->tport_tpgt]) { +			if (vs->vs_tpg && vs->vs_tpg[tv_tpg->tport_tpgt]) {  				mutex_unlock(&tv_tpg->tv_tpg_mutex);  				mutex_unlock(&tcm_vhost_mutex);  				mutex_unlock(&vs->dev.mutex); +				kfree(vs_tpg);  				return -EEXIST;  			}  			tv_tpg->tv_tpg_vhost_count++; -			vs->vs_tpg[tv_tpg->tport_tpgt] = tv_tpg; +			vs_tpg[tv_tpg->tport_tpgt] = tv_tpg;  			smp_mb__after_atomic_inc();  			match = true;  		} @@ -835,12 +872,27 @@ static int vhost_scsi_set_endpoint(  	if (match) {  		memcpy(vs->vs_vhost_wwpn, t->vhost_wwpn,  		       sizeof(vs->vs_vhost_wwpn)); -		vs->vs_endpoint = true; +		for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { +			vq = &vs->vqs[i]; +			/* Flushing the vhost_work acts as synchronize_rcu */ +			mutex_lock(&vq->mutex); +			rcu_assign_pointer(vq->private_data, vs_tpg); +			vhost_init_used(vq); +			mutex_unlock(&vq->mutex); +		}  		ret = 0;  	} else {  		ret = -EEXIST;  	} +	/* +	 * Act as synchronize_rcu to make sure access to +	 * old vs->vs_tpg is finished. +	 */ +	vhost_scsi_flush(vs); +	kfree(vs->vs_tpg); +	vs->vs_tpg = vs_tpg; +  	mutex_unlock(&vs->dev.mutex);  	return ret;  } @@ -851,6 +903,8 @@ static int vhost_scsi_clear_endpoint(  {  	struct tcm_vhost_tport *tv_tport;  	struct tcm_vhost_tpg *tv_tpg; +	struct vhost_virtqueue *vq; +	bool match = false;  	int index, ret, i;  	u8 target; @@ -862,9 +916,14 @@ static int vhost_scsi_clear_endpoint(  			goto err_dev;  		}  	} + +	if (!vs->vs_tpg) { +		mutex_unlock(&vs->dev.mutex); +		return 0; +	} +  	for (i = 0; i < VHOST_SCSI_MAX_TARGET; i++) {  		target = i; -  		tv_tpg = vs->vs_tpg[target];  		if (!tv_tpg)  			continue; @@ -886,10 +945,27 @@ static int vhost_scsi_clear_endpoint(  		}  		tv_tpg->tv_tpg_vhost_count--;  		vs->vs_tpg[target] = NULL; -		vs->vs_endpoint = false; +		match = true;  		mutex_unlock(&tv_tpg->tv_tpg_mutex);  	} +	if (match) { +		for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { +			vq = &vs->vqs[i]; +			/* Flushing the vhost_work acts as synchronize_rcu */ +			mutex_lock(&vq->mutex); +			rcu_assign_pointer(vq->private_data, NULL); +			mutex_unlock(&vq->mutex); +		} +	} +	/* +	 * Act as synchronize_rcu to make sure access to +	 * old vs->vs_tpg is finished. +	 */ +	vhost_scsi_flush(vs); +	kfree(vs->vs_tpg); +	vs->vs_tpg = NULL;  	mutex_unlock(&vs->dev.mutex); +  	return 0;  err_tpg: @@ -899,6 +975,24 @@ err_dev:  	return ret;  } +static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features) +{ +	if (features & ~VHOST_SCSI_FEATURES) +		return -EOPNOTSUPP; + +	mutex_lock(&vs->dev.mutex); +	if ((features & (1 << VHOST_F_LOG_ALL)) && +	    !vhost_log_access_ok(&vs->dev)) { +		mutex_unlock(&vs->dev.mutex); +		return -EFAULT; +	} +	vs->dev.acked_features = features; +	smp_wmb(); +	vhost_scsi_flush(vs); +	mutex_unlock(&vs->dev.mutex); +	return 0; +} +  static int vhost_scsi_open(struct inode *inode, struct file *f)  {  	struct vhost_scsi *s; @@ -939,38 +1033,6 @@ static int vhost_scsi_release(struct inode *inode, struct file *f)  	return 0;  } -static void vhost_scsi_flush_vq(struct vhost_scsi *vs, int index) -{ -	vhost_poll_flush(&vs->dev.vqs[index].poll); -} - -static void vhost_scsi_flush(struct vhost_scsi *vs) -{ -	int i; - -	for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) -		vhost_scsi_flush_vq(vs, i); -	vhost_work_flush(&vs->dev, &vs->vs_completion_work); -} - -static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features) -{ -	if (features & ~VHOST_SCSI_FEATURES) -		return -EOPNOTSUPP; - -	mutex_lock(&vs->dev.mutex); -	if ((features & (1 << VHOST_F_LOG_ALL)) && -	    !vhost_log_access_ok(&vs->dev)) { -		mutex_unlock(&vs->dev.mutex); -		return -EFAULT; -	} -	vs->dev.acked_features = features; -	smp_wmb(); -	vhost_scsi_flush(vs); -	mutex_unlock(&vs->dev.mutex); -	return 0; -} -  static long vhost_scsi_ioctl(struct file *f, unsigned int ioctl,  				unsigned long arg)  {  |