diff options
Diffstat (limited to 'drivers/target')
41 files changed, 1254 insertions, 2527 deletions
diff --git a/drivers/target/Makefile b/drivers/target/Makefile index 1060c7b7f80..62e54053bcd 100644 --- a/drivers/target/Makefile +++ b/drivers/target/Makefile @@ -6,7 +6,6 @@ target_core_mod-y		:= target_core_configfs.o \  				   target_core_hba.o \  				   target_core_pr.o \  				   target_core_alua.o \ -				   target_core_scdb.o \  				   target_core_tmr.o \  				   target_core_tpg.o \  				   target_core_transport.o \ diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c index c24fb10de60..4d01768fcd9 100644 --- a/drivers/target/iscsi/iscsi_target.c +++ b/drivers/target/iscsi/iscsi_target.c @@ -765,7 +765,7 @@ static int iscsit_allocate_iovecs(struct iscsi_cmd *cmd)  	u32 iov_count = (cmd->se_cmd.t_data_nents == 0) ? 1 :  				cmd->se_cmd.t_data_nents; -	iov_count += TRANSPORT_IOV_DATA_BUFFER; +	iov_count += ISCSI_IOV_DATA_BUFFER;  	cmd->iov_data = kzalloc(iov_count * sizeof(struct kvec), GFP_KERNEL);  	if (!cmd->iov_data) { @@ -2243,7 +2243,6 @@ static int iscsit_handle_snack(  	case 0:  		return iscsit_handle_recovery_datain_or_r2t(conn, buf,  			hdr->itt, hdr->ttt, hdr->begrun, hdr->runlength); -		return 0;  	case ISCSI_FLAG_SNACK_TYPE_STATUS:  		return iscsit_handle_status_snack(conn, hdr->itt, hdr->ttt,  			hdr->begrun, hdr->runlength); @@ -3539,16 +3538,8 @@ get_immediate:  				spin_lock_bh(&conn->cmd_lock);  				list_del(&cmd->i_list);  				spin_unlock_bh(&conn->cmd_lock); -				/* -				 * Determine if a struct se_cmd is assoicated with -				 * this struct iscsi_cmd. -				 */ -				if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) && -				    !(cmd->tmr_req)) -					iscsit_release_cmd(cmd); -				else -					transport_generic_free_cmd(&cmd->se_cmd, -								1, 0); + +				iscsit_free_cmd(cmd);  				goto get_immediate;  			case ISTATE_SEND_NOPIN_WANT_RESPONSE:  				spin_unlock_bh(&cmd->istate_lock); @@ -3941,7 +3932,6 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)  {  	struct iscsi_cmd *cmd = NULL, *cmd_tmp = NULL;  	struct iscsi_session *sess = conn->sess; -	struct se_cmd *se_cmd;  	/*  	 * We expect this function to only ever be called from either RX or TX  	 * thread context via iscsit_close_connection() once the other context @@ -3949,35 +3939,13 @@ static void iscsit_release_commands_from_conn(struct iscsi_conn *conn)  	 */  	spin_lock_bh(&conn->cmd_lock);  	list_for_each_entry_safe(cmd, cmd_tmp, &conn->conn_cmd_list, i_list) { -		if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD)) { -			list_del(&cmd->i_list); -			spin_unlock_bh(&conn->cmd_lock); -			iscsit_increment_maxcmdsn(cmd, sess); -			se_cmd = &cmd->se_cmd; -			/* -			 * Special cases for active iSCSI TMR, and -			 * transport_lookup_cmd_lun() failing from -			 * iscsit_get_lun_for_cmd() in iscsit_handle_scsi_cmd(). -			 */ -			if (cmd->tmr_req && se_cmd->transport_wait_for_tasks) -				se_cmd->transport_wait_for_tasks(se_cmd, 1, 1); -			else if (cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) -				transport_release_cmd(se_cmd); -			else -				iscsit_release_cmd(cmd); - -			spin_lock_bh(&conn->cmd_lock); -			continue; -		}  		list_del(&cmd->i_list);  		spin_unlock_bh(&conn->cmd_lock);  		iscsit_increment_maxcmdsn(cmd, sess); -		se_cmd = &cmd->se_cmd; -		if (se_cmd->transport_wait_for_tasks) -			se_cmd->transport_wait_for_tasks(se_cmd, 1, 1); +		iscsit_free_cmd(cmd);  		spin_lock_bh(&conn->cmd_lock);  	} diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c index 11fd7430781..beb39469e7f 100644 --- a/drivers/target/iscsi/iscsi_target_auth.c +++ b/drivers/target/iscsi/iscsi_target_auth.c @@ -18,6 +18,7 @@   * GNU General Public License for more details.   ******************************************************************************/ +#include <linux/kernel.h>  #include <linux/string.h>  #include <linux/crypto.h>  #include <linux/err.h> @@ -27,40 +28,11 @@  #include "iscsi_target_nego.h"  #include "iscsi_target_auth.h" -static unsigned char chap_asciihex_to_binaryhex(unsigned char val[2]) -{ -	unsigned char result = 0; -	/* -	 * MSB -	 */ -	if ((val[0] >= 'a') && (val[0] <= 'f')) -		result = ((val[0] - 'a' + 10) & 0xf) << 4; -	else -		if ((val[0] >= 'A') && (val[0] <= 'F')) -			result = ((val[0] - 'A' + 10) & 0xf) << 4; -		else /* digit */ -			result = ((val[0] - '0') & 0xf) << 4; -	/* -	 * LSB -	 */ -	if ((val[1] >= 'a') && (val[1] <= 'f')) -		result |= ((val[1] - 'a' + 10) & 0xf); -	else -		if ((val[1] >= 'A') && (val[1] <= 'F')) -			result |= ((val[1] - 'A' + 10) & 0xf); -		else /* digit */ -			result |= ((val[1] - '0') & 0xf); - -	return result; -} -  static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len)  { -	int i, j = 0; +	int j = DIV_ROUND_UP(len, 2); -	for (i = 0; i < len; i += 2) { -		dst[j++] = (unsigned char) chap_asciihex_to_binaryhex(&src[i]); -	} +	hex2bin(dst, src, j);  	dst[j] = '\0';  	return j; diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c index f095e65b1cc..f1643dbf6a9 100644 --- a/drivers/target/iscsi/iscsi_target_configfs.c +++ b/drivers/target/iscsi/iscsi_target_configfs.c @@ -268,7 +268,7 @@ struct se_tpg_np *lio_target_call_addnptotpg(  				ISCSI_TCP);  	if (IS_ERR(tpg_np)) {  		iscsit_put_tpg(tpg); -		return ERR_PTR(PTR_ERR(tpg_np)); +		return ERR_CAST(tpg_np);  	}  	pr_debug("LIO_Target_ConfigFS: addnptotpg done!\n"); @@ -1285,7 +1285,7 @@ struct se_wwn *lio_target_call_coreaddtiqn(  	tiqn = iscsit_add_tiqn((unsigned char *)name);  	if (IS_ERR(tiqn)) -		return ERR_PTR(PTR_ERR(tiqn)); +		return ERR_CAST(tiqn);  	/*  	 * Setup struct iscsi_wwn_stat_grps for se_wwn->fabric_stat_group.  	 */ diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h index 470ed551eeb..3723d90d5ae 100644 --- a/drivers/target/iscsi/iscsi_target_core.h +++ b/drivers/target/iscsi/iscsi_target_core.h @@ -57,6 +57,9 @@  #define TA_PROD_MODE_WRITE_PROTECT	0  #define TA_CACHE_CORE_NPS		0 + +#define ISCSI_IOV_DATA_BUFFER		5 +  enum tpg_np_network_transport_table {  	ISCSI_TCP				= 0,  	ISCSI_SCTP_TCP				= 1, @@ -425,7 +428,6 @@ struct iscsi_cmd {  	/* Number of times struct iscsi_cmd is present in immediate queue */  	atomic_t		immed_queue_count;  	atomic_t		response_queue_count; -	atomic_t		transport_sent;  	spinlock_t		datain_lock;  	spinlock_t		dataout_timeout_lock;  	/* spinlock for protecting struct iscsi_cmd->i_state */ diff --git a/drivers/target/iscsi/iscsi_target_erl1.c b/drivers/target/iscsi/iscsi_target_erl1.c index 980650792cf..c4c68da3e50 100644 --- a/drivers/target/iscsi/iscsi_target_erl1.c +++ b/drivers/target/iscsi/iscsi_target_erl1.c @@ -834,7 +834,7 @@ static int iscsit_attach_ooo_cmdsn(  			 */  			list_for_each_entry(ooo_tmp, &sess->sess_ooo_cmdsn_list,  						ooo_list) { -				while (ooo_tmp->cmdsn < ooo_cmdsn->cmdsn) +				if (ooo_tmp->cmdsn < ooo_cmdsn->cmdsn)  					continue;  				list_add(&ooo_cmdsn->ooo_list, diff --git a/drivers/target/iscsi/iscsi_target_erl2.c b/drivers/target/iscsi/iscsi_target_erl2.c index 91a4d170bda..0b8404c3012 100644 --- a/drivers/target/iscsi/iscsi_target_erl2.c +++ b/drivers/target/iscsi/iscsi_target_erl2.c @@ -143,12 +143,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)  			list_del(&cmd->i_list);  			cmd->conn = NULL;  			spin_unlock(&cr->conn_recovery_cmd_lock); -			if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) || -			    !(cmd->se_cmd.transport_wait_for_tasks)) -				iscsit_release_cmd(cmd); -			else -				cmd->se_cmd.transport_wait_for_tasks( -						&cmd->se_cmd, 1, 1); +			iscsit_free_cmd(cmd);  			spin_lock(&cr->conn_recovery_cmd_lock);  		}  		spin_unlock(&cr->conn_recovery_cmd_lock); @@ -170,12 +165,7 @@ void iscsit_free_connection_recovery_entires(struct iscsi_session *sess)  			list_del(&cmd->i_list);  			cmd->conn = NULL;  			spin_unlock(&cr->conn_recovery_cmd_lock); -			if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) || -			    !(cmd->se_cmd.transport_wait_for_tasks)) -				iscsit_release_cmd(cmd); -			else -				cmd->se_cmd.transport_wait_for_tasks( -						&cmd->se_cmd, 1, 1); +			iscsit_free_cmd(cmd);  			spin_lock(&cr->conn_recovery_cmd_lock);  		}  		spin_unlock(&cr->conn_recovery_cmd_lock); @@ -260,12 +250,7 @@ void iscsit_discard_cr_cmds_by_expstatsn(  		iscsit_remove_cmd_from_connection_recovery(cmd, sess);  		spin_unlock(&cr->conn_recovery_cmd_lock); -		if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) || -		    !(cmd->se_cmd.transport_wait_for_tasks)) -			iscsit_release_cmd(cmd); -		else -			cmd->se_cmd.transport_wait_for_tasks( -					&cmd->se_cmd, 1, 0); +		iscsit_free_cmd(cmd);  		spin_lock(&cr->conn_recovery_cmd_lock);  	}  	spin_unlock(&cr->conn_recovery_cmd_lock); @@ -319,12 +304,7 @@ int iscsit_discard_unacknowledged_ooo_cmdsns_for_conn(struct iscsi_conn *conn)  		list_del(&cmd->i_list);  		spin_unlock_bh(&conn->cmd_lock); -		if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) || -		    !(cmd->se_cmd.transport_wait_for_tasks)) -			iscsit_release_cmd(cmd); -		else -			cmd->se_cmd.transport_wait_for_tasks( -					&cmd->se_cmd, 1, 1); +		iscsit_free_cmd(cmd);  		spin_lock_bh(&conn->cmd_lock);  	}  	spin_unlock_bh(&conn->cmd_lock); @@ -377,13 +357,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)  			list_del(&cmd->i_list);  			spin_unlock_bh(&conn->cmd_lock); - -			if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) || -			    !(cmd->se_cmd.transport_wait_for_tasks)) -				iscsit_release_cmd(cmd); -			else -				cmd->se_cmd.transport_wait_for_tasks( -						&cmd->se_cmd, 1, 0); +			iscsit_free_cmd(cmd);  			spin_lock_bh(&conn->cmd_lock);  			continue;  		} @@ -403,13 +377,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)  		     (cmd->cmd_sn >= conn->sess->exp_cmd_sn)) {  			list_del(&cmd->i_list);  			spin_unlock_bh(&conn->cmd_lock); - -			if (!(cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) || -			    !(cmd->se_cmd.transport_wait_for_tasks)) -				iscsit_release_cmd(cmd); -			else -				cmd->se_cmd.transport_wait_for_tasks( -						&cmd->se_cmd, 1, 1); +			iscsit_free_cmd(cmd);  			spin_lock_bh(&conn->cmd_lock);  			continue;  		} @@ -434,10 +402,7 @@ int iscsit_prepare_cmds_for_realligance(struct iscsi_conn *conn)  		iscsit_free_all_datain_reqs(cmd); -		if ((cmd->se_cmd.se_cmd_flags & SCF_SE_LUN_CMD) && -		     cmd->se_cmd.transport_wait_for_tasks) -			cmd->se_cmd.transport_wait_for_tasks(&cmd->se_cmd, -					0, 0); +		transport_wait_for_tasks(&cmd->se_cmd);  		/*  		 * Add the struct iscsi_cmd to the connection recovery cmd list  		 */ diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c index bcaf82f4703..daad362a93c 100644 --- a/drivers/target/iscsi/iscsi_target_login.c +++ b/drivers/target/iscsi/iscsi_target_login.c @@ -1013,19 +1013,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)  					ISCSI_LOGIN_STATUS_TARGET_ERROR);  			goto new_sess_out;  		} -#if 0 -		if (!iscsi_ntop6((const unsigned char *) -				&sock_in6.sin6_addr.in6_u, -				(char *)&conn->ipv6_login_ip[0], -				IPV6_ADDRESS_SPACE)) { -			pr_err("iscsi_ntop6() failed\n"); -			iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR, -					ISCSI_LOGIN_STATUS_TARGET_ERROR); -			goto new_sess_out; -		} -#else -		pr_debug("Skipping iscsi_ntop6()\n"); -#endif +		snprintf(conn->login_ip, sizeof(conn->login_ip), "%pI6c", +				&sock_in6.sin6_addr.in6_u); +		conn->login_port = ntohs(sock_in6.sin6_port);  	} else {  		memset(&sock_in, 0, sizeof(struct sockaddr_in)); diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c index 4d087ac1106..426cd4bf6a9 100644 --- a/drivers/target/iscsi/iscsi_target_nego.c +++ b/drivers/target/iscsi/iscsi_target_nego.c @@ -504,7 +504,7 @@ static int iscsi_target_do_authentication(  		break;  	case 1:  		pr_debug("iSCSI security negotiation" -			" completed sucessfully.\n"); +			" completed successfully.\n");  		login->auth_complete = 1;  		if ((login_req->flags & ISCSI_FLAG_LOGIN_NEXT_STAGE1) &&  		    (login_req->flags & ISCSI_FLAG_LOGIN_TRANSIT)) { diff --git a/drivers/target/iscsi/iscsi_target_parameters.c b/drivers/target/iscsi/iscsi_target_parameters.c index 252e246cf51..5b773160200 100644 --- a/drivers/target/iscsi/iscsi_target_parameters.c +++ b/drivers/target/iscsi/iscsi_target_parameters.c @@ -545,13 +545,13 @@ int iscsi_copy_param_list(  	struct iscsi_param_list *src_param_list,  	int leading)  { -	struct iscsi_param *new_param = NULL, *param = NULL; +	struct iscsi_param *param = NULL; +	struct iscsi_param *new_param = NULL;  	struct iscsi_param_list *param_list = NULL;  	param_list = kzalloc(sizeof(struct iscsi_param_list), GFP_KERNEL);  	if (!param_list) { -		pr_err("Unable to allocate memory for" -				" struct iscsi_param_list.\n"); +		pr_err("Unable to allocate memory for struct iscsi_param_list.\n");  		goto err_out;  	}  	INIT_LIST_HEAD(¶m_list->param_list); @@ -567,8 +567,17 @@ int iscsi_copy_param_list(  		new_param = kzalloc(sizeof(struct iscsi_param), GFP_KERNEL);  		if (!new_param) { -			pr_err("Unable to allocate memory for" -				" struct iscsi_param.\n"); +			pr_err("Unable to allocate memory for struct iscsi_param.\n"); +			goto err_out; +		} + +		new_param->name = kstrdup(param->name, GFP_KERNEL); +		new_param->value = kstrdup(param->value, GFP_KERNEL); +		if (!new_param->value || !new_param->name) { +			kfree(new_param->value); +			kfree(new_param->name); +			kfree(new_param); +			pr_err("Unable to allocate memory for parameter name/value.\n");  			goto err_out;  		} @@ -580,32 +589,12 @@ int iscsi_copy_param_list(  		new_param->use = param->use;  		new_param->type_range = param->type_range; -		new_param->name = kzalloc(strlen(param->name) + 1, GFP_KERNEL); -		if (!new_param->name) { -			pr_err("Unable to allocate memory for" -				" parameter name.\n"); -			goto err_out; -		} - -		new_param->value = kzalloc(strlen(param->value) + 1, -				GFP_KERNEL); -		if (!new_param->value) { -			pr_err("Unable to allocate memory for" -				" parameter value.\n"); -			goto err_out; -		} - -		memcpy(new_param->name, param->name, strlen(param->name)); -		new_param->name[strlen(param->name)] = '\0'; -		memcpy(new_param->value, param->value, strlen(param->value)); -		new_param->value[strlen(param->value)] = '\0'; -  		list_add_tail(&new_param->p_list, ¶m_list->param_list);  	} -	if (!list_empty(¶m_list->param_list)) +	if (!list_empty(¶m_list->param_list)) {  		*dst_param_list = param_list; -	else { +	} else {  		pr_err("No parameters allocated.\n");  		goto err_out;  	} @@ -1441,7 +1430,7 @@ static int iscsi_enforce_integrity_rules(  	u8 DataSequenceInOrder = 0;  	u8 ErrorRecoveryLevel = 0, SessionType = 0;  	u8 IFMarker = 0, OFMarker = 0; -	u8 IFMarkInt_Reject = 0, OFMarkInt_Reject = 0; +	u8 IFMarkInt_Reject = 1, OFMarkInt_Reject = 1;  	u32 FirstBurstLength = 0, MaxBurstLength = 0;  	struct iscsi_param *param = NULL; diff --git a/drivers/target/iscsi/iscsi_target_tmr.c b/drivers/target/iscsi/iscsi_target_tmr.c index db1fe1ec84d..490207eacde 100644 --- a/drivers/target/iscsi/iscsi_target_tmr.c +++ b/drivers/target/iscsi/iscsi_target_tmr.c @@ -250,7 +250,7 @@ static int iscsit_task_reassign_complete_write(  	 * so if we have received all DataOUT we can safety ignore Initiator.  	 */  	if (cmd->cmd_flags & ICF_GOT_LAST_DATAOUT) { -		if (!atomic_read(&cmd->transport_sent)) { +		if (!atomic_read(&cmd->se_cmd.t_transport_sent)) {  			pr_debug("WRITE ITT: 0x%08x: t_state: %d"  				" never sent to transport\n",  				cmd->init_task_tag, cmd->se_cmd.t_state); @@ -314,11 +314,11 @@ static int iscsit_task_reassign_complete_read(  		cmd->acked_data_sn = (tmr_req->exp_data_sn - 1);  	} -	if (!atomic_read(&cmd->transport_sent)) { +	if (!atomic_read(&cmd->se_cmd.t_transport_sent)) {  		pr_debug("READ ITT: 0x%08x: t_state: %d never sent to"  			" transport\n", cmd->init_task_tag,  			cmd->se_cmd.t_state); -		transport_generic_handle_cdb(se_cmd); +		transport_handle_cdb_direct(se_cmd);  		return 0;  	} diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c index a1acb016790..02348f727bd 100644 --- a/drivers/target/iscsi/iscsi_target_util.c +++ b/drivers/target/iscsi/iscsi_target_util.c @@ -243,7 +243,7 @@ struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr(  	if (!cmd->tmr_req) {  		pr_err("Unable to allocate memory for"  			" Task Management command!\n"); -		return NULL; +		goto out;  	}  	/*  	 * TASK_REASSIGN for ERL=2 / connection stays inside of @@ -289,7 +289,8 @@ struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr(  	}  	se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, -				(void *)cmd->tmr_req, tcm_function); +				(void *)cmd->tmr_req, tcm_function, +				GFP_KERNEL);  	if (!se_cmd->se_tmr_req)  		goto out; @@ -298,8 +299,6 @@ struct iscsi_cmd *iscsit_allocate_se_cmd_for_tmr(  	return cmd;  out:  	iscsit_release_cmd(cmd); -	if (se_cmd) -		transport_free_se_cmd(se_cmd);  	return NULL;  } @@ -841,6 +840,23 @@ void iscsit_release_cmd(struct iscsi_cmd *cmd)  	kmem_cache_free(lio_cmd_cache, cmd);  } +void iscsit_free_cmd(struct iscsi_cmd *cmd) +{ +	/* +	 * Determine if a struct se_cmd is assoicated with +	 * this struct iscsi_cmd. +	 */ +	switch (cmd->iscsi_opcode) { +	case ISCSI_OP_SCSI_CMD: +	case ISCSI_OP_SCSI_TMFUNC: +		transport_generic_free_cmd(&cmd->se_cmd, 1); +		break; +	default: +		iscsit_release_cmd(cmd); +		break; +	} +} +  int iscsit_check_session_usage_count(struct iscsi_session *sess)  {  	spin_lock_bh(&sess->session_usage_lock); @@ -877,40 +893,6 @@ void iscsit_inc_session_usage_count(struct iscsi_session *sess)  }  /* - *	Used before iscsi_do[rx,tx]_data() to determine iov and [rx,tx]_marker - *	array counts needed for sync and steering. - */ -static int iscsit_determine_sync_and_steering_counts( -	struct iscsi_conn *conn, -	struct iscsi_data_count *count) -{ -	u32 length = count->data_length; -	u32 marker, markint; - -	count->sync_and_steering = 1; - -	marker = (count->type == ISCSI_RX_DATA) ? -			conn->of_marker : conn->if_marker; -	markint = (count->type == ISCSI_RX_DATA) ? -			(conn->conn_ops->OFMarkInt * 4) : -			(conn->conn_ops->IFMarkInt * 4); -	count->ss_iov_count = count->iov_count; - -	while (length > 0) { -		if (length >= marker) { -			count->ss_iov_count += 3; -			count->ss_marker_count += 2; - -			length -= marker; -			marker = markint; -		} else -			length = 0; -	} - -	return 0; -} - -/*   *	Setup conn->if_marker and conn->of_marker values based upon   *	the initial marker-less interval. (see iSCSI v19 A.2)   */ @@ -1292,7 +1274,7 @@ int iscsit_fe_sendpage_sg(  	struct kvec iov;  	u32 tx_hdr_size, data_len;  	u32 offset = cmd->first_data_sg_off; -	int tx_sent; +	int tx_sent, iov_off;  send_hdr:  	tx_hdr_size = ISCSI_HDR_LEN; @@ -1312,9 +1294,19 @@ send_hdr:  	}  	data_len = cmd->tx_size - tx_hdr_size - cmd->padding; -	if (conn->conn_ops->DataDigest) +	/* +	 * Set iov_off used by padding and data digest tx_data() calls below +	 * in order to determine proper offset into cmd->iov_data[] +	 */ +	if (conn->conn_ops->DataDigest) {  		data_len -= ISCSI_CRC_LEN; - +		if (cmd->padding) +			iov_off = (cmd->iov_data_count - 2); +		else +			iov_off = (cmd->iov_data_count - 1); +	} else { +		iov_off = (cmd->iov_data_count - 1); +	}  	/*  	 * Perform sendpage() for each page in the scatterlist  	 */ @@ -1343,8 +1335,7 @@ send_pg:  send_padding:  	if (cmd->padding) { -		struct kvec *iov_p = -			&cmd->iov_data[cmd->iov_data_count-1]; +		struct kvec *iov_p = &cmd->iov_data[iov_off++];  		tx_sent = tx_data(conn, iov_p, 1, cmd->padding);  		if (cmd->padding != tx_sent) { @@ -1358,8 +1349,7 @@ send_padding:  send_datacrc:  	if (conn->conn_ops->DataDigest) { -		struct kvec *iov_d = -			&cmd->iov_data[cmd->iov_data_count]; +		struct kvec *iov_d = &cmd->iov_data[iov_off];  		tx_sent = tx_data(conn, iov_d, 1, ISCSI_CRC_LEN);  		if (ISCSI_CRC_LEN != tx_sent) { @@ -1433,8 +1423,7 @@ static int iscsit_do_rx_data(  	struct iscsi_data_count *count)  {  	int data = count->data_length, rx_loop = 0, total_rx = 0, iov_len; -	u32 rx_marker_val[count->ss_marker_count], rx_marker_iov = 0; -	struct kvec iov[count->ss_iov_count], *iov_p; +	struct kvec *iov_p;  	struct msghdr msg;  	if (!conn || !conn->sock || !conn->conn_ops) @@ -1442,93 +1431,8 @@ static int iscsit_do_rx_data(  	memset(&msg, 0, sizeof(struct msghdr)); -	if (count->sync_and_steering) { -		int size = 0; -		u32 i, orig_iov_count = 0; -		u32 orig_iov_len = 0, orig_iov_loc = 0; -		u32 iov_count = 0, per_iov_bytes = 0; -		u32 *rx_marker, old_rx_marker = 0; -		struct kvec *iov_record; - -		memset(&rx_marker_val, 0, -				count->ss_marker_count * sizeof(u32)); -		memset(&iov, 0, count->ss_iov_count * sizeof(struct kvec)); - -		iov_record = count->iov; -		orig_iov_count = count->iov_count; -		rx_marker = &conn->of_marker; - -		i = 0; -		size = data; -		orig_iov_len = iov_record[orig_iov_loc].iov_len; -		while (size > 0) { -			pr_debug("rx_data: #1 orig_iov_len %u," -			" orig_iov_loc %u\n", orig_iov_len, orig_iov_loc); -			pr_debug("rx_data: #2 rx_marker %u, size" -				" %u\n", *rx_marker, size); - -			if (orig_iov_len >= *rx_marker) { -				iov[iov_count].iov_len = *rx_marker; -				iov[iov_count++].iov_base = -					(iov_record[orig_iov_loc].iov_base + -						per_iov_bytes); - -				iov[iov_count].iov_len = (MARKER_SIZE / 2); -				iov[iov_count++].iov_base = -					&rx_marker_val[rx_marker_iov++]; -				iov[iov_count].iov_len = (MARKER_SIZE / 2); -				iov[iov_count++].iov_base = -					&rx_marker_val[rx_marker_iov++]; -				old_rx_marker = *rx_marker; - -				/* -				 * OFMarkInt is in 32-bit words. -				 */ -				*rx_marker = (conn->conn_ops->OFMarkInt * 4); -				size -= old_rx_marker; -				orig_iov_len -= old_rx_marker; -				per_iov_bytes += old_rx_marker; - -				pr_debug("rx_data: #3 new_rx_marker" -					" %u, size %u\n", *rx_marker, size); -			} else { -				iov[iov_count].iov_len = orig_iov_len; -				iov[iov_count++].iov_base = -					(iov_record[orig_iov_loc].iov_base + -						per_iov_bytes); - -				per_iov_bytes = 0; -				*rx_marker -= orig_iov_len; -				size -= orig_iov_len; - -				if (size) -					orig_iov_len = -					iov_record[++orig_iov_loc].iov_len; - -				pr_debug("rx_data: #4 new_rx_marker" -					" %u, size %u\n", *rx_marker, size); -			} -		} -		data += (rx_marker_iov * (MARKER_SIZE / 2)); - -		iov_p	= &iov[0]; -		iov_len	= iov_count; - -		if (iov_count > count->ss_iov_count) { -			pr_err("iov_count: %d, count->ss_iov_count:" -				" %d\n", iov_count, count->ss_iov_count); -			return -1; -		} -		if (rx_marker_iov > count->ss_marker_count) { -			pr_err("rx_marker_iov: %d, count->ss_marker" -				"_count: %d\n", rx_marker_iov, -				count->ss_marker_count); -			return -1; -		} -	} else { -		iov_p = count->iov; -		iov_len	= count->iov_count; -	} +	iov_p = count->iov; +	iov_len	= count->iov_count;  	while (total_rx < data) {  		rx_loop = kernel_recvmsg(conn->sock, &msg, iov_p, iov_len, @@ -1543,16 +1447,6 @@ static int iscsit_do_rx_data(  				rx_loop, total_rx, data);  	} -	if (count->sync_and_steering) { -		int j; -		for (j = 0; j < rx_marker_iov; j++) { -			pr_debug("rx_data: #5 j: %d, offset: %d\n", -				j, rx_marker_val[j]); -			conn->of_marker_offset = rx_marker_val[j]; -		} -		total_rx -= (rx_marker_iov * (MARKER_SIZE / 2)); -	} -  	return total_rx;  } @@ -1561,8 +1455,7 @@ static int iscsit_do_tx_data(  	struct iscsi_data_count *count)  {  	int data = count->data_length, total_tx = 0, tx_loop = 0, iov_len; -	u32 tx_marker_val[count->ss_marker_count], tx_marker_iov = 0; -	struct kvec iov[count->ss_iov_count], *iov_p; +	struct kvec *iov_p;  	struct msghdr msg;  	if (!conn || !conn->sock || !conn->conn_ops) @@ -1575,98 +1468,8 @@ static int iscsit_do_tx_data(  	memset(&msg, 0, sizeof(struct msghdr)); -	if (count->sync_and_steering) { -		int size = 0; -		u32 i, orig_iov_count = 0; -		u32 orig_iov_len = 0, orig_iov_loc = 0; -		u32 iov_count = 0, per_iov_bytes = 0; -		u32 *tx_marker, old_tx_marker = 0; -		struct kvec *iov_record; - -		memset(&tx_marker_val, 0, -			count->ss_marker_count * sizeof(u32)); -		memset(&iov, 0, count->ss_iov_count * sizeof(struct kvec)); - -		iov_record = count->iov; -		orig_iov_count = count->iov_count; -		tx_marker = &conn->if_marker; - -		i = 0; -		size = data; -		orig_iov_len = iov_record[orig_iov_loc].iov_len; -		while (size > 0) { -			pr_debug("tx_data: #1 orig_iov_len %u," -			" orig_iov_loc %u\n", orig_iov_len, orig_iov_loc); -			pr_debug("tx_data: #2 tx_marker %u, size" -				" %u\n", *tx_marker, size); - -			if (orig_iov_len >= *tx_marker) { -				iov[iov_count].iov_len = *tx_marker; -				iov[iov_count++].iov_base = -					(iov_record[orig_iov_loc].iov_base + -						per_iov_bytes); - -				tx_marker_val[tx_marker_iov] = -						(size - *tx_marker); -				iov[iov_count].iov_len = (MARKER_SIZE / 2); -				iov[iov_count++].iov_base = -					&tx_marker_val[tx_marker_iov++]; -				iov[iov_count].iov_len = (MARKER_SIZE / 2); -				iov[iov_count++].iov_base = -					&tx_marker_val[tx_marker_iov++]; -				old_tx_marker = *tx_marker; - -				/* -				 * IFMarkInt is in 32-bit words. -				 */ -				*tx_marker = (conn->conn_ops->IFMarkInt * 4); -				size -= old_tx_marker; -				orig_iov_len -= old_tx_marker; -				per_iov_bytes += old_tx_marker; - -				pr_debug("tx_data: #3 new_tx_marker" -					" %u, size %u\n", *tx_marker, size); -				pr_debug("tx_data: #4 offset %u\n", -					tx_marker_val[tx_marker_iov-1]); -			} else { -				iov[iov_count].iov_len = orig_iov_len; -				iov[iov_count++].iov_base -					= (iov_record[orig_iov_loc].iov_base + -						per_iov_bytes); - -				per_iov_bytes = 0; -				*tx_marker -= orig_iov_len; -				size -= orig_iov_len; - -				if (size) -					orig_iov_len = -					iov_record[++orig_iov_loc].iov_len; - -				pr_debug("tx_data: #5 new_tx_marker" -					" %u, size %u\n", *tx_marker, size); -			} -		} - -		data += (tx_marker_iov * (MARKER_SIZE / 2)); - -		iov_p = &iov[0]; -		iov_len = iov_count; - -		if (iov_count > count->ss_iov_count) { -			pr_err("iov_count: %d, count->ss_iov_count:" -				" %d\n", iov_count, count->ss_iov_count); -			return -1; -		} -		if (tx_marker_iov > count->ss_marker_count) { -			pr_err("tx_marker_iov: %d, count->ss_marker" -				"_count: %d\n", tx_marker_iov, -				count->ss_marker_count); -			return -1; -		} -	} else { -		iov_p = count->iov; -		iov_len = count->iov_count; -	} +	iov_p = count->iov; +	iov_len = count->iov_count;  	while (total_tx < data) {  		tx_loop = kernel_sendmsg(conn->sock, &msg, iov_p, iov_len, @@ -1681,9 +1484,6 @@ static int iscsit_do_tx_data(  					tx_loop, total_tx, data);  	} -	if (count->sync_and_steering) -		total_tx -= (tx_marker_iov * (MARKER_SIZE / 2)); -  	return total_tx;  } @@ -1704,12 +1504,6 @@ int rx_data(  	c.data_length = data;  	c.type = ISCSI_RX_DATA; -	if (conn->conn_ops->OFMarker && -	   (conn->conn_state >= TARG_CONN_STATE_LOGGED_IN)) { -		if (iscsit_determine_sync_and_steering_counts(conn, &c) < 0) -			return -1; -	} -  	return iscsit_do_rx_data(conn, &c);  } @@ -1730,12 +1524,6 @@ int tx_data(  	c.data_length = data;  	c.type = ISCSI_TX_DATA; -	if (conn->conn_ops->IFMarker && -	   (conn->conn_state >= TARG_CONN_STATE_LOGGED_IN)) { -		if (iscsit_determine_sync_and_steering_counts(conn, &c) < 0) -			return -1; -	} -  	return iscsit_do_tx_data(conn, &c);  } diff --git a/drivers/target/iscsi/iscsi_target_util.h b/drivers/target/iscsi/iscsi_target_util.h index 2cd49d607bd..835bf7de028 100644 --- a/drivers/target/iscsi/iscsi_target_util.h +++ b/drivers/target/iscsi/iscsi_target_util.h @@ -30,6 +30,7 @@ extern struct iscsi_queue_req *iscsit_get_cmd_from_response_queue(struct iscsi_c  extern void iscsit_remove_cmd_from_tx_queues(struct iscsi_cmd *, struct iscsi_conn *);  extern void iscsit_free_queue_reqs_for_conn(struct iscsi_conn *);  extern void iscsit_release_cmd(struct iscsi_cmd *); +extern void iscsit_free_cmd(struct iscsi_cmd *);  extern int iscsit_check_session_usage_count(struct iscsi_session *);  extern void iscsit_dec_session_usage_count(struct iscsi_session *);  extern void iscsit_inc_session_usage_count(struct iscsi_session *); diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c index aa2d6799723..b15d8cbf630 100644 --- a/drivers/target/loopback/tcm_loop.c +++ b/drivers/target/loopback/tcm_loop.c @@ -200,7 +200,7 @@ static void tcm_loop_check_stop_free(struct se_cmd *se_cmd)  	 * Release the struct se_cmd, which will make a callback to release  	 * struct tcm_loop_cmd * in tcm_loop_deallocate_core_cmd()  	 */ -	transport_generic_free_cmd(se_cmd, 0, 0); +	transport_generic_free_cmd(se_cmd, 0);  }  static void tcm_loop_release_cmd(struct se_cmd *se_cmd) @@ -290,6 +290,15 @@ static int tcm_loop_queuecommand(  	 */  	tl_hba = *(struct tcm_loop_hba **)shost_priv(sc->device->host);  	tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; +	/* +	 * Ensure that this tl_tpg reference from the incoming sc->device->id +	 * has already been configured via tcm_loop_make_naa_tpg(). +	 */ +	if (!tl_tpg->tl_hba) { +		set_host_byte(sc, DID_NO_CONNECT); +		sc->scsi_done(sc); +		return 0; +	}  	se_tpg = &tl_tpg->tl_se_tpg;  	/*  	 * Determine the SAM Task Attribute and allocate tl_cmd and @@ -366,7 +375,7 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)  	 * Allocate the LUN_RESET TMR  	 */  	se_cmd->se_tmr_req = core_tmr_alloc_req(se_cmd, tl_tmr, -				TMR_LUN_RESET); +						TMR_LUN_RESET, GFP_KERNEL);  	if (IS_ERR(se_cmd->se_tmr_req))  		goto release;  	/* @@ -388,7 +397,7 @@ static int tcm_loop_device_reset(struct scsi_cmnd *sc)  		SUCCESS : FAILED;  release:  	if (se_cmd) -		transport_generic_free_cmd(se_cmd, 1, 0); +		transport_generic_free_cmd(se_cmd, 1);  	else  		kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);  	kfree(tl_tmr); @@ -1245,6 +1254,9 @@ void tcm_loop_drop_naa_tpg(  	 */  	core_tpg_deregister(se_tpg); +	tl_tpg->tl_hba = NULL; +	tl_tpg->tl_tpgt = 0; +  	pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s"  		" Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba),  		config_item_name(&wwn->wwn_group.cg_item), tpgt); diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index 98c98a3a025..8f4447749c7 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -24,7 +24,6 @@   *   ******************************************************************************/ -#include <linux/version.h>  #include <linux/slab.h>  #include <linux/spinlock.h>  #include <linux/configfs.h> @@ -68,6 +67,15 @@ int core_emulate_report_target_port_groups(struct se_cmd *cmd)  	unsigned char *buf;  	u32 rd_len = 0, off = 4; /* Skip over RESERVED area to first  				    Target port group descriptor */ +	/* +	 * Need at least 4 bytes of response data or else we can't +	 * even fit the return data length. +	 */ +	if (cmd->data_length < 4) { +		pr_warn("REPORT TARGET PORT GROUPS allocation length %u" +			" too small\n", cmd->data_length); +		return -EINVAL; +	}  	buf = transport_kmap_first_data_page(cmd); @@ -75,6 +83,17 @@ int core_emulate_report_target_port_groups(struct se_cmd *cmd)  	list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list,  			tg_pt_gp_list) {  		/* +		 * Check if the Target port group and Target port descriptor list +		 * based on tg_pt_gp_members count will fit into the response payload. +		 * Otherwise, bump rd_len to let the initiator know we have exceeded +		 * the allocation length and the response is truncated. +		 */ +		if ((off + 8 + (tg_pt_gp->tg_pt_gp_members * 4)) > +		     cmd->data_length) { +			rd_len += 8 + (tg_pt_gp->tg_pt_gp_members * 4); +			continue; +		} +		/*  		 * PREF: Preferred target port bit, determine if this  		 * bit should be set for port group.  		 */ diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c index 8ae09a1bdf7..38535eb1392 100644 --- a/drivers/target/target_core_cdb.c +++ b/drivers/target/target_core_cdb.c @@ -24,6 +24,7 @@   */  #include <linux/kernel.h> +#include <linux/module.h>  #include <asm/unaligned.h>  #include <scsi/scsi.h> @@ -67,6 +68,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd)  {  	struct se_lun *lun = cmd->se_lun;  	struct se_device *dev = cmd->se_dev; +	struct se_portal_group *tpg = lun->lun_sep->sep_tpg;  	unsigned char *buf;  	/* @@ -81,9 +83,13 @@ target_emulate_inquiry_std(struct se_cmd *cmd)  	buf = transport_kmap_first_data_page(cmd); -	buf[0] = dev->transport->get_device_type(dev); -	if (buf[0] == TYPE_TAPE) -		buf[1] = 0x80; +	if (dev == tpg->tpg_virt_lun0.lun_se_dev) { +		buf[0] = 0x3f; /* Not connected */ +	} else { +		buf[0] = dev->transport->get_device_type(dev); +		if (buf[0] == TYPE_TAPE) +			buf[1] = 0x80; +	}  	buf[2] = dev->transport->get_device_rev(dev);  	/* @@ -149,6 +155,37 @@ target_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf)  	return 0;  } +static void +target_parse_naa_6h_vendor_specific(struct se_device *dev, unsigned char *buf) +{ +	unsigned char *p = &dev->se_sub_dev->t10_wwn.unit_serial[0]; +	int cnt; +	bool next = true; + +	/* +	 * Generate up to 36 bits of VENDOR SPECIFIC IDENTIFIER starting on +	 * byte 3 bit 3-0 for NAA IEEE Registered Extended DESIGNATOR field +	 * format, followed by 64 bits of VENDOR SPECIFIC IDENTIFIER EXTENSION +	 * to complete the payload.  These are based from VPD=0x80 PRODUCT SERIAL +	 * NUMBER set via vpd_unit_serial in target_core_configfs.c to ensure +	 * per device uniqeness. +	 */ +	for (cnt = 0; *p && cnt < 13; p++) { +		int val = hex_to_bin(*p); + +		if (val < 0) +			continue; + +		if (next) { +			next = false; +			buf[cnt++] |= val; +		} else { +			next = true; +			buf[cnt] = val << 4; +		} +	} +} +  /*   * Device identification VPD, for a complete list of   * DESIGNATOR TYPEs see spc4r17 Table 459. @@ -214,8 +251,7 @@ target_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf)  	 * VENDOR_SPECIFIC_IDENTIFIER and  	 * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION  	 */ -	buf[off++] |= hex_to_bin(dev->se_sub_dev->t10_wwn.unit_serial[0]); -	hex2bin(&buf[off], &dev->se_sub_dev->t10_wwn.unit_serial[1], 12); +	target_parse_naa_6h_vendor_specific(dev, &buf[off]);  	len = 20;  	off = (len + 4); @@ -915,8 +951,8 @@ target_emulate_modesense(struct se_cmd *cmd, int ten)  		length += target_modesense_control(dev, &buf[offset+length]);  		break;  	default: -		pr_err("Got Unknown Mode Page: 0x%02x\n", -				cdb[2] & 0x3f); +		pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n", +		       cdb[2] & 0x3f, cdb[3]);  		return PYX_TRANSPORT_UNKNOWN_MODE_PAGE;  	}  	offset += length; @@ -1072,8 +1108,6 @@ target_emulate_unmap(struct se_task *task)  		size -= 16;  	} -	task->task_scsi_status = GOOD; -	transport_complete_task(task, 1);  err:  	transport_kunmap_first_data_page(cmd); @@ -1085,24 +1119,17 @@ err:   * Note this is not used for TCM/pSCSI passthrough   */  static int -target_emulate_write_same(struct se_task *task, int write_same32) +target_emulate_write_same(struct se_task *task, u32 num_blocks)  {  	struct se_cmd *cmd = task->task_se_cmd;  	struct se_device *dev = cmd->se_dev;  	sector_t range;  	sector_t lba = cmd->t_task_lba; -	unsigned int num_blocks;  	int ret;  	/* -	 * Extract num_blocks from the WRITE_SAME_* CDB.  Then use the explict -	 * range when non zero is supplied, otherwise calculate the remaining -	 * range based on ->get_blocks() - starting LBA. +	 * Use the explicit range when non zero is supplied, otherwise calculate +	 * the remaining range based on ->get_blocks() - starting LBA.  	 */ -	if (write_same32) -		num_blocks = get_unaligned_be32(&cmd->t_task_cdb[28]); -	else -		num_blocks = get_unaligned_be32(&cmd->t_task_cdb[10]); -  	if (num_blocks != 0)  		range = num_blocks;  	else @@ -1117,8 +1144,6 @@ target_emulate_write_same(struct se_task *task, int write_same32)  		return ret;  	} -	task->task_scsi_status = GOOD; -	transport_complete_task(task, 1);  	return 0;  } @@ -1165,13 +1190,23 @@ transport_emulate_control_cdb(struct se_task *task)  		}  		ret = target_emulate_unmap(task);  		break; +	case WRITE_SAME: +		if (!dev->transport->do_discard) { +			pr_err("WRITE_SAME emulation not supported" +					" for: %s\n", dev->transport->name); +			return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE; +		} +		ret = target_emulate_write_same(task, +				get_unaligned_be16(&cmd->t_task_cdb[7])); +		break;  	case WRITE_SAME_16:  		if (!dev->transport->do_discard) {  			pr_err("WRITE_SAME_16 emulation not supported"  					" for: %s\n", dev->transport->name);  			return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;  		} -		ret = target_emulate_write_same(task, 0); +		ret = target_emulate_write_same(task, +				get_unaligned_be32(&cmd->t_task_cdb[10]));  		break;  	case VARIABLE_LENGTH_CMD:  		service_action = @@ -1184,7 +1219,8 @@ transport_emulate_control_cdb(struct se_task *task)  					dev->transport->name);  				return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;  			} -			ret = target_emulate_write_same(task, 1); +			ret = target_emulate_write_same(task, +				get_unaligned_be32(&cmd->t_task_cdb[28]));  			break;  		default:  			pr_err("Unsupported VARIABLE_LENGTH_CMD SA:" @@ -1219,8 +1255,63 @@ transport_emulate_control_cdb(struct se_task *task)  	if (ret < 0)  		return ret; -	task->task_scsi_status = GOOD; -	transport_complete_task(task, 1); +	/* +	 * Handle the successful completion here unless a caller +	 * has explictly requested an asychronous completion. +	 */ +	if (!(cmd->se_cmd_flags & SCF_EMULATE_CDB_ASYNC)) { +		task->task_scsi_status = GOOD; +		transport_complete_task(task, 1); +	}  	return PYX_TRANSPORT_SENT_TO_TRANSPORT;  } + +/* + * Write a CDB into @cdb that is based on the one the intiator sent us, + * but updated to only cover the sectors that the current task handles. + */ +void target_get_task_cdb(struct se_task *task, unsigned char *cdb) +{ +	struct se_cmd *cmd = task->task_se_cmd; +	unsigned int cdb_len = scsi_command_size(cmd->t_task_cdb); + +	memcpy(cdb, cmd->t_task_cdb, cdb_len); +	if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { +		unsigned long long lba = task->task_lba; +		u32 sectors = task->task_sectors; + +		switch (cdb_len) { +		case 6: +			/* 21-bit LBA and 8-bit sectors */ +			cdb[1] = (lba >> 16) & 0x1f; +			cdb[2] = (lba >> 8) & 0xff; +			cdb[3] = lba & 0xff; +			cdb[4] = sectors & 0xff; +			break; +		case 10: +			/* 32-bit LBA and 16-bit sectors */ +			put_unaligned_be32(lba, &cdb[2]); +			put_unaligned_be16(sectors, &cdb[7]); +			break; +		case 12: +			/* 32-bit LBA and 32-bit sectors */ +			put_unaligned_be32(lba, &cdb[2]); +			put_unaligned_be32(sectors, &cdb[6]); +			break; +		case 16: +			/* 64-bit LBA and 32-bit sectors */ +			put_unaligned_be64(lba, &cdb[2]); +			put_unaligned_be32(sectors, &cdb[10]); +			break; +		case 32: +			/* 64-bit LBA and 32-bit sectors, extended CDB */ +			put_unaligned_be64(lba, &cdb[12]); +			put_unaligned_be32(sectors, &cdb[28]); +			break; +		default: +			BUG(); +		} +	} +} +EXPORT_SYMBOL(target_get_task_cdb); diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c index b2575d8568c..e0c1e8a8dd4 100644 --- a/drivers/target/target_core_configfs.c +++ b/drivers/target/target_core_configfs.c @@ -23,7 +23,6 @@  #include <linux/module.h>  #include <linux/moduleparam.h> -#include <linux/version.h>  #include <generated/utsrelease.h>  #include <linux/utsname.h>  #include <linux/init.h> @@ -133,14 +132,6 @@ static struct config_group *target_core_register_fabric(  	pr_debug("Target_Core_ConfigFS: REGISTER -> group: %p name:"  			" %s\n", group, name);  	/* -	 * Ensure that TCM subsystem plugins are loaded at this point for -	 * using the RAMDISK_DR virtual LUN 0 and all other struct se_port -	 * LUN symlinks. -	 */ -	if (transport_subsystem_check_init() < 0) -		return ERR_PTR(-EINVAL); - -	/*  	 * Below are some hardcoded request_module() calls to automatically  	 * local fabric modules when the following is called:  	 * @@ -725,9 +716,6 @@ SE_DEV_ATTR_RO(hw_queue_depth);  DEF_DEV_ATTRIB(queue_depth);  SE_DEV_ATTR(queue_depth, S_IRUGO | S_IWUSR); -DEF_DEV_ATTRIB(task_timeout); -SE_DEV_ATTR(task_timeout, S_IRUGO | S_IWUSR); -  DEF_DEV_ATTRIB(max_unmap_lba_count);  SE_DEV_ATTR(max_unmap_lba_count, S_IRUGO | S_IWUSR); @@ -761,7 +749,6 @@ static struct configfs_attribute *target_core_dev_attrib_attrs[] = {  	&target_core_dev_attrib_optimal_sectors.attr,  	&target_core_dev_attrib_hw_queue_depth.attr,  	&target_core_dev_attrib_queue_depth.attr, -	&target_core_dev_attrib_task_timeout.attr,  	&target_core_dev_attrib_max_unmap_lba_count.attr,  	&target_core_dev_attrib_max_unmap_block_desc_count.attr,  	&target_core_dev_attrib_unmap_granularity.attr, @@ -3080,8 +3067,7 @@ static struct config_group *target_core_call_addhbatotarget(  	/*  	 * Load up TCM subsystem plugins if they have not already been loaded.  	 */ -	if (transport_subsystem_check_init() < 0) -		return ERR_PTR(-EINVAL); +	transport_subsystem_check_init();  	hba = core_alloc_hba(se_plugin_str, plugin_dep_id, 0);  	if (IS_ERR(hba)) diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c index b38b6c993e6..f870c3bcfd8 100644 --- a/drivers/target/target_core_device.c +++ b/drivers/target/target_core_device.c @@ -472,9 +472,9 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)  	struct se_dev_entry *deve;  	u32 i; -	spin_lock_bh(&tpg->acl_node_lock); +	spin_lock_irq(&tpg->acl_node_lock);  	list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) { -		spin_unlock_bh(&tpg->acl_node_lock); +		spin_unlock_irq(&tpg->acl_node_lock);  		spin_lock_irq(&nacl->device_list_lock);  		for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { @@ -491,9 +491,9 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)  		}  		spin_unlock_irq(&nacl->device_list_lock); -		spin_lock_bh(&tpg->acl_node_lock); +		spin_lock_irq(&tpg->acl_node_lock);  	} -	spin_unlock_bh(&tpg->acl_node_lock); +	spin_unlock_irq(&tpg->acl_node_lock);  }  static struct se_port *core_alloc_port(struct se_device *dev) @@ -839,6 +839,24 @@ int se_dev_check_shutdown(struct se_device *dev)  	return ret;  } +u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size) +{ +	u32 tmp, aligned_max_sectors; +	/* +	 * Limit max_sectors to a PAGE_SIZE aligned value for modern +	 * transport_allocate_data_tasks() operation. +	 */ +	tmp = rounddown((max_sectors * block_size), PAGE_SIZE); +	aligned_max_sectors = (tmp / block_size); +	if (max_sectors != aligned_max_sectors) { +		printk(KERN_INFO "Rounding down aligned max_sectors from %u" +				" to %u\n", max_sectors, aligned_max_sectors); +		return aligned_max_sectors; +	} + +	return max_sectors; +} +  void se_dev_set_default_attribs(  	struct se_device *dev,  	struct se_dev_limits *dev_limits) @@ -878,6 +896,11 @@ void se_dev_set_default_attribs(  	 * max_sectors is based on subsystem plugin dependent requirements.  	 */  	dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors; +	/* +	 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks() +	 */ +	limits->max_sectors = se_dev_align_max_sectors(limits->max_sectors, +						limits->logical_block_size);  	dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors;  	/*  	 * Set optimal_sectors from max_sectors, which can be lowered via @@ -891,21 +914,6 @@ void se_dev_set_default_attribs(  	dev->se_sub_dev->se_dev_attrib.queue_depth = dev_limits->queue_depth;  } -int se_dev_set_task_timeout(struct se_device *dev, u32 task_timeout) -{ -	if (task_timeout > DA_TASK_TIMEOUT_MAX) { -		pr_err("dev[%p]: Passed task_timeout: %u larger then" -			" DA_TASK_TIMEOUT_MAX\n", dev, task_timeout); -		return -EINVAL; -	} else { -		dev->se_sub_dev->se_dev_attrib.task_timeout = task_timeout; -		pr_debug("dev[%p]: Set SE Device task_timeout: %u\n", -			dev, task_timeout); -	} - -	return 0; -} -  int se_dev_set_max_unmap_lba_count(  	struct se_device *dev,  	u32 max_unmap_lba_count) @@ -949,36 +957,24 @@ int se_dev_set_unmap_granularity_alignment(  int se_dev_set_emulate_dpo(struct se_device *dev, int flag)  { -	if ((flag != 0) && (flag != 1)) { +	if (flag != 0 && flag != 1) {  		pr_err("Illegal value %d\n", flag);  		return -EINVAL;  	} -	if (dev->transport->dpo_emulated == NULL) { -		pr_err("dev->transport->dpo_emulated is NULL\n"); -		return -EINVAL; -	} -	if (dev->transport->dpo_emulated(dev) == 0) { -		pr_err("dev->transport->dpo_emulated not supported\n"); -		return -EINVAL; -	} -	dev->se_sub_dev->se_dev_attrib.emulate_dpo = flag; -	pr_debug("dev[%p]: SE Device Page Out (DPO) Emulation" -			" bit: %d\n", dev, dev->se_sub_dev->se_dev_attrib.emulate_dpo); -	return 0; + +	pr_err("dpo_emulated not supported\n"); +	return -EINVAL;  }  int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)  { -	if ((flag != 0) && (flag != 1)) { +	if (flag != 0 && flag != 1) {  		pr_err("Illegal value %d\n", flag);  		return -EINVAL;  	} -	if (dev->transport->fua_write_emulated == NULL) { -		pr_err("dev->transport->fua_write_emulated is NULL\n"); -		return -EINVAL; -	} -	if (dev->transport->fua_write_emulated(dev) == 0) { -		pr_err("dev->transport->fua_write_emulated not supported\n"); + +	if (dev->transport->fua_write_emulated == 0) { +		pr_err("fua_write_emulated not supported\n");  		return -EINVAL;  	}  	dev->se_sub_dev->se_dev_attrib.emulate_fua_write = flag; @@ -989,36 +985,23 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)  int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)  { -	if ((flag != 0) && (flag != 1)) { +	if (flag != 0 && flag != 1) {  		pr_err("Illegal value %d\n", flag);  		return -EINVAL;  	} -	if (dev->transport->fua_read_emulated == NULL) { -		pr_err("dev->transport->fua_read_emulated is NULL\n"); -		return -EINVAL; -	} -	if (dev->transport->fua_read_emulated(dev) == 0) { -		pr_err("dev->transport->fua_read_emulated not supported\n"); -		return -EINVAL; -	} -	dev->se_sub_dev->se_dev_attrib.emulate_fua_read = flag; -	pr_debug("dev[%p]: SE Device Forced Unit Access READs: %d\n", -			dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_read); -	return 0; + +	pr_err("ua read emulated not supported\n"); +	return -EINVAL;  }  int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)  { -	if ((flag != 0) && (flag != 1)) { +	if (flag != 0 && flag != 1) {  		pr_err("Illegal value %d\n", flag);  		return -EINVAL;  	} -	if (dev->transport->write_cache_emulated == NULL) { -		pr_err("dev->transport->write_cache_emulated is NULL\n"); -		return -EINVAL; -	} -	if (dev->transport->write_cache_emulated(dev) == 0) { -		pr_err("dev->transport->write_cache_emulated not supported\n"); +	if (dev->transport->write_cache_emulated == 0) { +		pr_err("write_cache_emulated not supported\n");  		return -EINVAL;  	}  	dev->se_sub_dev->se_dev_attrib.emulate_write_cache = flag; @@ -1242,6 +1225,11 @@ int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)  			return -EINVAL;  		}  	} +	/* +	 * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks() +	 */ +	max_sectors = se_dev_align_max_sectors(max_sectors, +				dev->se_sub_dev->se_dev_attrib.block_size);  	dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors;  	pr_debug("dev[%p]: SE Device max_sectors changed to %u\n", @@ -1344,15 +1332,17 @@ struct se_lun *core_dev_add_lun(  	 */  	if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {  		struct se_node_acl *acl; -		spin_lock_bh(&tpg->acl_node_lock); +		spin_lock_irq(&tpg->acl_node_lock);  		list_for_each_entry(acl, &tpg->acl_node_list, acl_list) { -			if (acl->dynamic_node_acl) { -				spin_unlock_bh(&tpg->acl_node_lock); +			if (acl->dynamic_node_acl && +			    (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only || +			     !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) { +				spin_unlock_irq(&tpg->acl_node_lock);  				core_tpg_add_node_to_devs(acl, tpg); -				spin_lock_bh(&tpg->acl_node_lock); +				spin_lock_irq(&tpg->acl_node_lock);  			}  		} -		spin_unlock_bh(&tpg->acl_node_lock); +		spin_unlock_irq(&tpg->acl_node_lock);  	}  	return lun_p; diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c index f1654694f4e..09b6f8729f9 100644 --- a/drivers/target/target_core_fabric_configfs.c +++ b/drivers/target/target_core_fabric_configfs.c @@ -22,7 +22,6 @@  #include <linux/module.h>  #include <linux/moduleparam.h> -#include <linux/version.h>  #include <generated/utsrelease.h>  #include <linux/utsname.h>  #include <linux/init.h> @@ -481,7 +480,7 @@ static struct config_group *target_fabric_make_nodeacl(  	se_nacl = tf->tf_ops.fabric_make_nodeacl(se_tpg, group, name);  	if (IS_ERR(se_nacl)) -		return ERR_PTR(PTR_ERR(se_nacl)); +		return ERR_CAST(se_nacl);  	nacl_cg = &se_nacl->acl_group;  	nacl_cg->default_groups = se_nacl->acl_default_groups; diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c index c4ea3a9a555..39f021b855e 100644 --- a/drivers/target/target_core_fabric_lib.c +++ b/drivers/target/target_core_fabric_lib.c @@ -63,6 +63,7 @@ u32 sas_get_pr_transport_id(  	unsigned char *buf)  {  	unsigned char *ptr; +	int ret;  	/*  	 * Set PROTOCOL IDENTIFIER to 6h for SAS @@ -74,7 +75,9 @@ u32 sas_get_pr_transport_id(  	 */  	ptr = &se_nacl->initiatorname[4]; /* Skip over 'naa. prefix */ -	hex2bin(&buf[4], ptr, 8); +	ret = hex2bin(&buf[4], ptr, 8); +	if (ret < 0) +		pr_debug("sas transport_id: invalid hex string\n");  	/*  	 * The SAS Transport ID is a hardcoded 24-byte length @@ -156,8 +159,9 @@ u32 fc_get_pr_transport_id(  	unsigned char *buf)  {  	unsigned char *ptr; -	int i; +	int i, ret;  	u32 off = 8; +  	/*  	 * PROTOCOL IDENTIFIER is 0h for FCP-2  	 * @@ -174,7 +178,9 @@ u32 fc_get_pr_transport_id(  			i++;  			continue;  		} -		hex2bin(&buf[off++], &ptr[i], 1); +		ret = hex2bin(&buf[off++], &ptr[i], 1); +		if (ret < 0) +			pr_debug("fc transport_id: invalid hex string\n");  		i += 2;  	}  	/* diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c index bc1b33639b8..19a0be9c657 100644 --- a/drivers/target/target_core_file.c +++ b/drivers/target/target_core_file.c @@ -26,7 +26,6 @@   *   ******************************************************************************/ -#include <linux/version.h>  #include <linux/string.h>  #include <linux/parser.h>  #include <linux/timer.h> @@ -273,13 +272,14 @@ fd_alloc_task(unsigned char *cdb)  static int fd_do_readv(struct se_task *task)  {  	struct fd_request *req = FILE_REQ(task); -	struct fd_dev *dev = req->fd_task.se_dev->dev_ptr; +	struct se_device *se_dev = req->fd_task.task_se_cmd->se_dev; +	struct fd_dev *dev = se_dev->dev_ptr;  	struct file *fd = dev->fd_file;  	struct scatterlist *sg = task->task_sg;  	struct iovec *iov;  	mm_segment_t old_fs;  	loff_t pos = (task->task_lba * -		      task->se_dev->se_sub_dev->se_dev_attrib.block_size); +		      se_dev->se_sub_dev->se_dev_attrib.block_size);  	int ret = 0, i;  	iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL); @@ -325,13 +325,14 @@ static int fd_do_readv(struct se_task *task)  static int fd_do_writev(struct se_task *task)  {  	struct fd_request *req = FILE_REQ(task); -	struct fd_dev *dev = req->fd_task.se_dev->dev_ptr; +	struct se_device *se_dev = req->fd_task.task_se_cmd->se_dev; +	struct fd_dev *dev = se_dev->dev_ptr;  	struct file *fd = dev->fd_file;  	struct scatterlist *sg = task->task_sg;  	struct iovec *iov;  	mm_segment_t old_fs;  	loff_t pos = (task->task_lba * -		      task->se_dev->se_sub_dev->se_dev_attrib.block_size); +		      se_dev->se_sub_dev->se_dev_attrib.block_size);  	int ret, i = 0;  	iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL); @@ -399,33 +400,6 @@ static void fd_emulate_sync_cache(struct se_task *task)  }  /* - * Tell TCM Core that we are capable of WriteCache emulation for - * an underlying struct se_device. - */ -static int fd_emulated_write_cache(struct se_device *dev) -{ -	return 1; -} - -static int fd_emulated_dpo(struct se_device *dev) -{ -	return 0; -} -/* - * Tell TCM Core that we will be emulating Forced Unit Access (FUA) for WRITEs - * for TYPE_DISK. - */ -static int fd_emulated_fua_write(struct se_device *dev) -{ -	return 1; -} - -static int fd_emulated_fua_read(struct se_device *dev) -{ -	return 0; -} - -/*   * WRITE Force Unit Access (FUA) emulation on a per struct se_task   * LBA range basis..   */ @@ -608,17 +582,6 @@ static ssize_t fd_show_configfs_dev_params(  	return bl;  } -/*	fd_get_cdb(): (Part of se_subsystem_api_t template) - * - * - */ -static unsigned char *fd_get_cdb(struct se_task *task) -{ -	struct fd_request *req = FILE_REQ(task); - -	return req->fd_scsi_cdb; -} -  /*	fd_get_device_rev(): (Part of se_subsystem_api_t template)   *   * @@ -650,15 +613,13 @@ static struct se_subsystem_api fileio_template = {  	.name			= "fileio",  	.owner			= THIS_MODULE,  	.transport_type		= TRANSPORT_PLUGIN_VHBA_PDEV, +	.write_cache_emulated	= 1, +	.fua_write_emulated	= 1,  	.attach_hba		= fd_attach_hba,  	.detach_hba		= fd_detach_hba,  	.allocate_virtdevice	= fd_allocate_virtdevice,  	.create_virtdevice	= fd_create_virtdevice,  	.free_device		= fd_free_device, -	.dpo_emulated		= fd_emulated_dpo, -	.fua_write_emulated	= fd_emulated_fua_write, -	.fua_read_emulated	= fd_emulated_fua_read, -	.write_cache_emulated	= fd_emulated_write_cache,  	.alloc_task		= fd_alloc_task,  	.do_task		= fd_do_task,  	.do_sync_cache		= fd_emulate_sync_cache, @@ -666,7 +627,6 @@ static struct se_subsystem_api fileio_template = {  	.check_configfs_dev_params = fd_check_configfs_dev_params,  	.set_configfs_dev_params = fd_set_configfs_dev_params,  	.show_configfs_dev_params = fd_show_configfs_dev_params, -	.get_cdb		= fd_get_cdb,  	.get_device_rev		= fd_get_device_rev,  	.get_device_type	= fd_get_device_type,  	.get_blocks		= fd_get_blocks, diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h index daebd710b89..59e6e73106c 100644 --- a/drivers/target/target_core_file.h +++ b/drivers/target/target_core_file.h @@ -14,9 +14,7 @@  struct fd_request {  	struct se_task	fd_task; -	/* SCSI CDB from iSCSI Command PDU */ -	unsigned char	fd_scsi_cdb[TCM_MAX_COMMAND_SIZE]; -} ____cacheline_aligned; +};  #define FBDF_HAS_PATH		0x01  #define FBDF_HAS_SIZE		0x02 diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c index 7e123410544..41ad02b5fb8 100644 --- a/drivers/target/target_core_iblock.c +++ b/drivers/target/target_core_iblock.c @@ -27,7 +27,6 @@   *   ******************************************************************************/ -#include <linux/version.h>  #include <linux/string.h>  #include <linux/parser.h>  #include <linux/timer.h> @@ -314,104 +313,42 @@ static unsigned long long iblock_emulate_read_cap_with_block_size(  	return blocks_long;  } +static void iblock_end_io_flush(struct bio *bio, int err) +{ +	struct se_cmd *cmd = bio->bi_private; + +	if (err) +		pr_err("IBLOCK: cache flush failed: %d\n", err); + +	if (cmd) +		transport_complete_sync_cache(cmd, err == 0); +	bio_put(bio); +} +  /* - * Emulate SYCHRONIZE_CACHE_* + * Implement SYCHRONIZE CACHE.  Note that we can't handle lba ranges and must + * always flush the whole cache.   */  static void iblock_emulate_sync_cache(struct se_task *task)  {  	struct se_cmd *cmd = task->task_se_cmd;  	struct iblock_dev *ib_dev = cmd->se_dev->dev_ptr;  	int immed = (cmd->t_task_cdb[1] & 0x2); -	sector_t error_sector; -	int ret; +	struct bio *bio;  	/*  	 * If the Immediate bit is set, queue up the GOOD response -	 * for this SYNCHRONIZE_CACHE op +	 * for this SYNCHRONIZE_CACHE op.  	 */  	if (immed)  		transport_complete_sync_cache(cmd, 1); -	/* -	 * blkdev_issue_flush() does not support a specifying a range, so -	 * we have to flush the entire cache. -	 */ -	ret = blkdev_issue_flush(ib_dev->ibd_bd, GFP_KERNEL, &error_sector); -	if (ret != 0) { -		pr_err("IBLOCK: block_issue_flush() failed: %d " -			" error_sector: %llu\n", ret, -			(unsigned long long)error_sector); -	} - +	bio = bio_alloc(GFP_KERNEL, 0); +	bio->bi_end_io = iblock_end_io_flush; +	bio->bi_bdev = ib_dev->ibd_bd;  	if (!immed) -		transport_complete_sync_cache(cmd, ret == 0); -} - -/* - * Tell TCM Core that we are capable of WriteCache emulation for - * an underlying struct se_device. - */ -static int iblock_emulated_write_cache(struct se_device *dev) -{ -	return 1; -} - -static int iblock_emulated_dpo(struct se_device *dev) -{ -	return 0; -} - -/* - * Tell TCM Core that we will be emulating Forced Unit Access (FUA) for WRITEs - * for TYPE_DISK. - */ -static int iblock_emulated_fua_write(struct se_device *dev) -{ -	return 1; -} - -static int iblock_emulated_fua_read(struct se_device *dev) -{ -	return 0; -} - -static int iblock_do_task(struct se_task *task) -{ -	struct se_device *dev = task->task_se_cmd->se_dev; -	struct iblock_req *req = IBLOCK_REQ(task); -	struct bio *bio = req->ib_bio, *nbio = NULL; -	struct blk_plug plug; -	int rw; - -	if (task->task_data_direction == DMA_TO_DEVICE) { -		/* -		 * Force data to disk if we pretend to not have a volatile -		 * write cache, or the initiator set the Force Unit Access bit. -		 */ -		if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 || -		    (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && -		     task->task_se_cmd->t_tasks_fua)) -			rw = WRITE_FUA; -		else -			rw = WRITE; -	} else { -		rw = READ; -	} - -	blk_start_plug(&plug); -	while (bio) { -		nbio = bio->bi_next; -		bio->bi_next = NULL; -		pr_debug("Calling submit_bio() task: %p bio: %p" -			" bio->bi_sector: %llu\n", task, bio, -			 (unsigned long long)bio->bi_sector); - -		submit_bio(rw, bio); -		bio = nbio; -	} -	blk_finish_plug(&plug); - -	return PYX_TRANSPORT_SENT_TO_TRANSPORT; +		bio->bi_private = cmd; +	submit_bio(WRITE_FLUSH, bio);  }  static int iblock_do_discard(struct se_device *dev, sector_t lba, u32 range) @@ -425,20 +362,7 @@ static int iblock_do_discard(struct se_device *dev, sector_t lba, u32 range)  static void iblock_free_task(struct se_task *task)  { -	struct iblock_req *req = IBLOCK_REQ(task); -	struct bio *bio, *hbio = req->ib_bio; -	/* -	 * We only release the bio(s) here if iblock_bio_done() has not called -	 * bio_put() -> iblock_bio_destructor(). -	 */ -	while (hbio != NULL) { -		bio = hbio; -		hbio = hbio->bi_next; -		bio->bi_next = NULL; -		bio_put(bio); -	} - -	kfree(req); +	kfree(IBLOCK_REQ(task));  }  enum { @@ -552,25 +476,21 @@ static ssize_t iblock_show_configfs_dev_params(  static void iblock_bio_destructor(struct bio *bio)  {  	struct se_task *task = bio->bi_private; -	struct iblock_dev *ib_dev = task->se_dev->dev_ptr; +	struct iblock_dev *ib_dev = task->task_se_cmd->se_dev->dev_ptr;  	bio_free(bio, ib_dev->ibd_bio_set);  } -static struct bio *iblock_get_bio( -	struct se_task *task, -	struct iblock_req *ib_req, -	struct iblock_dev *ib_dev, -	int *ret, -	sector_t lba, -	u32 sg_num) +static struct bio * +iblock_get_bio(struct se_task *task, sector_t lba, u32 sg_num)  { +	struct iblock_dev *ib_dev = task->task_se_cmd->se_dev->dev_ptr; +	struct iblock_req *ib_req = IBLOCK_REQ(task);  	struct bio *bio;  	bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);  	if (!bio) {  		pr_err("Unable to allocate memory for bio\n"); -		*ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;  		return NULL;  	} @@ -591,17 +511,33 @@ static struct bio *iblock_get_bio(  	return bio;  } -static int iblock_map_data_SG(struct se_task *task) +static int iblock_do_task(struct se_task *task)  {  	struct se_cmd *cmd = task->task_se_cmd;  	struct se_device *dev = cmd->se_dev; -	struct iblock_dev *ib_dev = task->se_dev->dev_ptr; -	struct iblock_req *ib_req = IBLOCK_REQ(task); -	struct bio *bio = NULL, *hbio = NULL, *tbio = NULL; +	struct bio *bio; +	struct bio_list list;  	struct scatterlist *sg; -	int ret = 0;  	u32 i, sg_num = task->task_sg_nents;  	sector_t block_lba; +	struct blk_plug plug; +	int rw; + +	if (task->task_data_direction == DMA_TO_DEVICE) { +		/* +		 * Force data to disk if we pretend to not have a volatile +		 * write cache, or the initiator set the Force Unit Access bit. +		 */ +		if (dev->se_sub_dev->se_dev_attrib.emulate_write_cache == 0 || +		    (dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && +		     task->task_se_cmd->t_tasks_fua)) +			rw = WRITE_FUA; +		else +			rw = WRITE; +	} else { +		rw = READ; +	} +  	/*  	 * Do starting conversion up from non 512-byte blocksize with  	 * struct se_task SCSI blocksize into Linux/Block 512 units for BIO. @@ -620,68 +556,43 @@ static int iblock_map_data_SG(struct se_task *task)  		return PYX_TRANSPORT_LU_COMM_FAILURE;  	} -	bio = iblock_get_bio(task, ib_req, ib_dev, &ret, block_lba, sg_num); +	bio = iblock_get_bio(task, block_lba, sg_num);  	if (!bio) -		return ret; +		return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES; -	ib_req->ib_bio = bio; -	hbio = tbio = bio; -	/* -	 * Use fs/bio.c:bio_add_pages() to setup the bio_vec maplist -	 * from task->task_sg -> struct scatterlist memory. -	 */ -	for_each_sg(task->task_sg, sg, task->task_sg_nents, i) { -		pr_debug("task: %p bio: %p Calling bio_add_page(): page:" -			" %p len: %u offset: %u\n", task, bio, sg_page(sg), -				sg->length, sg->offset); -again: -		ret = bio_add_page(bio, sg_page(sg), sg->length, sg->offset); -		if (ret != sg->length) { - -			pr_debug("*** Set bio->bi_sector: %llu\n", -				 (unsigned long long)bio->bi_sector); -			pr_debug("** task->task_size: %u\n", -					task->task_size); -			pr_debug("*** bio->bi_max_vecs: %u\n", -					bio->bi_max_vecs); -			pr_debug("*** bio->bi_vcnt: %u\n", -					bio->bi_vcnt); +	bio_list_init(&list); +	bio_list_add(&list, bio); -			bio = iblock_get_bio(task, ib_req, ib_dev, &ret, -						block_lba, sg_num); +	for_each_sg(task->task_sg, sg, task->task_sg_nents, i) { +		/* +		 * XXX: if the length the device accepts is shorter than the +		 *	length of the S/G list entry this will cause and +		 *	endless loop.  Better hope no driver uses huge pages. +		 */ +		while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset) +				!= sg->length) { +			bio = iblock_get_bio(task, block_lba, sg_num);  			if (!bio)  				goto fail; - -			tbio = tbio->bi_next = bio; -			pr_debug("-----------------> Added +1 bio: %p to" -				" list, Going to again\n", bio); -			goto again; +			bio_list_add(&list, bio);  		} +  		/* Always in 512 byte units for Linux/Block */  		block_lba += sg->length >> IBLOCK_LBA_SHIFT;  		sg_num--; -		pr_debug("task: %p bio-add_page() passed!, decremented" -			" sg_num to %u\n", task, sg_num); -		pr_debug("task: %p bio_add_page() passed!, increased lba" -			 " to %llu\n", task, (unsigned long long)block_lba); -		pr_debug("task: %p bio_add_page() passed!, bio->bi_vcnt:" -				" %u\n", task, bio->bi_vcnt);  	} -	return 0; +	blk_start_plug(&plug); +	while ((bio = bio_list_pop(&list))) +		submit_bio(rw, bio); +	blk_finish_plug(&plug); + +	return PYX_TRANSPORT_SENT_TO_TRANSPORT; +  fail: -	while (hbio) { -		bio = hbio; -		hbio = hbio->bi_next; -		bio->bi_next = NULL; +	while ((bio = bio_list_pop(&list)))  		bio_put(bio); -	} -	return ret; -} - -static unsigned char *iblock_get_cdb(struct se_task *task) -{ -	return IBLOCK_REQ(task)->ib_scsi_cdb; +	return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;  }  static u32 iblock_get_device_rev(struct se_device *dev) @@ -707,6 +618,7 @@ static void iblock_bio_done(struct bio *bio, int err)  {  	struct se_task *task = bio->bi_private;  	struct iblock_req *ibr = IBLOCK_REQ(task); +  	/*  	 * Set -EIO if !BIO_UPTODATE and the passed is still err=0  	 */ @@ -721,50 +633,31 @@ static void iblock_bio_done(struct bio *bio, int err)  		 */  		atomic_inc(&ibr->ib_bio_err_cnt);  		smp_mb__after_atomic_inc(); -		bio_put(bio); -		/* -		 * Wait to complete the task until the last bio as completed. -		 */ -		if (!atomic_dec_and_test(&ibr->ib_bio_cnt)) -			return; - -		ibr->ib_bio = NULL; -		transport_complete_task(task, 0); -		return;  	} -	pr_debug("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n", -		 task, bio, task->task_lba, (unsigned long long)bio->bi_sector, err); -	/* -	 * bio_put() will call iblock_bio_destructor() to release the bio back -	 * to ibr->ib_bio_set. -	 */ +  	bio_put(bio); -	/* -	 * Wait to complete the task until the last bio as completed. -	 */ +  	if (!atomic_dec_and_test(&ibr->ib_bio_cnt))  		return; -	/* -	 * Return GOOD status for task if zero ib_bio_err_cnt exists. -	 */ -	ibr->ib_bio = NULL; -	transport_complete_task(task, (!atomic_read(&ibr->ib_bio_err_cnt))); + +	pr_debug("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n", +		 task, bio, task->task_lba, +		 (unsigned long long)bio->bi_sector, err); + +	transport_complete_task(task, !atomic_read(&ibr->ib_bio_err_cnt));  }  static struct se_subsystem_api iblock_template = {  	.name			= "iblock",  	.owner			= THIS_MODULE,  	.transport_type		= TRANSPORT_PLUGIN_VHBA_PDEV, -	.map_data_SG		= iblock_map_data_SG, +	.write_cache_emulated	= 1, +	.fua_write_emulated	= 1,  	.attach_hba		= iblock_attach_hba,  	.detach_hba		= iblock_detach_hba,  	.allocate_virtdevice	= iblock_allocate_virtdevice,  	.create_virtdevice	= iblock_create_virtdevice,  	.free_device		= iblock_free_device, -	.dpo_emulated		= iblock_emulated_dpo, -	.fua_write_emulated	= iblock_emulated_fua_write, -	.fua_read_emulated	= iblock_emulated_fua_read, -	.write_cache_emulated	= iblock_emulated_write_cache,  	.alloc_task		= iblock_alloc_task,  	.do_task		= iblock_do_task,  	.do_discard		= iblock_do_discard, @@ -773,7 +666,6 @@ static struct se_subsystem_api iblock_template = {  	.check_configfs_dev_params = iblock_check_configfs_dev_params,  	.set_configfs_dev_params = iblock_set_configfs_dev_params,  	.show_configfs_dev_params = iblock_show_configfs_dev_params, -	.get_cdb		= iblock_get_cdb,  	.get_device_rev		= iblock_get_device_rev,  	.get_device_type	= iblock_get_device_type,  	.get_blocks		= iblock_get_blocks, diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h index a121cd1b657..5cf1860c10d 100644 --- a/drivers/target/target_core_iblock.h +++ b/drivers/target/target_core_iblock.h @@ -8,10 +8,8 @@  struct iblock_req {  	struct se_task ib_task; -	unsigned char ib_scsi_cdb[TCM_MAX_COMMAND_SIZE];  	atomic_t ib_bio_cnt;  	atomic_t ib_bio_err_cnt; -	struct bio *ib_bio;  } ____cacheline_aligned;  #define IBDF_HAS_UDEV_PATH		0x01 diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c index 1c1b849cd4f..0c4f783f924 100644 --- a/drivers/target/target_core_pr.c +++ b/drivers/target/target_core_pr.c @@ -25,7 +25,6 @@   *   ******************************************************************************/ -#include <linux/version.h>  #include <linux/slab.h>  #include <linux/spinlock.h>  #include <linux/list.h> @@ -1598,14 +1597,14 @@ static int core_scsi3_decode_spec_i_port(  			 * from the decoded fabric module specific TransportID  			 * at *i_str.  			 */ -			spin_lock_bh(&tmp_tpg->acl_node_lock); +			spin_lock_irq(&tmp_tpg->acl_node_lock);  			dest_node_acl = __core_tpg_get_initiator_node_acl(  						tmp_tpg, i_str);  			if (dest_node_acl) {  				atomic_inc(&dest_node_acl->acl_pr_ref_count);  				smp_mb__after_atomic_inc();  			} -			spin_unlock_bh(&tmp_tpg->acl_node_lock); +			spin_unlock_irq(&tmp_tpg->acl_node_lock);  			if (!dest_node_acl) {  				core_scsi3_tpg_undepend_item(tmp_tpg); @@ -3496,14 +3495,14 @@ after_iport_check:  	/*  	 * Locate the destination struct se_node_acl from the received Transport ID  	 */ -	spin_lock_bh(&dest_se_tpg->acl_node_lock); +	spin_lock_irq(&dest_se_tpg->acl_node_lock);  	dest_node_acl = __core_tpg_get_initiator_node_acl(dest_se_tpg,  				initiator_str);  	if (dest_node_acl) {  		atomic_inc(&dest_node_acl->acl_pr_ref_count);  		smp_mb__after_atomic_inc();  	} -	spin_unlock_bh(&dest_se_tpg->acl_node_lock); +	spin_unlock_irq(&dest_se_tpg->acl_node_lock);  	if (!dest_node_acl) {  		pr_err("Unable to locate %s dest_node_acl for" diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c index 2b7b0da9146..dad671dee9e 100644 --- a/drivers/target/target_core_pscsi.c +++ b/drivers/target/target_core_pscsi.c @@ -26,7 +26,6 @@   *   ******************************************************************************/ -#include <linux/version.h>  #include <linux/string.h>  #include <linux/parser.h>  #include <linux/timer.h> @@ -567,7 +566,7 @@ static struct se_device *pscsi_create_virtdevice(  			if (IS_ERR(sh)) {  				pr_err("pSCSI: Unable to locate"  					" pdv_host_id: %d\n", pdv->pdv_host_id); -				return (struct se_device *) sh; +				return ERR_CAST(sh);  			}  		}  	} else { @@ -677,7 +676,7 @@ static inline struct pscsi_plugin_task *PSCSI_TASK(struct se_task *task)   */  static int pscsi_transport_complete(struct se_task *task)  { -	struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr; +	struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr;  	struct scsi_device *sd = pdv->pdv_sd;  	int result;  	struct pscsi_plugin_task *pt = PSCSI_TASK(task); @@ -777,95 +776,6 @@ pscsi_alloc_task(unsigned char *cdb)  	return &pt->pscsi_task;  } -static inline void pscsi_blk_init_request( -	struct se_task *task, -	struct pscsi_plugin_task *pt, -	struct request *req, -	int bidi_read) -{ -	/* -	 * Defined as "scsi command" in include/linux/blkdev.h. -	 */ -	req->cmd_type = REQ_TYPE_BLOCK_PC; -	/* -	 * For the extra BIDI-COMMAND READ struct request we do not -	 * need to setup the remaining structure members -	 */ -	if (bidi_read) -		return; -	/* -	 * Setup the done function pointer for struct request, -	 * also set the end_io_data pointer.to struct se_task. -	 */ -	req->end_io = pscsi_req_done; -	req->end_io_data = task; -	/* -	 * Load the referenced struct se_task's SCSI CDB into -	 * include/linux/blkdev.h:struct request->cmd -	 */ -	req->cmd_len = scsi_command_size(pt->pscsi_cdb); -	req->cmd = &pt->pscsi_cdb[0]; -	/* -	 * Setup pointer for outgoing sense data. -	 */ -	req->sense = &pt->pscsi_sense[0]; -	req->sense_len = 0; -} - -/* - * Used for pSCSI data payloads for all *NON* SCF_SCSI_DATA_SG_IO_CDB -*/ -static int pscsi_blk_get_request(struct se_task *task) -{ -	struct pscsi_plugin_task *pt = PSCSI_TASK(task); -	struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr; - -	pt->pscsi_req = blk_get_request(pdv->pdv_sd->request_queue, -			(task->task_data_direction == DMA_TO_DEVICE), -			GFP_KERNEL); -	if (!pt->pscsi_req || IS_ERR(pt->pscsi_req)) { -		pr_err("PSCSI: blk_get_request() failed: %ld\n", -				IS_ERR(pt->pscsi_req)); -		return PYX_TRANSPORT_LU_COMM_FAILURE; -	} -	/* -	 * Setup the newly allocated struct request for REQ_TYPE_BLOCK_PC, -	 * and setup rq callback, CDB and sense. -	 */ -	pscsi_blk_init_request(task, pt, pt->pscsi_req, 0); -	return 0; -} - -/*      pscsi_do_task(): (Part of se_subsystem_api_t template) - * - * - */ -static int pscsi_do_task(struct se_task *task) -{ -	struct pscsi_plugin_task *pt = PSCSI_TASK(task); -	struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr; -	/* -	 * Set the struct request->timeout value based on peripheral -	 * device type from SCSI. -	 */ -	if (pdv->pdv_sd->type == TYPE_DISK) -		pt->pscsi_req->timeout = PS_TIMEOUT_DISK; -	else -		pt->pscsi_req->timeout = PS_TIMEOUT_OTHER; - -	pt->pscsi_req->retries = PS_RETRY; -	/* -	 * Queue the struct request into the struct scsi_device->request_queue. -	 * Also check for HEAD_OF_QUEUE SAM TASK attr from received se_cmd -	 * descriptor -	 */ -	blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, pt->pscsi_req, -			(task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG), -			pscsi_req_done); - -	return PYX_TRANSPORT_SENT_TO_TRANSPORT; -} -  static void pscsi_free_task(struct se_task *task)  {  	struct pscsi_plugin_task *pt = PSCSI_TASK(task); @@ -1049,15 +959,12 @@ static inline struct bio *pscsi_get_bio(int sg_num)  	return bio;  } -static int __pscsi_map_SG( -	struct se_task *task, -	struct scatterlist *task_sg, -	u32 task_sg_num, -	int bidi_read) +static int pscsi_map_sg(struct se_task *task, struct scatterlist *task_sg, +		struct bio **hbio)  { -	struct pscsi_plugin_task *pt = PSCSI_TASK(task); -	struct pscsi_dev_virt *pdv = task->se_dev->dev_ptr; -	struct bio *bio = NULL, *hbio = NULL, *tbio = NULL; +	struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr; +	u32 task_sg_num = task->task_sg_nents; +	struct bio *bio = NULL, *tbio = NULL;  	struct page *page;  	struct scatterlist *sg;  	u32 data_len = task->task_size, i, len, bytes, off; @@ -1066,19 +973,8 @@ static int __pscsi_map_SG(  	int nr_vecs = 0, rc, ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;  	int rw = (task->task_data_direction == DMA_TO_DEVICE); -	if (!task->task_size) -		return 0; -	/* -	 * For SCF_SCSI_DATA_SG_IO_CDB, Use fs/bio.c:bio_add_page() to setup -	 * the bio_vec maplist from task->task_sg -> -	 * struct scatterlist memory.  The struct se_task->task_sg[] currently needs -	 * to be attached to struct bios for submission to Linux/SCSI using -	 * struct request to struct scsi_device->request_queue. -	 * -	 * Note that this will be changing post v2.6.28 as Target_Core_Mod/pSCSI -	 * is ported to upstream SCSI passthrough functionality that accepts -	 * struct scatterlist->page_link or struct page as a paraemeter. -	 */ +	*hbio = NULL; +  	pr_debug("PSCSI: nr_pages: %d\n", nr_pages);  	for_each_sg(task_sg, sg, task_sg_num, i) { @@ -1115,8 +1011,8 @@ static int __pscsi_map_SG(  				 * bios need to be added to complete a given  				 * struct se_task  				 */ -				if (!hbio) -					hbio = tbio = bio; +				if (!*hbio) +					*hbio = tbio = bio;  				else  					tbio = tbio->bi_next = bio;  			} @@ -1152,92 +1048,82 @@ static int __pscsi_map_SG(  			off = 0;  		}  	} -	/* -	 * Setup the primary pt->pscsi_req used for non BIDI and BIDI-COMMAND -	 * primary SCSI WRITE poayload mapped for struct se_task->task_sg[] -	 */ -	if (!bidi_read) { -		/* -		 * Starting with v2.6.31, call blk_make_request() passing in *hbio to -		 * allocate the pSCSI task a struct request. -		 */ -		pt->pscsi_req = blk_make_request(pdv->pdv_sd->request_queue, -					hbio, GFP_KERNEL); -		if (!pt->pscsi_req) { -			pr_err("pSCSI: blk_make_request() failed\n"); -			goto fail; -		} -		/* -		 * Setup the newly allocated struct request for REQ_TYPE_BLOCK_PC, -		 * and setup rq callback, CDB and sense. -		 */ -		pscsi_blk_init_request(task, pt, pt->pscsi_req, 0); - -		return task->task_sg_nents; -	} -	/* -	 * Setup the secondary pt->pscsi_req->next_rq used for the extra BIDI-COMMAND -	 * SCSI READ paylaod mapped for struct se_task->task_sg_bidi[] -	 */ -	pt->pscsi_req->next_rq = blk_make_request(pdv->pdv_sd->request_queue, -					hbio, GFP_KERNEL); -	if (!pt->pscsi_req->next_rq) { -		pr_err("pSCSI: blk_make_request() failed for BIDI\n"); -		goto fail; -	} -	pscsi_blk_init_request(task, pt, pt->pscsi_req->next_rq, 1);  	return task->task_sg_nents;  fail: -	while (hbio) { -		bio = hbio; -		hbio = hbio->bi_next; +	while (*hbio) { +		bio = *hbio; +		*hbio = (*hbio)->bi_next;  		bio->bi_next = NULL; -		bio_endio(bio, 0); +		bio_endio(bio, 0);	/* XXX: should be error */  	}  	return ret;  } -/* - * pSCSI maps both ->map_control_SG() and ->map_data_SG() to a single call. - */ -static int pscsi_map_SG(struct se_task *task) +static int pscsi_do_task(struct se_task *task)  { +	struct pscsi_dev_virt *pdv = task->task_se_cmd->se_dev->dev_ptr; +	struct pscsi_plugin_task *pt = PSCSI_TASK(task); +	struct request *req; +	struct bio *hbio;  	int ret; -	/* -	 * Setup the main struct request for the task->task_sg[] payload -	 */ +	target_get_task_cdb(task, pt->pscsi_cdb); + +	if (task->task_se_cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) { +		req = blk_get_request(pdv->pdv_sd->request_queue, +				(task->task_data_direction == DMA_TO_DEVICE), +				GFP_KERNEL); +		if (!req || IS_ERR(req)) { +			pr_err("PSCSI: blk_get_request() failed: %ld\n", +					req ? IS_ERR(req) : -ENOMEM); +			return PYX_TRANSPORT_LU_COMM_FAILURE; +		} +	} else { +		BUG_ON(!task->task_size); -	ret = __pscsi_map_SG(task, task->task_sg, task->task_sg_nents, 0); -	if (ret >= 0 && task->task_sg_bidi) {  		/* -		 * If present, set up the extra BIDI-COMMAND SCSI READ -		 * struct request and payload. +		 * Setup the main struct request for the task->task_sg[] payload  		 */ -		ret = __pscsi_map_SG(task, task->task_sg_bidi, -					task->task_sg_nents, 1); +		ret = pscsi_map_sg(task, task->task_sg, &hbio); +		if (ret < 0) +			return PYX_TRANSPORT_LU_COMM_FAILURE; + +		req = blk_make_request(pdv->pdv_sd->request_queue, hbio, +				       GFP_KERNEL); +		if (!req) { +			pr_err("pSCSI: blk_make_request() failed\n"); +			goto fail; +		}  	} -	if (ret < 0) -		return PYX_TRANSPORT_LU_COMM_FAILURE; -	return 0; -} +	req->cmd_type = REQ_TYPE_BLOCK_PC; +	req->end_io = pscsi_req_done; +	req->end_io_data = task; +	req->cmd_len = scsi_command_size(pt->pscsi_cdb); +	req->cmd = &pt->pscsi_cdb[0]; +	req->sense = &pt->pscsi_sense[0]; +	req->sense_len = 0; +	if (pdv->pdv_sd->type == TYPE_DISK) +		req->timeout = PS_TIMEOUT_DISK; +	else +		req->timeout = PS_TIMEOUT_OTHER; +	req->retries = PS_RETRY; -static int pscsi_CDB_none(struct se_task *task) -{ -	return pscsi_blk_get_request(task); -} +	blk_execute_rq_nowait(pdv->pdv_sd->request_queue, NULL, req, +			(task->task_se_cmd->sam_task_attr == MSG_HEAD_TAG), +			pscsi_req_done); -/*	pscsi_get_cdb(): - * - * - */ -static unsigned char *pscsi_get_cdb(struct se_task *task) -{ -	struct pscsi_plugin_task *pt = PSCSI_TASK(task); +	return PYX_TRANSPORT_SENT_TO_TRANSPORT; -	return pt->pscsi_cdb; +fail: +	while (hbio) { +		struct bio *bio = hbio; +		hbio = hbio->bi_next; +		bio->bi_next = NULL; +		bio_endio(bio, 0);	/* XXX: should be error */ +	} +	return PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;  }  /*	pscsi_get_sense_buffer(): @@ -1328,23 +1214,13 @@ static void pscsi_req_done(struct request *req, int uptodate)  	pt->pscsi_resid = req->resid_len;  	pscsi_process_SAM_status(task, pt); -	/* -	 * Release BIDI-READ if present -	 */ -	if (req->next_rq != NULL) -		__blk_put_request(req->q, req->next_rq); -  	__blk_put_request(req->q, req); -	pt->pscsi_req = NULL;  }  static struct se_subsystem_api pscsi_template = {  	.name			= "pscsi",  	.owner			= THIS_MODULE,  	.transport_type		= TRANSPORT_PLUGIN_PHBA_PDEV, -	.cdb_none		= pscsi_CDB_none, -	.map_control_SG		= pscsi_map_SG, -	.map_data_SG		= pscsi_map_SG,  	.attach_hba		= pscsi_attach_hba,  	.detach_hba		= pscsi_detach_hba,  	.pmode_enable_hba	= pscsi_pmode_enable_hba, @@ -1358,7 +1234,6 @@ static struct se_subsystem_api pscsi_template = {  	.check_configfs_dev_params = pscsi_check_configfs_dev_params,  	.set_configfs_dev_params = pscsi_set_configfs_dev_params,  	.show_configfs_dev_params = pscsi_show_configfs_dev_params, -	.get_cdb		= pscsi_get_cdb,  	.get_sense_buffer	= pscsi_get_sense_buffer,  	.get_device_rev		= pscsi_get_device_rev,  	.get_device_type	= pscsi_get_device_type, diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h index ebf4f1ae2c8..fdc17b6aefb 100644 --- a/drivers/target/target_core_pscsi.h +++ b/drivers/target/target_core_pscsi.h @@ -27,7 +27,6 @@ struct pscsi_plugin_task {  	int	pscsi_direction;  	int	pscsi_result;  	u32	pscsi_resid; -	struct request *pscsi_req;  	unsigned char pscsi_cdb[0];  } ____cacheline_aligned; diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c index 3dd81d24d9a..5158d3846f1 100644 --- a/drivers/target/target_core_rd.c +++ b/drivers/target/target_core_rd.c @@ -27,7 +27,6 @@   *   ******************************************************************************/ -#include <linux/version.h>  #include <linux/string.h>  #include <linux/parser.h>  #include <linux/timer.h> @@ -351,7 +350,7 @@ static struct rd_dev_sg_table *rd_get_sg_table(struct rd_dev *rd_dev, u32 page)  static int rd_MEMCPY_read(struct rd_request *req)  {  	struct se_task *task = &req->rd_task; -	struct rd_dev *dev = req->rd_task.se_dev->dev_ptr; +	struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr;  	struct rd_dev_sg_table *table;  	struct scatterlist *sg_d, *sg_s;  	void *dst, *src; @@ -390,12 +389,10 @@ static int rd_MEMCPY_read(struct rd_request *req)  				length = req->rd_size;  			dst = sg_virt(&sg_d[i++]) + dst_offset; -			if (!dst) -				BUG(); +			BUG_ON(!dst);  			src = sg_virt(&sg_s[j]) + src_offset; -			if (!src) -				BUG(); +			BUG_ON(!src);  			dst_offset = 0;  			src_offset = length; @@ -415,8 +412,7 @@ static int rd_MEMCPY_read(struct rd_request *req)  				length = req->rd_size;  			dst = sg_virt(&sg_d[i]) + dst_offset; -			if (!dst) -				BUG(); +			BUG_ON(!dst);  			if (sg_d[i].length == length) {  				i++; @@ -425,8 +421,7 @@ static int rd_MEMCPY_read(struct rd_request *req)  				dst_offset = length;  			src = sg_virt(&sg_s[j++]) + src_offset; -			if (!src) -				BUG(); +			BUG_ON(!src);  			src_offset = 0;  			page_end = 1; @@ -471,7 +466,7 @@ static int rd_MEMCPY_read(struct rd_request *req)  static int rd_MEMCPY_write(struct rd_request *req)  {  	struct se_task *task = &req->rd_task; -	struct rd_dev *dev = req->rd_task.se_dev->dev_ptr; +	struct rd_dev *dev = req->rd_task.task_se_cmd->se_dev->dev_ptr;  	struct rd_dev_sg_table *table;  	struct scatterlist *sg_d, *sg_s;  	void *dst, *src; @@ -510,12 +505,10 @@ static int rd_MEMCPY_write(struct rd_request *req)  				length = req->rd_size;  			src = sg_virt(&sg_s[i++]) + src_offset; -			if (!src) -				BUG(); +			BUG_ON(!src);  			dst = sg_virt(&sg_d[j]) + dst_offset; -			if (!dst) -				BUG(); +			BUG_ON(!dst);  			src_offset = 0;  			dst_offset = length; @@ -535,8 +528,7 @@ static int rd_MEMCPY_write(struct rd_request *req)  				length = req->rd_size;  			src = sg_virt(&sg_s[i]) + src_offset; -			if (!src) -				BUG(); +			BUG_ON(!src);  			if (sg_s[i].length == length) {  				i++; @@ -545,8 +537,7 @@ static int rd_MEMCPY_write(struct rd_request *req)  				src_offset = length;  			dst = sg_virt(&sg_d[j++]) + dst_offset; -			if (!dst) -				BUG(); +			BUG_ON(!dst);  			dst_offset = 0;  			page_end = 1; @@ -590,7 +581,7 @@ static int rd_MEMCPY_write(struct rd_request *req)   */  static int rd_MEMCPY_do_task(struct se_task *task)  { -	struct se_device *dev = task->se_dev; +	struct se_device *dev = task->task_se_cmd->se_dev;  	struct rd_request *req = RD_REQ(task);  	unsigned long long lba;  	int ret; @@ -700,17 +691,6 @@ static ssize_t rd_show_configfs_dev_params(  	return bl;  } -/*	rd_get_cdb(): (Part of se_subsystem_api_t template) - * - * - */ -static unsigned char *rd_get_cdb(struct se_task *task) -{ -	struct rd_request *req = RD_REQ(task); - -	return req->rd_scsi_cdb; -} -  static u32 rd_get_device_rev(struct se_device *dev)  {  	return SCSI_SPC_2; /* Returns SPC-3 in Initiator Data */ @@ -744,7 +724,6 @@ static struct se_subsystem_api rd_mcp_template = {  	.check_configfs_dev_params = rd_check_configfs_dev_params,  	.set_configfs_dev_params = rd_set_configfs_dev_params,  	.show_configfs_dev_params = rd_show_configfs_dev_params, -	.get_cdb		= rd_get_cdb,  	.get_device_rev		= rd_get_device_rev,  	.get_device_type	= rd_get_device_type,  	.get_blocks		= rd_get_blocks, diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h index 0d027732cd0..784e56a0410 100644 --- a/drivers/target/target_core_rd.h +++ b/drivers/target/target_core_rd.h @@ -22,8 +22,6 @@ void rd_module_exit(void);  struct rd_request {  	struct se_task	rd_task; -	/* SCSI CDB from iSCSI Command PDU */ -	unsigned char	rd_scsi_cdb[TCM_MAX_COMMAND_SIZE];  	/* Offset from start of page */  	u32		rd_offset;  	/* Starting page in Ramdisk for request */ diff --git a/drivers/target/target_core_scdb.c b/drivers/target/target_core_scdb.c deleted file mode 100644 index 72843441d4f..00000000000 --- a/drivers/target/target_core_scdb.c +++ /dev/null @@ -1,105 +0,0 @@ -/******************************************************************************* - * Filename:  target_core_scdb.c - * - * This file contains the generic target engine Split CDB related functions. - * - * Copyright (c) 2004-2005 PyX Technologies, Inc. - * Copyright (c) 2005, 2006, 2007 SBE, Inc. - * Copyright (c) 2007-2010 Rising Tide Systems - * Copyright (c) 2008-2010 Linux-iSCSI.org - * - * Nicholas A. Bellinger <nab@kernel.org> - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - ******************************************************************************/ - -#include <linux/net.h> -#include <linux/string.h> -#include <scsi/scsi.h> -#include <asm/unaligned.h> - -#include <target/target_core_base.h> -#include <target/target_core_transport.h> - -#include "target_core_scdb.h" - -/*	split_cdb_XX_6(): - * - *      21-bit LBA w/ 8-bit SECTORS - */ -void split_cdb_XX_6( -	unsigned long long lba, -	u32 sectors, -	unsigned char *cdb) -{ -	cdb[1] = (lba >> 16) & 0x1f; -	cdb[2] = (lba >> 8) & 0xff; -	cdb[3] = lba & 0xff; -	cdb[4] = sectors & 0xff; -} - -/*	split_cdb_XX_10(): - * - *	32-bit LBA w/ 16-bit SECTORS - */ -void split_cdb_XX_10( -	unsigned long long lba, -	u32 sectors, -	unsigned char *cdb) -{ -	put_unaligned_be32(lba, &cdb[2]); -	put_unaligned_be16(sectors, &cdb[7]); -} - -/*	split_cdb_XX_12(): - * - *	32-bit LBA w/ 32-bit SECTORS - */ -void split_cdb_XX_12( -	unsigned long long lba, -	u32 sectors, -	unsigned char *cdb) -{ -	put_unaligned_be32(lba, &cdb[2]); -	put_unaligned_be32(sectors, &cdb[6]); -} - -/*	split_cdb_XX_16(): - * - *	64-bit LBA w/ 32-bit SECTORS - */ -void split_cdb_XX_16( -	unsigned long long lba, -	u32 sectors, -	unsigned char *cdb) -{ -	put_unaligned_be64(lba, &cdb[2]); -	put_unaligned_be32(sectors, &cdb[10]); -} - -/* - *	split_cdb_XX_32(): - * - * 	64-bit LBA w/ 32-bit SECTORS such as READ_32, WRITE_32 and emulated XDWRITEREAD_32 - */ -void split_cdb_XX_32( -	unsigned long long lba, -	u32 sectors, -	unsigned char *cdb) -{ -	put_unaligned_be64(lba, &cdb[12]); -	put_unaligned_be32(sectors, &cdb[28]); -} diff --git a/drivers/target/target_core_scdb.h b/drivers/target/target_core_scdb.h deleted file mode 100644 index 48e9ccc9585..00000000000 --- a/drivers/target/target_core_scdb.h +++ /dev/null @@ -1,10 +0,0 @@ -#ifndef TARGET_CORE_SCDB_H -#define TARGET_CORE_SCDB_H - -extern void split_cdb_XX_6(unsigned long long, u32, unsigned char *); -extern void split_cdb_XX_10(unsigned long long, u32, unsigned char *); -extern void split_cdb_XX_12(unsigned long long, u32, unsigned char *); -extern void split_cdb_XX_16(unsigned long long, u32, unsigned char *); -extern void split_cdb_XX_32(unsigned long long, u32, unsigned char *); - -#endif /* TARGET_CORE_SCDB_H */ diff --git a/drivers/target/target_core_stat.c b/drivers/target/target_core_stat.c index a8d6e1dee93..874152aed94 100644 --- a/drivers/target/target_core_stat.c +++ b/drivers/target/target_core_stat.c @@ -32,7 +32,6 @@  #include <linux/delay.h>  #include <linux/timer.h>  #include <linux/string.h> -#include <linux/version.h>  #include <generated/utsrelease.h>  #include <linux/utsname.h>  #include <linux/proc_fs.h> diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c index 27d4925e51c..570b144a1ed 100644 --- a/drivers/target/target_core_tmr.c +++ b/drivers/target/target_core_tmr.c @@ -24,7 +24,6 @@   *   ******************************************************************************/ -#include <linux/version.h>  #include <linux/slab.h>  #include <linux/spinlock.h>  #include <linux/list.h> @@ -44,12 +43,12 @@  struct se_tmr_req *core_tmr_alloc_req(  	struct se_cmd *se_cmd,  	void *fabric_tmr_ptr, -	u8 function) +	u8 function, +	gfp_t gfp_flags)  {  	struct se_tmr_req *tmr; -	tmr = kmem_cache_zalloc(se_tmr_req_cache, (in_interrupt()) ? -					GFP_ATOMIC : GFP_KERNEL); +	tmr = kmem_cache_zalloc(se_tmr_req_cache, gfp_flags);  	if (!tmr) {  		pr_err("Unable to allocate struct se_tmr_req\n");  		return ERR_PTR(-ENOMEM); @@ -67,15 +66,16 @@ void core_tmr_release_req(  	struct se_tmr_req *tmr)  {  	struct se_device *dev = tmr->tmr_dev; +	unsigned long flags;  	if (!dev) {  		kmem_cache_free(se_tmr_req_cache, tmr);  		return;  	} -	spin_lock_irq(&dev->se_tmr_lock); +	spin_lock_irqsave(&dev->se_tmr_lock, flags);  	list_del(&tmr->tmr_list); -	spin_unlock_irq(&dev->se_tmr_lock); +	spin_unlock_irqrestore(&dev->se_tmr_lock, flags);  	kmem_cache_free(se_tmr_req_cache, tmr);  } @@ -100,54 +100,20 @@ static void core_tmr_handle_tas_abort(  	transport_cmd_finish_abort(cmd, 0);  } -int core_tmr_lun_reset( +static void core_tmr_drain_tmr_list(  	struct se_device *dev,  	struct se_tmr_req *tmr, -	struct list_head *preempt_and_abort_list, -	struct se_cmd *prout_cmd) +	struct list_head *preempt_and_abort_list)  { -	struct se_cmd *cmd, *tcmd; -	struct se_node_acl *tmr_nacl = NULL; -	struct se_portal_group *tmr_tpg = NULL; -	struct se_queue_obj *qobj = &dev->dev_queue_obj; +	LIST_HEAD(drain_tmr_list);  	struct se_tmr_req *tmr_p, *tmr_pp; -	struct se_task *task, *task_tmp; +	struct se_cmd *cmd;  	unsigned long flags; -	int fe_count, tas; -	/* -	 * TASK_ABORTED status bit, this is configurable via ConfigFS -	 * struct se_device attributes.  spc4r17 section 7.4.6 Control mode page -	 * -	 * A task aborted status (TAS) bit set to zero specifies that aborted -	 * tasks shall be terminated by the device server without any response -	 * to the application client. A TAS bit set to one specifies that tasks -	 * aborted by the actions of an I_T nexus other than the I_T nexus on -	 * which the command was received shall be completed with TASK ABORTED -	 * status (see SAM-4). -	 */ -	tas = dev->se_sub_dev->se_dev_attrib.emulate_tas; -	/* -	 * Determine if this se_tmr is coming from a $FABRIC_MOD -	 * or struct se_device passthrough.. -	 */ -	if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) { -		tmr_nacl = tmr->task_cmd->se_sess->se_node_acl; -		tmr_tpg = tmr->task_cmd->se_sess->se_tpg; -		if (tmr_nacl && tmr_tpg) { -			pr_debug("LUN_RESET: TMR caller fabric: %s" -				" initiator port %s\n", -				tmr_tpg->se_tpg_tfo->get_fabric_name(), -				tmr_nacl->initiatorname); -		} -	} -	pr_debug("LUN_RESET: %s starting for [%s], tas: %d\n", -		(preempt_and_abort_list) ? "Preempt" : "TMR", -		dev->transport->name, tas);  	/*  	 * Release all pending and outgoing TMRs aside from the received  	 * LUN_RESET tmr..  	 */ -	spin_lock_irq(&dev->se_tmr_lock); +	spin_lock_irqsave(&dev->se_tmr_lock, flags);  	list_for_each_entry_safe(tmr_p, tmr_pp, &dev->dev_tmr_list, tmr_list) {  		/*  		 * Allow the received TMR to return with FUNCTION_COMPLETE. @@ -169,29 +135,48 @@ int core_tmr_lun_reset(  		    (core_scsi3_check_cdb_abort_and_preempt(  					preempt_and_abort_list, cmd) != 0))  			continue; -		spin_unlock_irq(&dev->se_tmr_lock); -		spin_lock_irqsave(&cmd->t_state_lock, flags); +		spin_lock(&cmd->t_state_lock);  		if (!atomic_read(&cmd->t_transport_active)) { -			spin_unlock_irqrestore(&cmd->t_state_lock, flags); -			spin_lock_irq(&dev->se_tmr_lock); +			spin_unlock(&cmd->t_state_lock);  			continue;  		}  		if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) { -			spin_unlock_irqrestore(&cmd->t_state_lock, flags); -			spin_lock_irq(&dev->se_tmr_lock); +			spin_unlock(&cmd->t_state_lock);  			continue;  		} +		spin_unlock(&cmd->t_state_lock); + +		list_move_tail(&tmr->tmr_list, &drain_tmr_list); +	} +	spin_unlock_irqrestore(&dev->se_tmr_lock, flags); + +	while (!list_empty(&drain_tmr_list)) { +		tmr = list_entry(drain_tmr_list.next, struct se_tmr_req, tmr_list); +		list_del(&tmr->tmr_list); +		cmd = tmr_p->task_cmd; +  		pr_debug("LUN_RESET: %s releasing TMR %p Function: 0x%02x,"  			" Response: 0x%02x, t_state: %d\n", -			(preempt_and_abort_list) ? "Preempt" : "", tmr_p, -			tmr_p->function, tmr_p->response, cmd->t_state); -		spin_unlock_irqrestore(&cmd->t_state_lock, flags); +			(preempt_and_abort_list) ? "Preempt" : "", tmr, +			tmr->function, tmr->response, cmd->t_state); -		transport_cmd_finish_abort_tmr(cmd); -		spin_lock_irq(&dev->se_tmr_lock); +		transport_cmd_finish_abort(cmd, 1);  	} -	spin_unlock_irq(&dev->se_tmr_lock); +} + +static void core_tmr_drain_task_list( +	struct se_device *dev, +	struct se_cmd *prout_cmd, +	struct se_node_acl *tmr_nacl, +	int tas, +	struct list_head *preempt_and_abort_list) +{ +	LIST_HEAD(drain_task_list); +	struct se_cmd *cmd; +	struct se_task *task, *task_tmp; +	unsigned long flags; +	int fe_count;  	/*  	 * Complete outstanding struct se_task CDBs with TASK_ABORTED SAM status.  	 * This is following sam4r17, section 5.6 Aborting commands, Table 38 @@ -236,18 +221,28 @@ int core_tmr_lun_reset(  		if (prout_cmd == cmd)  			continue; -		list_del(&task->t_state_list); +		list_move_tail(&task->t_state_list, &drain_task_list);  		atomic_set(&task->task_state_active, 0); -		spin_unlock_irqrestore(&dev->execute_task_lock, flags); +		/* +		 * Remove from task execute list before processing drain_task_list +		 */ +		if (!list_empty(&task->t_execute_list)) +			__transport_remove_task_from_execute_queue(task, dev); +	} +	spin_unlock_irqrestore(&dev->execute_task_lock, flags); + +	while (!list_empty(&drain_task_list)) { +		task = list_entry(drain_task_list.next, struct se_task, t_state_list); +		list_del(&task->t_state_list); +		cmd = task->task_se_cmd; -		spin_lock_irqsave(&cmd->t_state_lock, flags);  		pr_debug("LUN_RESET: %s cmd: %p task: %p" -			" ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state/" -			"def_t_state: %d/%d cdb: 0x%02x\n", +			" ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state: %d" +			"cdb: 0x%02x\n",  			(preempt_and_abort_list) ? "Preempt" : "", cmd, task,  			cmd->se_tfo->get_task_tag(cmd), 0,  			cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, -			cmd->deferred_t_state, cmd->t_task_cdb[0]); +			cmd->t_task_cdb[0]);  		pr_debug("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx"  			" t_task_cdbs: %d t_task_cdbs_left: %d"  			" t_task_cdbs_sent: %d -- t_transport_active: %d" @@ -260,35 +255,24 @@ int core_tmr_lun_reset(  			atomic_read(&cmd->t_transport_stop),  			atomic_read(&cmd->t_transport_sent)); -		if (atomic_read(&task->task_active)) { -			atomic_set(&task->task_stop, 1); -			spin_unlock_irqrestore( -				&cmd->t_state_lock, flags); - -			pr_debug("LUN_RESET: Waiting for task: %p to shutdown" -				" for dev: %p\n", task, dev); -			wait_for_completion(&task->task_stop_comp); -			pr_debug("LUN_RESET Completed task: %p shutdown for" -				" dev: %p\n", task, dev); -			spin_lock_irqsave(&cmd->t_state_lock, flags); -			atomic_dec(&cmd->t_task_cdbs_left); +		/* +		 * If the command may be queued onto a workqueue cancel it now. +		 * +		 * This is equivalent to removal from the execute queue in the +		 * loop above, but we do it down here given that +		 * cancel_work_sync may block. +		 */ +		if (cmd->t_state == TRANSPORT_COMPLETE) +			cancel_work_sync(&cmd->work); -			atomic_set(&task->task_active, 0); -			atomic_set(&task->task_stop, 0); -		} else { -			if (atomic_read(&task->task_execute_queue) != 0) -				transport_remove_task_from_execute_queue(task, dev); -		} -		__transport_stop_task_timer(task, &flags); +		spin_lock_irqsave(&cmd->t_state_lock, flags); +		target_stop_task(task, &flags);  		if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) { -			spin_unlock_irqrestore( -					&cmd->t_state_lock, flags); +			spin_unlock_irqrestore(&cmd->t_state_lock, flags);  			pr_debug("LUN_RESET: Skipping task: %p, dev: %p for"  				" t_task_cdbs_ex_left: %d\n", task, dev,  				atomic_read(&cmd->t_task_cdbs_ex_left)); - -			spin_lock_irqsave(&dev->execute_task_lock, flags);  			continue;  		}  		fe_count = atomic_read(&cmd->t_fe_count); @@ -298,22 +282,31 @@ int core_tmr_lun_reset(  				" task: %p, t_fe_count: %d dev: %p\n", task,  				fe_count, dev);  			atomic_set(&cmd->t_transport_aborted, 1); -			spin_unlock_irqrestore(&cmd->t_state_lock, -						flags); -			core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); +			spin_unlock_irqrestore(&cmd->t_state_lock, flags); -			spin_lock_irqsave(&dev->execute_task_lock, flags); +			core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);  			continue;  		}  		pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p,"  			" t_fe_count: %d dev: %p\n", task, fe_count, dev);  		atomic_set(&cmd->t_transport_aborted, 1);  		spin_unlock_irqrestore(&cmd->t_state_lock, flags); -		core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count); -		spin_lock_irqsave(&dev->execute_task_lock, flags); +		core_tmr_handle_tas_abort(tmr_nacl, cmd, tas, fe_count);  	} -	spin_unlock_irqrestore(&dev->execute_task_lock, flags); +} + +static void core_tmr_drain_cmd_list( +	struct se_device *dev, +	struct se_cmd *prout_cmd, +	struct se_node_acl *tmr_nacl, +	int tas, +	struct list_head *preempt_and_abort_list) +{ +	LIST_HEAD(drain_cmd_list); +	struct se_queue_obj *qobj = &dev->dev_queue_obj; +	struct se_cmd *cmd, *tcmd; +	unsigned long flags;  	/*  	 * Release all commands remaining in the struct se_device cmd queue.  	 * @@ -337,11 +330,26 @@ int core_tmr_lun_reset(  		 */  		if (prout_cmd == cmd)  			continue; +		/* +		 * Skip direct processing of TRANSPORT_FREE_CMD_INTR for +		 * HW target mode fabrics. +		 */ +		spin_lock(&cmd->t_state_lock); +		if (cmd->t_state == TRANSPORT_FREE_CMD_INTR) { +			spin_unlock(&cmd->t_state_lock); +			continue; +		} +		spin_unlock(&cmd->t_state_lock); -		atomic_dec(&cmd->t_transport_queue_active); +		atomic_set(&cmd->t_transport_queue_active, 0);  		atomic_dec(&qobj->queue_cnt); -		list_del(&cmd->se_queue_node); -		spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); +		list_move_tail(&cmd->se_queue_node, &drain_cmd_list); +	} +	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); + +	while (!list_empty(&drain_cmd_list)) { +		cmd = list_entry(drain_cmd_list.next, struct se_cmd, se_queue_node); +		list_del_init(&cmd->se_queue_node);  		pr_debug("LUN_RESET: %s from Device Queue: cmd: %p t_state:"  			" %d t_fe_count: %d\n", (preempt_and_abort_list) ? @@ -354,9 +362,53 @@ int core_tmr_lun_reset(  		core_tmr_handle_tas_abort(tmr_nacl, cmd, tas,  				atomic_read(&cmd->t_fe_count)); -		spin_lock_irqsave(&qobj->cmd_queue_lock, flags);  	} -	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); +} + +int core_tmr_lun_reset( +        struct se_device *dev, +        struct se_tmr_req *tmr, +        struct list_head *preempt_and_abort_list, +        struct se_cmd *prout_cmd) +{ +	struct se_node_acl *tmr_nacl = NULL; +	struct se_portal_group *tmr_tpg = NULL; +	int tas; +        /* +	 * TASK_ABORTED status bit, this is configurable via ConfigFS +	 * struct se_device attributes.  spc4r17 section 7.4.6 Control mode page +	 * +	 * A task aborted status (TAS) bit set to zero specifies that aborted +	 * tasks shall be terminated by the device server without any response +	 * to the application client. A TAS bit set to one specifies that tasks +	 * aborted by the actions of an I_T nexus other than the I_T nexus on +	 * which the command was received shall be completed with TASK ABORTED +	 * status (see SAM-4). +	 */ +	tas = dev->se_sub_dev->se_dev_attrib.emulate_tas; +	/* +	 * Determine if this se_tmr is coming from a $FABRIC_MOD +	 * or struct se_device passthrough.. +	 */ +	if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) { +		tmr_nacl = tmr->task_cmd->se_sess->se_node_acl; +		tmr_tpg = tmr->task_cmd->se_sess->se_tpg; +		if (tmr_nacl && tmr_tpg) { +			pr_debug("LUN_RESET: TMR caller fabric: %s" +				" initiator port %s\n", +				tmr_tpg->se_tpg_tfo->get_fabric_name(), +				tmr_nacl->initiatorname); +		} +	} +	pr_debug("LUN_RESET: %s starting for [%s], tas: %d\n", +		(preempt_and_abort_list) ? "Preempt" : "TMR", +		dev->transport->name, tas); + +	core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list); +	core_tmr_drain_task_list(dev, prout_cmd, tmr_nacl, tas, +				preempt_and_abort_list); +	core_tmr_drain_cmd_list(dev, prout_cmd, tmr_nacl, tas, +				preempt_and_abort_list);  	/*  	 * Clear any legacy SPC-2 reservation when called during  	 * LOGICAL UNIT RESET @@ -379,3 +431,4 @@ int core_tmr_lun_reset(  			dev->transport->name);  	return 0;  } + diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c index 4f1ba4c5ef1..49fd0a9b0a5 100644 --- a/drivers/target/target_core_tpg.c +++ b/drivers/target/target_core_tpg.c @@ -137,15 +137,15 @@ struct se_node_acl *core_tpg_get_initiator_node_acl(  {  	struct se_node_acl *acl; -	spin_lock_bh(&tpg->acl_node_lock); +	spin_lock_irq(&tpg->acl_node_lock);  	list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {  		if (!strcmp(acl->initiatorname, initiatorname) &&  		    !acl->dynamic_node_acl) { -			spin_unlock_bh(&tpg->acl_node_lock); +			spin_unlock_irq(&tpg->acl_node_lock);  			return acl;  		}  	} -	spin_unlock_bh(&tpg->acl_node_lock); +	spin_unlock_irq(&tpg->acl_node_lock);  	return NULL;  } @@ -298,13 +298,21 @@ struct se_node_acl *core_tpg_check_initiator_node_acl(  		tpg->se_tpg_tfo->tpg_release_fabric_acl(tpg, acl);  		return NULL;  	} +	/* +	 * Here we only create demo-mode MappedLUNs from the active +	 * TPG LUNs if the fabric is not explictly asking for +	 * tpg_check_demo_mode_login_only() == 1. +	 */ +	if ((tpg->se_tpg_tfo->tpg_check_demo_mode_login_only != NULL) && +	    (tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg) == 1)) +		do { ; } while (0); +	else +		core_tpg_add_node_to_devs(acl, tpg); -	core_tpg_add_node_to_devs(acl, tpg); - -	spin_lock_bh(&tpg->acl_node_lock); +	spin_lock_irq(&tpg->acl_node_lock);  	list_add_tail(&acl->acl_list, &tpg->acl_node_list);  	tpg->num_node_acls++; -	spin_unlock_bh(&tpg->acl_node_lock); +	spin_unlock_irq(&tpg->acl_node_lock);  	pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"  		" Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(), @@ -354,7 +362,7 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(  {  	struct se_node_acl *acl = NULL; -	spin_lock_bh(&tpg->acl_node_lock); +	spin_lock_irq(&tpg->acl_node_lock);  	acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);  	if (acl) {  		if (acl->dynamic_node_acl) { @@ -362,7 +370,7 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(  			pr_debug("%s_TPG[%u] - Replacing dynamic ACL"  				" for %s\n", tpg->se_tpg_tfo->get_fabric_name(),  				tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname); -			spin_unlock_bh(&tpg->acl_node_lock); +			spin_unlock_irq(&tpg->acl_node_lock);  			/*  			 * Release the locally allocated struct se_node_acl  			 * because * core_tpg_add_initiator_node_acl() returned @@ -378,10 +386,10 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(  			" Node %s already exists for TPG %u, ignoring"  			" request.\n",  tpg->se_tpg_tfo->get_fabric_name(),  			initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); -		spin_unlock_bh(&tpg->acl_node_lock); +		spin_unlock_irq(&tpg->acl_node_lock);  		return ERR_PTR(-EEXIST);  	} -	spin_unlock_bh(&tpg->acl_node_lock); +	spin_unlock_irq(&tpg->acl_node_lock);  	if (!se_nacl) {  		pr_err("struct se_node_acl pointer is NULL\n"); @@ -418,10 +426,10 @@ struct se_node_acl *core_tpg_add_initiator_node_acl(  		return ERR_PTR(-EINVAL);  	} -	spin_lock_bh(&tpg->acl_node_lock); +	spin_lock_irq(&tpg->acl_node_lock);  	list_add_tail(&acl->acl_list, &tpg->acl_node_list);  	tpg->num_node_acls++; -	spin_unlock_bh(&tpg->acl_node_lock); +	spin_unlock_irq(&tpg->acl_node_lock);  done:  	pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s" @@ -445,14 +453,14 @@ int core_tpg_del_initiator_node_acl(  	struct se_session *sess, *sess_tmp;  	int dynamic_acl = 0; -	spin_lock_bh(&tpg->acl_node_lock); +	spin_lock_irq(&tpg->acl_node_lock);  	if (acl->dynamic_node_acl) {  		acl->dynamic_node_acl = 0;  		dynamic_acl = 1;  	}  	list_del(&acl->acl_list);  	tpg->num_node_acls--; -	spin_unlock_bh(&tpg->acl_node_lock); +	spin_unlock_irq(&tpg->acl_node_lock);  	spin_lock_bh(&tpg->session_lock);  	list_for_each_entry_safe(sess, sess_tmp, @@ -503,21 +511,21 @@ int core_tpg_set_initiator_node_queue_depth(  	struct se_node_acl *acl;  	int dynamic_acl = 0; -	spin_lock_bh(&tpg->acl_node_lock); +	spin_lock_irq(&tpg->acl_node_lock);  	acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);  	if (!acl) {  		pr_err("Access Control List entry for %s Initiator"  			" Node %s does not exists for TPG %hu, ignoring"  			" request.\n", tpg->se_tpg_tfo->get_fabric_name(),  			initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg)); -		spin_unlock_bh(&tpg->acl_node_lock); +		spin_unlock_irq(&tpg->acl_node_lock);  		return -ENODEV;  	}  	if (acl->dynamic_node_acl) {  		acl->dynamic_node_acl = 0;  		dynamic_acl = 1;  	} -	spin_unlock_bh(&tpg->acl_node_lock); +	spin_unlock_irq(&tpg->acl_node_lock);  	spin_lock_bh(&tpg->session_lock);  	list_for_each_entry(sess, &tpg->tpg_sess_list, sess_list) { @@ -533,10 +541,10 @@ int core_tpg_set_initiator_node_queue_depth(  				tpg->se_tpg_tfo->get_fabric_name(), initiatorname);  			spin_unlock_bh(&tpg->session_lock); -			spin_lock_bh(&tpg->acl_node_lock); +			spin_lock_irq(&tpg->acl_node_lock);  			if (dynamic_acl)  				acl->dynamic_node_acl = 1; -			spin_unlock_bh(&tpg->acl_node_lock); +			spin_unlock_irq(&tpg->acl_node_lock);  			return -EEXIST;  		}  		/* @@ -571,10 +579,10 @@ int core_tpg_set_initiator_node_queue_depth(  		if (init_sess)  			tpg->se_tpg_tfo->close_session(init_sess); -		spin_lock_bh(&tpg->acl_node_lock); +		spin_lock_irq(&tpg->acl_node_lock);  		if (dynamic_acl)  			acl->dynamic_node_acl = 1; -		spin_unlock_bh(&tpg->acl_node_lock); +		spin_unlock_irq(&tpg->acl_node_lock);  		return -EINVAL;  	}  	spin_unlock_bh(&tpg->session_lock); @@ -585,15 +593,15 @@ int core_tpg_set_initiator_node_queue_depth(  	if (init_sess)  		tpg->se_tpg_tfo->close_session(init_sess); -	pr_debug("Successfuly changed queue depth to: %d for Initiator" +	pr_debug("Successfully changed queue depth to: %d for Initiator"  		" Node: %s on %s Target Portal Group: %u\n", queue_depth,  		initiatorname, tpg->se_tpg_tfo->get_fabric_name(),  		tpg->se_tpg_tfo->tpg_get_tag(tpg)); -	spin_lock_bh(&tpg->acl_node_lock); +	spin_lock_irq(&tpg->acl_node_lock);  	if (dynamic_acl)  		acl->dynamic_node_acl = 1; -	spin_unlock_bh(&tpg->acl_node_lock); +	spin_unlock_irq(&tpg->acl_node_lock);  	return 0;  } @@ -717,20 +725,20 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)  	 * not been released because of TFO->tpg_check_demo_mode_cache() == 1  	 * in transport_deregister_session().  	 */ -	spin_lock_bh(&se_tpg->acl_node_lock); +	spin_lock_irq(&se_tpg->acl_node_lock);  	list_for_each_entry_safe(nacl, nacl_tmp, &se_tpg->acl_node_list,  			acl_list) {  		list_del(&nacl->acl_list);  		se_tpg->num_node_acls--; -		spin_unlock_bh(&se_tpg->acl_node_lock); +		spin_unlock_irq(&se_tpg->acl_node_lock);  		core_tpg_wait_for_nacl_pr_ref(nacl);  		core_free_device_list_for_node(nacl, se_tpg);  		se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg, nacl); -		spin_lock_bh(&se_tpg->acl_node_lock); +		spin_lock_irq(&se_tpg->acl_node_lock);  	} -	spin_unlock_bh(&se_tpg->acl_node_lock); +	spin_unlock_irq(&se_tpg->acl_node_lock);  	if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)  		core_tpg_release_virtual_lun0(se_tpg); diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 89760329d5d..d7525580448 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -26,7 +26,6 @@   *   ******************************************************************************/ -#include <linux/version.h>  #include <linux/net.h>  #include <linux/delay.h>  #include <linux/string.h> @@ -55,11 +54,11 @@  #include "target_core_alua.h"  #include "target_core_hba.h"  #include "target_core_pr.h" -#include "target_core_scdb.h"  #include "target_core_ua.h"  static int sub_api_initialized; +static struct workqueue_struct *target_completion_wq;  static struct kmem_cache *se_cmd_cache;  static struct kmem_cache *se_sess_cache;  struct kmem_cache *se_tmr_req_cache; @@ -70,30 +69,19 @@ struct kmem_cache *t10_alua_lu_gp_mem_cache;  struct kmem_cache *t10_alua_tg_pt_gp_cache;  struct kmem_cache *t10_alua_tg_pt_gp_mem_cache; -/* Used for transport_dev_get_map_*() */ -typedef int (*map_func_t)(struct se_task *, u32); -  static int transport_generic_write_pending(struct se_cmd *);  static int transport_processing_thread(void *param);  static int __transport_execute_tasks(struct se_device *dev);  static void transport_complete_task_attr(struct se_cmd *cmd); -static int transport_complete_qf(struct se_cmd *cmd);  static void transport_handle_queue_full(struct se_cmd *cmd, -		struct se_device *dev, int (*qf_callback)(struct se_cmd *)); -static void transport_direct_request_timeout(struct se_cmd *cmd); +		struct se_device *dev);  static void transport_free_dev_tasks(struct se_cmd *cmd); -static u32 transport_allocate_tasks(struct se_cmd *cmd, -		unsigned long long starting_lba, -		enum dma_data_direction data_direction, -		struct scatterlist *sgl, unsigned int nents);  static int transport_generic_get_mem(struct se_cmd *cmd); -static int transport_generic_remove(struct se_cmd *cmd, -		int session_reinstatement); -static void transport_release_fe_cmd(struct se_cmd *cmd); -static void transport_remove_cmd_from_queue(struct se_cmd *cmd, -		struct se_queue_obj *qobj); +static void transport_put_cmd(struct se_cmd *cmd); +static void transport_remove_cmd_from_queue(struct se_cmd *cmd);  static int transport_set_sense_codes(struct se_cmd *cmd, u8 asc, u8 ascq); -static void transport_stop_all_task_timers(struct se_cmd *cmd); +static void transport_generic_request_failure(struct se_cmd *, int, int); +static void target_complete_ok_work(struct work_struct *work);  int init_se_kmem_caches(void)  { @@ -109,7 +97,7 @@ int init_se_kmem_caches(void)  	if (!se_tmr_req_cache) {  		pr_err("kmem_cache_create() for struct se_tmr_req"  				" failed\n"); -		goto out; +		goto out_free_cmd_cache;  	}  	se_sess_cache = kmem_cache_create("se_sess_cache",  			sizeof(struct se_session), __alignof__(struct se_session), @@ -117,14 +105,14 @@ int init_se_kmem_caches(void)  	if (!se_sess_cache) {  		pr_err("kmem_cache_create() for struct se_session"  				" failed\n"); -		goto out; +		goto out_free_tmr_req_cache;  	}  	se_ua_cache = kmem_cache_create("se_ua_cache",  			sizeof(struct se_ua), __alignof__(struct se_ua),  			0, NULL);  	if (!se_ua_cache) {  		pr_err("kmem_cache_create() for struct se_ua failed\n"); -		goto out; +		goto out_free_sess_cache;  	}  	t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",  			sizeof(struct t10_pr_registration), @@ -132,7 +120,7 @@ int init_se_kmem_caches(void)  	if (!t10_pr_reg_cache) {  		pr_err("kmem_cache_create() for struct t10_pr_registration"  				" failed\n"); -		goto out; +		goto out_free_ua_cache;  	}  	t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",  			sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp), @@ -140,7 +128,7 @@ int init_se_kmem_caches(void)  	if (!t10_alua_lu_gp_cache) {  		pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"  				" failed\n"); -		goto out; +		goto out_free_pr_reg_cache;  	}  	t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",  			sizeof(struct t10_alua_lu_gp_member), @@ -148,7 +136,7 @@ int init_se_kmem_caches(void)  	if (!t10_alua_lu_gp_mem_cache) {  		pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"  				"cache failed\n"); -		goto out; +		goto out_free_lu_gp_cache;  	}  	t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",  			sizeof(struct t10_alua_tg_pt_gp), @@ -156,7 +144,7 @@ int init_se_kmem_caches(void)  	if (!t10_alua_tg_pt_gp_cache) {  		pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"  				"cache failed\n"); -		goto out; +		goto out_free_lu_gp_mem_cache;  	}  	t10_alua_tg_pt_gp_mem_cache = kmem_cache_create(  			"t10_alua_tg_pt_gp_mem_cache", @@ -166,34 +154,41 @@ int init_se_kmem_caches(void)  	if (!t10_alua_tg_pt_gp_mem_cache) {  		pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"  				"mem_t failed\n"); -		goto out; +		goto out_free_tg_pt_gp_cache;  	} +	target_completion_wq = alloc_workqueue("target_completion", +					       WQ_MEM_RECLAIM, 0); +	if (!target_completion_wq) +		goto out_free_tg_pt_gp_mem_cache; +  	return 0; + +out_free_tg_pt_gp_mem_cache: +	kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache); +out_free_tg_pt_gp_cache: +	kmem_cache_destroy(t10_alua_tg_pt_gp_cache); +out_free_lu_gp_mem_cache: +	kmem_cache_destroy(t10_alua_lu_gp_mem_cache); +out_free_lu_gp_cache: +	kmem_cache_destroy(t10_alua_lu_gp_cache); +out_free_pr_reg_cache: +	kmem_cache_destroy(t10_pr_reg_cache); +out_free_ua_cache: +	kmem_cache_destroy(se_ua_cache); +out_free_sess_cache: +	kmem_cache_destroy(se_sess_cache); +out_free_tmr_req_cache: +	kmem_cache_destroy(se_tmr_req_cache); +out_free_cmd_cache: +	kmem_cache_destroy(se_cmd_cache);  out: -	if (se_cmd_cache) -		kmem_cache_destroy(se_cmd_cache); -	if (se_tmr_req_cache) -		kmem_cache_destroy(se_tmr_req_cache); -	if (se_sess_cache) -		kmem_cache_destroy(se_sess_cache); -	if (se_ua_cache) -		kmem_cache_destroy(se_ua_cache); -	if (t10_pr_reg_cache) -		kmem_cache_destroy(t10_pr_reg_cache); -	if (t10_alua_lu_gp_cache) -		kmem_cache_destroy(t10_alua_lu_gp_cache); -	if (t10_alua_lu_gp_mem_cache) -		kmem_cache_destroy(t10_alua_lu_gp_mem_cache); -	if (t10_alua_tg_pt_gp_cache) -		kmem_cache_destroy(t10_alua_tg_pt_gp_cache); -	if (t10_alua_tg_pt_gp_mem_cache) -		kmem_cache_destroy(t10_alua_tg_pt_gp_mem_cache);  	return -ENOMEM;  }  void release_se_kmem_caches(void)  { +	destroy_workqueue(target_completion_wq);  	kmem_cache_destroy(se_cmd_cache);  	kmem_cache_destroy(se_tmr_req_cache);  	kmem_cache_destroy(se_sess_cache); @@ -234,10 +229,13 @@ void transport_init_queue_obj(struct se_queue_obj *qobj)  }  EXPORT_SYMBOL(transport_init_queue_obj); -static int transport_subsystem_reqmods(void) +void transport_subsystem_check_init(void)  {  	int ret; +	if (sub_api_initialized) +		return; +  	ret = request_module("target_core_iblock");  	if (ret != 0)  		pr_err("Unable to load target_core_iblock\n"); @@ -254,24 +252,8 @@ static int transport_subsystem_reqmods(void)  	if (ret != 0)  		pr_err("Unable to load target_core_stgt\n"); -	return 0; -} - -int transport_subsystem_check_init(void) -{ -	int ret; - -	if (sub_api_initialized) -		return 0; -	/* -	 * Request the loading of known TCM subsystem plugins.. -	 */ -	ret = transport_subsystem_reqmods(); -	if (ret < 0) -		return ret; -  	sub_api_initialized = 1; -	return 0; +	return;  }  struct se_session *transport_init_session(void) @@ -389,17 +371,18 @@ void transport_deregister_session(struct se_session *se_sess)  {  	struct se_portal_group *se_tpg = se_sess->se_tpg;  	struct se_node_acl *se_nacl; +	unsigned long flags;  	if (!se_tpg) {  		transport_free_session(se_sess);  		return;  	} -	spin_lock_bh(&se_tpg->session_lock); +	spin_lock_irqsave(&se_tpg->session_lock, flags);  	list_del(&se_sess->sess_list);  	se_sess->se_tpg = NULL;  	se_sess->fabric_sess_ptr = NULL; -	spin_unlock_bh(&se_tpg->session_lock); +	spin_unlock_irqrestore(&se_tpg->session_lock, flags);  	/*  	 * Determine if we need to do extra work for this initiator node's @@ -407,22 +390,22 @@ void transport_deregister_session(struct se_session *se_sess)  	 */  	se_nacl = se_sess->se_node_acl;  	if (se_nacl) { -		spin_lock_bh(&se_tpg->acl_node_lock); +		spin_lock_irqsave(&se_tpg->acl_node_lock, flags);  		if (se_nacl->dynamic_node_acl) {  			if (!se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache(  					se_tpg)) {  				list_del(&se_nacl->acl_list);  				se_tpg->num_node_acls--; -				spin_unlock_bh(&se_tpg->acl_node_lock); +				spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);  				core_tpg_wait_for_nacl_pr_ref(se_nacl);  				core_free_device_list_for_node(se_nacl, se_tpg);  				se_tpg->se_tpg_tfo->tpg_release_fabric_acl(se_tpg,  						se_nacl); -				spin_lock_bh(&se_tpg->acl_node_lock); +				spin_lock_irqsave(&se_tpg->acl_node_lock, flags);  			}  		} -		spin_unlock_bh(&se_tpg->acl_node_lock); +		spin_unlock_irqrestore(&se_tpg->acl_node_lock, flags);  	}  	transport_free_session(se_sess); @@ -437,16 +420,15 @@ EXPORT_SYMBOL(transport_deregister_session);   */  static void transport_all_task_dev_remove_state(struct se_cmd *cmd)  { -	struct se_device *dev; +	struct se_device *dev = cmd->se_dev;  	struct se_task *task;  	unsigned long flags; -	list_for_each_entry(task, &cmd->t_task_list, t_list) { -		dev = task->se_dev; -		if (!dev) -			continue; +	if (!dev) +		return; -		if (atomic_read(&task->task_active)) +	list_for_each_entry(task, &cmd->t_task_list, t_list) { +		if (task->task_flags & TF_ACTIVE)  			continue;  		if (!atomic_read(&task->task_state_active)) @@ -488,8 +470,6 @@ static int transport_cmd_check_stop(  			" == TRUE for ITT: 0x%08x\n", __func__, __LINE__,  			cmd->se_tfo->get_task_tag(cmd)); -		cmd->deferred_t_state = cmd->t_state; -		cmd->t_state = TRANSPORT_DEFERRED_CMD;  		atomic_set(&cmd->t_transport_active, 0);  		if (transport_off == 2)  			transport_all_task_dev_remove_state(cmd); @@ -507,8 +487,6 @@ static int transport_cmd_check_stop(  			" TRUE for ITT: 0x%08x\n", __func__, __LINE__,  			cmd->se_tfo->get_task_tag(cmd)); -		cmd->deferred_t_state = cmd->t_state; -		cmd->t_state = TRANSPORT_DEFERRED_CMD;  		if (transport_off == 2)  			transport_all_task_dev_remove_state(cmd); @@ -593,35 +571,24 @@ check_lun:  void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)  { -	transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj); -	transport_lun_remove_cmd(cmd); - -	if (transport_cmd_check_stop_to_fabric(cmd)) -		return; -	if (remove) -		transport_generic_remove(cmd, 0); -} - -void transport_cmd_finish_abort_tmr(struct se_cmd *cmd) -{ -	transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj); +	if (!cmd->se_tmr_req) +		transport_lun_remove_cmd(cmd);  	if (transport_cmd_check_stop_to_fabric(cmd))  		return; - -	transport_generic_remove(cmd, 0); +	if (remove) { +		transport_remove_cmd_from_queue(cmd); +		transport_put_cmd(cmd); +	}  } -static void transport_add_cmd_to_queue( -	struct se_cmd *cmd, -	int t_state) +static void transport_add_cmd_to_queue(struct se_cmd *cmd, int t_state, +		bool at_head)  {  	struct se_device *dev = cmd->se_dev;  	struct se_queue_obj *qobj = &dev->dev_queue_obj;  	unsigned long flags; -	INIT_LIST_HEAD(&cmd->se_queue_node); -  	if (t_state) {  		spin_lock_irqsave(&cmd->t_state_lock, flags);  		cmd->t_state = t_state; @@ -630,15 +597,20 @@ static void transport_add_cmd_to_queue(  	}  	spin_lock_irqsave(&qobj->cmd_queue_lock, flags); -	if (cmd->se_cmd_flags & SCF_EMULATE_QUEUE_FULL) { -		cmd->se_cmd_flags &= ~SCF_EMULATE_QUEUE_FULL; + +	/* If the cmd is already on the list, remove it before we add it */ +	if (!list_empty(&cmd->se_queue_node)) +		list_del(&cmd->se_queue_node); +	else +		atomic_inc(&qobj->queue_cnt); + +	if (at_head)  		list_add(&cmd->se_queue_node, &qobj->qobj_list); -	} else +	else  		list_add_tail(&cmd->se_queue_node, &qobj->qobj_list); -	atomic_inc(&cmd->t_transport_queue_active); +	atomic_set(&cmd->t_transport_queue_active, 1);  	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); -	atomic_inc(&qobj->queue_cnt);  	wake_up_interruptible(&qobj->thread_wq);  } @@ -655,19 +627,18 @@ transport_get_cmd_from_queue(struct se_queue_obj *qobj)  	}  	cmd = list_first_entry(&qobj->qobj_list, struct se_cmd, se_queue_node); -	atomic_dec(&cmd->t_transport_queue_active); +	atomic_set(&cmd->t_transport_queue_active, 0); -	list_del(&cmd->se_queue_node); +	list_del_init(&cmd->se_queue_node);  	atomic_dec(&qobj->queue_cnt);  	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);  	return cmd;  } -static void transport_remove_cmd_from_queue(struct se_cmd *cmd, -		struct se_queue_obj *qobj) +static void transport_remove_cmd_from_queue(struct se_cmd *cmd)  { -	struct se_cmd *t; +	struct se_queue_obj *qobj = &cmd->se_dev->dev_queue_obj;  	unsigned long flags;  	spin_lock_irqsave(&qobj->cmd_queue_lock, flags); @@ -675,14 +646,9 @@ static void transport_remove_cmd_from_queue(struct se_cmd *cmd,  		spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);  		return;  	} - -	list_for_each_entry(t, &qobj->qobj_list, se_queue_node) -		if (t == cmd) { -			atomic_dec(&cmd->t_transport_queue_active); -			atomic_dec(&qobj->queue_cnt); -			list_del(&cmd->se_queue_node); -			break; -		} +	atomic_set(&cmd->t_transport_queue_active, 0); +	atomic_dec(&qobj->queue_cnt); +	list_del_init(&cmd->se_queue_node);  	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);  	if (atomic_read(&cmd->t_transport_queue_active)) { @@ -715,6 +681,13 @@ void transport_complete_sync_cache(struct se_cmd *cmd, int good)  }  EXPORT_SYMBOL(transport_complete_sync_cache); +static void target_complete_failure_work(struct work_struct *work) +{ +	struct se_cmd *cmd = container_of(work, struct se_cmd, work); + +	transport_generic_request_failure(cmd, 1, 1); +} +  /*	transport_complete_task():   *   *	Called from interrupt and non interrupt context depending @@ -723,8 +696,7 @@ EXPORT_SYMBOL(transport_complete_sync_cache);  void transport_complete_task(struct se_task *task, int success)  {  	struct se_cmd *cmd = task->task_se_cmd; -	struct se_device *dev = task->se_dev; -	int t_state; +	struct se_device *dev = cmd->se_dev;  	unsigned long flags;  #if 0  	pr_debug("task: %p CDB: 0x%02x obj_ptr: %p\n", task, @@ -734,7 +706,7 @@ void transport_complete_task(struct se_task *task, int success)  		atomic_inc(&dev->depth_left);  	spin_lock_irqsave(&cmd->t_state_lock, flags); -	atomic_set(&task->task_active, 0); +	task->task_flags &= ~TF_ACTIVE;  	/*  	 * See if any sense data exists, if so set the TASK_SENSE flag. @@ -753,68 +725,39 @@ void transport_complete_task(struct se_task *task, int success)  	 * See if we are waiting for outstanding struct se_task  	 * to complete for an exception condition  	 */ -	if (atomic_read(&task->task_stop)) { -		/* -		 * Decrement cmd->t_se_count if this task had -		 * previously thrown its timeout exception handler. -		 */ -		if (atomic_read(&task->task_timeout)) { -			atomic_dec(&cmd->t_se_count); -			atomic_set(&task->task_timeout, 0); -		} +	if (task->task_flags & TF_REQUEST_STOP) {  		spin_unlock_irqrestore(&cmd->t_state_lock, flags); -  		complete(&task->task_stop_comp);  		return;  	}  	/* -	 * If the task's timeout handler has fired, use the t_task_cdbs_timeout -	 * left counter to determine when the struct se_cmd is ready to be queued to -	 * the processing thread. -	 */ -	if (atomic_read(&task->task_timeout)) { -		if (!atomic_dec_and_test( -				&cmd->t_task_cdbs_timeout_left)) { -			spin_unlock_irqrestore(&cmd->t_state_lock, -				flags); -			return; -		} -		t_state = TRANSPORT_COMPLETE_TIMEOUT; -		spin_unlock_irqrestore(&cmd->t_state_lock, flags); - -		transport_add_cmd_to_queue(cmd, t_state); -		return; -	} -	atomic_dec(&cmd->t_task_cdbs_timeout_left); - -	/*  	 * Decrement the outstanding t_task_cdbs_left count.  The last  	 * struct se_task from struct se_cmd will complete itself into the  	 * device queue depending upon int success.  	 */  	if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) { -		if (!success) -			cmd->t_tasks_failed = 1; -  		spin_unlock_irqrestore(&cmd->t_state_lock, flags);  		return;  	}  	if (!success || cmd->t_tasks_failed) { -		t_state = TRANSPORT_COMPLETE_FAILURE;  		if (!task->task_error_status) {  			task->task_error_status =  				PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;  			cmd->transport_error_status =  				PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;  		} +		INIT_WORK(&cmd->work, target_complete_failure_work);  	} else {  		atomic_set(&cmd->t_transport_complete, 1); -		t_state = TRANSPORT_COMPLETE_OK; +		INIT_WORK(&cmd->work, target_complete_ok_work);  	} + +	cmd->t_state = TRANSPORT_COMPLETE; +	atomic_set(&cmd->t_transport_active, 1);  	spin_unlock_irqrestore(&cmd->t_state_lock, flags); -	transport_add_cmd_to_queue(cmd, t_state); +	queue_work(target_completion_wq, &cmd->work);  }  EXPORT_SYMBOL(transport_complete_task); @@ -901,14 +844,12 @@ static void __transport_add_task_to_execute_queue(  static void transport_add_tasks_to_state_queue(struct se_cmd *cmd)  { -	struct se_device *dev; +	struct se_device *dev = cmd->se_dev;  	struct se_task *task;  	unsigned long flags;  	spin_lock_irqsave(&cmd->t_state_lock, flags);  	list_for_each_entry(task, &cmd->t_task_list, t_list) { -		dev = task->se_dev; -  		if (atomic_read(&task->task_state_active))  			continue; @@ -933,38 +874,36 @@ static void transport_add_tasks_from_cmd(struct se_cmd *cmd)  	spin_lock_irqsave(&dev->execute_task_lock, flags);  	list_for_each_entry(task, &cmd->t_task_list, t_list) { -		if (atomic_read(&task->task_execute_queue)) +		if (!list_empty(&task->t_execute_list))  			continue;  		/*  		 * __transport_add_task_to_execute_queue() handles the  		 * SAM Task Attribute emulation if enabled  		 */  		__transport_add_task_to_execute_queue(task, task_prev, dev); -		atomic_set(&task->task_execute_queue, 1);  		task_prev = task;  	}  	spin_unlock_irqrestore(&dev->execute_task_lock, flags);  } -/*	transport_remove_task_from_execute_queue(): - * - * - */ +void __transport_remove_task_from_execute_queue(struct se_task *task, +		struct se_device *dev) +{ +	list_del_init(&task->t_execute_list); +	atomic_dec(&dev->execute_tasks); +} +  void transport_remove_task_from_execute_queue(  	struct se_task *task,  	struct se_device *dev)  {  	unsigned long flags; -	if (atomic_read(&task->task_execute_queue) == 0) { -		dump_stack(); +	if (WARN_ON(list_empty(&task->t_execute_list)))  		return; -	}  	spin_lock_irqsave(&dev->execute_task_lock, flags); -	list_del(&task->t_execute_list); -	atomic_set(&task->task_execute_queue, 0); -	atomic_dec(&dev->execute_tasks); +	__transport_remove_task_from_execute_queue(task, dev);  	spin_unlock_irqrestore(&dev->execute_task_lock, flags);  } @@ -976,30 +915,26 @@ static void target_qf_do_work(struct work_struct *work)  {  	struct se_device *dev = container_of(work, struct se_device,  					qf_work_queue); +	LIST_HEAD(qf_cmd_list);  	struct se_cmd *cmd, *cmd_tmp;  	spin_lock_irq(&dev->qf_cmd_lock); -	list_for_each_entry_safe(cmd, cmd_tmp, &dev->qf_cmd_list, se_qf_node) { +	list_splice_init(&dev->qf_cmd_list, &qf_cmd_list); +	spin_unlock_irq(&dev->qf_cmd_lock); +	list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {  		list_del(&cmd->se_qf_node);  		atomic_dec(&dev->dev_qf_count);  		smp_mb__after_atomic_dec(); -		spin_unlock_irq(&dev->qf_cmd_lock);  		pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"  			" context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, -			(cmd->t_state == TRANSPORT_COMPLETE_OK) ? "COMPLETE_OK" : +			(cmd->t_state == TRANSPORT_COMPLETE_QF_OK) ? "COMPLETE_OK" :  			(cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"  			: "UNKNOWN"); -		/* -		 * The SCF_EMULATE_QUEUE_FULL flag will be cleared once se_cmd -		 * has been added to head of queue -		 */ -		transport_add_cmd_to_queue(cmd, cmd->t_state); -		spin_lock_irq(&dev->qf_cmd_lock); +		transport_add_cmd_to_queue(cmd, cmd->t_state, true);  	} -	spin_unlock_irq(&dev->qf_cmd_lock);  }  unsigned char *transport_dump_cmd_direction(struct se_cmd *cmd) @@ -1053,41 +988,6 @@ void transport_dump_dev_state(  	*bl += sprintf(b + *bl, "        ");  } -/*	transport_release_all_cmds(): - * - * - */ -static void transport_release_all_cmds(struct se_device *dev) -{ -	struct se_cmd *cmd, *tcmd; -	int bug_out = 0, t_state; -	unsigned long flags; - -	spin_lock_irqsave(&dev->dev_queue_obj.cmd_queue_lock, flags); -	list_for_each_entry_safe(cmd, tcmd, &dev->dev_queue_obj.qobj_list, -				se_queue_node) { -		t_state = cmd->t_state; -		list_del(&cmd->se_queue_node); -		spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock, -				flags); - -		pr_err("Releasing ITT: 0x%08x, i_state: %u," -			" t_state: %u directly\n", -			cmd->se_tfo->get_task_tag(cmd), -			cmd->se_tfo->get_cmd_state(cmd), t_state); - -		transport_release_fe_cmd(cmd); -		bug_out = 1; - -		spin_lock_irqsave(&dev->dev_queue_obj.cmd_queue_lock, flags); -	} -	spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock, flags); -#if 0 -	if (bug_out) -		BUG(); -#endif -} -  void transport_dump_vpd_proto_id(  	struct t10_vpd *vpd,  	unsigned char *p_buf, @@ -1573,7 +1473,6 @@ transport_generic_get_task(struct se_cmd *cmd,  	INIT_LIST_HEAD(&task->t_state_list);  	init_completion(&task->task_stop_comp);  	task->task_se_cmd = cmd; -	task->se_dev = dev;  	task->task_data_direction = data_direction;  	return task; @@ -1598,6 +1497,7 @@ void transport_init_se_cmd(  	INIT_LIST_HEAD(&cmd->se_delayed_node);  	INIT_LIST_HEAD(&cmd->se_ordered_node);  	INIT_LIST_HEAD(&cmd->se_qf_node); +	INIT_LIST_HEAD(&cmd->se_queue_node);  	INIT_LIST_HEAD(&cmd->t_task_list);  	init_completion(&cmd->transport_lun_fe_stop_comp); @@ -1641,21 +1541,6 @@ static int transport_check_alloc_task_attr(struct se_cmd *cmd)  	return 0;  } -void transport_free_se_cmd( -	struct se_cmd *se_cmd) -{ -	if (se_cmd->se_tmr_req) -		core_tmr_release_req(se_cmd->se_tmr_req); -	/* -	 * Check and free any extended CDB buffer that was allocated -	 */ -	if (se_cmd->t_task_cdb != se_cmd->__t_task_cdb) -		kfree(se_cmd->t_task_cdb); -} -EXPORT_SYMBOL(transport_free_se_cmd); - -static void transport_generic_wait_for_tasks(struct se_cmd *, int, int); -  /*	transport_generic_allocate_tasks():   *   *	Called from fabric RX Thread. @@ -1667,12 +1552,6 @@ int transport_generic_allocate_tasks(  	int ret;  	transport_generic_prepare_cdb(cdb); - -	/* -	 * This is needed for early exceptions. -	 */ -	cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks; -  	/*  	 * Ensure that the received CDB is less than the max (252 + 8) bytes  	 * for VARIABLE_LENGTH_CMD @@ -1730,26 +1609,6 @@ int transport_generic_allocate_tasks(  EXPORT_SYMBOL(transport_generic_allocate_tasks);  /* - * Used by fabric module frontends not defining a TFO->new_cmd_map() - * to queue up a newly setup se_cmd w/ TRANSPORT_NEW_CMD statis - */ -int transport_generic_handle_cdb( -	struct se_cmd *cmd) -{ -	if (!cmd->se_lun) { -		dump_stack(); -		pr_err("cmd->se_lun is NULL\n"); -		return -EINVAL; -	} - -	transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD); -	return 0; -} -EXPORT_SYMBOL(transport_generic_handle_cdb); - -static void transport_generic_request_failure(struct se_cmd *, -			struct se_device *, int, int); -/*   * Used by fabric module frontends to queue tasks directly.   * Many only be used from process context only   */ @@ -1773,7 +1632,7 @@ int transport_handle_cdb_direct(  	 * Set TRANSPORT_NEW_CMD state and cmd->t_transport_active=1 following  	 * transport_generic_handle_cdb*() -> transport_add_cmd_to_queue()  	 * in existing usage to ensure that outstanding descriptors are handled -	 * correctly during shutdown via transport_generic_wait_for_tasks() +	 * correctly during shutdown via transport_wait_for_tasks()  	 *  	 * Also, we don't take cmd->t_state_lock here as we only expect  	 * this to be called for initial descriptor submission. @@ -1790,7 +1649,7 @@ int transport_handle_cdb_direct(  		return 0;  	else if (ret < 0) {  		cmd->transport_error_status = ret; -		transport_generic_request_failure(cmd, NULL, 0, +		transport_generic_request_failure(cmd, 0,  				(cmd->data_direction != DMA_TO_DEVICE));  	}  	return 0; @@ -1811,7 +1670,7 @@ int transport_generic_handle_cdb_map(  		return -EINVAL;  	} -	transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP); +	transport_add_cmd_to_queue(cmd, TRANSPORT_NEW_CMD_MAP, false);  	return 0;  }  EXPORT_SYMBOL(transport_generic_handle_cdb_map); @@ -1841,7 +1700,7 @@ int transport_generic_handle_data(  	if (transport_check_aborted_status(cmd, 1) != 0)  		return 0; -	transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE); +	transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_WRITE, false);  	return 0;  }  EXPORT_SYMBOL(transport_generic_handle_data); @@ -1853,12 +1712,7 @@ EXPORT_SYMBOL(transport_generic_handle_data);  int transport_generic_handle_tmr(  	struct se_cmd *cmd)  { -	/* -	 * This is needed for early exceptions. -	 */ -	cmd->transport_wait_for_tasks = &transport_generic_wait_for_tasks; - -	transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR); +	transport_add_cmd_to_queue(cmd, TRANSPORT_PROCESS_TMR, false);  	return 0;  }  EXPORT_SYMBOL(transport_generic_handle_tmr); @@ -1866,10 +1720,36 @@ EXPORT_SYMBOL(transport_generic_handle_tmr);  void transport_generic_free_cmd_intr(  	struct se_cmd *cmd)  { -	transport_add_cmd_to_queue(cmd, TRANSPORT_FREE_CMD_INTR); +	transport_add_cmd_to_queue(cmd, TRANSPORT_FREE_CMD_INTR, false);  }  EXPORT_SYMBOL(transport_generic_free_cmd_intr); +/* + * If the task is active, request it to be stopped and sleep until it + * has completed. + */ +bool target_stop_task(struct se_task *task, unsigned long *flags) +{ +	struct se_cmd *cmd = task->task_se_cmd; +	bool was_active = false; + +	if (task->task_flags & TF_ACTIVE) { +		task->task_flags |= TF_REQUEST_STOP; +		spin_unlock_irqrestore(&cmd->t_state_lock, *flags); + +		pr_debug("Task %p waiting to complete\n", task); +		wait_for_completion(&task->task_stop_comp); +		pr_debug("Task %p stopped successfully\n", task); + +		spin_lock_irqsave(&cmd->t_state_lock, *flags); +		atomic_dec(&cmd->t_task_cdbs_left); +		task->task_flags &= ~(TF_ACTIVE | TF_REQUEST_STOP); +		was_active = true; +	} + +	return was_active; +} +  static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)  {  	struct se_task *task, *task_tmp; @@ -1885,51 +1765,26 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)  	spin_lock_irqsave(&cmd->t_state_lock, flags);  	list_for_each_entry_safe(task, task_tmp,  				&cmd->t_task_list, t_list) { -		pr_debug("task_no[%d] - Processing task %p\n", -				task->task_no, task); +		pr_debug("Processing task %p\n", task);  		/*  		 * If the struct se_task has not been sent and is not active,  		 * remove the struct se_task from the execution queue.  		 */ -		if (!atomic_read(&task->task_sent) && -		    !atomic_read(&task->task_active)) { +		if (!(task->task_flags & (TF_ACTIVE | TF_SENT))) {  			spin_unlock_irqrestore(&cmd->t_state_lock,  					flags);  			transport_remove_task_from_execute_queue(task, -					task->se_dev); +					cmd->se_dev); -			pr_debug("task_no[%d] - Removed from execute queue\n", -				task->task_no); +			pr_debug("Task %p removed from execute queue\n", task);  			spin_lock_irqsave(&cmd->t_state_lock, flags);  			continue;  		} -		/* -		 * If the struct se_task is active, sleep until it is returned -		 * from the plugin. -		 */ -		if (atomic_read(&task->task_active)) { -			atomic_set(&task->task_stop, 1); -			spin_unlock_irqrestore(&cmd->t_state_lock, -					flags); - -			pr_debug("task_no[%d] - Waiting to complete\n", -				task->task_no); -			wait_for_completion(&task->task_stop_comp); -			pr_debug("task_no[%d] - Stopped successfully\n", -				task->task_no); - -			spin_lock_irqsave(&cmd->t_state_lock, flags); -			atomic_dec(&cmd->t_task_cdbs_left); - -			atomic_set(&task->task_active, 0); -			atomic_set(&task->task_stop, 0); -		} else { -			pr_debug("task_no[%d] - Did nothing\n", task->task_no); +		if (!target_stop_task(task, &flags)) { +			pr_debug("Task %p - did nothing\n", task);  			ret++;  		} - -		__transport_stop_task_timer(task, &flags);  	}  	spin_unlock_irqrestore(&cmd->t_state_lock, flags); @@ -1941,7 +1796,6 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)   */  static void transport_generic_request_failure(  	struct se_cmd *cmd, -	struct se_device *dev,  	int complete,  	int sc)  { @@ -1950,10 +1804,9 @@ static void transport_generic_request_failure(  	pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"  		" CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),  		cmd->t_task_cdb[0]); -	pr_debug("-----[ i_state: %d t_state/def_t_state:" -		" %d/%d transport_error_status: %d\n", +	pr_debug("-----[ i_state: %d t_state: %d transport_error_status: %d\n",  		cmd->se_tfo->get_cmd_state(cmd), -		cmd->t_state, cmd->deferred_t_state, +		cmd->t_state,  		cmd->transport_error_status);  	pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d"  		" t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --" @@ -1966,10 +1819,6 @@ static void transport_generic_request_failure(  		atomic_read(&cmd->t_transport_stop),  		atomic_read(&cmd->t_transport_sent)); -	transport_stop_all_task_timers(cmd); - -	if (dev) -		atomic_inc(&dev->depth_left);  	/*  	 * For SAM Task Attribute emulation for failed struct se_cmd  	 */ @@ -1977,7 +1826,6 @@ static void transport_generic_request_failure(  		transport_complete_task_attr(cmd);  	if (complete) { -		transport_direct_request_timeout(cmd);  		cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE;  	} @@ -2053,8 +1901,14 @@ static void transport_generic_request_failure(  		cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;  		break;  	} - -	if (!sc) +	/* +	 * If a fabric does not define a cmd->se_tfo->new_cmd_map caller, +	 * make the call to transport_send_check_condition_and_sense() +	 * directly.  Otherwise expect the fabric to make the call to +	 * transport_send_check_condition_and_sense() after handling +	 * possible unsoliticied write data payloads. +	 */ +	if (!sc && !cmd->se_tfo->new_cmd_map)  		transport_new_cmd_failure(cmd);  	else {  		ret = transport_send_check_condition_and_sense(cmd, @@ -2070,46 +1924,8 @@ check_stop:  	return;  queue_full: -	cmd->t_state = TRANSPORT_COMPLETE_OK; -	transport_handle_queue_full(cmd, cmd->se_dev, transport_complete_qf); -} - -static void transport_direct_request_timeout(struct se_cmd *cmd) -{ -	unsigned long flags; - -	spin_lock_irqsave(&cmd->t_state_lock, flags); -	if (!atomic_read(&cmd->t_transport_timeout)) { -		spin_unlock_irqrestore(&cmd->t_state_lock, flags); -		return; -	} -	if (atomic_read(&cmd->t_task_cdbs_timeout_left)) { -		spin_unlock_irqrestore(&cmd->t_state_lock, flags); -		return; -	} - -	atomic_sub(atomic_read(&cmd->t_transport_timeout), -		   &cmd->t_se_count); -	spin_unlock_irqrestore(&cmd->t_state_lock, flags); -} - -static void transport_generic_request_timeout(struct se_cmd *cmd) -{ -	unsigned long flags; - -	/* -	 * Reset cmd->t_se_count to allow transport_generic_remove() -	 * to allow last call to free memory resources. -	 */ -	spin_lock_irqsave(&cmd->t_state_lock, flags); -	if (atomic_read(&cmd->t_transport_timeout) > 1) { -		int tmp = (atomic_read(&cmd->t_transport_timeout) - 1); - -		atomic_sub(tmp, &cmd->t_se_count); -	} -	spin_unlock_irqrestore(&cmd->t_state_lock, flags); - -	transport_generic_remove(cmd, 0); +	cmd->t_state = TRANSPORT_COMPLETE_QF_OK; +	transport_handle_queue_full(cmd, cmd->se_dev);  }  static inline u32 transport_lba_21(unsigned char *cdb) @@ -2154,127 +1970,6 @@ static void transport_set_supported_SAM_opcode(struct se_cmd *se_cmd)  	spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);  } -/* - * Called from interrupt context. - */ -static void transport_task_timeout_handler(unsigned long data) -{ -	struct se_task *task = (struct se_task *)data; -	struct se_cmd *cmd = task->task_se_cmd; -	unsigned long flags; - -	pr_debug("transport task timeout fired! task: %p cmd: %p\n", task, cmd); - -	spin_lock_irqsave(&cmd->t_state_lock, flags); -	if (task->task_flags & TF_STOP) { -		spin_unlock_irqrestore(&cmd->t_state_lock, flags); -		return; -	} -	task->task_flags &= ~TF_RUNNING; - -	/* -	 * Determine if transport_complete_task() has already been called. -	 */ -	if (!atomic_read(&task->task_active)) { -		pr_debug("transport task: %p cmd: %p timeout task_active" -				" == 0\n", task, cmd); -		spin_unlock_irqrestore(&cmd->t_state_lock, flags); -		return; -	} - -	atomic_inc(&cmd->t_se_count); -	atomic_inc(&cmd->t_transport_timeout); -	cmd->t_tasks_failed = 1; - -	atomic_set(&task->task_timeout, 1); -	task->task_error_status = PYX_TRANSPORT_TASK_TIMEOUT; -	task->task_scsi_status = 1; - -	if (atomic_read(&task->task_stop)) { -		pr_debug("transport task: %p cmd: %p timeout task_stop" -				" == 1\n", task, cmd); -		spin_unlock_irqrestore(&cmd->t_state_lock, flags); -		complete(&task->task_stop_comp); -		return; -	} - -	if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) { -		pr_debug("transport task: %p cmd: %p timeout non zero" -				" t_task_cdbs_left\n", task, cmd); -		spin_unlock_irqrestore(&cmd->t_state_lock, flags); -		return; -	} -	pr_debug("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n", -			task, cmd); - -	cmd->t_state = TRANSPORT_COMPLETE_FAILURE; -	spin_unlock_irqrestore(&cmd->t_state_lock, flags); - -	transport_add_cmd_to_queue(cmd, TRANSPORT_COMPLETE_FAILURE); -} - -/* - * Called with cmd->t_state_lock held. - */ -static void transport_start_task_timer(struct se_task *task) -{ -	struct se_device *dev = task->se_dev; -	int timeout; - -	if (task->task_flags & TF_RUNNING) -		return; -	/* -	 * If the task_timeout is disabled, exit now. -	 */ -	timeout = dev->se_sub_dev->se_dev_attrib.task_timeout; -	if (!timeout) -		return; - -	init_timer(&task->task_timer); -	task->task_timer.expires = (get_jiffies_64() + timeout * HZ); -	task->task_timer.data = (unsigned long) task; -	task->task_timer.function = transport_task_timeout_handler; - -	task->task_flags |= TF_RUNNING; -	add_timer(&task->task_timer); -#if 0 -	pr_debug("Starting task timer for cmd: %p task: %p seconds:" -		" %d\n", task->task_se_cmd, task, timeout); -#endif -} - -/* - * Called with spin_lock_irq(&cmd->t_state_lock) held. - */ -void __transport_stop_task_timer(struct se_task *task, unsigned long *flags) -{ -	struct se_cmd *cmd = task->task_se_cmd; - -	if (!task->task_flags & TF_RUNNING) -		return; - -	task->task_flags |= TF_STOP; -	spin_unlock_irqrestore(&cmd->t_state_lock, *flags); - -	del_timer_sync(&task->task_timer); - -	spin_lock_irqsave(&cmd->t_state_lock, *flags); -	task->task_flags &= ~TF_RUNNING; -	task->task_flags &= ~TF_STOP; -} - -static void transport_stop_all_task_timers(struct se_cmd *cmd) -{ -	struct se_task *task = NULL, *task_tmp; -	unsigned long flags; - -	spin_lock_irqsave(&cmd->t_state_lock, flags); -	list_for_each_entry_safe(task, task_tmp, -				&cmd->t_task_list, t_list) -		__transport_stop_task_timer(task, &flags); -	spin_unlock_irqrestore(&cmd->t_state_lock, flags); -} -  static inline int transport_tcq_window_closed(struct se_device *dev)  {  	if (dev->dev_tcq_window_closed++ < @@ -2379,7 +2074,7 @@ static int transport_execute_tasks(struct se_cmd *cmd)  	if (se_dev_check_online(cmd->se_orig_obj_ptr) != 0) {  		cmd->transport_error_status = PYX_TRANSPORT_LU_COMM_FAILURE; -		transport_generic_request_failure(cmd, NULL, 0, 1); +		transport_generic_request_failure(cmd, 0, 1);  		return 0;  	} @@ -2442,9 +2137,7 @@ check_depth:  	}  	task = list_first_entry(&dev->execute_task_list,  				struct se_task, t_execute_list); -	list_del(&task->t_execute_list); -	atomic_set(&task->task_execute_queue, 0); -	atomic_dec(&dev->execute_tasks); +	__transport_remove_task_from_execute_queue(task, dev);  	spin_unlock_irq(&dev->execute_task_lock);  	atomic_dec(&dev->depth_left); @@ -2452,15 +2145,13 @@ check_depth:  	cmd = task->task_se_cmd;  	spin_lock_irqsave(&cmd->t_state_lock, flags); -	atomic_set(&task->task_active, 1); -	atomic_set(&task->task_sent, 1); +	task->task_flags |= (TF_ACTIVE | TF_SENT);  	atomic_inc(&cmd->t_task_cdbs_sent);  	if (atomic_read(&cmd->t_task_cdbs_sent) ==  	    cmd->t_task_list_num) -		atomic_set(&cmd->transport_sent, 1); +		atomic_set(&cmd->t_transport_sent, 1); -	transport_start_task_timer(task);  	spin_unlock_irqrestore(&cmd->t_state_lock, flags);  	/*  	 * The struct se_cmd->transport_emulate_cdb() function pointer is used @@ -2471,10 +2162,13 @@ check_depth:  		error = cmd->transport_emulate_cdb(cmd);  		if (error != 0) {  			cmd->transport_error_status = error; -			atomic_set(&task->task_active, 0); -			atomic_set(&cmd->transport_sent, 0); +			spin_lock_irqsave(&cmd->t_state_lock, flags); +			task->task_flags &= ~TF_ACTIVE; +			spin_unlock_irqrestore(&cmd->t_state_lock, flags); +			atomic_set(&cmd->t_transport_sent, 0);  			transport_stop_tasks_for_cmd(cmd); -			transport_generic_request_failure(cmd, dev, 0, 1); +			atomic_inc(&dev->depth_left); +			transport_generic_request_failure(cmd, 0, 1);  			goto check_depth;  		}  		/* @@ -2507,10 +2201,13 @@ check_depth:  		if (error != 0) {  			cmd->transport_error_status = error; -			atomic_set(&task->task_active, 0); -			atomic_set(&cmd->transport_sent, 0); +			spin_lock_irqsave(&cmd->t_state_lock, flags); +			task->task_flags &= ~TF_ACTIVE; +			spin_unlock_irqrestore(&cmd->t_state_lock, flags); +			atomic_set(&cmd->t_transport_sent, 0);  			transport_stop_tasks_for_cmd(cmd); -			transport_generic_request_failure(cmd, dev, 0, 1); +			atomic_inc(&dev->depth_left); +			transport_generic_request_failure(cmd, 0, 1);  		}  	} @@ -2532,8 +2229,6 @@ void transport_new_cmd_failure(struct se_cmd *se_cmd)  	spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);  } -static void transport_nop_wait_for_tasks(struct se_cmd *, int, int); -  static inline u32 transport_get_sectors_6(  	unsigned char *cdb,  	struct se_cmd *cmd, @@ -2746,13 +2441,16 @@ out:  static int transport_get_sense_data(struct se_cmd *cmd)  {  	unsigned char *buffer = cmd->sense_buffer, *sense_buffer = NULL; -	struct se_device *dev; +	struct se_device *dev = cmd->se_dev;  	struct se_task *task = NULL, *task_tmp;  	unsigned long flags;  	u32 offset = 0;  	WARN_ON(!cmd->se_lun); +	if (!dev) +		return 0; +  	spin_lock_irqsave(&cmd->t_state_lock, flags);  	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) {  		spin_unlock_irqrestore(&cmd->t_state_lock, flags); @@ -2761,14 +2459,9 @@ static int transport_get_sense_data(struct se_cmd *cmd)  	list_for_each_entry_safe(task, task_tmp,  				&cmd->t_task_list, t_list) { -  		if (!task->task_sense)  			continue; -		dev = task->se_dev; -		if (!dev) -			continue; -  		if (!dev->transport->get_sense_buffer) {  			pr_err("dev->transport->get_sense_buffer"  					" is NULL\n"); @@ -2777,9 +2470,9 @@ static int transport_get_sense_data(struct se_cmd *cmd)  		sense_buffer = dev->transport->get_sense_buffer(task);  		if (!sense_buffer) { -			pr_err("ITT[0x%08x]_TASK[%d]: Unable to locate" +			pr_err("ITT[0x%08x]_TASK[%p]: Unable to locate"  				" sense buffer for task with sense\n", -				cmd->se_tfo->get_task_tag(cmd), task->task_no); +				cmd->se_tfo->get_task_tag(cmd), task);  			continue;  		}  		spin_unlock_irqrestore(&cmd->t_state_lock, flags); @@ -2808,7 +2501,6 @@ static int transport_get_sense_data(struct se_cmd *cmd)  static int  transport_handle_reservation_conflict(struct se_cmd *cmd)  { -	cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;  	cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;  	cmd->se_cmd_flags |= SCF_SCSI_RESERVATION_CONFLICT;  	cmd->scsi_status = SAM_STAT_RESERVATION_CONFLICT; @@ -2847,12 +2539,42 @@ static int transport_cmd_get_valid_sectors(struct se_cmd *cmd)  			" transport_dev_end_lba(): %llu\n",  			cmd->t_task_lba, sectors,  			transport_dev_end_lba(dev)); -		pr_err("  We should return CHECK_CONDITION" -		       " but we don't yet\n"); -		return 0; +		return -EINVAL;  	} -	return sectors; +	return 0; +} + +static int target_check_write_same_discard(unsigned char *flags, struct se_device *dev) +{ +	/* +	 * Determine if the received WRITE_SAME is used to for direct +	 * passthrough into Linux/SCSI with struct request via TCM/pSCSI +	 * or we are signaling the use of internal WRITE_SAME + UNMAP=1 +	 * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK code. +	 */ +	int passthrough = (dev->transport->transport_type == +				TRANSPORT_PLUGIN_PHBA_PDEV); + +	if (!passthrough) { +		if ((flags[0] & 0x04) || (flags[0] & 0x02)) { +			pr_err("WRITE_SAME PBDATA and LBDATA" +				" bits not supported for Block Discard" +				" Emulation\n"); +			return -ENOSYS; +		} +		/* +		 * Currently for the emulated case we only accept +		 * tpws with the UNMAP=1 bit set. +		 */ +		if (!(flags[0] & 0x08)) { +			pr_err("WRITE_SAME w/o UNMAP bit not" +				" supported for Block Discard Emulation\n"); +			return -ENOSYS; +		} +	} + +	return 0;  }  /*	transport_generic_cmd_sequencer(): @@ -2879,8 +2601,6 @@ static int transport_generic_cmd_sequencer(  	 * Check for an existing UNIT ATTENTION condition  	 */  	if (core_scsi3_ua_check(cmd, cdb) < 0) { -		cmd->transport_wait_for_tasks = -				&transport_nop_wait_for_tasks;  		cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;  		cmd->scsi_sense_reason = TCM_CHECK_CONDITION_UNIT_ATTENTION;  		return -EINVAL; @@ -2890,7 +2610,6 @@ static int transport_generic_cmd_sequencer(  	 */  	ret = su_dev->t10_alua.alua_state_check(cmd, cdb, &alua_ascq);  	if (ret != 0) { -		cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;  		/*  		 * Set SCSI additional sense code (ASC) to 'LUN Not Accessible';  		 * The ALUA additional sense code qualifier (ASCQ) is determined @@ -2929,7 +2648,6 @@ static int transport_generic_cmd_sequencer(  		if (sector_ret)  			goto out_unsupported_cdb;  		size = transport_get_size(sectors, cdb, cmd); -		cmd->transport_split_cdb = &split_cdb_XX_6;  		cmd->t_task_lba = transport_lba_21(cdb);  		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;  		break; @@ -2938,7 +2656,6 @@ static int transport_generic_cmd_sequencer(  		if (sector_ret)  			goto out_unsupported_cdb;  		size = transport_get_size(sectors, cdb, cmd); -		cmd->transport_split_cdb = &split_cdb_XX_10;  		cmd->t_task_lba = transport_lba_32(cdb);  		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;  		break; @@ -2947,7 +2664,6 @@ static int transport_generic_cmd_sequencer(  		if (sector_ret)  			goto out_unsupported_cdb;  		size = transport_get_size(sectors, cdb, cmd); -		cmd->transport_split_cdb = &split_cdb_XX_12;  		cmd->t_task_lba = transport_lba_32(cdb);  		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;  		break; @@ -2956,7 +2672,6 @@ static int transport_generic_cmd_sequencer(  		if (sector_ret)  			goto out_unsupported_cdb;  		size = transport_get_size(sectors, cdb, cmd); -		cmd->transport_split_cdb = &split_cdb_XX_16;  		cmd->t_task_lba = transport_lba_64(cdb);  		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;  		break; @@ -2965,7 +2680,6 @@ static int transport_generic_cmd_sequencer(  		if (sector_ret)  			goto out_unsupported_cdb;  		size = transport_get_size(sectors, cdb, cmd); -		cmd->transport_split_cdb = &split_cdb_XX_6;  		cmd->t_task_lba = transport_lba_21(cdb);  		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB;  		break; @@ -2974,7 +2688,6 @@ static int transport_generic_cmd_sequencer(  		if (sector_ret)  			goto out_unsupported_cdb;  		size = transport_get_size(sectors, cdb, cmd); -		cmd->transport_split_cdb = &split_cdb_XX_10;  		cmd->t_task_lba = transport_lba_32(cdb);  		cmd->t_tasks_fua = (cdb[1] & 0x8);  		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; @@ -2984,7 +2697,6 @@ static int transport_generic_cmd_sequencer(  		if (sector_ret)  			goto out_unsupported_cdb;  		size = transport_get_size(sectors, cdb, cmd); -		cmd->transport_split_cdb = &split_cdb_XX_12;  		cmd->t_task_lba = transport_lba_32(cdb);  		cmd->t_tasks_fua = (cdb[1] & 0x8);  		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; @@ -2994,7 +2706,6 @@ static int transport_generic_cmd_sequencer(  		if (sector_ret)  			goto out_unsupported_cdb;  		size = transport_get_size(sectors, cdb, cmd); -		cmd->transport_split_cdb = &split_cdb_XX_16;  		cmd->t_task_lba = transport_lba_64(cdb);  		cmd->t_tasks_fua = (cdb[1] & 0x8);  		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; @@ -3007,18 +2718,14 @@ static int transport_generic_cmd_sequencer(  		if (sector_ret)  			goto out_unsupported_cdb;  		size = transport_get_size(sectors, cdb, cmd); -		cmd->transport_split_cdb = &split_cdb_XX_10;  		cmd->t_task_lba = transport_lba_32(cdb);  		cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; -		passthrough = (dev->transport->transport_type == -				TRANSPORT_PLUGIN_PHBA_PDEV); -		/* -		 * Skip the remaining assignments for TCM/PSCSI passthrough -		 */ -		if (passthrough) -			break; + +		if (dev->transport->transport_type == +				TRANSPORT_PLUGIN_PHBA_PDEV) +			goto out_unsupported_cdb;  		/* -		 * Setup BIDI XOR callback to be run during transport_generic_complete_ok() +		 * Setup BIDI XOR callback to be run after I/O completion.  		 */  		cmd->transport_complete_callback = &transport_xor_callback;  		cmd->t_tasks_fua = (cdb[1] & 0x8); @@ -3042,19 +2749,14 @@ static int transport_generic_cmd_sequencer(  			 * Use WRITE_32 and READ_32 opcodes for the emulated  			 * XDWRITE_READ_32 logic.  			 */ -			cmd->transport_split_cdb = &split_cdb_XX_32;  			cmd->t_task_lba = transport_lba_64_ext(cdb);  			cmd->se_cmd_flags |= SCF_SCSI_DATA_SG_IO_CDB; -			/* -			 * Skip the remaining assignments for TCM/PSCSI passthrough -			 */  			if (passthrough) -				break; - +				goto out_unsupported_cdb;  			/* -			 * Setup BIDI XOR callback to be run during -			 * transport_generic_complete_ok() +			 * Setup BIDI XOR callback to be run during after I/O +			 * completion.  			 */  			cmd->transport_complete_callback = &transport_xor_callback;  			cmd->t_tasks_fua = (cdb[10] & 0x8); @@ -3065,7 +2767,7 @@ static int transport_generic_cmd_sequencer(  				goto out_unsupported_cdb;  			if (sectors) -				size = transport_get_size(sectors, cdb, cmd); +				size = transport_get_size(1, cdb, cmd);  			else {  				pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"  				       " supported\n"); @@ -3075,27 +2777,9 @@ static int transport_generic_cmd_sequencer(  			cmd->t_task_lba = get_unaligned_be64(&cdb[12]);  			cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; -			/* -			 * Skip the remaining assignments for TCM/PSCSI passthrough -			 */ -			if (passthrough) -				break; - -			if ((cdb[10] & 0x04) || (cdb[10] & 0x02)) { -				pr_err("WRITE_SAME PBDATA and LBDATA" -					" bits not supported for Block Discard" -					" Emulation\n"); +			if (target_check_write_same_discard(&cdb[10], dev) < 0)  				goto out_invalid_cdb_field; -			} -			/* -			 * Currently for the emulated case we only accept -			 * tpws with the UNMAP=1 bit set. -			 */ -			if (!(cdb[10] & 0x08)) { -				pr_err("WRITE_SAME w/o UNMAP bit not" -					" supported for Block Discard Emulation\n"); -				goto out_invalid_cdb_field; -			} +  			break;  		default:  			pr_err("VARIABLE_LENGTH_CMD service action" @@ -3330,10 +3014,12 @@ static int transport_generic_cmd_sequencer(  		cmd->se_cmd_flags |= SCF_EMULATE_CDB_ASYNC;  		/*  		 * Check to ensure that LBA + Range does not exceed past end of -		 * device. +		 * device for IBLOCK and FILEIO ->do_sync_cache() backend calls  		 */ -		if (!transport_cmd_get_valid_sectors(cmd)) -			goto out_invalid_cdb_field; +		if ((cmd->t_task_lba != 0) || (sectors != 0)) { +			if (transport_cmd_get_valid_sectors(cmd) < 0) +				goto out_invalid_cdb_field; +		}  		break;  	case UNMAP:  		size = get_unaligned_be16(&cdb[7]); @@ -3345,40 +3031,38 @@ static int transport_generic_cmd_sequencer(  			goto out_unsupported_cdb;  		if (sectors) -			size = transport_get_size(sectors, cdb, cmd); +			size = transport_get_size(1, cdb, cmd);  		else {  			pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");  			goto out_invalid_cdb_field;  		}  		cmd->t_task_lba = get_unaligned_be64(&cdb[2]); -		passthrough = (dev->transport->transport_type == -				TRANSPORT_PLUGIN_PHBA_PDEV); -		/* -		 * Determine if the received WRITE_SAME_16 is used to for direct -		 * passthrough into Linux/SCSI with struct request via TCM/pSCSI -		 * or we are signaling the use of internal WRITE_SAME + UNMAP=1 -		 * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK and -		 * TCM/FILEIO subsystem plugin backstores. -		 */ -		if (!passthrough) { -			if ((cdb[1] & 0x04) || (cdb[1] & 0x02)) { -				pr_err("WRITE_SAME PBDATA and LBDATA" -					" bits not supported for Block Discard" -					" Emulation\n"); -				goto out_invalid_cdb_field; -			} -			/* -			 * Currently for the emulated case we only accept -			 * tpws with the UNMAP=1 bit set. -			 */ -			if (!(cdb[1] & 0x08)) { -				pr_err("WRITE_SAME w/o UNMAP bit not " -					" supported for Block Discard Emulation\n"); -				goto out_invalid_cdb_field; -			} +		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; + +		if (target_check_write_same_discard(&cdb[1], dev) < 0) +			goto out_invalid_cdb_field; +		break; +	case WRITE_SAME: +		sectors = transport_get_sectors_10(cdb, cmd, §or_ret); +		if (sector_ret) +			goto out_unsupported_cdb; + +		if (sectors) +			size = transport_get_size(1, cdb, cmd); +		else { +			pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n"); +			goto out_invalid_cdb_field;  		} + +		cmd->t_task_lba = get_unaligned_be32(&cdb[2]);  		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB; +		/* +		 * Follow sbcr26 with WRITE_SAME (10) and check for the existence +		 * of byte 1 bit 3 UNMAP instead of original reserved field +		 */ +		if (target_check_write_same_discard(&cdb[1], dev) < 0) +			goto out_invalid_cdb_field;  		break;  	case ALLOW_MEDIUM_REMOVAL:  	case GPCMD_CLOSE_TRACK: @@ -3412,7 +3096,6 @@ static int transport_generic_cmd_sequencer(  		pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode"  			" 0x%02x, sending CHECK_CONDITION.\n",  			cmd->se_tfo->get_fabric_name(), cdb[0]); -		cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;  		goto out_unsupported_cdb;  	} @@ -3470,8 +3153,7 @@ out_invalid_cdb_field:  }  /* - * Called from transport_generic_complete_ok() and - * transport_generic_request_failure() to determine which dormant/delayed + * Called from I/O completion to determine which dormant/delayed   * and ordered cmds need to have their tasks added to the execution queue.   */  static void transport_complete_task_attr(struct se_cmd *cmd) @@ -3539,12 +3221,18 @@ static void transport_complete_task_attr(struct se_cmd *cmd)  		wake_up_interruptible(&dev->dev_queue_obj.thread_wq);  } -static int transport_complete_qf(struct se_cmd *cmd) +static void transport_complete_qf(struct se_cmd *cmd)  {  	int ret = 0; -	if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) -		return cmd->se_tfo->queue_status(cmd); +	if (cmd->se_dev->dev_task_attr_type == SAM_TASK_ATTR_EMULATED) +		transport_complete_task_attr(cmd); + +	if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { +		ret = cmd->se_tfo->queue_status(cmd); +		if (ret) +			goto out; +	}  	switch (cmd->data_direction) {  	case DMA_FROM_DEVICE: @@ -3554,7 +3242,7 @@ static int transport_complete_qf(struct se_cmd *cmd)  		if (cmd->t_bidi_data_sg) {  			ret = cmd->se_tfo->queue_data_in(cmd);  			if (ret < 0) -				return ret; +				break;  		}  		/* Fall through for DMA_TO_DEVICE */  	case DMA_NONE: @@ -3564,17 +3252,20 @@ static int transport_complete_qf(struct se_cmd *cmd)  		break;  	} -	return ret; +out: +	if (ret < 0) { +		transport_handle_queue_full(cmd, cmd->se_dev); +		return; +	} +	transport_lun_remove_cmd(cmd); +	transport_cmd_check_stop_to_fabric(cmd);  }  static void transport_handle_queue_full(  	struct se_cmd *cmd, -	struct se_device *dev, -	int (*qf_callback)(struct se_cmd *)) +	struct se_device *dev)  {  	spin_lock_irq(&dev->qf_cmd_lock); -	cmd->se_cmd_flags |= SCF_EMULATE_QUEUE_FULL; -	cmd->transport_qf_callback = qf_callback;  	list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);  	atomic_inc(&dev->dev_qf_count);  	smp_mb__after_atomic_inc(); @@ -3583,9 +3274,11 @@ static void transport_handle_queue_full(  	schedule_work(&cmd->se_dev->qf_work_queue);  } -static void transport_generic_complete_ok(struct se_cmd *cmd) +static void target_complete_ok_work(struct work_struct *work)  { +	struct se_cmd *cmd = container_of(work, struct se_cmd, work);  	int reason = 0, ret; +  	/*  	 * Check if we need to move delayed/dormant tasks from cmds on the  	 * delayed execution list after a HEAD_OF_QUEUE or ORDERED Task @@ -3600,14 +3293,6 @@ static void transport_generic_complete_ok(struct se_cmd *cmd)  	if (atomic_read(&cmd->se_dev->dev_qf_count) != 0)  		schedule_work(&cmd->se_dev->qf_work_queue); -	if (cmd->transport_qf_callback) { -		ret = cmd->transport_qf_callback(cmd); -		if (ret < 0) -			goto queue_full; - -		cmd->transport_qf_callback = NULL; -		goto done; -	}  	/*  	 * Check if we need to retrieve a sense buffer from  	 * the struct se_cmd in question. @@ -3683,7 +3368,6 @@ static void transport_generic_complete_ok(struct se_cmd *cmd)  		break;  	} -done:  	transport_lun_remove_cmd(cmd);  	transport_cmd_check_stop_to_fabric(cmd);  	return; @@ -3691,34 +3375,35 @@ done:  queue_full:  	pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"  		" data_direction: %d\n", cmd, cmd->data_direction); -	transport_handle_queue_full(cmd, cmd->se_dev, transport_complete_qf); +	cmd->t_state = TRANSPORT_COMPLETE_QF_OK; +	transport_handle_queue_full(cmd, cmd->se_dev);  }  static void transport_free_dev_tasks(struct se_cmd *cmd)  {  	struct se_task *task, *task_tmp;  	unsigned long flags; +	LIST_HEAD(dispose_list);  	spin_lock_irqsave(&cmd->t_state_lock, flags);  	list_for_each_entry_safe(task, task_tmp,  				&cmd->t_task_list, t_list) { -		if (atomic_read(&task->task_active)) -			continue; +		if (!(task->task_flags & TF_ACTIVE)) +			list_move_tail(&task->t_list, &dispose_list); +	} +	spin_unlock_irqrestore(&cmd->t_state_lock, flags); + +	while (!list_empty(&dispose_list)) { +		task = list_first_entry(&dispose_list, struct se_task, t_list); -		kfree(task->task_sg_bidi); -		kfree(task->task_sg); +		if (task->task_sg != cmd->t_data_sg && +		    task->task_sg != cmd->t_bidi_data_sg) +			kfree(task->task_sg);  		list_del(&task->t_list); -		spin_unlock_irqrestore(&cmd->t_state_lock, flags); -		if (task->se_dev) -			task->se_dev->transport->free_task(task); -		else -			pr_err("task[%u] - task->se_dev is NULL\n", -				task->task_no); -		spin_lock_irqsave(&cmd->t_state_lock, flags); +		cmd->se_dev->transport->free_task(task);  	} -	spin_unlock_irqrestore(&cmd->t_state_lock, flags);  }  static inline void transport_free_sgl(struct scatterlist *sgl, int nents) @@ -3746,89 +3431,43 @@ static inline void transport_free_pages(struct se_cmd *cmd)  	cmd->t_bidi_data_nents = 0;  } -static inline void transport_release_tasks(struct se_cmd *cmd) -{ -	transport_free_dev_tasks(cmd); -} - -static inline int transport_dec_and_check(struct se_cmd *cmd) +/** + * transport_put_cmd - release a reference to a command + * @cmd:       command to release + * + * This routine releases our reference to the command and frees it if possible. + */ +static void transport_put_cmd(struct se_cmd *cmd)  {  	unsigned long flags; +	int free_tasks = 0;  	spin_lock_irqsave(&cmd->t_state_lock, flags);  	if (atomic_read(&cmd->t_fe_count)) { -		if (!atomic_dec_and_test(&cmd->t_fe_count)) { -			spin_unlock_irqrestore(&cmd->t_state_lock, -					flags); -			return 1; -		} +		if (!atomic_dec_and_test(&cmd->t_fe_count)) +			goto out_busy;  	}  	if (atomic_read(&cmd->t_se_count)) { -		if (!atomic_dec_and_test(&cmd->t_se_count)) { -			spin_unlock_irqrestore(&cmd->t_state_lock, -					flags); -			return 1; -		} -	} -	spin_unlock_irqrestore(&cmd->t_state_lock, flags); - -	return 0; -} - -static void transport_release_fe_cmd(struct se_cmd *cmd) -{ -	unsigned long flags; - -	if (transport_dec_and_check(cmd)) -		return; - -	spin_lock_irqsave(&cmd->t_state_lock, flags); -	if (!atomic_read(&cmd->transport_dev_active)) { -		spin_unlock_irqrestore(&cmd->t_state_lock, flags); -		goto free_pages; +		if (!atomic_dec_and_test(&cmd->t_se_count)) +			goto out_busy;  	} -	atomic_set(&cmd->transport_dev_active, 0); -	transport_all_task_dev_remove_state(cmd); -	spin_unlock_irqrestore(&cmd->t_state_lock, flags); - -	transport_release_tasks(cmd); -free_pages: -	transport_free_pages(cmd); -	transport_free_se_cmd(cmd); -	cmd->se_tfo->release_cmd(cmd); -} - -static int -transport_generic_remove(struct se_cmd *cmd, int session_reinstatement) -{ -	unsigned long flags; -	if (transport_dec_and_check(cmd)) { -		if (session_reinstatement) { -			spin_lock_irqsave(&cmd->t_state_lock, flags); -			transport_all_task_dev_remove_state(cmd); -			spin_unlock_irqrestore(&cmd->t_state_lock, -					flags); -		} -		return 1; -	} - -	spin_lock_irqsave(&cmd->t_state_lock, flags); -	if (!atomic_read(&cmd->transport_dev_active)) { -		spin_unlock_irqrestore(&cmd->t_state_lock, flags); -		goto free_pages; +	if (atomic_read(&cmd->transport_dev_active)) { +		atomic_set(&cmd->transport_dev_active, 0); +		transport_all_task_dev_remove_state(cmd); +		free_tasks = 1;  	} -	atomic_set(&cmd->transport_dev_active, 0); -	transport_all_task_dev_remove_state(cmd);  	spin_unlock_irqrestore(&cmd->t_state_lock, flags); -	transport_release_tasks(cmd); +	if (free_tasks != 0) +		transport_free_dev_tasks(cmd); -free_pages:  	transport_free_pages(cmd);  	transport_release_cmd(cmd); -	return 0; +	return; +out_busy: +	spin_unlock_irqrestore(&cmd->t_state_lock, flags);  }  /* @@ -3870,64 +3509,6 @@ int transport_generic_map_mem_to_cmd(  }  EXPORT_SYMBOL(transport_generic_map_mem_to_cmd); -static int transport_new_cmd_obj(struct se_cmd *cmd) -{ -	struct se_device *dev = cmd->se_dev; -	u32 task_cdbs; -	u32 rc; -	int set_counts = 1; - -	/* -	 * Setup any BIDI READ tasks and memory from -	 * cmd->t_mem_bidi_list so the READ struct se_tasks -	 * are queued first for the non pSCSI passthrough case. -	 */ -	if (cmd->t_bidi_data_sg && -	    (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV)) { -		rc = transport_allocate_tasks(cmd, -					      cmd->t_task_lba, -					      DMA_FROM_DEVICE, -					      cmd->t_bidi_data_sg, -					      cmd->t_bidi_data_nents); -		if (rc <= 0) { -			cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; -			cmd->scsi_sense_reason = -				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; -			return PYX_TRANSPORT_LU_COMM_FAILURE; -		} -		atomic_inc(&cmd->t_fe_count); -		atomic_inc(&cmd->t_se_count); -		set_counts = 0; -	} -	/* -	 * Setup the tasks and memory from cmd->t_mem_list -	 * Note for BIDI transfers this will contain the WRITE payload -	 */ -	task_cdbs = transport_allocate_tasks(cmd, -					     cmd->t_task_lba, -					     cmd->data_direction, -					     cmd->t_data_sg, -					     cmd->t_data_nents); -	if (task_cdbs <= 0) { -		cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; -		cmd->scsi_sense_reason = -			TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; -		return PYX_TRANSPORT_LU_COMM_FAILURE; -	} - -	if (set_counts) { -		atomic_inc(&cmd->t_fe_count); -		atomic_inc(&cmd->t_se_count); -	} - -	cmd->t_task_list_num = task_cdbs; - -	atomic_set(&cmd->t_task_cdbs_left, task_cdbs); -	atomic_set(&cmd->t_task_cdbs_ex_left, task_cdbs); -	atomic_set(&cmd->t_task_cdbs_timeout_left, task_cdbs); -	return 0; -} -  void *transport_kmap_first_data_page(struct se_cmd *cmd)  {  	struct scatterlist *sg = cmd->t_data_sg; @@ -4028,8 +3609,6 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)  		if (!task->task_sg)  			continue; -		BUG_ON(!task->task_padded_sg); -  		if (!sg_first) {  			sg_first = task->task_sg;  			chained_nents = task->task_sg_nents; @@ -4037,9 +3616,17 @@ void transport_do_task_sg_chain(struct se_cmd *cmd)  			sg_chain(sg_prev, sg_prev_nents, task->task_sg);  			chained_nents += task->task_sg_nents;  		} - +		/* +		 * For the padded tasks, use the extra SGL vector allocated +		 * in transport_allocate_data_tasks() for the sg_prev_nents +		 * offset into sg_chain() above. +		 * +		 * We do not need the padding for the last task (or a single +		 * task), but in that case we will never use the sg_prev_nents +		 * value below which would be incorrect. +		 */ +		sg_prev_nents = (task->task_sg_nents + 1);  		sg_prev = task->task_sg; -		sg_prev_nents = task->task_sg_nents;  	}  	/*  	 * Setup the starting pointer and total t_tasks_sg_linked_no including @@ -4068,72 +3655,96 @@ EXPORT_SYMBOL(transport_do_task_sg_chain);  /*   * Break up cmd into chunks transport can handle   */ -static int transport_allocate_data_tasks( -	struct se_cmd *cmd, -	unsigned long long lba, +static int +transport_allocate_data_tasks(struct se_cmd *cmd,  	enum dma_data_direction data_direction, -	struct scatterlist *sgl, -	unsigned int sgl_nents) +	struct scatterlist *cmd_sg, unsigned int sgl_nents)  { -	unsigned char *cdb = NULL; -	struct se_task *task;  	struct se_device *dev = cmd->se_dev; -	unsigned long flags; -	int task_count, i, ret; -	sector_t sectors, dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors; -	u32 sector_size = dev->se_sub_dev->se_dev_attrib.block_size; -	struct scatterlist *sg; -	struct scatterlist *cmd_sg; +	int task_count, i; +	unsigned long long lba; +	sector_t sectors, dev_max_sectors; +	u32 sector_size; + +	if (transport_cmd_get_valid_sectors(cmd) < 0) +		return -EINVAL; + +	dev_max_sectors = dev->se_sub_dev->se_dev_attrib.max_sectors; +	sector_size = dev->se_sub_dev->se_dev_attrib.block_size;  	WARN_ON(cmd->data_length % sector_size); + +	lba = cmd->t_task_lba;  	sectors = DIV_ROUND_UP(cmd->data_length, sector_size);  	task_count = DIV_ROUND_UP_SECTOR_T(sectors, dev_max_sectors); -	 -	cmd_sg = sgl; -	for (i = 0; i < task_count; i++) { -		unsigned int task_size; -		int count; + +	/* +	 * If we need just a single task reuse the SG list in the command +	 * and avoid a lot of work. +	 */ +	if (task_count == 1) { +		struct se_task *task; +		unsigned long flags;  		task = transport_generic_get_task(cmd, data_direction);  		if (!task)  			return -ENOMEM; +		task->task_sg = cmd_sg; +		task->task_sg_nents = sgl_nents; +  		task->task_lba = lba; -		task->task_sectors = min(sectors, dev_max_sectors); +		task->task_sectors = sectors;  		task->task_size = task->task_sectors * sector_size; -		cdb = dev->transport->get_cdb(task); -		BUG_ON(!cdb); +		spin_lock_irqsave(&cmd->t_state_lock, flags); +		list_add_tail(&task->t_list, &cmd->t_task_list); +		spin_unlock_irqrestore(&cmd->t_state_lock, flags); -		memcpy(cdb, cmd->t_task_cdb, -		       scsi_command_size(cmd->t_task_cdb)); +		return task_count; +	} -		/* Update new cdb with updated lba/sectors */ -		cmd->transport_split_cdb(task->task_lba, task->task_sectors, cdb); +	for (i = 0; i < task_count; i++) { +		struct se_task *task; +		unsigned int task_size, task_sg_nents_padded; +		struct scatterlist *sg; +		unsigned long flags; +		int count; + +		task = transport_generic_get_task(cmd, data_direction); +		if (!task) +			return -ENOMEM; +		task->task_lba = lba; +		task->task_sectors = min(sectors, dev_max_sectors); +		task->task_size = task->task_sectors * sector_size; + +		/* +		 * This now assumes that passed sg_ents are in PAGE_SIZE chunks +		 * in order to calculate the number per task SGL entries +		 */ +		task->task_sg_nents = DIV_ROUND_UP(task->task_size, PAGE_SIZE);  		/*  		 * Check if the fabric module driver is requesting that all  		 * struct se_task->task_sg[] be chained together..  If so,  		 * then allocate an extra padding SG entry for linking and -		 * marking the end of the chained SGL. -		 * Possibly over-allocate task sgl size by using cmd sgl size. -		 * It's so much easier and only a waste when task_count > 1. -		 * That is extremely rare. +		 * marking the end of the chained SGL for every task except +		 * the last one for (task_count > 1) operation, or skipping +		 * the extra padding for the (task_count == 1) case.  		 */ -		task->task_sg_nents = sgl_nents; -		if (cmd->se_tfo->task_sg_chaining) { -			task->task_sg_nents++; -			task->task_padded_sg = 1; -		} +		if (cmd->se_tfo->task_sg_chaining && (i < (task_count - 1))) { +			task_sg_nents_padded = (task->task_sg_nents + 1); +		} else +			task_sg_nents_padded = task->task_sg_nents;  		task->task_sg = kmalloc(sizeof(struct scatterlist) * -					task->task_sg_nents, GFP_KERNEL); +					task_sg_nents_padded, GFP_KERNEL);  		if (!task->task_sg) {  			cmd->se_dev->transport->free_task(task);  			return -ENOMEM;  		} -		sg_init_table(task->task_sg, task->task_sg_nents); +		sg_init_table(task->task_sg, task_sg_nents_padded);  		task_size = task->task_size; @@ -4154,20 +3765,6 @@ static int transport_allocate_data_tasks(  		list_add_tail(&task->t_list, &cmd->t_task_list);  		spin_unlock_irqrestore(&cmd->t_state_lock, flags);  	} -	/* -	 * Now perform the memory map of task->task_sg[] into backend -	 * subsystem memory.. -	 */ -	list_for_each_entry(task, &cmd->t_task_list, t_list) { -		if (atomic_read(&task->task_sent)) -			continue; -		if (!dev->transport->map_data_SG) -			continue; - -		ret = dev->transport->map_data_SG(task); -		if (ret < 0) -			return 0; -	}  	return task_count;  } @@ -4175,30 +3772,14 @@ static int transport_allocate_data_tasks(  static int  transport_allocate_control_task(struct se_cmd *cmd)  { -	struct se_device *dev = cmd->se_dev; -	unsigned char *cdb;  	struct se_task *task;  	unsigned long flags; -	int ret = 0;  	task = transport_generic_get_task(cmd, cmd->data_direction);  	if (!task)  		return -ENOMEM; -	cdb = dev->transport->get_cdb(task); -	BUG_ON(!cdb); -	memcpy(cdb, cmd->t_task_cdb, -	       scsi_command_size(cmd->t_task_cdb)); - -	task->task_sg = kmalloc(sizeof(struct scatterlist) * cmd->t_data_nents, -				GFP_KERNEL); -	if (!task->task_sg) { -		cmd->se_dev->transport->free_task(task); -		return -ENOMEM; -	} - -	memcpy(task->task_sg, cmd->t_data_sg, -	       sizeof(struct scatterlist) * cmd->t_data_nents); +	task->task_sg = cmd->t_data_sg;  	task->task_size = cmd->data_length;  	task->task_sg_nents = cmd->t_data_nents; @@ -4206,50 +3787,20 @@ transport_allocate_control_task(struct se_cmd *cmd)  	list_add_tail(&task->t_list, &cmd->t_task_list);  	spin_unlock_irqrestore(&cmd->t_state_lock, flags); -	if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) { -		if (dev->transport->map_control_SG) -			ret = dev->transport->map_control_SG(task); -	} else if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) { -		if (dev->transport->cdb_none) -			ret = dev->transport->cdb_none(task); -	} else { -		pr_err("target: Unknown control cmd type!\n"); -		BUG(); -	} -  	/* Success! Return number of tasks allocated */ -	if (ret == 0) -		return 1; -	return ret; -} - -static u32 transport_allocate_tasks( -	struct se_cmd *cmd, -	unsigned long long lba, -	enum dma_data_direction data_direction, -	struct scatterlist *sgl, -	unsigned int sgl_nents) -{ -	if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) -		return transport_allocate_data_tasks(cmd, lba, data_direction, -						     sgl, sgl_nents); -	else -		return transport_allocate_control_task(cmd); - +	return 1;  } - -/*	 transport_generic_new_cmd(): Called from transport_processing_thread() - * - *	 Allocate storage transport resources from a set of values predefined - *	 by transport_generic_cmd_sequencer() from the iSCSI Target RX process. - *	 Any non zero return here is treated as an "out of resource' op here. +/* + * Allocate any required ressources to execute the command, and either place + * it on the execution queue if possible.  For writes we might not have the + * payload yet, thus notify the fabric via a call to ->write_pending instead.   */ -	/* -	 * Generate struct se_task(s) and/or their payloads for this CDB. -	 */  int transport_generic_new_cmd(struct se_cmd *cmd)  { +	struct se_device *dev = cmd->se_dev; +	int task_cdbs, task_cdbs_bidi = 0; +	int set_counts = 1;  	int ret = 0;  	/* @@ -4263,16 +3814,45 @@ int transport_generic_new_cmd(struct se_cmd *cmd)  		if (ret < 0)  			return ret;  	} +  	/* -	 * Call transport_new_cmd_obj() to invoke transport_allocate_tasks() for -	 * control or data CDB types, and perform the map to backend subsystem -	 * code from SGL memory allocated here by transport_generic_get_mem(), or -	 * via pre-existing SGL memory setup explictly by fabric module code with -	 * transport_generic_map_mem_to_cmd(). +	 * For BIDI command set up the read tasks first.  	 */ -	ret = transport_new_cmd_obj(cmd); -	if (ret < 0) -		return ret; +	if (cmd->t_bidi_data_sg && +	    dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) { +		BUG_ON(!(cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)); + +		task_cdbs_bidi = transport_allocate_data_tasks(cmd, +				DMA_FROM_DEVICE, cmd->t_bidi_data_sg, +				cmd->t_bidi_data_nents); +		if (task_cdbs_bidi <= 0) +			goto out_fail; + +		atomic_inc(&cmd->t_fe_count); +		atomic_inc(&cmd->t_se_count); +		set_counts = 0; +	} + +	if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) { +		task_cdbs = transport_allocate_data_tasks(cmd, +					cmd->data_direction, cmd->t_data_sg, +					cmd->t_data_nents); +	} else { +		task_cdbs = transport_allocate_control_task(cmd); +	} + +	if (task_cdbs <= 0) +		goto out_fail; + +	if (set_counts) { +		atomic_inc(&cmd->t_fe_count); +		atomic_inc(&cmd->t_se_count); +	} + +	cmd->t_task_list_num = (task_cdbs + task_cdbs_bidi); +	atomic_set(&cmd->t_task_cdbs_left, cmd->t_task_list_num); +	atomic_set(&cmd->t_task_cdbs_ex_left, cmd->t_task_list_num); +  	/*  	 * For WRITEs, let the fabric know its buffer is ready..  	 * This WRITE struct se_cmd (and all of its associated struct se_task's) @@ -4290,6 +3870,11 @@ int transport_generic_new_cmd(struct se_cmd *cmd)  	 */  	transport_execute_tasks(cmd);  	return 0; + +out_fail: +	cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION; +	cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; +	return -EINVAL;  }  EXPORT_SYMBOL(transport_generic_new_cmd); @@ -4303,15 +3888,15 @@ void transport_generic_process_write(struct se_cmd *cmd)  }  EXPORT_SYMBOL(transport_generic_process_write); -static int transport_write_pending_qf(struct se_cmd *cmd) +static void transport_write_pending_qf(struct se_cmd *cmd)  { -	return cmd->se_tfo->write_pending(cmd); +	if (cmd->se_tfo->write_pending(cmd) == -EAGAIN) { +		pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", +			 cmd); +		transport_handle_queue_full(cmd, cmd->se_dev); +	}  } -/*	transport_generic_write_pending(): - * - * - */  static int transport_generic_write_pending(struct se_cmd *cmd)  {  	unsigned long flags; @@ -4321,17 +3906,6 @@ static int transport_generic_write_pending(struct se_cmd *cmd)  	cmd->t_state = TRANSPORT_WRITE_PENDING;  	spin_unlock_irqrestore(&cmd->t_state_lock, flags); -	if (cmd->transport_qf_callback) { -		ret = cmd->transport_qf_callback(cmd); -		if (ret == -EAGAIN) -			goto queue_full; -		else if (ret < 0) -			return ret; - -		cmd->transport_qf_callback = NULL; -		return 0; -	} -  	/*  	 * Clear the se_cmd for WRITE_PENDING status in order to set  	 * cmd->t_transport_active=0 so that transport_generic_handle_data @@ -4356,61 +3930,52 @@ static int transport_generic_write_pending(struct se_cmd *cmd)  queue_full:  	pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);  	cmd->t_state = TRANSPORT_COMPLETE_QF_WP; -	transport_handle_queue_full(cmd, cmd->se_dev, -			transport_write_pending_qf); +	transport_handle_queue_full(cmd, cmd->se_dev);  	return ret;  } +/** + * transport_release_cmd - free a command + * @cmd:       command to free + * + * This routine unconditionally frees a command, and reference counting + * or list removal must be done in the caller. + */  void transport_release_cmd(struct se_cmd *cmd)  {  	BUG_ON(!cmd->se_tfo); -	transport_free_se_cmd(cmd); +	if (cmd->se_tmr_req) +		core_tmr_release_req(cmd->se_tmr_req); +	if (cmd->t_task_cdb != cmd->__t_task_cdb) +		kfree(cmd->t_task_cdb);  	cmd->se_tfo->release_cmd(cmd);  }  EXPORT_SYMBOL(transport_release_cmd); -/*	transport_generic_free_cmd(): - * - *	Called from processing frontend to release storage engine resources - */ -void transport_generic_free_cmd( -	struct se_cmd *cmd, -	int wait_for_tasks, -	int session_reinstatement) +void transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)  { -	if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) +	if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) { +		if (wait_for_tasks && cmd->se_tmr_req) +			 transport_wait_for_tasks(cmd); +  		transport_release_cmd(cmd); -	else { +	} else { +		if (wait_for_tasks) +			transport_wait_for_tasks(cmd); +  		core_dec_lacl_count(cmd->se_sess->se_node_acl, cmd); -		if (cmd->se_lun) { -#if 0 -			pr_debug("cmd: %p ITT: 0x%08x contains" -				" cmd->se_lun\n", cmd, -				cmd->se_tfo->get_task_tag(cmd)); -#endif +		if (cmd->se_lun)  			transport_lun_remove_cmd(cmd); -		} - -		if (wait_for_tasks && cmd->transport_wait_for_tasks) -			cmd->transport_wait_for_tasks(cmd, 0, 0);  		transport_free_dev_tasks(cmd); -		transport_generic_remove(cmd, session_reinstatement); +		transport_put_cmd(cmd);  	}  }  EXPORT_SYMBOL(transport_generic_free_cmd); -static void transport_nop_wait_for_tasks( -	struct se_cmd *cmd, -	int remove_cmd, -	int session_reinstatement) -{ -	return; -} -  /*	transport_lun_wait_for_tasks():   *   *	Called from ConfigFS context to stop the passed struct se_cmd to allow @@ -4449,7 +4014,7 @@ static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)  		pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n",  				cmd->se_tfo->get_task_tag(cmd));  	} -	transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj); +	transport_remove_cmd_from_queue(cmd);  	return 0;  } @@ -4580,22 +4145,30 @@ int transport_clear_lun_from_sessions(struct se_lun *lun)  	return 0;  } -/*	transport_generic_wait_for_tasks(): +/** + * transport_wait_for_tasks - wait for completion to occur + * @cmd:	command to wait   * - *	Called from frontend or passthrough context to wait for storage engine - *	to pause and/or release frontend generated struct se_cmd. + * Called from frontend fabric context to wait for storage engine + * to pause and/or release frontend generated struct se_cmd.   */ -static void transport_generic_wait_for_tasks( -	struct se_cmd *cmd, -	int remove_cmd, -	int session_reinstatement) +void transport_wait_for_tasks(struct se_cmd *cmd)  {  	unsigned long flags; -	if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req)) -		return; -  	spin_lock_irqsave(&cmd->t_state_lock, flags); +	if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) && !(cmd->se_tmr_req)) { +		spin_unlock_irqrestore(&cmd->t_state_lock, flags); +		return; +	} +	/* +	 * Only perform a possible wait_for_tasks if SCF_SUPPORTED_SAM_OPCODE +	 * has been set in transport_set_supported_SAM_opcode(). +	 */ +	if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) && !cmd->se_tmr_req) { +		spin_unlock_irqrestore(&cmd->t_state_lock, flags); +		return; +	}  	/*  	 * If we are already stopped due to an external event (ie: LUN shutdown)  	 * sleep until the connection can have the passed struct se_cmd back. @@ -4635,16 +4208,17 @@ static void transport_generic_wait_for_tasks(  		atomic_set(&cmd->transport_lun_stop, 0);  	}  	if (!atomic_read(&cmd->t_transport_active) || -	     atomic_read(&cmd->t_transport_aborted)) -		goto remove; +	     atomic_read(&cmd->t_transport_aborted)) { +		spin_unlock_irqrestore(&cmd->t_state_lock, flags); +		return; +	}  	atomic_set(&cmd->t_transport_stop, 1);  	pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x" -		" i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop" -		" = TRUE\n", cmd, cmd->se_tfo->get_task_tag(cmd), -		cmd->se_tfo->get_cmd_state(cmd), cmd->t_state, -		cmd->deferred_t_state); +		" i_state: %d, t_state: %d, t_transport_stop = TRUE\n", +		cmd, cmd->se_tfo->get_task_tag(cmd), +		cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);  	spin_unlock_irqrestore(&cmd->t_state_lock, flags); @@ -4659,13 +4233,10 @@ static void transport_generic_wait_for_tasks(  	pr_debug("wait_for_tasks: Stopped wait_for_compltion("  		"&cmd->t_transport_stop_comp) for ITT: 0x%08x\n",  		cmd->se_tfo->get_task_tag(cmd)); -remove: -	spin_unlock_irqrestore(&cmd->t_state_lock, flags); -	if (!remove_cmd) -		return; -	transport_generic_free_cmd(cmd, 0, session_reinstatement); +	spin_unlock_irqrestore(&cmd->t_state_lock, flags);  } +EXPORT_SYMBOL(transport_wait_for_tasks);  static int transport_get_sense_codes(  	struct se_cmd *cmd, @@ -4726,6 +4297,13 @@ int transport_send_check_condition_and_sense(  	 */  	switch (reason) {  	case TCM_NON_EXISTENT_LUN: +		/* CURRENT ERROR */ +		buffer[offset] = 0x70; +		/* ILLEGAL REQUEST */ +		buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST; +		/* LOGICAL UNIT NOT SUPPORTED */ +		buffer[offset+SPC_ASC_KEY_OFFSET] = 0x25; +		break;  	case TCM_UNSUPPORTED_SCSI_OPCODE:  	case TCM_SECTOR_COUNT_TOO_MANY:  		/* CURRENT ERROR */ @@ -4883,6 +4461,15 @@ EXPORT_SYMBOL(transport_check_aborted_status);  void transport_send_task_abort(struct se_cmd *cmd)  { +	unsigned long flags; + +	spin_lock_irqsave(&cmd->t_state_lock, flags); +	if (cmd->se_cmd_flags & SCF_SENT_CHECK_CONDITION) { +		spin_unlock_irqrestore(&cmd->t_state_lock, flags); +		return; +	} +	spin_unlock_irqrestore(&cmd->t_state_lock, flags); +  	/*  	 * If there are still expected incoming fabric WRITEs, we wait  	 * until until they have completed before sending a TASK_ABORTED @@ -4947,184 +4534,10 @@ int transport_generic_do_tmr(struct se_cmd *cmd)  	cmd->t_state = TRANSPORT_ISTATE_PROCESSING;  	cmd->se_tfo->queue_tm_rsp(cmd); -	transport_cmd_check_stop(cmd, 2, 0); +	transport_cmd_check_stop_to_fabric(cmd);  	return 0;  } -/* - *	Called with spin_lock_irq(&dev->execute_task_lock); held - * - */ -static struct se_task * -transport_get_task_from_state_list(struct se_device *dev) -{ -	struct se_task *task; - -	if (list_empty(&dev->state_task_list)) -		return NULL; - -	list_for_each_entry(task, &dev->state_task_list, t_state_list) -		break; - -	list_del(&task->t_state_list); -	atomic_set(&task->task_state_active, 0); - -	return task; -} - -static void transport_processing_shutdown(struct se_device *dev) -{ -	struct se_cmd *cmd; -	struct se_task *task; -	unsigned long flags; -	/* -	 * Empty the struct se_device's struct se_task state list. -	 */ -	spin_lock_irqsave(&dev->execute_task_lock, flags); -	while ((task = transport_get_task_from_state_list(dev))) { -		if (!task->task_se_cmd) { -			pr_err("task->task_se_cmd is NULL!\n"); -			continue; -		} -		cmd = task->task_se_cmd; - -		spin_unlock_irqrestore(&dev->execute_task_lock, flags); - -		spin_lock_irqsave(&cmd->t_state_lock, flags); - -		pr_debug("PT: cmd: %p task: %p ITT: 0x%08x," -			" i_state: %d, t_state/def_t_state:" -			" %d/%d cdb: 0x%02x\n", cmd, task, -			cmd->se_tfo->get_task_tag(cmd), -			cmd->se_tfo->get_cmd_state(cmd), -			cmd->t_state, cmd->deferred_t_state, -			cmd->t_task_cdb[0]); -		pr_debug("PT: ITT[0x%08x] - t_tasks: %d t_task_cdbs_left:" -			" %d t_task_cdbs_sent: %d -- t_transport_active: %d" -			" t_transport_stop: %d t_transport_sent: %d\n", -			cmd->se_tfo->get_task_tag(cmd), -			cmd->t_task_list_num, -			atomic_read(&cmd->t_task_cdbs_left), -			atomic_read(&cmd->t_task_cdbs_sent), -			atomic_read(&cmd->t_transport_active), -			atomic_read(&cmd->t_transport_stop), -			atomic_read(&cmd->t_transport_sent)); - -		if (atomic_read(&task->task_active)) { -			atomic_set(&task->task_stop, 1); -			spin_unlock_irqrestore( -				&cmd->t_state_lock, flags); - -			pr_debug("Waiting for task: %p to shutdown for dev:" -				" %p\n", task, dev); -			wait_for_completion(&task->task_stop_comp); -			pr_debug("Completed task: %p shutdown for dev: %p\n", -				task, dev); - -			spin_lock_irqsave(&cmd->t_state_lock, flags); -			atomic_dec(&cmd->t_task_cdbs_left); - -			atomic_set(&task->task_active, 0); -			atomic_set(&task->task_stop, 0); -		} else { -			if (atomic_read(&task->task_execute_queue) != 0) -				transport_remove_task_from_execute_queue(task, dev); -		} -		__transport_stop_task_timer(task, &flags); - -		if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) { -			spin_unlock_irqrestore( -					&cmd->t_state_lock, flags); - -			pr_debug("Skipping task: %p, dev: %p for" -				" t_task_cdbs_ex_left: %d\n", task, dev, -				atomic_read(&cmd->t_task_cdbs_ex_left)); - -			spin_lock_irqsave(&dev->execute_task_lock, flags); -			continue; -		} - -		if (atomic_read(&cmd->t_transport_active)) { -			pr_debug("got t_transport_active = 1 for task: %p, dev:" -					" %p\n", task, dev); - -			if (atomic_read(&cmd->t_fe_count)) { -				spin_unlock_irqrestore( -					&cmd->t_state_lock, flags); -				transport_send_check_condition_and_sense( -					cmd, TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, -					0); -				transport_remove_cmd_from_queue(cmd, -					&cmd->se_dev->dev_queue_obj); - -				transport_lun_remove_cmd(cmd); -				transport_cmd_check_stop(cmd, 1, 0); -			} else { -				spin_unlock_irqrestore( -					&cmd->t_state_lock, flags); - -				transport_remove_cmd_from_queue(cmd, -					&cmd->se_dev->dev_queue_obj); - -				transport_lun_remove_cmd(cmd); - -				if (transport_cmd_check_stop(cmd, 1, 0)) -					transport_generic_remove(cmd, 0); -			} - -			spin_lock_irqsave(&dev->execute_task_lock, flags); -			continue; -		} -		pr_debug("Got t_transport_active = 0 for task: %p, dev: %p\n", -				task, dev); - -		if (atomic_read(&cmd->t_fe_count)) { -			spin_unlock_irqrestore( -				&cmd->t_state_lock, flags); -			transport_send_check_condition_and_sense(cmd, -				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); -			transport_remove_cmd_from_queue(cmd, -				&cmd->se_dev->dev_queue_obj); - -			transport_lun_remove_cmd(cmd); -			transport_cmd_check_stop(cmd, 1, 0); -		} else { -			spin_unlock_irqrestore( -				&cmd->t_state_lock, flags); - -			transport_remove_cmd_from_queue(cmd, -				&cmd->se_dev->dev_queue_obj); -			transport_lun_remove_cmd(cmd); - -			if (transport_cmd_check_stop(cmd, 1, 0)) -				transport_generic_remove(cmd, 0); -		} - -		spin_lock_irqsave(&dev->execute_task_lock, flags); -	} -	spin_unlock_irqrestore(&dev->execute_task_lock, flags); -	/* -	 * Empty the struct se_device's struct se_cmd list. -	 */ -	while ((cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj))) { - -		pr_debug("From Device Queue: cmd: %p t_state: %d\n", -				cmd, cmd->t_state); - -		if (atomic_read(&cmd->t_fe_count)) { -			transport_send_check_condition_and_sense(cmd, -				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); - -			transport_lun_remove_cmd(cmd); -			transport_cmd_check_stop(cmd, 1, 0); -		} else { -			transport_lun_remove_cmd(cmd); -			if (transport_cmd_check_stop(cmd, 1, 0)) -				transport_generic_remove(cmd, 0); -		} -	} -} -  /*	transport_processing_thread():   *   * @@ -5144,14 +4557,6 @@ static int transport_processing_thread(void *param)  		if (ret < 0)  			goto out; -		spin_lock_irq(&dev->dev_status_lock); -		if (dev->dev_status & TRANSPORT_DEVICE_SHUTDOWN) { -			spin_unlock_irq(&dev->dev_status_lock); -			transport_processing_shutdown(dev); -			continue; -		} -		spin_unlock_irq(&dev->dev_status_lock); -  get_cmd:  		__transport_execute_tasks(dev); @@ -5160,6 +4565,9 @@ get_cmd:  			continue;  		switch (cmd->t_state) { +		case TRANSPORT_NEW_CMD: +			BUG(); +			break;  		case TRANSPORT_NEW_CMD_MAP:  			if (!cmd->se_tfo->new_cmd_map) {  				pr_err("cmd->se_tfo->new_cmd_map is" @@ -5169,19 +4577,17 @@ get_cmd:  			ret = cmd->se_tfo->new_cmd_map(cmd);  			if (ret < 0) {  				cmd->transport_error_status = ret; -				transport_generic_request_failure(cmd, NULL, +				transport_generic_request_failure(cmd,  						0, (cmd->data_direction !=  						    DMA_TO_DEVICE));  				break;  			} -			/* Fall through */ -		case TRANSPORT_NEW_CMD:  			ret = transport_generic_new_cmd(cmd);  			if (ret == -EAGAIN)  				break;  			else if (ret < 0) {  				cmd->transport_error_status = ret; -				transport_generic_request_failure(cmd, NULL, +				transport_generic_request_failure(cmd,  					0, (cmd->data_direction !=  					 DMA_TO_DEVICE));  			} @@ -5189,33 +4595,22 @@ get_cmd:  		case TRANSPORT_PROCESS_WRITE:  			transport_generic_process_write(cmd);  			break; -		case TRANSPORT_COMPLETE_OK: -			transport_stop_all_task_timers(cmd); -			transport_generic_complete_ok(cmd); -			break; -		case TRANSPORT_REMOVE: -			transport_generic_remove(cmd, 0); -			break;  		case TRANSPORT_FREE_CMD_INTR: -			transport_generic_free_cmd(cmd, 0, 0); +			transport_generic_free_cmd(cmd, 0);  			break;  		case TRANSPORT_PROCESS_TMR:  			transport_generic_do_tmr(cmd);  			break; -		case TRANSPORT_COMPLETE_FAILURE: -			transport_generic_request_failure(cmd, NULL, 1, 1); -			break; -		case TRANSPORT_COMPLETE_TIMEOUT: -			transport_stop_all_task_timers(cmd); -			transport_generic_request_timeout(cmd); -			break;  		case TRANSPORT_COMPLETE_QF_WP: -			transport_generic_write_pending(cmd); +			transport_write_pending_qf(cmd); +			break; +		case TRANSPORT_COMPLETE_QF_OK: +			transport_complete_qf(cmd);  			break;  		default: -			pr_err("Unknown t_state: %d deferred_t_state:" -				" %d for ITT: 0x%08x i_state: %d on SE LUN:" -				" %u\n", cmd->t_state, cmd->deferred_t_state, +			pr_err("Unknown t_state: %d  for ITT: 0x%08x " +				"i_state: %d on SE LUN: %u\n", +				cmd->t_state,  				cmd->se_tfo->get_task_tag(cmd),  				cmd->se_tfo->get_cmd_state(cmd),  				cmd->se_lun->unpacked_lun); @@ -5226,7 +4621,8 @@ get_cmd:  	}  out: -	transport_release_all_cmds(dev); +	WARN_ON(!list_empty(&dev->state_task_list)); +	WARN_ON(!list_empty(&dev->dev_queue_obj.qobj_list));  	dev->process_thread = NULL;  	return 0;  } diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c index 31e3c652527..50a480db7a6 100644 --- a/drivers/target/target_core_ua.c +++ b/drivers/target/target_core_ua.c @@ -24,7 +24,6 @@   *   ******************************************************************************/ -#include <linux/version.h>  #include <linux/slab.h>  #include <linux/spinlock.h>  #include <scsi/scsi.h> diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h index bd4fe21a23b..3749d8b4b42 100644 --- a/drivers/target/tcm_fc/tcm_fc.h +++ b/drivers/target/tcm_fc/tcm_fc.h @@ -98,8 +98,7 @@ struct ft_tpg {  	struct list_head list;		/* linkage in ft_lport_acl tpg_list */  	struct list_head lun_list;	/* head of LUNs */  	struct se_portal_group se_tpg; -	struct task_struct *thread;	/* processing thread */ -	struct se_queue_obj qobj;	/* queue for processing thread */ +	struct workqueue_struct *workqueue;  };  struct ft_lport_acl { @@ -110,16 +109,10 @@ struct ft_lport_acl {  	struct se_wwn fc_lport_wwn;  }; -enum ft_cmd_state { -	FC_CMD_ST_NEW = 0, -	FC_CMD_ST_REJ -}; -  /*   * Commands   */  struct ft_cmd { -	enum ft_cmd_state state;  	u32 lun;                        /* LUN from request */  	struct ft_sess *sess;		/* session held for cmd */  	struct fc_seq *seq;		/* sequence in exchange mgr */ @@ -127,7 +120,7 @@ struct ft_cmd {  	struct fc_frame *req_frame;  	unsigned char *cdb;		/* pointer to CDB inside frame */  	u32 write_data_len;		/* data received on writes */ -	struct se_queue_req se_req; +	struct work_struct work;  	/* Local sense buffer */  	unsigned char ft_sense_buffer[TRANSPORT_SENSE_BUFFER];  	u32 was_ddp_setup:1;		/* Set only if ddp is setup */ @@ -177,7 +170,6 @@ int ft_is_state_remove(struct se_cmd *);  /*   * other internal functions.   */ -int ft_thread(void *);  void ft_recv_req(struct ft_sess *, struct fc_frame *);  struct ft_tpg *ft_lport_find_tpg(struct fc_lport *);  struct ft_node_acl *ft_acl_get(struct ft_tpg *, struct fc_rport_priv *); diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c index 5654dc22f7a..6195026cc7b 100644 --- a/drivers/target/tcm_fc/tfc_cmd.c +++ b/drivers/target/tcm_fc/tfc_cmd.c @@ -19,7 +19,6 @@  #include <linux/module.h>  #include <linux/moduleparam.h> -#include <linux/version.h>  #include <generated/utsrelease.h>  #include <linux/utsname.h>  #include <linux/init.h> @@ -62,8 +61,8 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)  	int count;  	se_cmd = &cmd->se_cmd; -	pr_debug("%s: cmd %p state %d sess %p seq %p se_cmd %p\n", -		caller, cmd, cmd->state, cmd->sess, cmd->seq, se_cmd); +	pr_debug("%s: cmd %p sess %p seq %p se_cmd %p\n", +		caller, cmd, cmd->sess, cmd->seq, se_cmd);  	pr_debug("%s: cmd %p cdb %p\n",  		caller, cmd, cmd->cdb);  	pr_debug("%s: cmd %p lun %d\n", caller, cmd, cmd->lun); @@ -90,38 +89,6 @@ void ft_dump_cmd(struct ft_cmd *cmd, const char *caller)  		16, 4, cmd->cdb, MAX_COMMAND_SIZE, 0);  } -static void ft_queue_cmd(struct ft_sess *sess, struct ft_cmd *cmd) -{ -	struct ft_tpg *tpg = sess->tport->tpg; -	struct se_queue_obj *qobj = &tpg->qobj; -	unsigned long flags; - -	qobj = &sess->tport->tpg->qobj; -	spin_lock_irqsave(&qobj->cmd_queue_lock, flags); -	list_add_tail(&cmd->se_req.qr_list, &qobj->qobj_list); -	atomic_inc(&qobj->queue_cnt); -	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); - -	wake_up_process(tpg->thread); -} - -static struct ft_cmd *ft_dequeue_cmd(struct se_queue_obj *qobj) -{ -	unsigned long flags; -	struct se_queue_req *qr; - -	spin_lock_irqsave(&qobj->cmd_queue_lock, flags); -	if (list_empty(&qobj->qobj_list)) { -		spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); -		return NULL; -	} -	qr = list_first_entry(&qobj->qobj_list, struct se_queue_req, qr_list); -	list_del(&qr->qr_list); -	atomic_dec(&qobj->queue_cnt); -	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags); -	return container_of(qr, struct ft_cmd, se_req); -} -  static void ft_free_cmd(struct ft_cmd *cmd)  {  	struct fc_frame *fp; @@ -147,7 +114,7 @@ void ft_release_cmd(struct se_cmd *se_cmd)  void ft_check_stop_free(struct se_cmd *se_cmd)  { -	transport_generic_free_cmd(se_cmd, 0, 0); +	transport_generic_free_cmd(se_cmd, 0);  }  /* @@ -282,9 +249,7 @@ u32 ft_get_task_tag(struct se_cmd *se_cmd)  int ft_get_cmd_state(struct se_cmd *se_cmd)  { -	struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd); - -	return cmd->state; +	return 0;  }  int ft_is_state_remove(struct se_cmd *se_cmd) @@ -302,9 +267,8 @@ static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg)  	if (IS_ERR(fp)) {  		/* XXX need to find cmd if queued */ -		cmd->se_cmd.t_state = TRANSPORT_REMOVE;  		cmd->seq = NULL; -		transport_generic_free_cmd(&cmd->se_cmd, 0, 0); +		transport_generic_free_cmd(&cmd->se_cmd, 0);  		return;  	} @@ -322,7 +286,7 @@ static void ft_recv_seq(struct fc_seq *sp, struct fc_frame *fp, void *arg)  		       __func__, fh->fh_r_ctl);  		ft_invl_hw_context(cmd);  		fc_frame_free(fp); -		transport_generic_free_cmd(&cmd->se_cmd, 0, 0); +		transport_generic_free_cmd(&cmd->se_cmd, 0);  		break;  	}  } @@ -431,7 +395,7 @@ static void ft_send_tm(struct ft_cmd *cmd)  	}  	pr_debug("alloc tm cmd fn %d\n", tm_func); -	tmr = core_tmr_alloc_req(&cmd->se_cmd, cmd, tm_func); +	tmr = core_tmr_alloc_req(&cmd->se_cmd, cmd, tm_func, GFP_KERNEL);  	if (!tmr) {  		pr_debug("alloc failed\n");  		ft_send_resp_code_and_free(cmd, FCP_TMF_FAILED); @@ -455,7 +419,7 @@ static void ft_send_tm(struct ft_cmd *cmd)  			sess = cmd->sess;  			transport_send_check_condition_and_sense(&cmd->se_cmd,  				cmd->se_cmd.scsi_sense_reason, 0); -			transport_generic_free_cmd(&cmd->se_cmd, 0, 0); +			transport_generic_free_cmd(&cmd->se_cmd, 0);  			ft_sess_put(sess);  			return;  		} @@ -505,6 +469,8 @@ int ft_queue_tm_resp(struct se_cmd *se_cmd)  	return 0;  } +static void ft_send_work(struct work_struct *work); +  /*   * Handle incoming FCP command.   */ @@ -523,7 +489,9 @@ static void ft_recv_cmd(struct ft_sess *sess, struct fc_frame *fp)  		goto busy;  	}  	cmd->req_frame = fp;		/* hold frame during cmd */ -	ft_queue_cmd(sess, cmd); + +	INIT_WORK(&cmd->work, ft_send_work); +	queue_work(sess->tport->tpg->workqueue, &cmd->work);  	return;  busy: @@ -563,12 +531,13 @@ void ft_recv_req(struct ft_sess *sess, struct fc_frame *fp)  /*   * Send new command to target.   */ -static void ft_send_cmd(struct ft_cmd *cmd) +static void ft_send_work(struct work_struct *work)  { +	struct ft_cmd *cmd = container_of(work, struct ft_cmd, work);  	struct fc_frame_header *fh = fc_frame_header_get(cmd->req_frame);  	struct se_cmd *se_cmd;  	struct fcp_cmnd *fcp; -	int data_dir; +	int data_dir = 0;  	u32 data_len;  	int task_attr;  	int ret; @@ -657,7 +626,7 @@ static void ft_send_cmd(struct ft_cmd *cmd)  	if (ret == -ENOMEM) {  		transport_send_check_condition_and_sense(se_cmd,  				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0); -		transport_generic_free_cmd(se_cmd, 0, 0); +		transport_generic_free_cmd(se_cmd, 0);  		return;  	}  	if (ret == -EINVAL) { @@ -666,51 +635,12 @@ static void ft_send_cmd(struct ft_cmd *cmd)  		else  			transport_send_check_condition_and_sense(se_cmd,  					se_cmd->scsi_sense_reason, 0); -		transport_generic_free_cmd(se_cmd, 0, 0); +		transport_generic_free_cmd(se_cmd, 0);  		return;  	} -	transport_generic_handle_cdb(se_cmd); +	transport_handle_cdb_direct(se_cmd);  	return;  err:  	ft_send_resp_code_and_free(cmd, FCP_CMND_FIELDS_INVALID);  } - -/* - * Handle request in the command thread. - */ -static void ft_exec_req(struct ft_cmd *cmd) -{ -	pr_debug("cmd state %x\n", cmd->state); -	switch (cmd->state) { -	case FC_CMD_ST_NEW: -		ft_send_cmd(cmd); -		break; -	default: -		break; -	} -} - -/* - * Processing thread. - * Currently one thread per tpg. - */ -int ft_thread(void *arg) -{ -	struct ft_tpg *tpg = arg; -	struct se_queue_obj *qobj = &tpg->qobj; -	struct ft_cmd *cmd; - -	while (!kthread_should_stop()) { -		schedule_timeout_interruptible(MAX_SCHEDULE_TIMEOUT); -		if (kthread_should_stop()) -			goto out; - -		cmd = ft_dequeue_cmd(qobj); -		if (cmd) -			ft_exec_req(cmd); -	} - -out: -	return 0; -} diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c index 8781d1e423d..5f770412ca4 100644 --- a/drivers/target/tcm_fc/tfc_conf.c +++ b/drivers/target/tcm_fc/tfc_conf.c @@ -23,7 +23,6 @@  #include <linux/module.h>  #include <linux/moduleparam.h> -#include <linux/version.h>  #include <generated/utsrelease.h>  #include <linux/utsname.h>  #include <linux/init.h> @@ -32,6 +31,7 @@  #include <linux/types.h>  #include <linux/string.h>  #include <linux/configfs.h> +#include <linux/kernel.h>  #include <linux/ctype.h>  #include <asm/unaligned.h>  #include <scsi/scsi.h> @@ -71,10 +71,10 @@ static ssize_t ft_parse_wwn(const char *name, u64 *wwn, int strict)  {  	const char *cp;  	char c; -	u32 nibble;  	u32 byte = 0;  	u32 pos = 0;  	u32 err; +	int val;  	*wwn = 0;  	for (cp = name; cp < &name[FT_NAMELEN - 1]; cp++) { @@ -95,13 +95,10 @@ static ssize_t ft_parse_wwn(const char *name, u64 *wwn, int strict)  			return cp - name;  		}  		err = 3; -		if (isdigit(c)) -			nibble = c - '0'; -		else if (isxdigit(c) && (islower(c) || !strict)) -			nibble = tolower(c) - 'a' + 10; -		else +		val = hex_to_bin(c); +		if (val < 0 || (strict && isupper(c)))  			goto fail; -		*wwn = (*wwn << 4) | nibble; +		*wwn = (*wwn << 4) | val;  	}  	err = 4;  fail: @@ -256,7 +253,7 @@ struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata)  	struct se_portal_group *se_tpg = &tpg->se_tpg;  	struct se_node_acl *se_acl; -	spin_lock_bh(&se_tpg->acl_node_lock); +	spin_lock_irq(&se_tpg->acl_node_lock);  	list_for_each_entry(se_acl, &se_tpg->acl_node_list, acl_list) {  		acl = container_of(se_acl, struct ft_node_acl, se_node_acl);  		pr_debug("acl %p port_name %llx\n", @@ -270,7 +267,7 @@ struct ft_node_acl *ft_acl_get(struct ft_tpg *tpg, struct fc_rport_priv *rdata)  			break;  		}  	} -	spin_unlock_bh(&se_tpg->acl_node_lock); +	spin_unlock_irq(&se_tpg->acl_node_lock);  	return found;  } @@ -327,7 +324,6 @@ static struct se_portal_group *ft_add_tpg(  	tpg->index = index;  	tpg->lport_acl = lacl;  	INIT_LIST_HEAD(&tpg->lun_list); -	transport_init_queue_obj(&tpg->qobj);  	ret = core_tpg_register(&ft_configfs->tf_ops, wwn, &tpg->se_tpg,  				tpg, TRANSPORT_TPG_TYPE_NORMAL); @@ -336,8 +332,8 @@ static struct se_portal_group *ft_add_tpg(  		return NULL;  	} -	tpg->thread = kthread_run(ft_thread, tpg, "ft_tpg%lu", index); -	if (IS_ERR(tpg->thread)) { +	tpg->workqueue = alloc_workqueue("tcm_fc", 0, 1); +	if (!tpg->workqueue) {  		kfree(tpg);  		return NULL;  	} @@ -356,7 +352,7 @@ static void ft_del_tpg(struct se_portal_group *se_tpg)  	pr_debug("del tpg %s\n",  		    config_item_name(&tpg->se_tpg.tpg_group.cg_item)); -	kthread_stop(tpg->thread); +	destroy_workqueue(tpg->workqueue);  	/* Wait for sessions to be freed thru RCU, for BUG_ON below */  	synchronize_rcu(); @@ -655,9 +651,7 @@ static void __exit ft_exit(void)  	synchronize_rcu();  } -#ifdef MODULE  MODULE_DESCRIPTION("FC TCM fabric driver " FT_VERSION);  MODULE_LICENSE("GPL");  module_init(ft_init);  module_exit(ft_exit); -#endif /* MODULE */ diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c index c37f4cd9645..1369b1cb103 100644 --- a/drivers/target/tcm_fc/tfc_io.c +++ b/drivers/target/tcm_fc/tfc_io.c @@ -28,7 +28,6 @@  #include <linux/module.h>  #include <linux/moduleparam.h> -#include <linux/version.h>  #include <generated/utsrelease.h>  #include <linux/utsname.h>  #include <linux/init.h> @@ -219,43 +218,41 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)  	if (cmd->was_ddp_setup) {  		BUG_ON(!ep);  		BUG_ON(!lport); -	} - -	/* -	 * Doesn't expect payload if DDP is setup. Payload -	 * is expected to be copied directly to user buffers -	 * due to DDP (Large Rx offload), -	 */ -	buf = fc_frame_payload_get(fp, 1); -	if (buf) -		pr_err("%s: xid 0x%x, f_ctl 0x%x, cmd->sg %p, " +		/* +		 * Since DDP (Large Rx offload) was setup for this request, +		 * payload is expected to be copied directly to user buffers. +		 */ +		buf = fc_frame_payload_get(fp, 1); +		if (buf) +			pr_err("%s: xid 0x%x, f_ctl 0x%x, cmd->sg %p, "  				"cmd->sg_cnt 0x%x. DDP was setup"  				" hence not expected to receive frame with " -				"payload, Frame will be dropped if " -				"'Sequence Initiative' bit in f_ctl is " +				"payload, Frame will be dropped if" +				"'Sequence Initiative' bit in f_ctl is"  				"not set\n", __func__, ep->xid, f_ctl,  				cmd->sg, cmd->sg_cnt); -	/* - 	 * Invalidate HW DDP context if it was setup for respective - 	 * command. Invalidation of HW DDP context is requited in both - 	 * situation (success and error).  - 	 */ -	ft_invl_hw_context(cmd); +		/* +		 * Invalidate HW DDP context if it was setup for respective +		 * command. Invalidation of HW DDP context is requited in both +		 * situation (success and error). +		 */ +		ft_invl_hw_context(cmd); -	/* -	 * If "Sequence Initiative (TSI)" bit set in f_ctl, means last -	 * write data frame is received successfully where payload is -	 * posted directly to user buffer and only the last frame's -	 * header is posted in receive queue. -	 * -	 * If "Sequence Initiative (TSI)" bit is not set, means error -	 * condition w.r.t. DDP, hence drop the packet and let explict -	 * ABORTS from other end of exchange timer trigger the recovery. -	 */ -	if (f_ctl & FC_FC_SEQ_INIT) -		goto last_frame; -	else -		goto drop; +		/* +		 * If "Sequence Initiative (TSI)" bit set in f_ctl, means last +		 * write data frame is received successfully where payload is +		 * posted directly to user buffer and only the last frame's +		 * header is posted in receive queue. +		 * +		 * If "Sequence Initiative (TSI)" bit is not set, means error +		 * condition w.r.t. DDP, hence drop the packet and let explict +		 * ABORTS from other end of exchange timer trigger the recovery. +		 */ +		if (f_ctl & FC_FC_SEQ_INIT) +			goto last_frame; +		else +			goto drop; +	}  	rel_off = ntohl(fh->fh_parm_offset);  	frame_len = fr_len(fp); diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c index dbb5eaeee39..326921385af 100644 --- a/drivers/target/tcm_fc/tfc_sess.c +++ b/drivers/target/tcm_fc/tfc_sess.c @@ -19,7 +19,6 @@  #include <linux/module.h>  #include <linux/moduleparam.h> -#include <linux/version.h>  #include <generated/utsrelease.h>  #include <linux/utsname.h>  #include <linux/init.h>  |