|
@@ -505,23 +505,6 @@ static int transport_cmd_check_stop(struct se_cmd *cmd, bool remove_from_lists,
|
|
|
if (write_pending)
|
|
|
cmd->t_state = TRANSPORT_WRITE_PENDING;
|
|
|
|
|
|
- /*
|
|
|
- * Determine if IOCTL context caller in requesting the stopping of this
|
|
|
- * command for LUN shutdown purposes.
|
|
|
- */
|
|
|
- if (cmd->transport_state & CMD_T_LUN_STOP) {
|
|
|
- pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n",
|
|
|
- __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd));
|
|
|
-
|
|
|
- cmd->transport_state &= ~CMD_T_ACTIVE;
|
|
|
- if (remove_from_lists)
|
|
|
- target_remove_from_state_list(cmd);
|
|
|
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
|
|
|
-
|
|
|
- complete(&cmd->transport_lun_stop_comp);
|
|
|
- return 1;
|
|
|
- }
|
|
|
-
|
|
|
if (remove_from_lists) {
|
|
|
target_remove_from_state_list(cmd);
|
|
|
|
|
@@ -1078,13 +1061,10 @@ void transport_init_se_cmd(
|
|
|
int task_attr,
|
|
|
unsigned char *sense_buffer)
|
|
|
{
|
|
|
- INIT_LIST_HEAD(&cmd->se_lun_node);
|
|
|
INIT_LIST_HEAD(&cmd->se_delayed_node);
|
|
|
INIT_LIST_HEAD(&cmd->se_qf_node);
|
|
|
INIT_LIST_HEAD(&cmd->se_cmd_list);
|
|
|
INIT_LIST_HEAD(&cmd->state_list);
|
|
|
- init_completion(&cmd->transport_lun_fe_stop_comp);
|
|
|
- init_completion(&cmd->transport_lun_stop_comp);
|
|
|
init_completion(&cmd->t_transport_stop_comp);
|
|
|
init_completion(&cmd->cmd_wait_comp);
|
|
|
init_completion(&cmd->task_stop_comp);
|
|
@@ -1705,29 +1685,14 @@ void target_execute_cmd(struct se_cmd *cmd)
|
|
|
/*
|
|
|
* If the received CDB has aleady been aborted stop processing it here.
|
|
|
*/
|
|
|
- if (transport_check_aborted_status(cmd, 1)) {
|
|
|
- complete(&cmd->transport_lun_stop_comp);
|
|
|
+ if (transport_check_aborted_status(cmd, 1))
|
|
|
return;
|
|
|
- }
|
|
|
|
|
|
- /*
|
|
|
- * Determine if IOCTL context caller in requesting the stopping of this
|
|
|
- * command for LUN shutdown purposes.
|
|
|
- */
|
|
|
- spin_lock_irq(&cmd->t_state_lock);
|
|
|
- if (cmd->transport_state & CMD_T_LUN_STOP) {
|
|
|
- pr_debug("%s:%d CMD_T_LUN_STOP for ITT: 0x%08x\n",
|
|
|
- __func__, __LINE__, cmd->se_tfo->get_task_tag(cmd));
|
|
|
-
|
|
|
- cmd->transport_state &= ~CMD_T_ACTIVE;
|
|
|
- spin_unlock_irq(&cmd->t_state_lock);
|
|
|
- complete(&cmd->transport_lun_stop_comp);
|
|
|
- return;
|
|
|
- }
|
|
|
/*
|
|
|
* Determine if frontend context caller is requesting the stopping of
|
|
|
* this command for frontend exceptions.
|
|
|
*/
|
|
|
+ spin_lock_irq(&cmd->t_state_lock);
|
|
|
if (cmd->transport_state & CMD_T_STOP) {
|
|
|
pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08x\n",
|
|
|
__func__, __LINE__,
|
|
@@ -2390,149 +2355,6 @@ void target_wait_for_sess_cmds(struct se_session *se_sess)
|
|
|
}
|
|
|
EXPORT_SYMBOL(target_wait_for_sess_cmds);
|
|
|
|
|
|
-/* transport_lun_wait_for_tasks():
|
|
|
- *
|
|
|
- * Called from ConfigFS context to stop the passed struct se_cmd to allow
|
|
|
- * an struct se_lun to be successfully shutdown.
|
|
|
- */
|
|
|
-static int transport_lun_wait_for_tasks(struct se_cmd *cmd, struct se_lun *lun)
|
|
|
-{
|
|
|
- unsigned long flags;
|
|
|
- int ret = 0;
|
|
|
-
|
|
|
- /*
|
|
|
- * If the frontend has already requested this struct se_cmd to
|
|
|
- * be stopped, we can safely ignore this struct se_cmd.
|
|
|
- */
|
|
|
- spin_lock_irqsave(&cmd->t_state_lock, flags);
|
|
|
- if (cmd->transport_state & CMD_T_STOP) {
|
|
|
- cmd->transport_state &= ~CMD_T_LUN_STOP;
|
|
|
-
|
|
|
- pr_debug("ConfigFS ITT[0x%08x] - CMD_T_STOP, skipping\n",
|
|
|
- cmd->se_tfo->get_task_tag(cmd));
|
|
|
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
|
|
|
- transport_cmd_check_stop(cmd, false, false);
|
|
|
- return -EPERM;
|
|
|
- }
|
|
|
- cmd->transport_state |= CMD_T_LUN_FE_STOP;
|
|
|
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
|
|
|
-
|
|
|
- // XXX: audit task_flags checks.
|
|
|
- spin_lock_irqsave(&cmd->t_state_lock, flags);
|
|
|
- if ((cmd->transport_state & CMD_T_BUSY) &&
|
|
|
- (cmd->transport_state & CMD_T_SENT)) {
|
|
|
- if (!target_stop_cmd(cmd, &flags))
|
|
|
- ret++;
|
|
|
- }
|
|
|
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
|
|
|
-
|
|
|
- pr_debug("ConfigFS: cmd: %p stop tasks ret:"
|
|
|
- " %d\n", cmd, ret);
|
|
|
- if (!ret) {
|
|
|
- pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
|
|
|
- cmd->se_tfo->get_task_tag(cmd));
|
|
|
- wait_for_completion(&cmd->transport_lun_stop_comp);
|
|
|
- pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
|
|
|
- cmd->se_tfo->get_task_tag(cmd));
|
|
|
- }
|
|
|
-
|
|
|
- return 0;
|
|
|
-}
|
|
|
-
|
|
|
-static void __transport_clear_lun_from_sessions(struct se_lun *lun)
|
|
|
-{
|
|
|
- struct se_cmd *cmd = NULL;
|
|
|
- unsigned long lun_flags, cmd_flags;
|
|
|
- /*
|
|
|
- * Do exception processing and return CHECK_CONDITION status to the
|
|
|
- * Initiator Port.
|
|
|
- */
|
|
|
- spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
|
|
|
- while (!list_empty(&lun->lun_cmd_list)) {
|
|
|
- cmd = list_first_entry(&lun->lun_cmd_list,
|
|
|
- struct se_cmd, se_lun_node);
|
|
|
- list_del_init(&cmd->se_lun_node);
|
|
|
-
|
|
|
- spin_lock(&cmd->t_state_lock);
|
|
|
- pr_debug("SE_LUN[%d] - Setting cmd->transport"
|
|
|
- "_lun_stop for ITT: 0x%08x\n",
|
|
|
- cmd->se_lun->unpacked_lun,
|
|
|
- cmd->se_tfo->get_task_tag(cmd));
|
|
|
- cmd->transport_state |= CMD_T_LUN_STOP;
|
|
|
- spin_unlock(&cmd->t_state_lock);
|
|
|
-
|
|
|
- spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
|
|
|
-
|
|
|
- if (!cmd->se_lun) {
|
|
|
- pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n",
|
|
|
- cmd->se_tfo->get_task_tag(cmd),
|
|
|
- cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
|
|
|
- BUG();
|
|
|
- }
|
|
|
- /*
|
|
|
- * If the Storage engine still owns the iscsi_cmd_t, determine
|
|
|
- * and/or stop its context.
|
|
|
- */
|
|
|
- pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport"
|
|
|
- "_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun,
|
|
|
- cmd->se_tfo->get_task_tag(cmd));
|
|
|
-
|
|
|
- if (transport_lun_wait_for_tasks(cmd, cmd->se_lun) < 0) {
|
|
|
- spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
|
|
|
- continue;
|
|
|
- }
|
|
|
-
|
|
|
- pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun"
|
|
|
- "_wait_for_tasks(): SUCCESS\n",
|
|
|
- cmd->se_lun->unpacked_lun,
|
|
|
- cmd->se_tfo->get_task_tag(cmd));
|
|
|
-
|
|
|
- spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
|
|
|
- if (!(cmd->transport_state & CMD_T_DEV_ACTIVE)) {
|
|
|
- spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
|
|
|
- goto check_cond;
|
|
|
- }
|
|
|
- cmd->transport_state &= ~CMD_T_DEV_ACTIVE;
|
|
|
- target_remove_from_state_list(cmd);
|
|
|
- spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
|
|
|
-
|
|
|
- /*
|
|
|
- * The Storage engine stopped this struct se_cmd before it was
|
|
|
- * send to the fabric frontend for delivery back to the
|
|
|
- * Initiator Node. Return this SCSI CDB back with an
|
|
|
- * CHECK_CONDITION status.
|
|
|
- */
|
|
|
-check_cond:
|
|
|
- transport_send_check_condition_and_sense(cmd,
|
|
|
- TCM_NON_EXISTENT_LUN, 0);
|
|
|
- /*
|
|
|
- * If the fabric frontend is waiting for this iscsi_cmd_t to
|
|
|
- * be released, notify the waiting thread now that LU has
|
|
|
- * finished accessing it.
|
|
|
- */
|
|
|
- spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
|
|
|
- if (cmd->transport_state & CMD_T_LUN_FE_STOP) {
|
|
|
- pr_debug("SE_LUN[%d] - Detected FE stop for"
|
|
|
- " struct se_cmd: %p ITT: 0x%08x\n",
|
|
|
- lun->unpacked_lun,
|
|
|
- cmd, cmd->se_tfo->get_task_tag(cmd));
|
|
|
-
|
|
|
- spin_unlock_irqrestore(&cmd->t_state_lock,
|
|
|
- cmd_flags);
|
|
|
- transport_cmd_check_stop(cmd, false, false);
|
|
|
- complete(&cmd->transport_lun_fe_stop_comp);
|
|
|
- spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
|
|
|
- continue;
|
|
|
- }
|
|
|
- pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
|
|
|
- lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd));
|
|
|
-
|
|
|
- spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
|
|
|
- spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
|
|
|
- }
|
|
|
- spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
|
|
|
-}
|
|
|
-
|
|
|
static int transport_clear_lun_ref_thread(void *p)
|
|
|
{
|
|
|
struct se_lun *lun = p;
|
|
@@ -2583,43 +2405,6 @@ bool transport_wait_for_tasks(struct se_cmd *cmd)
|
|
|
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
|
|
|
return false;
|
|
|
}
|
|
|
- /*
|
|
|
- * If we are already stopped due to an external event (ie: LUN shutdown)
|
|
|
- * sleep until the connection can have the passed struct se_cmd back.
|
|
|
- * The cmd->transport_lun_stopped_sem will be upped by
|
|
|
- * transport_clear_lun_from_sessions() once the ConfigFS context caller
|
|
|
- * has completed its operation on the struct se_cmd.
|
|
|
- */
|
|
|
- if (cmd->transport_state & CMD_T_LUN_STOP) {
|
|
|
- pr_debug("wait_for_tasks: Stopping"
|
|
|
- " wait_for_completion(&cmd->t_tasktransport_lun_fe"
|
|
|
- "_stop_comp); for ITT: 0x%08x\n",
|
|
|
- cmd->se_tfo->get_task_tag(cmd));
|
|
|
- /*
|
|
|
- * There is a special case for WRITES where a FE exception +
|
|
|
- * LUN shutdown means ConfigFS context is still sleeping on
|
|
|
- * transport_lun_stop_comp in transport_lun_wait_for_tasks().
|
|
|
- * We go ahead and up transport_lun_stop_comp just to be sure
|
|
|
- * here.
|
|
|
- */
|
|
|
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
|
|
|
- complete(&cmd->transport_lun_stop_comp);
|
|
|
- wait_for_completion(&cmd->transport_lun_fe_stop_comp);
|
|
|
- spin_lock_irqsave(&cmd->t_state_lock, flags);
|
|
|
-
|
|
|
- target_remove_from_state_list(cmd);
|
|
|
- /*
|
|
|
- * At this point, the frontend who was the originator of this
|
|
|
- * struct se_cmd, now owns the structure and can be released through
|
|
|
- * normal means below.
|
|
|
- */
|
|
|
- pr_debug("wait_for_tasks: Stopped"
|
|
|
- " wait_for_completion(&cmd->t_tasktransport_lun_fe_"
|
|
|
- "stop_comp); for ITT: 0x%08x\n",
|
|
|
- cmd->se_tfo->get_task_tag(cmd));
|
|
|
-
|
|
|
- cmd->transport_state &= ~CMD_T_LUN_STOP;
|
|
|
- }
|
|
|
|
|
|
if (!(cmd->transport_state & CMD_T_ACTIVE)) {
|
|
|
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
|