dect
/
linux-2.6
Archived
13
0
Fork 0

target: make more use of the task_flags field in se_task

Replace various atomic_t variables that were mostly under t_state_lock
with new flags in task_flags.  Note that the execution error path
didn't take t_state_lock before, so add it there.

Signed-off-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
This commit is contained in:
Christoph Hellwig 2011-10-12 11:07:03 -04:00 committed by Nicholas Bellinger
parent 42bf829eee
commit 6c76bf951c
3 changed files with 43 additions and 44 deletions

View File

@ -259,8 +259,8 @@ static void core_tmr_drain_task_list(
atomic_read(&cmd->t_transport_stop), atomic_read(&cmd->t_transport_stop),
atomic_read(&cmd->t_transport_sent)); atomic_read(&cmd->t_transport_sent));
if (atomic_read(&task->task_active)) { if (task->task_flags & TF_ACTIVE) {
atomic_set(&task->task_stop, 1); task->task_flags |= TF_REQUEST_STOP;
spin_unlock_irqrestore( spin_unlock_irqrestore(
&cmd->t_state_lock, flags); &cmd->t_state_lock, flags);
@ -269,11 +269,10 @@ static void core_tmr_drain_task_list(
wait_for_completion(&task->task_stop_comp); wait_for_completion(&task->task_stop_comp);
pr_debug("LUN_RESET Completed task: %p shutdown for" pr_debug("LUN_RESET Completed task: %p shutdown for"
" dev: %p\n", task, dev); " dev: %p\n", task, dev);
spin_lock_irqsave(&cmd->t_state_lock, flags); spin_lock_irqsave(&cmd->t_state_lock, flags);
atomic_dec(&cmd->t_task_cdbs_left); atomic_dec(&cmd->t_task_cdbs_left);
task->task_flags &= ~(TF_ACTIVE | TF_REQUEST_STOP);
atomic_set(&task->task_active, 0);
atomic_set(&task->task_stop, 0);
} }
__transport_stop_task_timer(task, &flags); __transport_stop_task_timer(task, &flags);

View File

@ -440,7 +440,7 @@ static void transport_all_task_dev_remove_state(struct se_cmd *cmd)
return; return;
list_for_each_entry(task, &cmd->t_task_list, t_list) { list_for_each_entry(task, &cmd->t_task_list, t_list) {
if (atomic_read(&task->task_active)) if (task->task_flags & TF_ACTIVE)
continue; continue;
if (!atomic_read(&task->task_state_active)) if (!atomic_read(&task->task_state_active))
@ -718,7 +718,7 @@ void transport_complete_task(struct se_task *task, int success)
atomic_inc(&dev->depth_left); atomic_inc(&dev->depth_left);
spin_lock_irqsave(&cmd->t_state_lock, flags); spin_lock_irqsave(&cmd->t_state_lock, flags);
atomic_set(&task->task_active, 0); task->task_flags &= ~TF_ACTIVE;
/* /*
* See if any sense data exists, if so set the TASK_SENSE flag. * See if any sense data exists, if so set the TASK_SENSE flag.
@ -737,14 +737,14 @@ void transport_complete_task(struct se_task *task, int success)
* See if we are waiting for outstanding struct se_task * See if we are waiting for outstanding struct se_task
* to complete for an exception condition * to complete for an exception condition
*/ */
if (atomic_read(&task->task_stop)) { if (task->task_flags & TF_REQUEST_STOP) {
/* /*
* Decrement cmd->t_se_count if this task had * Decrement cmd->t_se_count if this task had
* previously thrown its timeout exception handler. * previously thrown its timeout exception handler.
*/ */
if (atomic_read(&task->task_timeout)) { if (task->task_flags & TF_TIMEOUT) {
atomic_dec(&cmd->t_se_count); atomic_dec(&cmd->t_se_count);
atomic_set(&task->task_timeout, 0); task->task_flags &= ~TF_TIMEOUT;
} }
spin_unlock_irqrestore(&cmd->t_state_lock, flags); spin_unlock_irqrestore(&cmd->t_state_lock, flags);
@ -756,7 +756,7 @@ void transport_complete_task(struct se_task *task, int success)
* left counter to determine when the struct se_cmd is ready to be queued to * left counter to determine when the struct se_cmd is ready to be queued to
* the processing thread. * the processing thread.
*/ */
if (atomic_read(&task->task_timeout)) { if (task->task_flags & TF_TIMEOUT) {
if (!atomic_dec_and_test( if (!atomic_dec_and_test(
&cmd->t_task_cdbs_timeout_left)) { &cmd->t_task_cdbs_timeout_left)) {
spin_unlock_irqrestore(&cmd->t_state_lock, spin_unlock_irqrestore(&cmd->t_state_lock,
@ -1793,8 +1793,7 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
* If the struct se_task has not been sent and is not active, * If the struct se_task has not been sent and is not active,
* remove the struct se_task from the execution queue. * remove the struct se_task from the execution queue.
*/ */
if (!atomic_read(&task->task_sent) && if (!(task->task_flags & (TF_ACTIVE | TF_SENT))) {
!atomic_read(&task->task_active)) {
spin_unlock_irqrestore(&cmd->t_state_lock, spin_unlock_irqrestore(&cmd->t_state_lock,
flags); flags);
transport_remove_task_from_execute_queue(task, transport_remove_task_from_execute_queue(task,
@ -1810,8 +1809,8 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
* If the struct se_task is active, sleep until it is returned * If the struct se_task is active, sleep until it is returned
* from the plugin. * from the plugin.
*/ */
if (atomic_read(&task->task_active)) { if (task->task_flags & TF_ACTIVE) {
atomic_set(&task->task_stop, 1); task->task_flags |= TF_REQUEST_STOP;
spin_unlock_irqrestore(&cmd->t_state_lock, spin_unlock_irqrestore(&cmd->t_state_lock,
flags); flags);
@ -1823,9 +1822,7 @@ static int transport_stop_tasks_for_cmd(struct se_cmd *cmd)
spin_lock_irqsave(&cmd->t_state_lock, flags); spin_lock_irqsave(&cmd->t_state_lock, flags);
atomic_dec(&cmd->t_task_cdbs_left); atomic_dec(&cmd->t_task_cdbs_left);
task->task_flags &= ~(TF_ACTIVE | TF_REQUEST_STOP);
atomic_set(&task->task_active, 0);
atomic_set(&task->task_stop, 0);
} else { } else {
pr_debug("task_no[%d] - Did nothing\n", task->task_no); pr_debug("task_no[%d] - Did nothing\n", task->task_no);
ret++; ret++;
@ -2074,18 +2071,18 @@ static void transport_task_timeout_handler(unsigned long data)
pr_debug("transport task timeout fired! task: %p cmd: %p\n", task, cmd); pr_debug("transport task timeout fired! task: %p cmd: %p\n", task, cmd);
spin_lock_irqsave(&cmd->t_state_lock, flags); spin_lock_irqsave(&cmd->t_state_lock, flags);
if (task->task_flags & TF_STOP) { if (task->task_flags & TF_TIMER_STOP) {
spin_unlock_irqrestore(&cmd->t_state_lock, flags); spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return; return;
} }
task->task_flags &= ~TF_RUNNING; task->task_flags &= ~TF_TIMER_RUNNING;
/* /*
* Determine if transport_complete_task() has already been called. * Determine if transport_complete_task() has already been called.
*/ */
if (!atomic_read(&task->task_active)) { if (!(task->task_flags & TF_ACTIVE)) {
pr_debug("transport task: %p cmd: %p timeout task_active" pr_debug("transport task: %p cmd: %p timeout !TF_ACTIVE\n",
" == 0\n", task, cmd); task, cmd);
spin_unlock_irqrestore(&cmd->t_state_lock, flags); spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return; return;
} }
@ -2094,12 +2091,12 @@ static void transport_task_timeout_handler(unsigned long data)
atomic_inc(&cmd->t_transport_timeout); atomic_inc(&cmd->t_transport_timeout);
cmd->t_tasks_failed = 1; cmd->t_tasks_failed = 1;
atomic_set(&task->task_timeout, 1); task->task_flags |= TF_TIMEOUT;
task->task_error_status = PYX_TRANSPORT_TASK_TIMEOUT; task->task_error_status = PYX_TRANSPORT_TASK_TIMEOUT;
task->task_scsi_status = 1; task->task_scsi_status = 1;
if (atomic_read(&task->task_stop)) { if (task->task_flags & TF_REQUEST_STOP) {
pr_debug("transport task: %p cmd: %p timeout task_stop" pr_debug("transport task: %p cmd: %p timeout TF_REQUEST_STOP"
" == 1\n", task, cmd); " == 1\n", task, cmd);
spin_unlock_irqrestore(&cmd->t_state_lock, flags); spin_unlock_irqrestore(&cmd->t_state_lock, flags);
complete(&task->task_stop_comp); complete(&task->task_stop_comp);
@ -2129,7 +2126,7 @@ static void transport_start_task_timer(struct se_task *task)
struct se_device *dev = task->task_se_cmd->se_dev; struct se_device *dev = task->task_se_cmd->se_dev;
int timeout; int timeout;
if (task->task_flags & TF_RUNNING) if (task->task_flags & TF_TIMER_RUNNING)
return; return;
/* /*
* If the task_timeout is disabled, exit now. * If the task_timeout is disabled, exit now.
@ -2143,7 +2140,7 @@ static void transport_start_task_timer(struct se_task *task)
task->task_timer.data = (unsigned long) task; task->task_timer.data = (unsigned long) task;
task->task_timer.function = transport_task_timeout_handler; task->task_timer.function = transport_task_timeout_handler;
task->task_flags |= TF_RUNNING; task->task_flags |= TF_TIMER_RUNNING;
add_timer(&task->task_timer); add_timer(&task->task_timer);
#if 0 #if 0
pr_debug("Starting task timer for cmd: %p task: %p seconds:" pr_debug("Starting task timer for cmd: %p task: %p seconds:"
@ -2158,17 +2155,17 @@ void __transport_stop_task_timer(struct se_task *task, unsigned long *flags)
{ {
struct se_cmd *cmd = task->task_se_cmd; struct se_cmd *cmd = task->task_se_cmd;
if (!task->task_flags & TF_RUNNING) if (!(task->task_flags & TF_TIMER_RUNNING))
return; return;
task->task_flags |= TF_STOP; task->task_flags |= TF_TIMER_STOP;
spin_unlock_irqrestore(&cmd->t_state_lock, *flags); spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
del_timer_sync(&task->task_timer); del_timer_sync(&task->task_timer);
spin_lock_irqsave(&cmd->t_state_lock, *flags); spin_lock_irqsave(&cmd->t_state_lock, *flags);
task->task_flags &= ~TF_RUNNING; task->task_flags &= ~TF_TIMER_RUNNING;
task->task_flags &= ~TF_STOP; task->task_flags &= ~TF_TIMER_STOP;
} }
static void transport_stop_all_task_timers(struct se_cmd *cmd) static void transport_stop_all_task_timers(struct se_cmd *cmd)
@ -2360,8 +2357,7 @@ check_depth:
cmd = task->task_se_cmd; cmd = task->task_se_cmd;
spin_lock_irqsave(&cmd->t_state_lock, flags); spin_lock_irqsave(&cmd->t_state_lock, flags);
atomic_set(&task->task_active, 1); task->task_flags |= (TF_ACTIVE | TF_SENT);
atomic_set(&task->task_sent, 1);
atomic_inc(&cmd->t_task_cdbs_sent); atomic_inc(&cmd->t_task_cdbs_sent);
if (atomic_read(&cmd->t_task_cdbs_sent) == if (atomic_read(&cmd->t_task_cdbs_sent) ==
@ -2379,7 +2375,9 @@ check_depth:
error = cmd->transport_emulate_cdb(cmd); error = cmd->transport_emulate_cdb(cmd);
if (error != 0) { if (error != 0) {
cmd->transport_error_status = error; cmd->transport_error_status = error;
atomic_set(&task->task_active, 0); spin_lock_irqsave(&cmd->t_state_lock, flags);
task->task_flags &= ~TF_ACTIVE;
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
atomic_set(&cmd->transport_sent, 0); atomic_set(&cmd->transport_sent, 0);
transport_stop_tasks_for_cmd(cmd); transport_stop_tasks_for_cmd(cmd);
transport_generic_request_failure(cmd, dev, 0, 1); transport_generic_request_failure(cmd, dev, 0, 1);
@ -2415,7 +2413,9 @@ check_depth:
if (error != 0) { if (error != 0) {
cmd->transport_error_status = error; cmd->transport_error_status = error;
atomic_set(&task->task_active, 0); spin_lock_irqsave(&cmd->t_state_lock, flags);
task->task_flags &= ~TF_ACTIVE;
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
atomic_set(&cmd->transport_sent, 0); atomic_set(&cmd->transport_sent, 0);
transport_stop_tasks_for_cmd(cmd); transport_stop_tasks_for_cmd(cmd);
transport_generic_request_failure(cmd, dev, 0, 1); transport_generic_request_failure(cmd, dev, 0, 1);
@ -3613,7 +3613,7 @@ static void transport_free_dev_tasks(struct se_cmd *cmd)
spin_lock_irqsave(&cmd->t_state_lock, flags); spin_lock_irqsave(&cmd->t_state_lock, flags);
list_for_each_entry_safe(task, task_tmp, list_for_each_entry_safe(task, task_tmp,
&cmd->t_task_list, t_list) { &cmd->t_task_list, t_list) {
if (atomic_read(&task->task_active)) if (task->task_flags & TF_ACTIVE)
continue; continue;
kfree(task->task_sg_bidi); kfree(task->task_sg_bidi);

View File

@ -72,9 +72,13 @@ enum transport_tpg_type_table {
}; };
/* Used for generate timer flags */ /* Used for generate timer flags */
enum timer_flags_table { enum se_task_flags {
TF_RUNNING = 0x01, TF_ACTIVE = (1 << 0),
TF_STOP = 0x02, TF_SENT = (1 << 1),
TF_TIMEOUT = (1 << 2),
TF_REQUEST_STOP = (1 << 3),
TF_TIMER_RUNNING = (1 << 4),
TF_TIMER_STOP = (1 << 5),
}; };
/* Special transport agnostic struct se_cmd->t_states */ /* Special transport agnostic struct se_cmd->t_states */
@ -413,11 +417,7 @@ struct se_task {
enum dma_data_direction task_data_direction; enum dma_data_direction task_data_direction;
struct se_cmd *task_se_cmd; struct se_cmd *task_se_cmd;
struct completion task_stop_comp; struct completion task_stop_comp;
atomic_t task_active;
atomic_t task_execute_queue; atomic_t task_execute_queue;
atomic_t task_timeout;
atomic_t task_sent;
atomic_t task_stop;
atomic_t task_state_active; atomic_t task_state_active;
struct timer_list task_timer; struct timer_list task_timer;
struct list_head t_list; struct list_head t_list;