dect
/
linux-2.6
Archived
13
0
Fork 0

ptrace: introduce signal_wake_up_state() and ptrace_signal_wake_up()

Cleanup and preparation for the next change.

signal_wake_up(resume => true) is overused. None of ptrace/jctl callers
actually want to wakeup a TASK_WAKEKILL task, but they can't specify the
necessary mask.

Turn signal_wake_up() into signal_wake_up_state(state), reintroduce
signal_wake_up() as a trivial helper, and add ptrace_signal_wake_up()
which adds __TASK_TRACED.

This way ptrace_signal_wake_up() can work "inside" ptrace_request()
even if the tracee doesn't have the TASK_WAKEKILL bit set.

Signed-off-by: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Oleg Nesterov 2013-01-21 20:47:41 +01:00 committed by Linus Torvalds
parent 9a9284153d
commit 910ffdb18a
3 changed files with 18 additions and 15 deletions

View File

@ -2714,7 +2714,16 @@ static inline void thread_group_cputime_init(struct signal_struct *sig)
extern void recalc_sigpending_and_wake(struct task_struct *t); extern void recalc_sigpending_and_wake(struct task_struct *t);
extern void recalc_sigpending(void); extern void recalc_sigpending(void);
extern void signal_wake_up(struct task_struct *t, int resume_stopped); extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
static inline void signal_wake_up(struct task_struct *t, bool resume)
{
signal_wake_up_state(t, resume ? TASK_WAKEKILL : 0);
}
static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
{
signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
}
/* /*
* Wrappers for p->thread_info->cpu access. No-op on UP. * Wrappers for p->thread_info->cpu access. No-op on UP.

View File

@ -117,7 +117,7 @@ void __ptrace_unlink(struct task_struct *child)
* TASK_KILLABLE sleeps. * TASK_KILLABLE sleeps.
*/ */
if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child)) if (child->jobctl & JOBCTL_STOP_PENDING || task_is_traced(child))
signal_wake_up(child, task_is_traced(child)); ptrace_signal_wake_up(child, true);
spin_unlock(&child->sighand->siglock); spin_unlock(&child->sighand->siglock);
} }
@ -317,7 +317,7 @@ static int ptrace_attach(struct task_struct *task, long request,
*/ */
if (task_is_stopped(task) && if (task_is_stopped(task) &&
task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING)) task_set_jobctl_pending(task, JOBCTL_TRAP_STOP | JOBCTL_TRAPPING))
signal_wake_up(task, 1); signal_wake_up_state(task, __TASK_STOPPED);
spin_unlock(&task->sighand->siglock); spin_unlock(&task->sighand->siglock);
@ -737,7 +737,7 @@ int ptrace_request(struct task_struct *child, long request,
* tracee into STOP. * tracee into STOP.
*/ */
if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP))) if (likely(task_set_jobctl_pending(child, JOBCTL_TRAP_STOP)))
signal_wake_up(child, child->jobctl & JOBCTL_LISTENING); ptrace_signal_wake_up(child, child->jobctl & JOBCTL_LISTENING);
unlock_task_sighand(child, &flags); unlock_task_sighand(child, &flags);
ret = 0; ret = 0;
@ -763,7 +763,7 @@ int ptrace_request(struct task_struct *child, long request,
* start of this trap and now. Trigger re-trap. * start of this trap and now. Trigger re-trap.
*/ */
if (child->jobctl & JOBCTL_TRAP_NOTIFY) if (child->jobctl & JOBCTL_TRAP_NOTIFY)
signal_wake_up(child, true); ptrace_signal_wake_up(child, true);
ret = 0; ret = 0;
} }
unlock_task_sighand(child, &flags); unlock_task_sighand(child, &flags);

View File

@ -680,23 +680,17 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
* No need to set need_resched since signal event passing * No need to set need_resched since signal event passing
* goes through ->blocked * goes through ->blocked
*/ */
void signal_wake_up(struct task_struct *t, int resume) void signal_wake_up_state(struct task_struct *t, unsigned int state)
{ {
unsigned int mask;
set_tsk_thread_flag(t, TIF_SIGPENDING); set_tsk_thread_flag(t, TIF_SIGPENDING);
/* /*
* For SIGKILL, we want to wake it up in the stopped/traced/killable * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
* case. We don't check t->state here because there is a race with it * case. We don't check t->state here because there is a race with it
* executing another processor and just now entering stopped state. * executing another processor and just now entering stopped state.
* By using wake_up_state, we ensure the process will wake up and * By using wake_up_state, we ensure the process will wake up and
* handle its death signal. * handle its death signal.
*/ */
mask = TASK_INTERRUPTIBLE; if (!wake_up_state(t, state | TASK_INTERRUPTIBLE))
if (resume)
mask |= TASK_WAKEKILL;
if (!wake_up_state(t, mask))
kick_process(t); kick_process(t);
} }
@ -844,7 +838,7 @@ static void ptrace_trap_notify(struct task_struct *t)
assert_spin_locked(&t->sighand->siglock); assert_spin_locked(&t->sighand->siglock);
task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY); task_set_jobctl_pending(t, JOBCTL_TRAP_NOTIFY);
signal_wake_up(t, t->jobctl & JOBCTL_LISTENING); ptrace_signal_wake_up(t, t->jobctl & JOBCTL_LISTENING);
} }
/* /*