Archived
14
0
Fork 0
This repository has been archived on 2022-02-17. You can view files and clone it, but cannot push or open issues or pull requests.
linux-2.6/kernel/trace/trace_sched_switch.c
Steven Rostedt 38697053fa ftrace: preempt disable over interrupt disable
With the new ring buffer infrastructure in ftrace, I'm trying to make
ftrace a little more light weight.

This patch converts a lot of the local_irq_save/restore into
preempt_disable/enable.  The original preempt count in a lot of cases
has to be sent in as a parameter so that it can be recorded correctly.
Some places were recording it incorrectly before anyway.

This is also laying the ground work to make ftrace a little bit
more reentrant, and remove all locking. The function tracers must
still protect from reentrancy.

Note: All the function tracers must be careful when using preempt_disable.
  It must do the following:

  resched = need_resched();
  preempt_disable_notrace();
  [...]
  if (resched)
	preempt_enable_no_resched_notrace();
  else
	preempt_enable_notrace();

The reason is that if this function traces schedule() itself, the
preempt_enable_notrace() will cause a schedule, which will lead
us into a recursive failure.

If we needed to reschedule before calling preempt_disable, we
should have already scheduled. Since we did not, this is most
likely that we should not and are probably inside a schedule
function.

If resched was not set, we still need to catch the need resched
flag being set when preemption was off and the if case at the
end will catch that for us.

Signed-off-by: Steven Rostedt <srostedt@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
2008-10-14 10:39:09 +02:00

218 lines
4.4 KiB
C

/*
* trace context switch
*
* Copyright (C) 2007 Steven Rostedt <srostedt@redhat.com>
*
*/
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/debugfs.h>
#include <linux/kallsyms.h>
#include <linux/uaccess.h>
#include <linux/ftrace.h>
#include <trace/sched.h>
#include "trace.h"
static struct trace_array *ctx_trace;
static int __read_mostly tracer_enabled;
static atomic_t sched_ref;
static void
probe_sched_switch(struct rq *__rq, struct task_struct *prev,
struct task_struct *next)
{
struct trace_array_cpu *data;
unsigned long flags;
long disabled;
int cpu;
int pc;
if (!atomic_read(&sched_ref))
return;
tracing_record_cmdline(prev);
tracing_record_cmdline(next);
if (!tracer_enabled)
return;
pc = preempt_count();
local_irq_save(flags);
cpu = raw_smp_processor_id();
data = ctx_trace->data[cpu];
disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1))
tracing_sched_switch_trace(ctx_trace, data, prev, next, flags, pc);
atomic_dec(&data->disabled);
local_irq_restore(flags);
}
static void
probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee)
{
struct trace_array_cpu *data;
unsigned long flags;
long disabled;
int cpu, pc;
if (!likely(tracer_enabled))
return;
pc = preempt_count();
tracing_record_cmdline(current);
local_irq_save(flags);
cpu = raw_smp_processor_id();
data = ctx_trace->data[cpu];
disabled = atomic_inc_return(&data->disabled);
if (likely(disabled == 1))
tracing_sched_wakeup_trace(ctx_trace, data, wakee, current,
flags, pc);
atomic_dec(&data->disabled);
local_irq_restore(flags);
}
static void sched_switch_reset(struct trace_array *tr)
{
int cpu;
tr->time_start = ftrace_now(tr->cpu);
for_each_online_cpu(cpu)
tracing_reset(tr, cpu);
}
static int tracing_sched_register(void)
{
int ret;
ret = register_trace_sched_wakeup(probe_sched_wakeup);
if (ret) {
pr_info("wakeup trace: Couldn't activate tracepoint"
" probe to kernel_sched_wakeup\n");
return ret;
}
ret = register_trace_sched_wakeup_new(probe_sched_wakeup);
if (ret) {
pr_info("wakeup trace: Couldn't activate tracepoint"
" probe to kernel_sched_wakeup_new\n");
goto fail_deprobe;
}
ret = register_trace_sched_switch(probe_sched_switch);
if (ret) {
pr_info("sched trace: Couldn't activate tracepoint"
" probe to kernel_sched_schedule\n");
goto fail_deprobe_wake_new;
}
return ret;
fail_deprobe_wake_new:
unregister_trace_sched_wakeup_new(probe_sched_wakeup);
fail_deprobe:
unregister_trace_sched_wakeup(probe_sched_wakeup);
return ret;
}
static void tracing_sched_unregister(void)
{
unregister_trace_sched_switch(probe_sched_switch);
unregister_trace_sched_wakeup_new(probe_sched_wakeup);
unregister_trace_sched_wakeup(probe_sched_wakeup);
}
static void tracing_start_sched_switch(void)
{
long ref;
ref = atomic_inc_return(&sched_ref);
if (ref == 1)
tracing_sched_register();
}
static void tracing_stop_sched_switch(void)
{
long ref;
ref = atomic_dec_and_test(&sched_ref);
if (ref)
tracing_sched_unregister();
}
void tracing_start_cmdline_record(void)
{
tracing_start_sched_switch();
}
void tracing_stop_cmdline_record(void)
{
tracing_stop_sched_switch();
}
static void start_sched_trace(struct trace_array *tr)
{
sched_switch_reset(tr);
tracing_start_cmdline_record();
tracer_enabled = 1;
}
static void stop_sched_trace(struct trace_array *tr)
{
tracer_enabled = 0;
tracing_stop_cmdline_record();
}
static void sched_switch_trace_init(struct trace_array *tr)
{
ctx_trace = tr;
if (tr->ctrl)
start_sched_trace(tr);
}
static void sched_switch_trace_reset(struct trace_array *tr)
{
if (tr->ctrl)
stop_sched_trace(tr);
}
static void sched_switch_trace_ctrl_update(struct trace_array *tr)
{
/* When starting a new trace, reset the buffers */
if (tr->ctrl)
start_sched_trace(tr);
else
stop_sched_trace(tr);
}
static struct tracer sched_switch_trace __read_mostly =
{
.name = "sched_switch",
.init = sched_switch_trace_init,
.reset = sched_switch_trace_reset,
.ctrl_update = sched_switch_trace_ctrl_update,
#ifdef CONFIG_FTRACE_SELFTEST
.selftest = trace_selftest_startup_sched_switch,
#endif
};
__init static int init_sched_switch_trace(void)
{
int ret = 0;
if (atomic_read(&sched_ref))
ret = tracing_sched_register();
if (ret) {
pr_info("error registering scheduler trace\n");
return ret;
}
return register_tracer(&sched_switch_trace);
}
device_initcall(init_sched_switch_trace);