kprobes: Separate kprobe optimizing code from optimizer
Separate kprobe optimizing code from optimizer, this will make easy to introducing unoptimizing code in optimizer. Signed-off-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com> Cc: Jason Baron <jbaron@redhat.com> Cc: Mathieu Desnoyers <mathieu.desnoyers@efficios.com> Cc: 2nddept-manager@sdl.hitachi.co.jp LKML-Reference: <20101203095403.2961.91201.stgit@ltc236.sdl.hitachi.co.jp> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
6f0f1dd719
commit
61f4e13ffd
|
@ -427,26 +427,14 @@ static void kprobe_optimizer(struct work_struct *work);
|
||||||
static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
|
static DECLARE_DELAYED_WORK(optimizing_work, kprobe_optimizer);
|
||||||
#define OPTIMIZE_DELAY 5
|
#define OPTIMIZE_DELAY 5
|
||||||
|
|
||||||
/* Kprobe jump optimizer */
|
/*
|
||||||
static __kprobes void kprobe_optimizer(struct work_struct *work)
|
* Optimize (replace a breakpoint with a jump) kprobes listed on
|
||||||
|
* optimizing_list.
|
||||||
|
*/
|
||||||
|
static __kprobes void do_optimize_kprobes(void)
|
||||||
{
|
{
|
||||||
struct optimized_kprobe *op, *tmp;
|
struct optimized_kprobe *op, *tmp;
|
||||||
|
|
||||||
/* Lock modules while optimizing kprobes */
|
|
||||||
mutex_lock(&module_mutex);
|
|
||||||
mutex_lock(&kprobe_mutex);
|
|
||||||
if (kprobes_all_disarmed || !kprobes_allow_optimization)
|
|
||||||
goto end;
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Wait for quiesence period to ensure all running interrupts
|
|
||||||
* are done. Because optprobe may modify multiple instructions
|
|
||||||
* there is a chance that Nth instruction is interrupted. In that
|
|
||||||
* case, running interrupt can return to 2nd-Nth byte of jump
|
|
||||||
* instruction. This wait is for avoiding it.
|
|
||||||
*/
|
|
||||||
synchronize_sched();
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The optimization/unoptimization refers online_cpus via
|
* The optimization/unoptimization refers online_cpus via
|
||||||
* stop_machine() and cpu-hotplug modifies online_cpus.
|
* stop_machine() and cpu-hotplug modifies online_cpus.
|
||||||
|
@ -467,6 +455,27 @@ static __kprobes void kprobe_optimizer(struct work_struct *work)
|
||||||
}
|
}
|
||||||
mutex_unlock(&text_mutex);
|
mutex_unlock(&text_mutex);
|
||||||
put_online_cpus();
|
put_online_cpus();
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Kprobe jump optimizer */
|
||||||
|
static __kprobes void kprobe_optimizer(struct work_struct *work)
|
||||||
|
{
|
||||||
|
/* Lock modules while optimizing kprobes */
|
||||||
|
mutex_lock(&module_mutex);
|
||||||
|
mutex_lock(&kprobe_mutex);
|
||||||
|
if (kprobes_all_disarmed || !kprobes_allow_optimization)
|
||||||
|
goto end;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Wait for quiesence period to ensure all running interrupts
|
||||||
|
* are done. Because optprobe may modify multiple instructions
|
||||||
|
* there is a chance that Nth instruction is interrupted. In that
|
||||||
|
* case, running interrupt can return to 2nd-Nth byte of jump
|
||||||
|
* instruction. This wait is for avoiding it.
|
||||||
|
*/
|
||||||
|
synchronize_sched();
|
||||||
|
|
||||||
|
do_optimize_kprobes();
|
||||||
end:
|
end:
|
||||||
mutex_unlock(&kprobe_mutex);
|
mutex_unlock(&kprobe_mutex);
|
||||||
mutex_unlock(&module_mutex);
|
mutex_unlock(&module_mutex);
|
||||||
|
|
Reference in New Issue