dect
/
linux-2.6
Archived
13
0
Fork 0

perf_counter, x86: rework pmc_amd_save_disable_all() and pmc_amd_restore_all()

MSR reads and writes are expensive. This patch adds checks to avoid
its usage where possible.

[ Impact: micro-optimization on AMD CPUs ]

Signed-off-by: Robert Richter <robert.richter@amd.com>
Cc: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
LKML-Reference: <1241002046-8832-5-git-send-email-robert.richter@amd.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
Robert Richter 2009-04-29 12:47:01 +02:00 committed by Ingo Molnar
parent 4138960a92
commit 4295ee6266
1 changed files with 14 additions and 10 deletions

View File

@ -334,11 +334,13 @@ static u64 pmc_amd_save_disable_all(void)
for (idx = 0; idx < nr_counters_generic; idx++) {
u64 val;
if (!test_bit(idx, cpuc->active_mask))
continue;
rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
if (val & ARCH_PERFMON_EVENTSEL0_ENABLE) {
val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
}
if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE))
continue;
val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
}
return enabled;
@ -372,13 +374,15 @@ static void pmc_amd_restore_all(u64 ctrl)
return;
for (idx = 0; idx < nr_counters_generic; idx++) {
if (test_bit(idx, cpuc->active_mask)) {
u64 val;
u64 val;
rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
}
if (!test_bit(idx, cpuc->active_mask))
continue;
rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
if (val & ARCH_PERFMON_EVENTSEL0_ENABLE)
continue;
val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
}
}