dect
/
linux-2.6
Archived
13
0
Fork 0

mm-tracepoint: rename page-free events

Rename mm_page_free_direct into mm_page_free and mm_pagevec_free into
mm_page_free_batched

Since v2.6.33-5426-gc475dab the kernel triggers mm_page_free_direct for
all freed pages, not only for directly freed.  So, let's name it properly.
 For pages freed via page-list we also trigger mm_page_free_batched event.

Signed-off-by: Konstantin Khlebnikov <khlebnikov@openvz.org>
Cc: Mel Gorman <mel@csn.ul.ie>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Reviewed-by: Minchan Kim <minchan.kim@gmail.com>
Cc: Hugh Dickins <hughd@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
Konstantin Khlebnikov 2012-01-10 15:07:09 -08:00 committed by Linus Torvalds
parent da066ad357
commit b413d48aa7
4 changed files with 20 additions and 20 deletions

View File

@ -40,8 +40,8 @@ but the call_site can usually be used to extrapolate that information.
================== ==================
mm_page_alloc page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s mm_page_alloc page=%p pfn=%lu order=%d migratetype=%d gfp_flags=%s
mm_page_alloc_zone_locked page=%p pfn=%lu order=%u migratetype=%d cpu=%d percpu_refill=%d mm_page_alloc_zone_locked page=%p pfn=%lu order=%u migratetype=%d cpu=%d percpu_refill=%d
mm_page_free_direct page=%p pfn=%lu order=%d mm_page_free page=%p pfn=%lu order=%d
mm_pagevec_free page=%p pfn=%lu order=%d cold=%d mm_page_free_batched page=%p pfn=%lu order=%d cold=%d
These four events deal with page allocation and freeing. mm_page_alloc is These four events deal with page allocation and freeing. mm_page_alloc is
a simple indicator of page allocator activity. Pages may be allocated from a simple indicator of page allocator activity. Pages may be allocated from
@ -53,13 +53,13 @@ amounts of activity imply high activity on the zone->lock. Taking this lock
impairs performance by disabling interrupts, dirtying cache lines between impairs performance by disabling interrupts, dirtying cache lines between
CPUs and serialising many CPUs. CPUs and serialising many CPUs.
When a page is freed directly by the caller, the mm_page_free_direct event When a page is freed directly by the caller, the only mm_page_free event
is triggered. Significant amounts of activity here could indicate that the is triggered. Significant amounts of activity here could indicate that the
callers should be batching their activities. callers should be batching their activities.
When pages are freed using a pagevec, the mm_pagevec_free is When pages are freed in batch, the also mm_page_free_batched is triggered.
triggered. Broadly speaking, pages are taken off the LRU lock in bulk and Broadly speaking, pages are taken off the LRU lock in bulk and
freed in batch with a pagevec. Significant amounts of activity here could freed in batch with a page list. Significant amounts of activity here could
indicate that the system is under memory pressure and can also indicate indicate that the system is under memory pressure and can also indicate
contention on the zone->lru_lock. contention on the zone->lru_lock.

View File

@ -17,8 +17,8 @@ use Getopt::Long;
# Tracepoint events # Tracepoint events
use constant MM_PAGE_ALLOC => 1; use constant MM_PAGE_ALLOC => 1;
use constant MM_PAGE_FREE_DIRECT => 2; use constant MM_PAGE_FREE => 2;
use constant MM_PAGEVEC_FREE => 3; use constant MM_PAGE_FREE_BATCHED => 3;
use constant MM_PAGE_PCPU_DRAIN => 4; use constant MM_PAGE_PCPU_DRAIN => 4;
use constant MM_PAGE_ALLOC_ZONE_LOCKED => 5; use constant MM_PAGE_ALLOC_ZONE_LOCKED => 5;
use constant MM_PAGE_ALLOC_EXTFRAG => 6; use constant MM_PAGE_ALLOC_EXTFRAG => 6;
@ -223,10 +223,10 @@ EVENT_PROCESS:
# Perl Switch() sucks majorly # Perl Switch() sucks majorly
if ($tracepoint eq "mm_page_alloc") { if ($tracepoint eq "mm_page_alloc") {
$perprocesspid{$process_pid}->{MM_PAGE_ALLOC}++; $perprocesspid{$process_pid}->{MM_PAGE_ALLOC}++;
} elsif ($tracepoint eq "mm_page_free_direct") { } elsif ($tracepoint eq "mm_page_free") {
$perprocesspid{$process_pid}->{MM_PAGE_FREE_DIRECT}++; $perprocesspid{$process_pid}->{MM_PAGE_FREE}++
} elsif ($tracepoint eq "mm_pagevec_free") { } elsif ($tracepoint eq "mm_page_free_batched") {
$perprocesspid{$process_pid}->{MM_PAGEVEC_FREE}++; $perprocesspid{$process_pid}->{MM_PAGE_FREE_BATCHED}++;
} elsif ($tracepoint eq "mm_page_pcpu_drain") { } elsif ($tracepoint eq "mm_page_pcpu_drain") {
$perprocesspid{$process_pid}->{MM_PAGE_PCPU_DRAIN}++; $perprocesspid{$process_pid}->{MM_PAGE_PCPU_DRAIN}++;
$perprocesspid{$process_pid}->{STATE_PCPU_PAGES_DRAINED}++; $perprocesspid{$process_pid}->{STATE_PCPU_PAGES_DRAINED}++;
@ -336,8 +336,8 @@ sub dump_stats {
$process_pid, $process_pid,
$stats{$process_pid}->{MM_PAGE_ALLOC}, $stats{$process_pid}->{MM_PAGE_ALLOC},
$stats{$process_pid}->{MM_PAGE_ALLOC_ZONE_LOCKED}, $stats{$process_pid}->{MM_PAGE_ALLOC_ZONE_LOCKED},
$stats{$process_pid}->{MM_PAGE_FREE_DIRECT}, $stats{$process_pid}->{MM_PAGE_FREE},
$stats{$process_pid}->{MM_PAGEVEC_FREE}, $stats{$process_pid}->{MM_PAGE_FREE_BATCHED},
$stats{$process_pid}->{MM_PAGE_PCPU_DRAIN}, $stats{$process_pid}->{MM_PAGE_PCPU_DRAIN},
$stats{$process_pid}->{HIGH_PCPU_DRAINS}, $stats{$process_pid}->{HIGH_PCPU_DRAINS},
$stats{$process_pid}->{HIGH_PCPU_REFILLS}, $stats{$process_pid}->{HIGH_PCPU_REFILLS},
@ -364,8 +364,8 @@ sub aggregate_perprocesspid() {
$perprocess{$process}->{MM_PAGE_ALLOC} += $perprocesspid{$process_pid}->{MM_PAGE_ALLOC}; $perprocess{$process}->{MM_PAGE_ALLOC} += $perprocesspid{$process_pid}->{MM_PAGE_ALLOC};
$perprocess{$process}->{MM_PAGE_ALLOC_ZONE_LOCKED} += $perprocesspid{$process_pid}->{MM_PAGE_ALLOC_ZONE_LOCKED}; $perprocess{$process}->{MM_PAGE_ALLOC_ZONE_LOCKED} += $perprocesspid{$process_pid}->{MM_PAGE_ALLOC_ZONE_LOCKED};
$perprocess{$process}->{MM_PAGE_FREE_DIRECT} += $perprocesspid{$process_pid}->{MM_PAGE_FREE_DIRECT}; $perprocess{$process}->{MM_PAGE_FREE} += $perprocesspid{$process_pid}->{MM_PAGE_FREE};
$perprocess{$process}->{MM_PAGEVEC_FREE} += $perprocesspid{$process_pid}->{MM_PAGEVEC_FREE}; $perprocess{$process}->{MM_PAGE_FREE_BATCHED} += $perprocesspid{$process_pid}->{MM_PAGE_FREE_BATCHED};
$perprocess{$process}->{MM_PAGE_PCPU_DRAIN} += $perprocesspid{$process_pid}->{MM_PAGE_PCPU_DRAIN}; $perprocess{$process}->{MM_PAGE_PCPU_DRAIN} += $perprocesspid{$process_pid}->{MM_PAGE_PCPU_DRAIN};
$perprocess{$process}->{HIGH_PCPU_DRAINS} += $perprocesspid{$process_pid}->{HIGH_PCPU_DRAINS}; $perprocess{$process}->{HIGH_PCPU_DRAINS} += $perprocesspid{$process_pid}->{HIGH_PCPU_DRAINS};
$perprocess{$process}->{HIGH_PCPU_REFILLS} += $perprocesspid{$process_pid}->{HIGH_PCPU_REFILLS}; $perprocess{$process}->{HIGH_PCPU_REFILLS} += $perprocesspid{$process_pid}->{HIGH_PCPU_REFILLS};

View File

@ -147,7 +147,7 @@ DEFINE_EVENT(kmem_free, kmem_cache_free,
TP_ARGS(call_site, ptr) TP_ARGS(call_site, ptr)
); );
TRACE_EVENT(mm_page_free_direct, TRACE_EVENT(mm_page_free,
TP_PROTO(struct page *page, unsigned int order), TP_PROTO(struct page *page, unsigned int order),
@ -169,7 +169,7 @@ TRACE_EVENT(mm_page_free_direct,
__entry->order) __entry->order)
); );
TRACE_EVENT(mm_pagevec_free, TRACE_EVENT(mm_page_free_batched,
TP_PROTO(struct page *page, int cold), TP_PROTO(struct page *page, int cold),

View File

@ -632,7 +632,7 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
int i; int i;
int bad = 0; int bad = 0;
trace_mm_page_free_direct(page, order); trace_mm_page_free(page, order);
kmemcheck_free_shadow(page, order); kmemcheck_free_shadow(page, order);
if (PageAnon(page)) if (PageAnon(page))
@ -1196,7 +1196,7 @@ void free_hot_cold_page_list(struct list_head *list, int cold)
struct page *page, *next; struct page *page, *next;
list_for_each_entry_safe(page, next, list, lru) { list_for_each_entry_safe(page, next, list, lru) {
trace_mm_pagevec_free(page, cold); trace_mm_page_free_batched(page, cold);
free_hot_cold_page(page, cold); free_hot_cold_page(page, cold);
} }
} }