From 3c7b4e6b8be4c16f1e6e5c558e33b7ff0db2dfaf Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Thu, 11 Jun 2009 13:22:39 +0100 Subject: [PATCH 01/12] kmemleak: Add the base support This patch adds the base support for the kernel memory leak detector. It traces the memory allocation/freeing in a way similar to the Boehm's conservative garbage collector, the difference being that the unreferenced objects are not freed but only shown in /sys/kernel/debug/kmemleak. Enabling this feature introduces an overhead to memory allocations. Signed-off-by: Catalin Marinas Cc: Ingo Molnar Acked-by: Pekka Enberg Cc: Andrew Morton Reviewed-by: Paul E. McKenney --- include/linux/kmemleak.h | 96 +++ init/main.c | 4 +- mm/kmemleak.c | 1498 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 1597 insertions(+), 1 deletion(-) create mode 100644 include/linux/kmemleak.h create mode 100644 mm/kmemleak.c diff --git a/include/linux/kmemleak.h b/include/linux/kmemleak.h new file mode 100644 index 00000000000..7796aed6cdd --- /dev/null +++ b/include/linux/kmemleak.h @@ -0,0 +1,96 @@ +/* + * include/linux/kmemleak.h + * + * Copyright (C) 2008 ARM Limited + * Written by Catalin Marinas + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#ifndef __KMEMLEAK_H +#define __KMEMLEAK_H + +#ifdef CONFIG_DEBUG_KMEMLEAK + +extern void kmemleak_init(void); +extern void kmemleak_alloc(const void *ptr, size_t size, int min_count, + gfp_t gfp); +extern void kmemleak_free(const void *ptr); +extern void kmemleak_padding(const void *ptr, unsigned long offset, + size_t size); +extern void kmemleak_not_leak(const void *ptr); +extern void kmemleak_ignore(const void *ptr); +extern void kmemleak_scan_area(const void *ptr, unsigned long offset, + size_t length, gfp_t gfp); +extern void kmemleak_no_scan(const void *ptr); + +static inline void kmemleak_alloc_recursive(const void *ptr, size_t size, + int min_count, unsigned long flags, + gfp_t gfp) +{ + if (!(flags & SLAB_NOLEAKTRACE)) + kmemleak_alloc(ptr, size, min_count, gfp); +} + +static inline void kmemleak_free_recursive(const void *ptr, unsigned long flags) +{ + if (!(flags & SLAB_NOLEAKTRACE)) + kmemleak_free(ptr); +} + +static inline void kmemleak_erase(void **ptr) +{ + *ptr = NULL; +} + +#else + +static inline void kmemleak_init(void) +{ +} +static inline void kmemleak_alloc(const void *ptr, size_t size, int min_count, + gfp_t gfp) +{ +} +static inline void kmemleak_alloc_recursive(const void *ptr, size_t size, + int min_count, unsigned long flags, + gfp_t gfp) +{ +} +static inline void kmemleak_free(const void *ptr) +{ +} +static inline void kmemleak_free_recursive(const void *ptr, unsigned long flags) +{ +} +static inline void kmemleak_not_leak(const void *ptr) +{ +} +static inline void kmemleak_ignore(const void *ptr) +{ +} +static inline void kmemleak_scan_area(const void *ptr, unsigned long offset, + size_t length, gfp_t gfp) +{ +} +static inline void kmemleak_erase(void **ptr) +{ +} +static inline void kmemleak_no_scan(const void *ptr) +{ +} + +#endif /* CONFIG_DEBUG_KMEMLEAK */ + +#endif /* __KMEMLEAK_H */ diff --git a/init/main.c b/init/main.c index bb7dc57eee3..9d759d68d7a 100644 --- a/init/main.c +++ b/init/main.c @@ -56,6 +56,7 @@ #include #include #include +#include #include #include #include @@ -603,6 +604,7 @@ asmlinkage void __init start_kernel(void) /* init some links before init_ISA_irqs() */ early_irq_init(); init_IRQ(); + prio_tree_init(); pidhash_init(); init_timers(); hrtimers_init(); @@ -654,6 +656,7 @@ asmlinkage void __init start_kernel(void) cpu_hotplug_init(); kmem_cache_init(); kmemtrace_init(); + kmemleak_init(); debug_objects_mem_init(); idr_init_cache(); setup_per_cpu_pageset(); @@ -663,7 +666,6 @@ asmlinkage void __init start_kernel(void) calibrate_delay(); pidmap_init(); pgtable_cache_init(); - prio_tree_init(); anon_vma_init(); #ifdef CONFIG_X86 if (efi_enabled) diff --git a/mm/kmemleak.c b/mm/kmemleak.c new file mode 100644 index 00000000000..58ec86c9e58 --- /dev/null +++ b/mm/kmemleak.c @@ -0,0 +1,1498 @@ +/* + * mm/kmemleak.c + * + * Copyright (C) 2008 ARM Limited + * Written by Catalin Marinas + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + * + * For more information on the algorithm and kmemleak usage, please see + * Documentation/kmemleak.txt. + * + * Notes on locking + * ---------------- + * + * The following locks and mutexes are used by kmemleak: + * + * - kmemleak_lock (rwlock): protects the object_list modifications and + * accesses to the object_tree_root. The object_list is the main list + * holding the metadata (struct kmemleak_object) for the allocated memory + * blocks. The object_tree_root is a priority search tree used to look-up + * metadata based on a pointer to the corresponding memory block. The + * kmemleak_object structures are added to the object_list and + * object_tree_root in the create_object() function called from the + * kmemleak_alloc() callback and removed in delete_object() called from the + * kmemleak_free() callback + * - kmemleak_object.lock (spinlock): protects a kmemleak_object. Accesses to + * the metadata (e.g. count) are protected by this lock. Note that some + * members of this structure may be protected by other means (atomic or + * kmemleak_lock). This lock is also held when scanning the corresponding + * memory block to avoid the kernel freeing it via the kmemleak_free() + * callback. This is less heavyweight than holding a global lock like + * kmemleak_lock during scanning + * - scan_mutex (mutex): ensures that only one thread may scan the memory for + * unreferenced objects at a time. The gray_list contains the objects which + * are already referenced or marked as false positives and need to be + * scanned. This list is only modified during a scanning episode when the + * scan_mutex is held. At the end of a scan, the gray_list is always empty. + * Note that the kmemleak_object.use_count is incremented when an object is + * added to the gray_list and therefore cannot be freed + * - kmemleak_mutex (mutex): prevents multiple users of the "kmemleak" debugfs + * file together with modifications to the memory scanning parameters + * including the scan_thread pointer + * + * The kmemleak_object structures have a use_count incremented or decremented + * using the get_object()/put_object() functions. When the use_count becomes + * 0, this count can no longer be incremented and put_object() schedules the + * kmemleak_object freeing via an RCU callback. All calls to the get_object() + * function must be protected by rcu_read_lock() to avoid accessing a freed + * structure. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include + +/* + * Kmemleak configuration and common defines. + */ +#define MAX_TRACE 16 /* stack trace length */ +#define REPORTS_NR 50 /* maximum number of reported leaks */ +#define MSECS_MIN_AGE 5000 /* minimum object age for reporting */ +#define MSECS_SCAN_YIELD 10 /* CPU yielding period */ +#define SECS_FIRST_SCAN 60 /* delay before the first scan */ +#define SECS_SCAN_WAIT 600 /* subsequent auto scanning delay */ + +#define BYTES_PER_POINTER sizeof(void *) + +/* scanning area inside a memory block */ +struct kmemleak_scan_area { + struct hlist_node node; + unsigned long offset; + size_t length; +}; + +/* + * Structure holding the metadata for each allocated memory block. + * Modifications to such objects should be made while holding the + * object->lock. Insertions or deletions from object_list, gray_list or + * tree_node are already protected by the corresponding locks or mutex (see + * the notes on locking above). These objects are reference-counted + * (use_count) and freed using the RCU mechanism. + */ +struct kmemleak_object { + spinlock_t lock; + unsigned long flags; /* object status flags */ + struct list_head object_list; + struct list_head gray_list; + struct prio_tree_node tree_node; + struct rcu_head rcu; /* object_list lockless traversal */ + /* object usage count; object freed when use_count == 0 */ + atomic_t use_count; + unsigned long pointer; + size_t size; + /* minimum number of a pointers found before it is considered leak */ + int min_count; + /* the total number of pointers found pointing to this object */ + int count; + /* memory ranges to be scanned inside an object (empty for all) */ + struct hlist_head area_list; + unsigned long trace[MAX_TRACE]; + unsigned int trace_len; + unsigned long jiffies; /* creation timestamp */ + pid_t pid; /* pid of the current task */ + char comm[TASK_COMM_LEN]; /* executable name */ +}; + +/* flag representing the memory block allocation status */ +#define OBJECT_ALLOCATED (1 << 0) +/* flag set after the first reporting of an unreference object */ +#define OBJECT_REPORTED (1 << 1) +/* flag set to not scan the object */ +#define OBJECT_NO_SCAN (1 << 2) + +/* the list of all allocated objects */ +static LIST_HEAD(object_list); +/* the list of gray-colored objects (see color_gray comment below) */ +static LIST_HEAD(gray_list); +/* prio search tree for object boundaries */ +static struct prio_tree_root object_tree_root; +/* rw_lock protecting the access to object_list and prio_tree_root */ +static DEFINE_RWLOCK(kmemleak_lock); + +/* allocation caches for kmemleak internal data */ +static struct kmem_cache *object_cache; +static struct kmem_cache *scan_area_cache; + +/* set if tracing memory operations is enabled */ +static atomic_t kmemleak_enabled = ATOMIC_INIT(0); +/* set in the late_initcall if there were no errors */ +static atomic_t kmemleak_initialized = ATOMIC_INIT(0); +/* enables or disables early logging of the memory operations */ +static atomic_t kmemleak_early_log = ATOMIC_INIT(1); +/* set if a fata kmemleak error has occurred */ +static atomic_t kmemleak_error = ATOMIC_INIT(0); + +/* minimum and maximum address that may be valid pointers */ +static unsigned long min_addr = ULONG_MAX; +static unsigned long max_addr; + +/* used for yielding the CPU to other tasks during scanning */ +static unsigned long next_scan_yield; +static struct task_struct *scan_thread; +static unsigned long jiffies_scan_yield; +static unsigned long jiffies_min_age; +/* delay between automatic memory scannings */ +static signed long jiffies_scan_wait; +/* enables or disables the task stacks scanning */ +static int kmemleak_stack_scan; +/* mutex protecting the memory scanning */ +static DEFINE_MUTEX(scan_mutex); +/* mutex protecting the access to the /sys/kernel/debug/kmemleak file */ +static DEFINE_MUTEX(kmemleak_mutex); + +/* number of leaks reported (for limitation purposes) */ +static int reported_leaks; + +/* + * Early object allocation/freeing logging. Kkmemleak is initialized after the + * kernel allocator. However, both the kernel allocator and kmemleak may + * allocate memory blocks which need to be tracked. Kkmemleak defines an + * arbitrary buffer to hold the allocation/freeing information before it is + * fully initialized. + */ + +/* kmemleak operation type for early logging */ +enum { + KMEMLEAK_ALLOC, + KMEMLEAK_FREE, + KMEMLEAK_NOT_LEAK, + KMEMLEAK_IGNORE, + KMEMLEAK_SCAN_AREA, + KMEMLEAK_NO_SCAN +}; + +/* + * Structure holding the information passed to kmemleak callbacks during the + * early logging. + */ +struct early_log { + int op_type; /* kmemleak operation type */ + const void *ptr; /* allocated/freed memory block */ + size_t size; /* memory block size */ + int min_count; /* minimum reference count */ + unsigned long offset; /* scan area offset */ + size_t length; /* scan area length */ +}; + +/* early logging buffer and current position */ +static struct early_log early_log[200]; +static int crt_early_log; + +static void kmemleak_disable(void); + +/* + * Print a warning and dump the stack trace. + */ +#define kmemleak_warn(x...) do { \ + pr_warning(x); \ + dump_stack(); \ +} while (0) + +/* + * Macro invoked when a serious kmemleak condition occured and cannot be + * recovered from. Kkmemleak will be disabled and further allocation/freeing + * tracing no longer available. + */ +#define kmemleak_panic(x...) do { \ + kmemleak_warn(x); \ + kmemleak_disable(); \ +} while (0) + +/* + * Object colors, encoded with count and min_count: + * - white - orphan object, not enough references to it (count < min_count) + * - gray - not orphan, not marked as false positive (min_count == 0) or + * sufficient references to it (count >= min_count) + * - black - ignore, it doesn't contain references (e.g. text section) + * (min_count == -1). No function defined for this color. + * Newly created objects don't have any color assigned (object->count == -1) + * before the next memory scan when they become white. + */ +static int color_white(const struct kmemleak_object *object) +{ + return object->count != -1 && object->count < object->min_count; +} + +static int color_gray(const struct kmemleak_object *object) +{ + return object->min_count != -1 && object->count >= object->min_count; +} + +/* + * Objects are considered referenced if their color is gray and they have not + * been deleted. + */ +static int referenced_object(struct kmemleak_object *object) +{ + return (object->flags & OBJECT_ALLOCATED) && color_gray(object); +} + +/* + * Objects are considered unreferenced only if their color is white, they have + * not be deleted and have a minimum age to avoid false positives caused by + * pointers temporarily stored in CPU registers. + */ +static int unreferenced_object(struct kmemleak_object *object) +{ + return (object->flags & OBJECT_ALLOCATED) && color_white(object) && + time_is_before_eq_jiffies(object->jiffies + jiffies_min_age); +} + +/* + * Printing of the (un)referenced objects information, either to the seq file + * or to the kernel log. The print_referenced/print_unreferenced functions + * must be called with the object->lock held. + */ +#define print_helper(seq, x...) do { \ + struct seq_file *s = (seq); \ + if (s) \ + seq_printf(s, x); \ + else \ + pr_info(x); \ +} while (0) + +static void print_referenced(struct kmemleak_object *object) +{ + pr_info("kmemleak: referenced object 0x%08lx (size %zu)\n", + object->pointer, object->size); +} + +static void print_unreferenced(struct seq_file *seq, + struct kmemleak_object *object) +{ + int i; + + print_helper(seq, "kmemleak: unreferenced object 0x%08lx (size %zu):\n", + object->pointer, object->size); + print_helper(seq, " comm \"%s\", pid %d, jiffies %lu\n", + object->comm, object->pid, object->jiffies); + print_helper(seq, " backtrace:\n"); + + for (i = 0; i < object->trace_len; i++) { + void *ptr = (void *)object->trace[i]; + print_helper(seq, " [<%p>] %pS\n", ptr, ptr); + } +} + +/* + * Print the kmemleak_object information. This function is used mainly for + * debugging special cases when kmemleak operations. It must be called with + * the object->lock held. + */ +static void dump_object_info(struct kmemleak_object *object) +{ + struct stack_trace trace; + + trace.nr_entries = object->trace_len; + trace.entries = object->trace; + + pr_notice("kmemleak: Object 0x%08lx (size %zu):\n", + object->tree_node.start, object->size); + pr_notice(" comm \"%s\", pid %d, jiffies %lu\n", + object->comm, object->pid, object->jiffies); + pr_notice(" min_count = %d\n", object->min_count); + pr_notice(" count = %d\n", object->count); + pr_notice(" backtrace:\n"); + print_stack_trace(&trace, 4); +} + +/* + * Look-up a memory block metadata (kmemleak_object) in the priority search + * tree based on a pointer value. If alias is 0, only values pointing to the + * beginning of the memory block are allowed. The kmemleak_lock must be held + * when calling this function. + */ +static struct kmemleak_object *lookup_object(unsigned long ptr, int alias) +{ + struct prio_tree_node *node; + struct prio_tree_iter iter; + struct kmemleak_object *object; + + prio_tree_iter_init(&iter, &object_tree_root, ptr, ptr); + node = prio_tree_next(&iter); + if (node) { + object = prio_tree_entry(node, struct kmemleak_object, + tree_node); + if (!alias && object->pointer != ptr) { + kmemleak_warn("kmemleak: Found object by alias"); + object = NULL; + } + } else + object = NULL; + + return object; +} + +/* + * Increment the object use_count. Return 1 if successful or 0 otherwise. Note + * that once an object's use_count reached 0, the RCU freeing was already + * registered and the object should no longer be used. This function must be + * called under the protection of rcu_read_lock(). + */ +static int get_object(struct kmemleak_object *object) +{ + return atomic_inc_not_zero(&object->use_count); +} + +/* + * RCU callback to free a kmemleak_object. + */ +static void free_object_rcu(struct rcu_head *rcu) +{ + struct hlist_node *elem, *tmp; + struct kmemleak_scan_area *area; + struct kmemleak_object *object = + container_of(rcu, struct kmemleak_object, rcu); + + /* + * Once use_count is 0 (guaranteed by put_object), there is no other + * code accessing this object, hence no need for locking. + */ + hlist_for_each_entry_safe(area, elem, tmp, &object->area_list, node) { + hlist_del(elem); + kmem_cache_free(scan_area_cache, area); + } + kmem_cache_free(object_cache, object); +} + +/* + * Decrement the object use_count. Once the count is 0, free the object using + * an RCU callback. Since put_object() may be called via the kmemleak_free() -> + * delete_object() path, the delayed RCU freeing ensures that there is no + * recursive call to the kernel allocator. Lock-less RCU object_list traversal + * is also possible. + */ +static void put_object(struct kmemleak_object *object) +{ + if (!atomic_dec_and_test(&object->use_count)) + return; + + /* should only get here after delete_object was called */ + WARN_ON(object->flags & OBJECT_ALLOCATED); + + call_rcu(&object->rcu, free_object_rcu); +} + +/* + * Look up an object in the prio search tree and increase its use_count. + */ +static struct kmemleak_object *find_and_get_object(unsigned long ptr, int alias) +{ + unsigned long flags; + struct kmemleak_object *object = NULL; + + rcu_read_lock(); + read_lock_irqsave(&kmemleak_lock, flags); + if (ptr >= min_addr && ptr < max_addr) + object = lookup_object(ptr, alias); + read_unlock_irqrestore(&kmemleak_lock, flags); + + /* check whether the object is still available */ + if (object && !get_object(object)) + object = NULL; + rcu_read_unlock(); + + return object; +} + +/* + * Create the metadata (struct kmemleak_object) corresponding to an allocated + * memory block and add it to the object_list and object_tree_root. + */ +static void create_object(unsigned long ptr, size_t size, int min_count, + gfp_t gfp) +{ + unsigned long flags; + struct kmemleak_object *object; + struct prio_tree_node *node; + struct stack_trace trace; + + object = kmem_cache_alloc(object_cache, gfp & ~GFP_SLAB_BUG_MASK); + if (!object) { + kmemleak_panic("kmemleak: Cannot allocate a kmemleak_object " + "structure\n"); + return; + } + + INIT_LIST_HEAD(&object->object_list); + INIT_LIST_HEAD(&object->gray_list); + INIT_HLIST_HEAD(&object->area_list); + spin_lock_init(&object->lock); + atomic_set(&object->use_count, 1); + object->flags = OBJECT_ALLOCATED; + object->pointer = ptr; + object->size = size; + object->min_count = min_count; + object->count = -1; /* no color initially */ + object->jiffies = jiffies; + + /* task information */ + if (in_irq()) { + object->pid = 0; + strncpy(object->comm, "hardirq", sizeof(object->comm)); + } else if (in_softirq()) { + object->pid = 0; + strncpy(object->comm, "softirq", sizeof(object->comm)); + } else { + object->pid = current->pid; + /* + * There is a small chance of a race with set_task_comm(), + * however using get_task_comm() here may cause locking + * dependency issues with current->alloc_lock. In the worst + * case, the command line is not correct. + */ + strncpy(object->comm, current->comm, sizeof(object->comm)); + } + + /* kernel backtrace */ + trace.max_entries = MAX_TRACE; + trace.nr_entries = 0; + trace.entries = object->trace; + trace.skip = 1; + save_stack_trace(&trace); + object->trace_len = trace.nr_entries; + + INIT_PRIO_TREE_NODE(&object->tree_node); + object->tree_node.start = ptr; + object->tree_node.last = ptr + size - 1; + + write_lock_irqsave(&kmemleak_lock, flags); + min_addr = min(min_addr, ptr); + max_addr = max(max_addr, ptr + size); + node = prio_tree_insert(&object_tree_root, &object->tree_node); + /* + * The code calling the kernel does not yet have the pointer to the + * memory block to be able to free it. However, we still hold the + * kmemleak_lock here in case parts of the kernel started freeing + * random memory blocks. + */ + if (node != &object->tree_node) { + unsigned long flags; + + kmemleak_panic("kmemleak: Cannot insert 0x%lx into the object " + "search tree (already existing)\n", ptr); + object = lookup_object(ptr, 1); + spin_lock_irqsave(&object->lock, flags); + dump_object_info(object); + spin_unlock_irqrestore(&object->lock, flags); + + goto out; + } + list_add_tail_rcu(&object->object_list, &object_list); +out: + write_unlock_irqrestore(&kmemleak_lock, flags); +} + +/* + * Remove the metadata (struct kmemleak_object) for a memory block from the + * object_list and object_tree_root and decrement its use_count. + */ +static void delete_object(unsigned long ptr) +{ + unsigned long flags; + struct kmemleak_object *object; + + write_lock_irqsave(&kmemleak_lock, flags); + object = lookup_object(ptr, 0); + if (!object) { + kmemleak_warn("kmemleak: Freeing unknown object at 0x%08lx\n", + ptr); + write_unlock_irqrestore(&kmemleak_lock, flags); + return; + } + prio_tree_remove(&object_tree_root, &object->tree_node); + list_del_rcu(&object->object_list); + write_unlock_irqrestore(&kmemleak_lock, flags); + + WARN_ON(!(object->flags & OBJECT_ALLOCATED)); + WARN_ON(atomic_read(&object->use_count) < 1); + + /* + * Locking here also ensures that the corresponding memory block + * cannot be freed when it is being scanned. + */ + spin_lock_irqsave(&object->lock, flags); + if (object->flags & OBJECT_REPORTED) + print_referenced(object); + object->flags &= ~OBJECT_ALLOCATED; + spin_unlock_irqrestore(&object->lock, flags); + put_object(object); +} + +/* + * Make a object permanently as gray-colored so that it can no longer be + * reported as a leak. This is used in general to mark a false positive. + */ +static void make_gray_object(unsigned long ptr) +{ + unsigned long flags; + struct kmemleak_object *object; + + object = find_and_get_object(ptr, 0); + if (!object) { + kmemleak_warn("kmemleak: Graying unknown object at 0x%08lx\n", + ptr); + return; + } + + spin_lock_irqsave(&object->lock, flags); + object->min_count = 0; + spin_unlock_irqrestore(&object->lock, flags); + put_object(object); +} + +/* + * Mark the object as black-colored so that it is ignored from scans and + * reporting. + */ +static void make_black_object(unsigned long ptr) +{ + unsigned long flags; + struct kmemleak_object *object; + + object = find_and_get_object(ptr, 0); + if (!object) { + kmemleak_warn("kmemleak: Blacking unknown object at 0x%08lx\n", + ptr); + return; + } + + spin_lock_irqsave(&object->lock, flags); + object->min_count = -1; + spin_unlock_irqrestore(&object->lock, flags); + put_object(object); +} + +/* + * Add a scanning area to the object. If at least one such area is added, + * kmemleak will only scan these ranges rather than the whole memory block. + */ +static void add_scan_area(unsigned long ptr, unsigned long offset, + size_t length, gfp_t gfp) +{ + unsigned long flags; + struct kmemleak_object *object; + struct kmemleak_scan_area *area; + + object = find_and_get_object(ptr, 0); + if (!object) { + kmemleak_warn("kmemleak: Adding scan area to unknown " + "object at 0x%08lx\n", ptr); + return; + } + + area = kmem_cache_alloc(scan_area_cache, gfp & ~GFP_SLAB_BUG_MASK); + if (!area) { + kmemleak_warn("kmemleak: Cannot allocate a scan area\n"); + goto out; + } + + spin_lock_irqsave(&object->lock, flags); + if (offset + length > object->size) { + kmemleak_warn("kmemleak: Scan area larger than object " + "0x%08lx\n", ptr); + dump_object_info(object); + kmem_cache_free(scan_area_cache, area); + goto out_unlock; + } + + INIT_HLIST_NODE(&area->node); + area->offset = offset; + area->length = length; + + hlist_add_head(&area->node, &object->area_list); +out_unlock: + spin_unlock_irqrestore(&object->lock, flags); +out: + put_object(object); +} + +/* + * Set the OBJECT_NO_SCAN flag for the object corresponding to the give + * pointer. Such object will not be scanned by kmemleak but references to it + * are searched. + */ +static void object_no_scan(unsigned long ptr) +{ + unsigned long flags; + struct kmemleak_object *object; + + object = find_and_get_object(ptr, 0); + if (!object) { + kmemleak_warn("kmemleak: Not scanning unknown object at " + "0x%08lx\n", ptr); + return; + } + + spin_lock_irqsave(&object->lock, flags); + object->flags |= OBJECT_NO_SCAN; + spin_unlock_irqrestore(&object->lock, flags); + put_object(object); +} + +/* + * Log an early kmemleak_* call to the early_log buffer. These calls will be + * processed later once kmemleak is fully initialized. + */ +static void log_early(int op_type, const void *ptr, size_t size, + int min_count, unsigned long offset, size_t length) +{ + unsigned long flags; + struct early_log *log; + + if (crt_early_log >= ARRAY_SIZE(early_log)) { + kmemleak_panic("kmemleak: Early log buffer exceeded\n"); + return; + } + + /* + * There is no need for locking since the kernel is still in UP mode + * at this stage. Disabling the IRQs is enough. + */ + local_irq_save(flags); + log = &early_log[crt_early_log]; + log->op_type = op_type; + log->ptr = ptr; + log->size = size; + log->min_count = min_count; + log->offset = offset; + log->length = length; + crt_early_log++; + local_irq_restore(flags); +} + +/* + * Memory allocation function callback. This function is called from the + * kernel allocators when a new block is allocated (kmem_cache_alloc, kmalloc, + * vmalloc etc.). + */ +void kmemleak_alloc(const void *ptr, size_t size, int min_count, gfp_t gfp) +{ + pr_debug("%s(0x%p, %zu, %d)\n", __func__, ptr, size, min_count); + + if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) + create_object((unsigned long)ptr, size, min_count, gfp); + else if (atomic_read(&kmemleak_early_log)) + log_early(KMEMLEAK_ALLOC, ptr, size, min_count, 0, 0); +} +EXPORT_SYMBOL_GPL(kmemleak_alloc); + +/* + * Memory freeing function callback. This function is called from the kernel + * allocators when a block is freed (kmem_cache_free, kfree, vfree etc.). + */ +void kmemleak_free(const void *ptr) +{ + pr_debug("%s(0x%p)\n", __func__, ptr); + + if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) + delete_object((unsigned long)ptr); + else if (atomic_read(&kmemleak_early_log)) + log_early(KMEMLEAK_FREE, ptr, 0, 0, 0, 0); +} +EXPORT_SYMBOL_GPL(kmemleak_free); + +/* + * Mark an already allocated memory block as a false positive. This will cause + * the block to no longer be reported as leak and always be scanned. + */ +void kmemleak_not_leak(const void *ptr) +{ + pr_debug("%s(0x%p)\n", __func__, ptr); + + if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) + make_gray_object((unsigned long)ptr); + else if (atomic_read(&kmemleak_early_log)) + log_early(KMEMLEAK_NOT_LEAK, ptr, 0, 0, 0, 0); +} +EXPORT_SYMBOL(kmemleak_not_leak); + +/* + * Ignore a memory block. This is usually done when it is known that the + * corresponding block is not a leak and does not contain any references to + * other allocated memory blocks. + */ +void kmemleak_ignore(const void *ptr) +{ + pr_debug("%s(0x%p)\n", __func__, ptr); + + if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) + make_black_object((unsigned long)ptr); + else if (atomic_read(&kmemleak_early_log)) + log_early(KMEMLEAK_IGNORE, ptr, 0, 0, 0, 0); +} +EXPORT_SYMBOL(kmemleak_ignore); + +/* + * Limit the range to be scanned in an allocated memory block. + */ +void kmemleak_scan_area(const void *ptr, unsigned long offset, size_t length, + gfp_t gfp) +{ + pr_debug("%s(0x%p)\n", __func__, ptr); + + if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) + add_scan_area((unsigned long)ptr, offset, length, gfp); + else if (atomic_read(&kmemleak_early_log)) + log_early(KMEMLEAK_SCAN_AREA, ptr, 0, 0, offset, length); +} +EXPORT_SYMBOL(kmemleak_scan_area); + +/* + * Inform kmemleak not to scan the given memory block. + */ +void kmemleak_no_scan(const void *ptr) +{ + pr_debug("%s(0x%p)\n", __func__, ptr); + + if (atomic_read(&kmemleak_enabled) && ptr && !IS_ERR(ptr)) + object_no_scan((unsigned long)ptr); + else if (atomic_read(&kmemleak_early_log)) + log_early(KMEMLEAK_NO_SCAN, ptr, 0, 0, 0, 0); +} +EXPORT_SYMBOL(kmemleak_no_scan); + +/* + * Yield the CPU so that other tasks get a chance to run. The yielding is + * rate-limited to avoid excessive number of calls to the schedule() function + * during memory scanning. + */ +static void scan_yield(void) +{ + might_sleep(); + + if (time_is_before_eq_jiffies(next_scan_yield)) { + schedule(); + next_scan_yield = jiffies + jiffies_scan_yield; + } +} + +/* + * Memory scanning is a long process and it needs to be interruptable. This + * function checks whether such interrupt condition occured. + */ +static int scan_should_stop(void) +{ + if (!atomic_read(&kmemleak_enabled)) + return 1; + + /* + * This function may be called from either process or kthread context, + * hence the need to check for both stop conditions. + */ + if (current->mm) + return signal_pending(current); + else + return kthread_should_stop(); + + return 0; +} + +/* + * Scan a memory block (exclusive range) for valid pointers and add those + * found to the gray list. + */ +static void scan_block(void *_start, void *_end, + struct kmemleak_object *scanned) +{ + unsigned long *ptr; + unsigned long *start = PTR_ALIGN(_start, BYTES_PER_POINTER); + unsigned long *end = _end - (BYTES_PER_POINTER - 1); + + for (ptr = start; ptr < end; ptr++) { + unsigned long flags; + unsigned long pointer = *ptr; + struct kmemleak_object *object; + + if (scan_should_stop()) + break; + + /* + * When scanning a memory block with a corresponding + * kmemleak_object, the CPU yielding is handled in the calling + * code since it holds the object->lock to avoid the block + * freeing. + */ + if (!scanned) + scan_yield(); + + object = find_and_get_object(pointer, 1); + if (!object) + continue; + if (object == scanned) { + /* self referenced, ignore */ + put_object(object); + continue; + } + + /* + * Avoid the lockdep recursive warning on object->lock being + * previously acquired in scan_object(). These locks are + * enclosed by scan_mutex. + */ + spin_lock_irqsave_nested(&object->lock, flags, + SINGLE_DEPTH_NESTING); + if (!color_white(object)) { + /* non-orphan, ignored or new */ + spin_unlock_irqrestore(&object->lock, flags); + put_object(object); + continue; + } + + /* + * Increase the object's reference count (number of pointers + * to the memory block). If this count reaches the required + * minimum, the object's color will become gray and it will be + * added to the gray_list. + */ + object->count++; + if (color_gray(object)) + list_add_tail(&object->gray_list, &gray_list); + else + put_object(object); + spin_unlock_irqrestore(&object->lock, flags); + } +} + +/* + * Scan a memory block corresponding to a kmemleak_object. A condition is + * that object->use_count >= 1. + */ +static void scan_object(struct kmemleak_object *object) +{ + struct kmemleak_scan_area *area; + struct hlist_node *elem; + unsigned long flags; + + /* + * Once the object->lock is aquired, the corresponding memory block + * cannot be freed (the same lock is aquired in delete_object). + */ + spin_lock_irqsave(&object->lock, flags); + if (object->flags & OBJECT_NO_SCAN) + goto out; + if (!(object->flags & OBJECT_ALLOCATED)) + /* already freed object */ + goto out; + if (hlist_empty(&object->area_list)) + scan_block((void *)object->pointer, + (void *)(object->pointer + object->size), object); + else + hlist_for_each_entry(area, elem, &object->area_list, node) + scan_block((void *)(object->pointer + area->offset), + (void *)(object->pointer + area->offset + + area->length), object); +out: + spin_unlock_irqrestore(&object->lock, flags); +} + +/* + * Scan data sections and all the referenced memory blocks allocated via the + * kernel's standard allocators. This function must be called with the + * scan_mutex held. + */ +static void kmemleak_scan(void) +{ + unsigned long flags; + struct kmemleak_object *object, *tmp; + struct task_struct *task; + int i; + + /* prepare the kmemleak_object's */ + rcu_read_lock(); + list_for_each_entry_rcu(object, &object_list, object_list) { + spin_lock_irqsave(&object->lock, flags); +#ifdef DEBUG + /* + * With a few exceptions there should be a maximum of + * 1 reference to any object at this point. + */ + if (atomic_read(&object->use_count) > 1) { + pr_debug("kmemleak: object->use_count = %d\n", + atomic_read(&object->use_count)); + dump_object_info(object); + } +#endif + /* reset the reference count (whiten the object) */ + object->count = 0; + if (color_gray(object) && get_object(object)) + list_add_tail(&object->gray_list, &gray_list); + + spin_unlock_irqrestore(&object->lock, flags); + } + rcu_read_unlock(); + + /* data/bss scanning */ + scan_block(_sdata, _edata, NULL); + scan_block(__bss_start, __bss_stop, NULL); + +#ifdef CONFIG_SMP + /* per-cpu sections scanning */ + for_each_possible_cpu(i) + scan_block(__per_cpu_start + per_cpu_offset(i), + __per_cpu_end + per_cpu_offset(i), NULL); +#endif + + /* + * Struct page scanning for each node. The code below is not yet safe + * with MEMORY_HOTPLUG. + */ + for_each_online_node(i) { + pg_data_t *pgdat = NODE_DATA(i); + unsigned long start_pfn = pgdat->node_start_pfn; + unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages; + unsigned long pfn; + + for (pfn = start_pfn; pfn < end_pfn; pfn++) { + struct page *page; + + if (!pfn_valid(pfn)) + continue; + page = pfn_to_page(pfn); + /* only scan if page is in use */ + if (page_count(page) == 0) + continue; + scan_block(page, page + 1, NULL); + } + } + + /* + * Scanning the task stacks may introduce false negatives and it is + * not enabled by default. + */ + if (kmemleak_stack_scan) { + read_lock(&tasklist_lock); + for_each_process(task) + scan_block(task_stack_page(task), + task_stack_page(task) + THREAD_SIZE, NULL); + read_unlock(&tasklist_lock); + } + + /* + * Scan the objects already referenced from the sections scanned + * above. More objects will be referenced and, if there are no memory + * leaks, all the objects will be scanned. The list traversal is safe + * for both tail additions and removals from inside the loop. The + * kmemleak objects cannot be freed from outside the loop because their + * use_count was increased. + */ + object = list_entry(gray_list.next, typeof(*object), gray_list); + while (&object->gray_list != &gray_list) { + scan_yield(); + + /* may add new objects to the list */ + if (!scan_should_stop()) + scan_object(object); + + tmp = list_entry(object->gray_list.next, typeof(*object), + gray_list); + + /* remove the object from the list and release it */ + list_del(&object->gray_list); + put_object(object); + + object = tmp; + } + WARN_ON(!list_empty(&gray_list)); +} + +/* + * Thread function performing automatic memory scanning. Unreferenced objects + * at the end of a memory scan are reported but only the first time. + */ +static int kmemleak_scan_thread(void *arg) +{ + static int first_run = 1; + + pr_info("kmemleak: Automatic memory scanning thread started\n"); + + /* + * Wait before the first scan to allow the system to fully initialize. + */ + if (first_run) { + first_run = 0; + ssleep(SECS_FIRST_SCAN); + } + + while (!kthread_should_stop()) { + struct kmemleak_object *object; + signed long timeout = jiffies_scan_wait; + + mutex_lock(&scan_mutex); + + kmemleak_scan(); + reported_leaks = 0; + + rcu_read_lock(); + list_for_each_entry_rcu(object, &object_list, object_list) { + unsigned long flags; + + if (reported_leaks >= REPORTS_NR) + break; + spin_lock_irqsave(&object->lock, flags); + if (!(object->flags & OBJECT_REPORTED) && + unreferenced_object(object)) { + print_unreferenced(NULL, object); + object->flags |= OBJECT_REPORTED; + reported_leaks++; + } else if ((object->flags & OBJECT_REPORTED) && + referenced_object(object)) { + print_referenced(object); + object->flags &= ~OBJECT_REPORTED; + } + spin_unlock_irqrestore(&object->lock, flags); + } + rcu_read_unlock(); + + mutex_unlock(&scan_mutex); + /* wait before the next scan */ + while (timeout && !kthread_should_stop()) + timeout = schedule_timeout_interruptible(timeout); + } + + pr_info("kmemleak: Automatic memory scanning thread ended\n"); + + return 0; +} + +/* + * Start the automatic memory scanning thread. This function must be called + * with the kmemleak_mutex held. + */ +void start_scan_thread(void) +{ + if (scan_thread) + return; + scan_thread = kthread_run(kmemleak_scan_thread, NULL, "kmemleak"); + if (IS_ERR(scan_thread)) { + pr_warning("kmemleak: Failed to create the scan thread\n"); + scan_thread = NULL; + } +} + +/* + * Stop the automatic memory scanning thread. This function must be called + * with the kmemleak_mutex held. + */ +void stop_scan_thread(void) +{ + if (scan_thread) { + kthread_stop(scan_thread); + scan_thread = NULL; + } +} + +/* + * Iterate over the object_list and return the first valid object at or after + * the required position with its use_count incremented. The function triggers + * a memory scanning when the pos argument points to the first position. + */ +static void *kmemleak_seq_start(struct seq_file *seq, loff_t *pos) +{ + struct kmemleak_object *object; + loff_t n = *pos; + + if (!n) { + kmemleak_scan(); + reported_leaks = 0; + } + if (reported_leaks >= REPORTS_NR) + return NULL; + + rcu_read_lock(); + list_for_each_entry_rcu(object, &object_list, object_list) { + if (n-- > 0) + continue; + if (get_object(object)) + goto out; + } + object = NULL; +out: + rcu_read_unlock(); + return object; +} + +/* + * Return the next object in the object_list. The function decrements the + * use_count of the previous object and increases that of the next one. + */ +static void *kmemleak_seq_next(struct seq_file *seq, void *v, loff_t *pos) +{ + struct kmemleak_object *prev_obj = v; + struct kmemleak_object *next_obj = NULL; + struct list_head *n = &prev_obj->object_list; + + ++(*pos); + if (reported_leaks >= REPORTS_NR) + goto out; + + rcu_read_lock(); + list_for_each_continue_rcu(n, &object_list) { + next_obj = list_entry(n, struct kmemleak_object, object_list); + if (get_object(next_obj)) + break; + } + rcu_read_unlock(); +out: + put_object(prev_obj); + return next_obj; +} + +/* + * Decrement the use_count of the last object required, if any. + */ +static void kmemleak_seq_stop(struct seq_file *seq, void *v) +{ + if (v) + put_object(v); +} + +/* + * Print the information for an unreferenced object to the seq file. + */ +static int kmemleak_seq_show(struct seq_file *seq, void *v) +{ + struct kmemleak_object *object = v; + unsigned long flags; + + spin_lock_irqsave(&object->lock, flags); + if (!unreferenced_object(object)) + goto out; + print_unreferenced(seq, object); + reported_leaks++; +out: + spin_unlock_irqrestore(&object->lock, flags); + return 0; +} + +static const struct seq_operations kmemleak_seq_ops = { + .start = kmemleak_seq_start, + .next = kmemleak_seq_next, + .stop = kmemleak_seq_stop, + .show = kmemleak_seq_show, +}; + +static int kmemleak_open(struct inode *inode, struct file *file) +{ + int ret = 0; + + if (!atomic_read(&kmemleak_enabled)) + return -EBUSY; + + ret = mutex_lock_interruptible(&kmemleak_mutex); + if (ret < 0) + goto out; + if (file->f_mode & FMODE_READ) { + ret = mutex_lock_interruptible(&scan_mutex); + if (ret < 0) + goto kmemleak_unlock; + ret = seq_open(file, &kmemleak_seq_ops); + if (ret < 0) + goto scan_unlock; + } + return ret; + +scan_unlock: + mutex_unlock(&scan_mutex); +kmemleak_unlock: + mutex_unlock(&kmemleak_mutex); +out: + return ret; +} + +static int kmemleak_release(struct inode *inode, struct file *file) +{ + int ret = 0; + + if (file->f_mode & FMODE_READ) { + seq_release(inode, file); + mutex_unlock(&scan_mutex); + } + mutex_unlock(&kmemleak_mutex); + + return ret; +} + +/* + * File write operation to configure kmemleak at run-time. The following + * commands can be written to the /sys/kernel/debug/kmemleak file: + * off - disable kmemleak (irreversible) + * stack=on - enable the task stacks scanning + * stack=off - disable the tasks stacks scanning + * scan=on - start the automatic memory scanning thread + * scan=off - stop the automatic memory scanning thread + * scan=... - set the automatic memory scanning period in seconds (0 to + * disable it) + */ +static ssize_t kmemleak_write(struct file *file, const char __user *user_buf, + size_t size, loff_t *ppos) +{ + char buf[64]; + int buf_size; + + if (!atomic_read(&kmemleak_enabled)) + return -EBUSY; + + buf_size = min(size, (sizeof(buf) - 1)); + if (strncpy_from_user(buf, user_buf, buf_size) < 0) + return -EFAULT; + buf[buf_size] = 0; + + if (strncmp(buf, "off", 3) == 0) + kmemleak_disable(); + else if (strncmp(buf, "stack=on", 8) == 0) + kmemleak_stack_scan = 1; + else if (strncmp(buf, "stack=off", 9) == 0) + kmemleak_stack_scan = 0; + else if (strncmp(buf, "scan=on", 7) == 0) + start_scan_thread(); + else if (strncmp(buf, "scan=off", 8) == 0) + stop_scan_thread(); + else if (strncmp(buf, "scan=", 5) == 0) { + unsigned long secs; + int err; + + err = strict_strtoul(buf + 5, 0, &secs); + if (err < 0) + return err; + stop_scan_thread(); + if (secs) { + jiffies_scan_wait = msecs_to_jiffies(secs * 1000); + start_scan_thread(); + } + } else + return -EINVAL; + + /* ignore the rest of the buffer, only one command at a time */ + *ppos += size; + return size; +} + +static const struct file_operations kmemleak_fops = { + .owner = THIS_MODULE, + .open = kmemleak_open, + .read = seq_read, + .write = kmemleak_write, + .llseek = seq_lseek, + .release = kmemleak_release, +}; + +/* + * Perform the freeing of the kmemleak internal objects after waiting for any + * current memory scan to complete. + */ +static int kmemleak_cleanup_thread(void *arg) +{ + struct kmemleak_object *object; + + mutex_lock(&kmemleak_mutex); + stop_scan_thread(); + mutex_unlock(&kmemleak_mutex); + + mutex_lock(&scan_mutex); + rcu_read_lock(); + list_for_each_entry_rcu(object, &object_list, object_list) + delete_object(object->pointer); + rcu_read_unlock(); + mutex_unlock(&scan_mutex); + + return 0; +} + +/* + * Start the clean-up thread. + */ +static void kmemleak_cleanup(void) +{ + struct task_struct *cleanup_thread; + + cleanup_thread = kthread_run(kmemleak_cleanup_thread, NULL, + "kmemleak-clean"); + if (IS_ERR(cleanup_thread)) + pr_warning("kmemleak: Failed to create the clean-up thread\n"); +} + +/* + * Disable kmemleak. No memory allocation/freeing will be traced once this + * function is called. Disabling kmemleak is an irreversible operation. + */ +static void kmemleak_disable(void) +{ + /* atomically check whether it was already invoked */ + if (atomic_cmpxchg(&kmemleak_error, 0, 1)) + return; + + /* stop any memory operation tracing */ + atomic_set(&kmemleak_early_log, 0); + atomic_set(&kmemleak_enabled, 0); + + /* check whether it is too early for a kernel thread */ + if (atomic_read(&kmemleak_initialized)) + kmemleak_cleanup(); + + pr_info("Kernel memory leak detector disabled\n"); +} + +/* + * Allow boot-time kmemleak disabling (enabled by default). + */ +static int kmemleak_boot_config(char *str) +{ + if (!str) + return -EINVAL; + if (strcmp(str, "off") == 0) + kmemleak_disable(); + else if (strcmp(str, "on") != 0) + return -EINVAL; + return 0; +} +early_param("kmemleak", kmemleak_boot_config); + +/* + * Kkmemleak initialization. + */ +void __init kmemleak_init(void) +{ + int i; + unsigned long flags; + + jiffies_scan_yield = msecs_to_jiffies(MSECS_SCAN_YIELD); + jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE); + jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000); + + object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE); + scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE); + INIT_PRIO_TREE_ROOT(&object_tree_root); + + /* the kernel is still in UP mode, so disabling the IRQs is enough */ + local_irq_save(flags); + if (!atomic_read(&kmemleak_error)) { + atomic_set(&kmemleak_enabled, 1); + atomic_set(&kmemleak_early_log, 0); + } + local_irq_restore(flags); + + /* + * This is the point where tracking allocations is safe. Automatic + * scanning is started during the late initcall. Add the early logged + * callbacks to the kmemleak infrastructure. + */ + for (i = 0; i < crt_early_log; i++) { + struct early_log *log = &early_log[i]; + + switch (log->op_type) { + case KMEMLEAK_ALLOC: + kmemleak_alloc(log->ptr, log->size, log->min_count, + GFP_KERNEL); + break; + case KMEMLEAK_FREE: + kmemleak_free(log->ptr); + break; + case KMEMLEAK_NOT_LEAK: + kmemleak_not_leak(log->ptr); + break; + case KMEMLEAK_IGNORE: + kmemleak_ignore(log->ptr); + break; + case KMEMLEAK_SCAN_AREA: + kmemleak_scan_area(log->ptr, log->offset, log->length, + GFP_KERNEL); + break; + case KMEMLEAK_NO_SCAN: + kmemleak_no_scan(log->ptr); + break; + default: + WARN_ON(1); + } + } +} + +/* + * Late initialization function. + */ +static int __init kmemleak_late_init(void) +{ + struct dentry *dentry; + + atomic_set(&kmemleak_initialized, 1); + + if (atomic_read(&kmemleak_error)) { + /* + * Some error occured and kmemleak was disabled. There is a + * small chance that kmemleak_disable() was called immediately + * after setting kmemleak_initialized and we may end up with + * two clean-up threads but serialized by scan_mutex. + */ + kmemleak_cleanup(); + return -ENOMEM; + } + + dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL, + &kmemleak_fops); + if (!dentry) + pr_warning("kmemleak: Failed to create the debugfs kmemleak " + "file\n"); + mutex_lock(&kmemleak_mutex); + start_scan_thread(); + mutex_unlock(&kmemleak_mutex); + + pr_info("Kernel memory leak detector initialized\n"); + + return 0; +} +late_initcall(kmemleak_late_init); From 04f70336c80c43a15e617b36c2043dfa0ad6ed0f Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Thu, 11 Jun 2009 13:22:39 +0100 Subject: [PATCH 02/12] kmemleak: Add documentation on the memory leak detector This patch adds the Documentation/kmemleak.txt file with some information about how kmemleak works. Signed-off-by: Catalin Marinas --- Documentation/kernel-parameters.txt | 4 + Documentation/kmemleak.txt | 142 ++++++++++++++++++++++++++++ 2 files changed, 146 insertions(+) create mode 100644 Documentation/kmemleak.txt diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index 4a3c2209a12..04a44cc5048 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -1077,6 +1077,10 @@ and is between 256 and 4096 characters. It is defined in the file Configure the RouterBoard 532 series on-chip Ethernet adapter MAC address. + kmemleak= [KNL] Boot-time kmemleak enable/disable + Valid arguments: on, off + Default: on + kstack=N [X86] Print N words from the kernel stack in oops dumps. diff --git a/Documentation/kmemleak.txt b/Documentation/kmemleak.txt new file mode 100644 index 00000000000..0112da3b9ab --- /dev/null +++ b/Documentation/kmemleak.txt @@ -0,0 +1,142 @@ +Kernel Memory Leak Detector +=========================== + +Introduction +------------ + +Kmemleak provides a way of detecting possible kernel memory leaks in a +way similar to a tracing garbage collector +(http://en.wikipedia.org/wiki/Garbage_collection_%28computer_science%29#Tracing_garbage_collectors), +with the difference that the orphan objects are not freed but only +reported via /sys/kernel/debug/kmemleak. A similar method is used by the +Valgrind tool (memcheck --leak-check) to detect the memory leaks in +user-space applications. + +Usage +----- + +CONFIG_DEBUG_KMEMLEAK in "Kernel hacking" has to be enabled. A kernel +thread scans the memory every 10 minutes (by default) and prints any new +unreferenced objects found. To trigger an intermediate scan and display +all the possible memory leaks: + + # mount -t debugfs nodev /sys/kernel/debug/ + # cat /sys/kernel/debug/kmemleak + +Note that the orphan objects are listed in the order they were allocated +and one object at the beginning of the list may cause other subsequent +objects to be reported as orphan. + +Memory scanning parameters can be modified at run-time by writing to the +/sys/kernel/debug/kmemleak file. The following parameters are supported: + + off - disable kmemleak (irreversible) + stack=on - enable the task stacks scanning + stack=off - disable the tasks stacks scanning + scan=on - start the automatic memory scanning thread + scan=off - stop the automatic memory scanning thread + scan= - set the automatic memory scanning period in seconds (0 + to disable it) + +Kmemleak can also be disabled at boot-time by passing "kmemleak=off" on +the kernel command line. + +Basic Algorithm +--------------- + +The memory allocations via kmalloc, vmalloc, kmem_cache_alloc and +friends are traced and the pointers, together with additional +information like size and stack trace, are stored in a prio search tree. +The corresponding freeing function calls are tracked and the pointers +removed from the kmemleak data structures. + +An allocated block of memory is considered orphan if no pointer to its +start address or to any location inside the block can be found by +scanning the memory (including saved registers). This means that there +might be no way for the kernel to pass the address of the allocated +block to a freeing function and therefore the block is considered a +memory leak. + +The scanning algorithm steps: + + 1. mark all objects as white (remaining white objects will later be + considered orphan) + 2. scan the memory starting with the data section and stacks, checking + the values against the addresses stored in the prio search tree. If + a pointer to a white object is found, the object is added to the + gray list + 3. scan the gray objects for matching addresses (some white objects + can become gray and added at the end of the gray list) until the + gray set is finished + 4. the remaining white objects are considered orphan and reported via + /sys/kernel/debug/kmemleak + +Some allocated memory blocks have pointers stored in the kernel's +internal data structures and they cannot be detected as orphans. To +avoid this, kmemleak can also store the number of values pointing to an +address inside the block address range that need to be found so that the +block is not considered a leak. One example is __vmalloc(). + +Kmemleak API +------------ + +See the include/linux/kmemleak.h header for the functions prototype. + +kmemleak_init - initialize kmemleak +kmemleak_alloc - notify of a memory block allocation +kmemleak_free - notify of a memory block freeing +kmemleak_not_leak - mark an object as not a leak +kmemleak_ignore - do not scan or report an object as leak +kmemleak_scan_area - add scan areas inside a memory block +kmemleak_no_scan - do not scan a memory block +kmemleak_erase - erase an old value in a pointer variable +kmemleak_alloc_recursive - as kmemleak_alloc but checks the recursiveness +kmemleak_free_recursive - as kmemleak_free but checks the recursiveness + +Dealing with false positives/negatives +-------------------------------------- + +The false negatives are real memory leaks (orphan objects) but not +reported by kmemleak because values found during the memory scanning +point to such objects. To reduce the number of false negatives, kmemleak +provides the kmemleak_ignore, kmemleak_scan_area, kmemleak_no_scan and +kmemleak_erase functions (see above). The task stacks also increase the +amount of false negatives and their scanning is not enabled by default. + +The false positives are objects wrongly reported as being memory leaks +(orphan). For objects known not to be leaks, kmemleak provides the +kmemleak_not_leak function. The kmemleak_ignore could also be used if +the memory block is known not to contain other pointers and it will no +longer be scanned. + +Some of the reported leaks are only transient, especially on SMP +systems, because of pointers temporarily stored in CPU registers or +stacks. Kmemleak defines MSECS_MIN_AGE (defaulting to 1000) representing +the minimum age of an object to be reported as a memory leak. + +Limitations and Drawbacks +------------------------- + +The main drawback is the reduced performance of memory allocation and +freeing. To avoid other penalties, the memory scanning is only performed +when the /sys/kernel/debug/kmemleak file is read. Anyway, this tool is +intended for debugging purposes where the performance might not be the +most important requirement. + +To keep the algorithm simple, kmemleak scans for values pointing to any +address inside a block's address range. This may lead to an increased +number of false negatives. However, it is likely that a real memory leak +will eventually become visible. + +Another source of false negatives is the data stored in non-pointer +values. In a future version, kmemleak could only scan the pointer +members in the allocated structures. This feature would solve many of +the false negative cases described above. + +The tool can report false positives. These are cases where an allocated +block doesn't need to be freed (some cases in the init_call functions), +the pointer is calculated by other methods than the usual container_of +macro or the pointer is stored in a location not scanned by kmemleak. + +Page allocations and ioremap are not tracked. Only the ARM and x86 +architectures are currently supported. From d5cff635290aec9ad7e6ee546aa4fae895361cbb Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Thu, 11 Jun 2009 13:22:40 +0100 Subject: [PATCH 03/12] kmemleak: Add the slab memory allocation/freeing hooks This patch adds the callbacks to kmemleak_(alloc|free) functions from the slab allocator. The patch also adds the SLAB_NOLEAKTRACE flag to avoid recursive calls to kmemleak when it allocates its own data structures. Signed-off-by: Catalin Marinas Reviewed-by: Pekka Enberg --- include/linux/slab.h | 2 ++ mm/slab.c | 32 ++++++++++++++++++++++++++++++-- 2 files changed, 32 insertions(+), 2 deletions(-) diff --git a/include/linux/slab.h b/include/linux/slab.h index 24c5602bee9..48803064ced 100644 --- a/include/linux/slab.h +++ b/include/linux/slab.h @@ -62,6 +62,8 @@ # define SLAB_DEBUG_OBJECTS 0x00000000UL #endif +#define SLAB_NOLEAKTRACE 0x00800000UL /* Avoid kmemleak tracing */ + /* The following flags affect the page allocator grouping pages by mobility */ #define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */ #define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */ diff --git a/mm/slab.c b/mm/slab.c index f85831da908..859067f8e4f 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -107,6 +107,7 @@ #include #include #include +#include #include #include #include @@ -178,13 +179,13 @@ SLAB_STORE_USER | \ SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ - SLAB_DEBUG_OBJECTS) + SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE) #else # define CREATE_MASK (SLAB_HWCACHE_ALIGN | \ SLAB_CACHE_DMA | \ SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \ SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \ - SLAB_DEBUG_OBJECTS) + SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE) #endif /* @@ -964,6 +965,14 @@ static struct array_cache *alloc_arraycache(int node, int entries, struct array_cache *nc = NULL; nc = kmalloc_node(memsize, GFP_KERNEL, node); + /* + * The array_cache structures contain pointers to free object. + * However, when such objects are allocated or transfered to another + * cache the pointers are not cleared and they could be counted as + * valid references during a kmemleak scan. Therefore, kmemleak must + * not scan such objects. + */ + kmemleak_no_scan(nc); if (nc) { nc->avail = 0; nc->limit = entries; @@ -2621,6 +2630,14 @@ static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp, /* Slab management obj is off-slab. */ slabp = kmem_cache_alloc_node(cachep->slabp_cache, local_flags, nodeid); + /* + * If the first object in the slab is leaked (it's allocated + * but no one has a reference to it), we want to make sure + * kmemleak does not treat the ->s_mem pointer as a reference + * to the object. Otherwise we will not report the leak. + */ + kmemleak_scan_area(slabp, offsetof(struct slab, list), + sizeof(struct list_head), local_flags); if (!slabp) return NULL; } else { @@ -3141,6 +3158,12 @@ static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags) STATS_INC_ALLOCMISS(cachep); objp = cache_alloc_refill(cachep, flags); } + /* + * To avoid a false negative, if an object that is in one of the + * per-CPU caches is leaked, we need to make sure kmemleak doesn't + * treat the array pointers as a reference to the object. + */ + kmemleak_erase(&ac->entry[ac->avail]); return objp; } @@ -3360,6 +3383,8 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, out: local_irq_restore(save_flags); ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller); + kmemleak_alloc_recursive(ptr, obj_size(cachep), 1, cachep->flags, + flags); if (unlikely((flags & __GFP_ZERO) && ptr)) memset(ptr, 0, obj_size(cachep)); @@ -3415,6 +3440,8 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller) objp = __do_cache_alloc(cachep, flags); local_irq_restore(save_flags); objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller); + kmemleak_alloc_recursive(objp, obj_size(cachep), 1, cachep->flags, + flags); prefetchw(objp); if (unlikely((flags & __GFP_ZERO) && objp)) @@ -3530,6 +3557,7 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp) struct array_cache *ac = cpu_cache_get(cachep); check_irq_off(); + kmemleak_free_recursive(objp, cachep->flags); objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0)); /* From 4374e616d28e65265a5b433ceece275449f3d2e3 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Thu, 11 Jun 2009 13:23:17 +0100 Subject: [PATCH 04/12] kmemleak: Add the slob memory allocation/freeing hooks This patch adds the callbacks to kmemleak_(alloc|free) functions from the slob allocator. Signed-off-by: Catalin Marinas Acked-by: Matt Mackall Acked-by: Pekka Enberg --- mm/slob.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/mm/slob.c b/mm/slob.c index 9b1737b0787..12f26149992 100644 --- a/mm/slob.c +++ b/mm/slob.c @@ -67,6 +67,7 @@ #include #include #include +#include #include /* @@ -509,6 +510,7 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node) size, PAGE_SIZE << order, gfp, node); } + kmemleak_alloc(ret, size, 1, gfp); return ret; } EXPORT_SYMBOL(__kmalloc_node); @@ -521,6 +523,7 @@ void kfree(const void *block) if (unlikely(ZERO_OR_NULL_PTR(block))) return; + kmemleak_free(block); sp = slob_page(block); if (is_slob_page(sp)) { @@ -584,12 +587,14 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size, } else if (flags & SLAB_PANIC) panic("Cannot create slab cache %s\n", name); + kmemleak_alloc(c, sizeof(struct kmem_cache), 1, GFP_KERNEL); return c; } EXPORT_SYMBOL(kmem_cache_create); void kmem_cache_destroy(struct kmem_cache *c) { + kmemleak_free(c); slob_free(c, sizeof(struct kmem_cache)); } EXPORT_SYMBOL(kmem_cache_destroy); @@ -613,6 +618,7 @@ void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node) if (c->ctor) c->ctor(b); + kmemleak_alloc_recursive(b, c->size, 1, c->flags, flags); return b; } EXPORT_SYMBOL(kmem_cache_alloc_node); @@ -635,6 +641,7 @@ static void kmem_rcu_free(struct rcu_head *head) void kmem_cache_free(struct kmem_cache *c, void *b) { + kmemleak_free_recursive(b, c->flags); if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) { struct slob_rcu *slob_rcu; slob_rcu = b + (c->size - sizeof(struct slob_rcu)); From 06f22f13f3cc2eff00db09f053218e5d4b757bc8 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Thu, 11 Jun 2009 13:23:18 +0100 Subject: [PATCH 05/12] kmemleak: Add the slub memory allocation/freeing hooks This patch adds the callbacks to kmemleak_(alloc|free) functions from the slub allocator. Signed-off-by: Catalin Marinas Cc: Christoph Lameter Reviewed-by: Pekka Enberg --- mm/slub.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/mm/slub.c b/mm/slub.c index 5e805a6fe36..6674a7907b0 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -143,7 +144,7 @@ * Set of flags that will prevent slab merging */ #define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ - SLAB_TRACE | SLAB_DESTROY_BY_RCU) + SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE) #define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \ SLAB_CACHE_DMA) @@ -1617,6 +1618,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, if (unlikely((gfpflags & __GFP_ZERO) && object)) memset(object, 0, objsize); + kmemleak_alloc_recursive(object, objsize, 1, s->flags, gfpflags); return object; } @@ -1746,6 +1748,7 @@ static __always_inline void slab_free(struct kmem_cache *s, struct kmem_cache_cpu *c; unsigned long flags; + kmemleak_free_recursive(x, s->flags); local_irq_save(flags); c = get_cpu_slab(s, smp_processor_id()); debug_check_no_locks_freed(object, c->objsize); From 89219d37a2377c44fde7bff0bf0623453c05329a Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Thu, 11 Jun 2009 13:23:19 +0100 Subject: [PATCH 06/12] kmemleak: Add the vmalloc memory allocation/freeing hooks This patch adds the callbacks to kmemleak_(alloc|free) functions from vmalloc/vfree. Signed-off-by: Catalin Marinas --- mm/vmalloc.c | 30 +++++++++++++++++++++++++++--- 1 file changed, 27 insertions(+), 3 deletions(-) diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 083716ea38c..b7db9357279 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -1327,6 +1328,9 @@ static void __vunmap(const void *addr, int deallocate_pages) void vfree(const void *addr) { BUG_ON(in_interrupt()); + + kmemleak_free(addr); + __vunmap(addr, 1); } EXPORT_SYMBOL(vfree); @@ -1439,8 +1443,17 @@ fail: void *__vmalloc_area(struct vm_struct *area, gfp_t gfp_mask, pgprot_t prot) { - return __vmalloc_area_node(area, gfp_mask, prot, -1, - __builtin_return_address(0)); + void *addr = __vmalloc_area_node(area, gfp_mask, prot, -1, + __builtin_return_address(0)); + + /* + * A ref_count = 3 is needed because the vm_struct and vmap_area + * structures allocated in the __get_vm_area_node() function contain + * references to the virtual address of the vmalloc'ed block. + */ + kmemleak_alloc(addr, area->size - PAGE_SIZE, 3, gfp_mask); + + return addr; } /** @@ -1459,6 +1472,8 @@ static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, int node, void *caller) { struct vm_struct *area; + void *addr; + unsigned long real_size = size; size = PAGE_ALIGN(size); if (!size || (size >> PAGE_SHIFT) > num_physpages) @@ -1470,7 +1485,16 @@ static void *__vmalloc_node(unsigned long size, gfp_t gfp_mask, pgprot_t prot, if (!area) return NULL; - return __vmalloc_area_node(area, gfp_mask, prot, node, caller); + addr = __vmalloc_area_node(area, gfp_mask, prot, node, caller); + + /* + * A ref_count = 3 is needed because the vm_struct and vmap_area + * structures allocated in the __get_vm_area_node() function contain + * references to the virtual address of the vmalloc'ed block. + */ + kmemleak_alloc(addr, real_size, 3, gfp_mask); + + return addr; } void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) From dbb1f81ca67a56c6cfce4c94d07c76378fd4af9e Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Thu, 11 Jun 2009 13:23:19 +0100 Subject: [PATCH 07/12] kmemleak: Add kmemleak_alloc callback from alloc_large_system_hash The alloc_large_system_hash function is called from various places in the kernel and it contains pointers to other allocated structures. It therefore needs to be traced by kmemleak. Signed-off-by: Catalin Marinas --- mm/page_alloc.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 474c7e9dd51..17d5f539a9a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -46,6 +46,7 @@ #include #include #include +#include #include #include @@ -4546,6 +4547,16 @@ void *__init alloc_large_system_hash(const char *tablename, if (_hash_mask) *_hash_mask = (1 << log2qty) - 1; + /* + * If hashdist is set, the table allocation is done with __vmalloc() + * which invokes the kmemleak_alloc() callback. This function may also + * be called before the slab and kmemleak are initialised when + * kmemleak simply buffers the request to be executed later + * (GFP_ATOMIC flag ignored in this case). + */ + if (!hashdist) + kmemleak_alloc(table, size, 1, GFP_ATOMIC); + return table; } From 4f2294b6dc88d99295230d97fef2c9863cec44c3 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Thu, 11 Jun 2009 13:23:20 +0100 Subject: [PATCH 08/12] kmemleak: Add modules support This patch handles the kmemleak operations needed for modules loading so that memory allocations from inside a module are properly tracked. Signed-off-by: Catalin Marinas --- kernel/module.c | 56 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/kernel/module.c b/kernel/module.c index 2383e60fcf3..5cd55ab15da 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -53,6 +53,7 @@ #include #include #include +#include #if 0 #define DEBUGP printk @@ -430,6 +431,7 @@ static void *percpu_modalloc(unsigned long size, unsigned long align, unsigned long extra; unsigned int i; void *ptr; + int cpu; if (align > PAGE_SIZE) { printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n", @@ -459,6 +461,11 @@ static void *percpu_modalloc(unsigned long size, unsigned long align, if (!split_block(i, size)) return NULL; + /* add the per-cpu scanning areas */ + for_each_possible_cpu(cpu) + kmemleak_alloc(ptr + per_cpu_offset(cpu), size, 0, + GFP_KERNEL); + /* Mark allocated */ pcpu_size[i] = -pcpu_size[i]; return ptr; @@ -473,6 +480,7 @@ static void percpu_modfree(void *freeme) { unsigned int i; void *ptr = __per_cpu_start + block_size(pcpu_size[0]); + int cpu; /* First entry is core kernel percpu data. */ for (i = 1; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) { @@ -484,6 +492,10 @@ static void percpu_modfree(void *freeme) BUG(); free: + /* remove the per-cpu scanning areas */ + for_each_possible_cpu(cpu) + kmemleak_free(freeme + per_cpu_offset(cpu)); + /* Merge with previous? */ if (pcpu_size[i-1] >= 0) { pcpu_size[i-1] += pcpu_size[i]; @@ -1876,6 +1888,36 @@ static void *module_alloc_update_bounds(unsigned long size) return ret; } +#ifdef CONFIG_DEBUG_KMEMLEAK +static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr, + Elf_Shdr *sechdrs, char *secstrings) +{ + unsigned int i; + + /* only scan the sections containing data */ + kmemleak_scan_area(mod->module_core, (unsigned long)mod - + (unsigned long)mod->module_core, + sizeof(struct module), GFP_KERNEL); + + for (i = 1; i < hdr->e_shnum; i++) { + if (!(sechdrs[i].sh_flags & SHF_ALLOC)) + continue; + if (strncmp(secstrings + sechdrs[i].sh_name, ".data", 5) != 0 + && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0) + continue; + + kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr - + (unsigned long)mod->module_core, + sechdrs[i].sh_size, GFP_KERNEL); + } +} +#else +static inline void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr, + Elf_Shdr *sechdrs, char *secstrings) +{ +} +#endif + /* Allocate and load the module: note that size of section 0 is always zero, and we rely on this for optional sections. */ static noinline struct module *load_module(void __user *umod, @@ -2046,6 +2088,12 @@ static noinline struct module *load_module(void __user *umod, /* Do the allocs. */ ptr = module_alloc_update_bounds(mod->core_size); + /* + * The pointer to this block is stored in the module structure + * which is inside the block. Just mark it as not being a + * leak. + */ + kmemleak_not_leak(ptr); if (!ptr) { err = -ENOMEM; goto free_percpu; @@ -2054,6 +2102,13 @@ static noinline struct module *load_module(void __user *umod, mod->module_core = ptr; ptr = module_alloc_update_bounds(mod->init_size); + /* + * The pointer to this block is stored in the module structure + * which is inside the block. This block doesn't need to be + * scanned as it contains data and code that will be freed + * after the module is initialized. + */ + kmemleak_ignore(ptr); if (!ptr && mod->init_size) { err = -ENOMEM; goto free_core; @@ -2084,6 +2139,7 @@ static noinline struct module *load_module(void __user *umod, } /* Module has been moved. */ mod = (void *)sechdrs[modindex].sh_addr; + kmemleak_load_module(mod, hdr, sechdrs, secstrings); #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP) mod->refptr = percpu_modalloc(sizeof(local_t), __alignof__(local_t), From 2e1483c995bbd0fa6cbd055ad76088a520799ba4 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Thu, 11 Jun 2009 13:24:13 +0100 Subject: [PATCH 09/12] kmemleak: Remove some of the kmemleak false positives There are allocations for which the main pointer cannot be found but they are not memory leaks. This patch fixes some of them. For more information on false positives, see Documentation/kmemleak.txt. Signed-off-by: Catalin Marinas --- drivers/char/vt.c | 7 +++++++ fs/block_dev.c | 6 ++++++ include/linux/percpu.h | 5 +++++ 3 files changed, 18 insertions(+) diff --git a/drivers/char/vt.c b/drivers/char/vt.c index 08151d4de48..961c1a788c6 100644 --- a/drivers/char/vt.c +++ b/drivers/char/vt.c @@ -104,6 +104,7 @@ #include #include #include +#include #define MAX_NR_CON_DRIVER 16 @@ -2880,6 +2881,12 @@ static int __init con_init(void) */ for (currcons = 0; currcons < MIN_NR_CONSOLES; currcons++) { vc_cons[currcons].d = vc = alloc_bootmem(sizeof(struct vc_data)); + /* + * Kmemleak does not track the memory allocated via + * alloc_bootmem() but this block contains pointers to + * other blocks allocated via kmalloc. + */ + kmemleak_alloc(vc, sizeof(struct vc_data), 1, GFP_ATOMIC); INIT_WORK(&vc_cons[currcons].SAK_work, vc_SAK); visual_init(vc, currcons, 1); vc->vc_screenbuf = (unsigned short *)alloc_bootmem(vc->vc_screenbuf_size); diff --git a/fs/block_dev.c b/fs/block_dev.c index f45dbc18dd1..d250f807fd8 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include "internal.h" @@ -492,6 +493,11 @@ void __init bdev_cache_init(void) bd_mnt = kern_mount(&bd_type); if (IS_ERR(bd_mnt)) panic("Cannot create bdev pseudo-fs"); + /* + * This vfsmount structure is only used to obtain the + * blockdev_superblock, so tell kmemleak not to report it. + */ + kmemleak_not_leak(bd_mnt); blockdev_superblock = bd_mnt->mnt_sb; /* For writeback */ } diff --git a/include/linux/percpu.h b/include/linux/percpu.h index 1581ff235c7..26fd9d12f05 100644 --- a/include/linux/percpu.h +++ b/include/linux/percpu.h @@ -86,7 +86,12 @@ struct percpu_data { void *ptrs[1]; }; +/* pointer disguising messes up the kmemleak objects tracking */ +#ifndef CONFIG_DEBUG_KMEMLEAK #define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata) +#else +#define __percpu_disguise(pdata) (struct percpu_data *)(pdata) +#endif #define per_cpu_ptr(ptr, cpu) \ ({ \ From 3bba00d7bdd57cb7aa739b751fa0a1fbbb04dc18 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Thu, 11 Jun 2009 13:24:13 +0100 Subject: [PATCH 10/12] kmemleak: Enable the building of the memory leak detector This patch adds the Kconfig.debug and Makefile entries needed for building kmemleak into the kernel. Signed-off-by: Catalin Marinas --- lib/Kconfig.debug | 22 ++++++++++++++++++++++ mm/Makefile | 1 + 2 files changed, 23 insertions(+) diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 6cdcf38f2da..5cc3506574e 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -336,6 +336,28 @@ config SLUB_STATS out which slabs are relevant to a particular load. Try running: slabinfo -DA +config DEBUG_KMEMLEAK + bool "Kernel memory leak detector" + depends on DEBUG_KERNEL && EXPERIMENTAL && (X86 || ARM) && \ + !MEMORY_HOTPLUG + select DEBUG_SLAB if SLAB + select SLUB_DEBUG if SLUB + select DEBUG_FS if SYSFS + select STACKTRACE if STACKTRACE_SUPPORT + select KALLSYMS + help + Say Y here if you want to enable the memory leak + detector. The memory allocation/freeing is traced in a way + similar to the Boehm's conservative garbage collector, the + difference being that the orphan objects are not freed but + only shown in /sys/kernel/debug/kmemleak. Enabling this + feature will introduce an overhead to memory + allocations. See Documentation/kmemleak.txt for more + details. + + In order to access the kmemleak file, debugfs needs to be + mounted (usually at /sys/kernel/debug). + config DEBUG_PREEMPT bool "Debug preemptible kernel" depends on DEBUG_KERNEL && PREEMPT && (TRACE_IRQFLAGS_SUPPORT || PPC64) diff --git a/mm/Makefile b/mm/Makefile index ec73c68b601..cb84a025fdb 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -38,3 +38,4 @@ obj-$(CONFIG_SMP) += allocpercpu.o endif obj-$(CONFIG_QUICKLIST) += quicklist.o obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o page_cgroup.o +obj-$(CONFIG_DEBUG_KMEMLEAK) += kmemleak.o From 0822ee4ac1ae6af5a953f97f75553738834b10b9 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Thu, 11 Jun 2009 13:24:14 +0100 Subject: [PATCH 11/12] kmemleak: Simple testing module for kmemleak This patch adds a loadable module that deliberately leaks memory. It is used for testing various memory leaking scenarios. Signed-off-by: Catalin Marinas --- lib/Kconfig.debug | 10 ++++ mm/Makefile | 1 + mm/kmemleak-test.c | 111 +++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 122 insertions(+) create mode 100644 mm/kmemleak-test.c diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug index 5cc3506574e..116a35051be 100644 --- a/lib/Kconfig.debug +++ b/lib/Kconfig.debug @@ -358,6 +358,16 @@ config DEBUG_KMEMLEAK In order to access the kmemleak file, debugfs needs to be mounted (usually at /sys/kernel/debug). +config DEBUG_KMEMLEAK_TEST + tristate "Simple test for the kernel memory leak detector" + depends on DEBUG_KMEMLEAK + help + Say Y or M here to build a test for the kernel memory leak + detector. This option enables a module that explicitly leaks + memory. + + If unsure, say N. + config DEBUG_PREEMPT bool "Debug preemptible kernel" depends on DEBUG_KERNEL && PREEMPT && (TRACE_IRQFLAGS_SUPPORT || PPC64) diff --git a/mm/Makefile b/mm/Makefile index cb84a025fdb..e89acb090b4 100644 --- a/mm/Makefile +++ b/mm/Makefile @@ -39,3 +39,4 @@ endif obj-$(CONFIG_QUICKLIST) += quicklist.o obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o page_cgroup.o obj-$(CONFIG_DEBUG_KMEMLEAK) += kmemleak.o +obj-$(CONFIG_DEBUG_KMEMLEAK_TEST) += kmemleak-test.o diff --git a/mm/kmemleak-test.c b/mm/kmemleak-test.c new file mode 100644 index 00000000000..d5292fc6f52 --- /dev/null +++ b/mm/kmemleak-test.c @@ -0,0 +1,111 @@ +/* + * mm/kmemleak-test.c + * + * Copyright (C) 2008 ARM Limited + * Written by Catalin Marinas + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +struct test_node { + long header[25]; + struct list_head list; + long footer[25]; +}; + +static LIST_HEAD(test_list); +static DEFINE_PER_CPU(void *, test_pointer); + +/* + * Some very simple testing. This function needs to be extended for + * proper testing. + */ +static int __init kmemleak_test_init(void) +{ + struct test_node *elem; + int i; + + printk(KERN_INFO "Kmemleak testing\n"); + + /* make some orphan objects */ + pr_info("kmemleak: kmalloc(32) = %p\n", kmalloc(32, GFP_KERNEL)); + pr_info("kmemleak: kmalloc(32) = %p\n", kmalloc(32, GFP_KERNEL)); + pr_info("kmemleak: kmalloc(1024) = %p\n", kmalloc(1024, GFP_KERNEL)); + pr_info("kmemleak: kmalloc(1024) = %p\n", kmalloc(1024, GFP_KERNEL)); + pr_info("kmemleak: kmalloc(2048) = %p\n", kmalloc(2048, GFP_KERNEL)); + pr_info("kmemleak: kmalloc(2048) = %p\n", kmalloc(2048, GFP_KERNEL)); + pr_info("kmemleak: kmalloc(4096) = %p\n", kmalloc(4096, GFP_KERNEL)); + pr_info("kmemleak: kmalloc(4096) = %p\n", kmalloc(4096, GFP_KERNEL)); +#ifndef CONFIG_MODULES + pr_info("kmemleak: kmem_cache_alloc(files_cachep) = %p\n", + kmem_cache_alloc(files_cachep, GFP_KERNEL)); + pr_info("kmemleak: kmem_cache_alloc(files_cachep) = %p\n", + kmem_cache_alloc(files_cachep, GFP_KERNEL)); +#endif + pr_info("kmemleak: vmalloc(64) = %p\n", vmalloc(64)); + pr_info("kmemleak: vmalloc(64) = %p\n", vmalloc(64)); + pr_info("kmemleak: vmalloc(64) = %p\n", vmalloc(64)); + pr_info("kmemleak: vmalloc(64) = %p\n", vmalloc(64)); + pr_info("kmemleak: vmalloc(64) = %p\n", vmalloc(64)); + + /* + * Add elements to a list. They should only appear as orphan + * after the module is removed. + */ + for (i = 0; i < 10; i++) { + elem = kmalloc(sizeof(*elem), GFP_KERNEL); + pr_info("kmemleak: kmalloc(sizeof(*elem)) = %p\n", elem); + if (!elem) + return -ENOMEM; + memset(elem, 0, sizeof(*elem)); + INIT_LIST_HEAD(&elem->list); + + list_add_tail(&elem->list, &test_list); + } + + for_each_possible_cpu(i) { + per_cpu(test_pointer, i) = kmalloc(129, GFP_KERNEL); + pr_info("kmemleak: kmalloc(129) = %p\n", + per_cpu(test_pointer, i)); + } + + return 0; +} +module_init(kmemleak_test_init); + +static void __exit kmemleak_test_exit(void) +{ + struct test_node *elem, *tmp; + + /* + * Remove the list elements without actually freeing the + * memory. + */ + list_for_each_entry_safe(elem, tmp, &test_list, list) + list_del(&elem->list); +} +module_exit(kmemleak_test_exit); + +MODULE_LICENSE("GPL"); From 3aa27bbe7a6536d1ec859d3a97caf3319b5081b7 Mon Sep 17 00:00:00 2001 From: Catalin Marinas Date: Thu, 11 Jun 2009 13:24:14 +0100 Subject: [PATCH 12/12] kmemleak: Add the corresponding MAINTAINERS entry Signed-off-by: Catalin Marinas --- MAINTAINERS | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index cf4abddfc8a..e5af3063e8b 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3359,6 +3359,12 @@ F: Documentation/trace/kmemtrace.txt F: include/trace/kmemtrace.h F: kernel/trace/kmemtrace.c +KMEMLEAK +P: Catalin Marinas +M: catalin.marinas@arm.com +L: linux-kernel@vger.kernel.org +S: Maintained + KPROBES P: Ananth N Mavinakayanahalli M: ananth@in.ibm.com