dect
/
linux-2.6
Archived
13
0
Fork 0

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/penberg/slab-2.6: (27 commits)
  SLUB: Fix memory hotplug with !NUMA
  slub: Move functions to reduce #ifdefs
  slub: Enable sysfs support for !CONFIG_SLUB_DEBUG
  SLUB: Optimize slab_free() debug check
  slub: Move NUMA-related functions under CONFIG_NUMA
  slub: Add lock release annotation
  slub: Fix signedness warnings
  slub: extract common code to remove objects from partial list without locking
  SLUB: Pass active and inactive redzone flags instead of boolean to debug functions
  slub: reduce differences between SMP and NUMA
  Revert "Slub: UP bandaid"
  percpu: clear memory allocated with the km allocator
  percpu: use percpu allocator on UP too
  percpu: reduce PCPU_MIN_UNIT_SIZE to 32k
  vmalloc: pcpu_get/free_vm_areas() aren't needed on UP
  SLUB: Fix merged slab cache names
  Slub: UP bandaid
  slub: fix SLUB_RESILIENCY_TEST for dynamic kmalloc caches
  slub: Fix up missing kmalloc_cache -> kmem_cache_node case for memoryhotplug
  slub: Add dummy functions for the !SLUB_DEBUG case
  ...
This commit is contained in:
Linus Torvalds 2010-10-24 12:47:55 -07:00
commit 76c39e4fef
4 changed files with 424 additions and 384 deletions

View File

@ -87,7 +87,7 @@ struct kmem_cache {
unsigned long min_partial;
const char *name; /* Name (only for display!) */
struct list_head list; /* List of slab caches */
#ifdef CONFIG_SLUB_DEBUG
#ifdef CONFIG_SYSFS
struct kobject kobj; /* For sysfs */
#endif
@ -96,11 +96,8 @@ struct kmem_cache {
* Defragmentation by allocating from a remote node.
*/
int remote_node_defrag_ratio;
struct kmem_cache_node *node[MAX_NUMNODES];
#else
/* Avoid an extra cache line for UP */
struct kmem_cache_node local_node;
#endif
struct kmem_cache_node *node[MAX_NUMNODES];
};
/*
@ -139,19 +136,16 @@ struct kmem_cache {
#ifdef CONFIG_ZONE_DMA
#define SLUB_DMA __GFP_DMA
/* Reserve extra caches for potential DMA use */
#define KMALLOC_CACHES (2 * SLUB_PAGE_SHIFT)
#else
/* Disable DMA functionality */
#define SLUB_DMA (__force gfp_t)0
#define KMALLOC_CACHES SLUB_PAGE_SHIFT
#endif
/*
* We keep the general caches in an array of slab caches that are used for
* 2^x bytes of allocations.
*/
extern struct kmem_cache kmalloc_caches[KMALLOC_CACHES];
extern struct kmem_cache *kmalloc_caches[SLUB_PAGE_SHIFT];
/*
* Sorry that the following has to be that ugly but some versions of GCC
@ -216,7 +210,7 @@ static __always_inline struct kmem_cache *kmalloc_slab(size_t size)
if (index == 0)
return NULL;
return &kmalloc_caches[index];
return kmalloc_caches[index];
}
void *kmem_cache_alloc(struct kmem_cache *, gfp_t);

View File

@ -353,7 +353,7 @@ config SLUB_DEBUG_ON
config SLUB_STATS
default n
bool "Enable SLUB performance statistics"
depends on SLUB && SLUB_DEBUG && SYSFS
depends on SLUB && SYSFS
help
SLUB statistics are useful to debug SLUBs allocation behavior in
order find ways to optimize the allocator. This should never be

View File

@ -500,7 +500,9 @@ void *__kmalloc_node(size_t size, gfp_t gfp, int node)
} else {
unsigned int order = get_order(size);
ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
if (likely(order))
gfp |= __GFP_COMP;
ret = slob_new_pages(gfp, order, node);
if (ret) {
struct page *page;
page = virt_to_page(ret);

788
mm/slub.c

File diff suppressed because it is too large Load Diff