From dcce284a259373f9e5570f2e33f79eca84fcf565 Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Thu, 18 Jun 2009 13:24:12 +1000 Subject: [PATCH] mm: Extend gfp masking to the page allocator The page allocator also needs the masking of gfp flags during boot, so this moves it out of slab/slub and uses it with the page allocator as well. Signed-off-by: Benjamin Herrenschmidt Acked-by: Pekka Enberg Signed-off-by: Linus Torvalds --- include/linux/gfp.h | 9 ++++++++- init/main.c | 4 ++++ mm/page_alloc.c | 3 +++ mm/slab.c | 15 ++------------- mm/slub.c | 12 +----------- 5 files changed, 18 insertions(+), 25 deletions(-) diff --git a/include/linux/gfp.h b/include/linux/gfp.h index cfdb35d71bc..7c777a0da17 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -99,7 +99,7 @@ struct vm_area_struct; __GFP_NORETRY|__GFP_NOMEMALLOC) /* Control slab gfp mask during early boot */ -#define SLAB_GFP_BOOT_MASK __GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS) +#define GFP_BOOT_MASK __GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS) /* Control allocation constraints */ #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE) @@ -348,4 +348,11 @@ static inline void oom_killer_enable(void) oom_killer_disabled = false; } +extern gfp_t gfp_allowed_mask; + +static inline void set_gfp_allowed_mask(gfp_t mask) +{ + gfp_allowed_mask = mask; +} + #endif /* __LINUX_GFP_H */ diff --git a/init/main.c b/init/main.c index 1a65fdd0631..09131ec090c 100644 --- a/init/main.c +++ b/init/main.c @@ -642,6 +642,10 @@ asmlinkage void __init start_kernel(void) "enabled early\n"); early_boot_irqs_on(); local_irq_enable(); + + /* Interrupts are enabled now so all GFP allocations are safe. */ + set_gfp_allowed_mask(__GFP_BITS_MASK); + kmem_cache_init_late(); /* diff --git a/mm/page_alloc.c b/mm/page_alloc.c index a5f3c278c57..6f0753fe694 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -73,6 +73,7 @@ unsigned long totalram_pages __read_mostly; unsigned long totalreserve_pages __read_mostly; unsigned long highest_memmap_pfn __read_mostly; int percpu_pagelist_fraction; +gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK; #ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE int pageblock_order __read_mostly; @@ -1863,6 +1864,8 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, struct page *page; int migratetype = allocflags_to_migratetype(gfp_mask); + gfp_mask &= gfp_allowed_mask; + lockdep_trace_alloc(gfp_mask); might_sleep_if(gfp_mask & __GFP_WAIT); diff --git a/mm/slab.c b/mm/slab.c index d08692303f6..e74a16e4ced 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -304,12 +304,6 @@ struct kmem_list3 { int free_touched; /* updated without locking */ }; -/* - * The slab allocator is initialized with interrupts disabled. Therefore, make - * sure early boot allocations don't accidentally enable interrupts. - */ -static gfp_t slab_gfp_mask __read_mostly = SLAB_GFP_BOOT_MASK; - /* * Need this for bootstrapping a per node allocator. */ @@ -1559,11 +1553,6 @@ void __init kmem_cache_init_late(void) { struct kmem_cache *cachep; - /* - * Interrupts are enabled now so all GFP allocations are safe. - */ - slab_gfp_mask = __GFP_BITS_MASK; - /* 6) resize the head arrays to their final sizes */ mutex_lock(&cache_chain_mutex); list_for_each_entry(cachep, &cache_chain, next) @@ -3307,7 +3296,7 @@ __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid, unsigned long save_flags; void *ptr; - flags &= slab_gfp_mask; + flags &= gfp_allowed_mask; lockdep_trace_alloc(flags); @@ -3392,7 +3381,7 @@ __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller) unsigned long save_flags; void *objp; - flags &= slab_gfp_mask; + flags &= gfp_allowed_mask; lockdep_trace_alloc(flags); diff --git a/mm/slub.c b/mm/slub.c index 4c6449310a0..ce62b770e2f 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -179,12 +179,6 @@ static enum { SYSFS /* Sysfs up */ } slab_state = DOWN; -/* - * The slab allocator is initialized with interrupts disabled. Therefore, make - * sure early boot allocations don't accidentally enable interrupts. - */ -static gfp_t slab_gfp_mask __read_mostly = SLAB_GFP_BOOT_MASK; - /* A list of all slab caches on the system */ static DECLARE_RWSEM(slub_lock); static LIST_HEAD(slab_caches); @@ -1692,7 +1686,7 @@ static __always_inline void *slab_alloc(struct kmem_cache *s, unsigned long flags; unsigned int objsize; - gfpflags &= slab_gfp_mask; + gfpflags &= gfp_allowed_mask; lockdep_trace_alloc(gfpflags); might_sleep_if(gfpflags & __GFP_WAIT); @@ -3220,10 +3214,6 @@ void __init kmem_cache_init(void) void __init kmem_cache_init_late(void) { - /* - * Interrupts are enabled now so all GFP allocations are safe. - */ - slab_gfp_mask = __GFP_BITS_MASK; } /*