dect
/
linux-2.6
Archived
13
0
Fork 0

percpu: add __percpu sparse annotations to core kernel subsystems

Add __percpu sparse annotations to core subsystems.

These annotations are to make sparse consider percpu variables to be
in a different address space and warn if accessed without going
through percpu accessors.  This patch doesn't affect normal builds.

Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Christoph Lameter <cl@linux-foundation.org>
Acked-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: linux-mm@kvack.org
Cc: Rusty Russell <rusty@rustcorp.com.au>
Cc: Dipankar Sarma <dipankar@in.ibm.com>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Eric Biederman <ebiederm@xmission.com>
This commit is contained in:
Tejun Heo 2010-02-02 14:38:57 +09:00
parent ab386128f2
commit 43cf38eb5c
11 changed files with 22 additions and 20 deletions

View File

@ -150,8 +150,8 @@ struct blk_user_trace_setup {
struct blk_trace { struct blk_trace {
int trace_state; int trace_state;
struct rchan *rchan; struct rchan *rchan;
unsigned long *sequence; unsigned long __percpu *sequence;
unsigned char *msg_data; unsigned char __percpu *msg_data;
u16 act_mask; u16 act_mask;
u64 start_lba; u64 start_lba;
u64 end_lba; u64 end_lba;

View File

@ -101,7 +101,7 @@ struct hd_struct {
unsigned long stamp; unsigned long stamp;
int in_flight[2]; int in_flight[2];
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
struct disk_stats *dkstats; struct disk_stats __percpu *dkstats;
#else #else
struct disk_stats dkstats; struct disk_stats dkstats;
#endif #endif

View File

@ -199,7 +199,7 @@ extern struct kimage *kexec_crash_image;
*/ */
extern struct resource crashk_res; extern struct resource crashk_res;
typedef u32 note_buf_t[KEXEC_NOTE_BYTES/4]; typedef u32 note_buf_t[KEXEC_NOTE_BYTES/4];
extern note_buf_t *crash_notes; extern note_buf_t __percpu *crash_notes;
extern u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4]; extern u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
extern size_t vmcoreinfo_size; extern size_t vmcoreinfo_size;
extern size_t vmcoreinfo_max_size; extern size_t vmcoreinfo_max_size;

View File

@ -301,7 +301,7 @@ struct zone {
unsigned long min_unmapped_pages; unsigned long min_unmapped_pages;
unsigned long min_slab_pages; unsigned long min_slab_pages;
#endif #endif
struct per_cpu_pageset *pageset; struct per_cpu_pageset __percpu *pageset;
/* /*
* free areas of different sizes * free areas of different sizes
*/ */

View File

@ -365,7 +365,7 @@ struct module
struct module_ref { struct module_ref {
int count; int count;
} *refptr; } __percpu *refptr;
#endif #endif
#ifdef CONFIG_CONSTRUCTORS #ifdef CONFIG_CONSTRUCTORS

View File

@ -21,7 +21,7 @@ struct percpu_counter {
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
struct list_head list; /* All percpu_counters are on a list */ struct list_head list; /* All percpu_counters are on a list */
#endif #endif
s32 *counters; s32 __percpu *counters;
}; };
extern int percpu_counter_batch; extern int percpu_counter_batch;

View File

@ -33,7 +33,7 @@ struct srcu_struct_array {
struct srcu_struct { struct srcu_struct {
int completed; int completed;
struct srcu_struct_array *per_cpu_ref; struct srcu_struct_array __percpu *per_cpu_ref;
struct mutex mutex; struct mutex mutex;
}; };

View File

@ -41,7 +41,7 @@
#include <asm/sections.h> #include <asm/sections.h>
/* Per cpu memory for storing cpu states in case of system crash. */ /* Per cpu memory for storing cpu states in case of system crash. */
note_buf_t* crash_notes; note_buf_t __percpu *crash_notes;
/* vmcoreinfo stuff */ /* vmcoreinfo stuff */
static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES]; static unsigned char vmcoreinfo_data[VMCOREINFO_BYTES];

View File

@ -1566,7 +1566,7 @@ static unsigned long cpu_avg_load_per_task(int cpu)
#ifdef CONFIG_FAIR_GROUP_SCHED #ifdef CONFIG_FAIR_GROUP_SCHED
static __read_mostly unsigned long *update_shares_data; static __read_mostly unsigned long __percpu *update_shares_data;
static void __set_se_shares(struct sched_entity *se, unsigned long shares); static void __set_se_shares(struct sched_entity *se, unsigned long shares);
@ -10683,7 +10683,7 @@ struct cgroup_subsys cpu_cgroup_subsys = {
struct cpuacct { struct cpuacct {
struct cgroup_subsys_state css; struct cgroup_subsys_state css;
/* cpuusage holds pointer to a u64-type object on every cpu */ /* cpuusage holds pointer to a u64-type object on every cpu */
u64 *cpuusage; u64 __percpu *cpuusage;
struct percpu_counter cpustat[CPUACCT_STAT_NSTATS]; struct percpu_counter cpustat[CPUACCT_STAT_NSTATS];
struct cpuacct *parent; struct cpuacct *parent;
}; };

View File

@ -45,7 +45,7 @@ static int refcount;
static struct workqueue_struct *stop_machine_wq; static struct workqueue_struct *stop_machine_wq;
static struct stop_machine_data active, idle; static struct stop_machine_data active, idle;
static const struct cpumask *active_cpus; static const struct cpumask *active_cpus;
static void *stop_machine_work; static void __percpu *stop_machine_work;
static void set_state(enum stopmachine_state newstate) static void set_state(enum stopmachine_state newstate)
{ {

View File

@ -80,13 +80,15 @@
/* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */ /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */
#ifndef __addr_to_pcpu_ptr #ifndef __addr_to_pcpu_ptr
#define __addr_to_pcpu_ptr(addr) \ #define __addr_to_pcpu_ptr(addr) \
(void *)((unsigned long)(addr) - (unsigned long)pcpu_base_addr \ (void __percpu *)((unsigned long)(addr) - \
+ (unsigned long)__per_cpu_start) (unsigned long)pcpu_base_addr + \
(unsigned long)__per_cpu_start)
#endif #endif
#ifndef __pcpu_ptr_to_addr #ifndef __pcpu_ptr_to_addr
#define __pcpu_ptr_to_addr(ptr) \ #define __pcpu_ptr_to_addr(ptr) \
(void *)((unsigned long)(ptr) + (unsigned long)pcpu_base_addr \ (void __force *)((unsigned long)(ptr) + \
- (unsigned long)__per_cpu_start) (unsigned long)pcpu_base_addr - \
(unsigned long)__per_cpu_start)
#endif #endif
struct pcpu_chunk { struct pcpu_chunk {
@ -1065,7 +1067,7 @@ static struct pcpu_chunk *alloc_pcpu_chunk(void)
* RETURNS: * RETURNS:
* Percpu pointer to the allocated area on success, NULL on failure. * Percpu pointer to the allocated area on success, NULL on failure.
*/ */
static void *pcpu_alloc(size_t size, size_t align, bool reserved) static void __percpu *pcpu_alloc(size_t size, size_t align, bool reserved)
{ {
static int warn_limit = 10; static int warn_limit = 10;
struct pcpu_chunk *chunk; struct pcpu_chunk *chunk;
@ -1194,7 +1196,7 @@ fail_unlock_mutex:
* RETURNS: * RETURNS:
* Percpu pointer to the allocated area on success, NULL on failure. * Percpu pointer to the allocated area on success, NULL on failure.
*/ */
void *__alloc_percpu(size_t size, size_t align) void __percpu *__alloc_percpu(size_t size, size_t align)
{ {
return pcpu_alloc(size, align, false); return pcpu_alloc(size, align, false);
} }
@ -1215,7 +1217,7 @@ EXPORT_SYMBOL_GPL(__alloc_percpu);
* RETURNS: * RETURNS:
* Percpu pointer to the allocated area on success, NULL on failure. * Percpu pointer to the allocated area on success, NULL on failure.
*/ */
void *__alloc_reserved_percpu(size_t size, size_t align) void __percpu *__alloc_reserved_percpu(size_t size, size_t align)
{ {
return pcpu_alloc(size, align, true); return pcpu_alloc(size, align, true);
} }
@ -1267,7 +1269,7 @@ static void pcpu_reclaim(struct work_struct *work)
* CONTEXT: * CONTEXT:
* Can be called from atomic context. * Can be called from atomic context.
*/ */
void free_percpu(void *ptr) void free_percpu(void __percpu *ptr)
{ {
void *addr; void *addr;
struct pcpu_chunk *chunk; struct pcpu_chunk *chunk;