memcg: Fix race condition in memcg_check_events() with this_cpu usage
Various code in memcontrol.c () calls this_cpu_read() on the calculations to be done from two different percpu variables, or does an open-coded read-modify-write on a single percpu variable. Disable preemption throughout these operations so that the writes go to the correct palces. [hannes@cmpxchg.org: added this_cpu to __this_cpu conversion] Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Steven Rostedt <rostedt@goodmis.org> Cc: Greg Thelen <gthelen@google.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: Balbir Singh <balbir@linux.vnet.ibm.com> Cc: Daisuke Nishimura <nishimura@mxp.nes.nec.co.jp> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Christoph Lameter <cl@linux.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
This commit is contained in:
parent
a61ed3cec5
commit
4799401fef
|
@ -686,8 +686,8 @@ static bool __memcg_event_check(struct mem_cgroup *memcg, int target)
|
||||||
{
|
{
|
||||||
unsigned long val, next;
|
unsigned long val, next;
|
||||||
|
|
||||||
val = this_cpu_read(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT]);
|
val = __this_cpu_read(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT]);
|
||||||
next = this_cpu_read(memcg->stat->targets[target]);
|
next = __this_cpu_read(memcg->stat->targets[target]);
|
||||||
/* from time_after() in jiffies.h */
|
/* from time_after() in jiffies.h */
|
||||||
return ((long)next - (long)val < 0);
|
return ((long)next - (long)val < 0);
|
||||||
}
|
}
|
||||||
|
@ -696,7 +696,7 @@ static void __mem_cgroup_target_update(struct mem_cgroup *memcg, int target)
|
||||||
{
|
{
|
||||||
unsigned long val, next;
|
unsigned long val, next;
|
||||||
|
|
||||||
val = this_cpu_read(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT]);
|
val = __this_cpu_read(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT]);
|
||||||
|
|
||||||
switch (target) {
|
switch (target) {
|
||||||
case MEM_CGROUP_TARGET_THRESH:
|
case MEM_CGROUP_TARGET_THRESH:
|
||||||
|
@ -712,7 +712,7 @@ static void __mem_cgroup_target_update(struct mem_cgroup *memcg, int target)
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
this_cpu_write(memcg->stat->targets[target], next);
|
__this_cpu_write(memcg->stat->targets[target], next);
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
|
@ -721,6 +721,7 @@ static void __mem_cgroup_target_update(struct mem_cgroup *memcg, int target)
|
||||||
*/
|
*/
|
||||||
static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
|
static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
|
||||||
{
|
{
|
||||||
|
preempt_disable();
|
||||||
/* threshold event is triggered in finer grain than soft limit */
|
/* threshold event is triggered in finer grain than soft limit */
|
||||||
if (unlikely(__memcg_event_check(memcg, MEM_CGROUP_TARGET_THRESH))) {
|
if (unlikely(__memcg_event_check(memcg, MEM_CGROUP_TARGET_THRESH))) {
|
||||||
mem_cgroup_threshold(memcg);
|
mem_cgroup_threshold(memcg);
|
||||||
|
@ -740,6 +741,7 @@ static void memcg_check_events(struct mem_cgroup *memcg, struct page *page)
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
preempt_enable();
|
||||||
}
|
}
|
||||||
|
|
||||||
static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
|
static struct mem_cgroup *mem_cgroup_from_cont(struct cgroup *cont)
|
||||||
|
|
Reference in New Issue