From e2b245f89ee3f5b03fb42d843a79a58cf4773181 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20H=2E=20Sch=C3=B6nherr?= Date: Mon, 1 Aug 2011 11:03:28 +0200 Subject: [PATCH 01/46] sched: Remove rq->avg_load_per_task MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since commit a2d47777 ("sched: fix stale value in average load per task") the variable rq->avg_load_per_task is no longer required. Remove it. Signed-off-by: Jan H. Schönherr Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1312189408-17172-1-git-send-email-schnhrr@cs.tu-berlin.de Signed-off-by: Ingo Molnar --- kernel/sched.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/kernel/sched.c b/kernel/sched.c index ccacdbdecf4..b1e8f0eca1f 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -520,8 +520,6 @@ struct rq { int cpu; int online; - unsigned long avg_load_per_task; - u64 rt_avg; u64 age_stamp; u64 idle_stamp; @@ -1569,11 +1567,9 @@ static unsigned long cpu_avg_load_per_task(int cpu) unsigned long nr_running = ACCESS_ONCE(rq->nr_running); if (nr_running) - rq->avg_load_per_task = rq->load.weight / nr_running; - else - rq->avg_load_per_task = 0; + return rq->load.weight / nr_running; - return rq->avg_load_per_task; + return 0; } #ifdef CONFIG_PREEMPT From 2c2efaed9bc973e3aeab1385c618017b56c8f6d7 Mon Sep 17 00:00:00 2001 From: Yong Zhang Date: Fri, 29 Jul 2011 16:20:33 +0800 Subject: [PATCH 02/46] sched: Kill WAKEUP_PREEMPT Remove the WAKEUP_PREEMPT feature, disabling it doesn't make any sense and its outlived its use by a long long while. Signed-off-by: Yong Zhang Acked-by: Mike Galbraith Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/20110729082033.GB12106@zhy Signed-off-by: Ingo Molnar --- kernel/sched_fair.c | 9 +-------- kernel/sched_features.h | 5 ----- 2 files changed, 1 insertion(+), 13 deletions(-) diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index bc8ee999381..241fc86bc61 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -1095,9 +1095,6 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) * narrow margin doesn't have to wait for a full slice. * This also mitigates buddy induced latencies under load. */ - if (!sched_feat(WAKEUP_PREEMPT)) - return; - if (delta_exec < sysctl_sched_min_granularity) return; @@ -1233,7 +1230,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) return; #endif - if (cfs_rq->nr_running > 1 || !sched_feat(WAKEUP_PREEMPT)) + if (cfs_rq->nr_running > 1) check_preempt_tick(cfs_rq, curr); } @@ -1899,10 +1896,6 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ if (unlikely(p->policy != SCHED_NORMAL)) return; - - if (!sched_feat(WAKEUP_PREEMPT)) - return; - find_matching_se(&se, &pse); update_curr(cfs_rq_of(se)); BUG_ON(!pse); diff --git a/kernel/sched_features.h b/kernel/sched_features.h index 2e74677cb04..efa0a7b75dd 100644 --- a/kernel/sched_features.h +++ b/kernel/sched_features.h @@ -11,11 +11,6 @@ SCHED_FEAT(GENTLE_FAIR_SLEEPERS, 1) */ SCHED_FEAT(START_DEBIT, 1) -/* - * Should wakeups try to preempt running tasks. - */ -SCHED_FEAT(WAKEUP_PREEMPT, 1) - /* * Based on load and program behaviour, see if it makes sense to place * a newly woken task on the same cpu as the task that woke it -- From c350a04efd1c89cd256b2abc8f07a21d0d53ff24 Mon Sep 17 00:00:00 2001 From: Mike Galbraith Date: Wed, 27 Jul 2011 17:14:55 +0200 Subject: [PATCH 03/46] sched: fix broken SCHED_RESET_ON_FORK handling Setting child->prio = current->normal_prio _after_ SCHED_RESET_ON_FORK has been handled for an RT parent gives birth to a deranged mutant child with non-RT policy, but RT prio and sched_class. Move PI leakage protection up, always set priorities and weight, and if the child is leaving RT class, reset rt_priority to the proper value. Signed-off-by: Mike Galbraith Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1311779695.8691.2.camel@marge.simson.net Signed-off-by: Ingo Molnar --- kernel/sched.c | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/kernel/sched.c b/kernel/sched.c index b1e8f0eca1f..cf427bb2b65 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -2843,20 +2843,24 @@ void sched_fork(struct task_struct *p) */ p->state = TASK_RUNNING; + /* + * Make sure we do not leak PI boosting priority to the child. + */ + p->prio = current->normal_prio; + /* * Revert to default priority/policy on fork if requested. */ if (unlikely(p->sched_reset_on_fork)) { - if (p->policy == SCHED_FIFO || p->policy == SCHED_RR) { + if (task_has_rt_policy(p)) { p->policy = SCHED_NORMAL; - p->normal_prio = p->static_prio; - } - - if (PRIO_TO_NICE(p->static_prio) < 0) { p->static_prio = NICE_TO_PRIO(0); - p->normal_prio = p->static_prio; - set_load_weight(p); - } + p->rt_priority = 0; + } else if (PRIO_TO_NICE(p->static_prio) < 0) + p->static_prio = NICE_TO_PRIO(0); + + p->prio = p->normal_prio = __normal_prio(p); + set_load_weight(p); /* * We don't need the reset flag anymore after the fork. It has @@ -2865,11 +2869,6 @@ void sched_fork(struct task_struct *p) p->sched_reset_on_fork = 0; } - /* - * Make sure we do not leak PI boosting priority to the child. - */ - p->prio = current->normal_prio; - if (!rt_prio(p->prio)) p->sched_class = &fair_sched_class; From 67d955383ab2ef8866c494c14156a4f3d29e441c Mon Sep 17 00:00:00 2001 From: Hillf Danton Date: Thu, 16 Jun 2011 21:55:18 -0400 Subject: [PATCH 04/46] sched: Remove noop in next_prio() When computing the next priority for a given run-queue, the check for RT priority of the task determined by the pick_next_highest_task_rt() function could be removed, since only RT tasks are returned by the function. Reviewed-by: Yong Zhang Signed-off-by: Hillf Danton Signed-off-by: Steven Rostedt Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/BANLkTimxmWiof9s5AvS3v_0X+sMiE=0x5g@mail.gmail.com Signed-off-by: Ingo Molnar --- kernel/sched_rt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 97540f0c9e4..e2698c0fc69 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -704,7 +704,7 @@ static inline int next_prio(struct rq *rq) { struct task_struct *next = pick_next_highest_task_rt(rq, rq->cpu); - if (next && rt_prio(next->prio)) + if (next) return next->prio; else return MAX_RT_PRIO; From 0835471697255b415edcefd6b1e25b6c034439f2 Mon Sep 17 00:00:00 2001 From: Hillf Danton Date: Thu, 16 Jun 2011 21:55:19 -0400 Subject: [PATCH 05/46] sched: Remove noop in lowest_flag_domain() Checking for the validity of sd is removed, since it is already checked by the for_each_domain macro. Signed-off-by: Hillf Danton Signed-off-by: Steven Rostedt Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/BANLkTimT+Tut-3TshCDm-NiLLXrOznibNA@mail.gmail.com Signed-off-by: Ingo Molnar --- kernel/sched_fair.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 241fc86bc61..f4b732a3552 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -3660,7 +3660,7 @@ static inline struct sched_domain *lowest_flag_domain(int cpu, int flag) struct sched_domain *sd; for_each_domain(cpu, sd) - if (sd && (sd->flags & flag)) + if (sd->flags & flag) break; return sd; From 311e800e16f63d909136a64ed17ca353a160be59 Mon Sep 17 00:00:00 2001 From: Hillf Danton Date: Thu, 16 Jun 2011 21:55:20 -0400 Subject: [PATCH 06/46] sched, rt: Fix rq->rt.pushable_tasks bug in push_rt_task() Do not call dequeue_pushable_task() when failing to push an eligible task, as it remains pushable, merely not at this particular moment. Signed-off-by: Hillf Danton Signed-off-by: Mike Galbraith Signed-off-by: Steven Rostedt Signed-off-by: Peter Zijlstra Cc: Yong Zhang Link: http://lkml.kernel.org/r/1306895385.4791.26.camel@marge.simson.net Signed-off-by: Ingo Molnar --- kernel/sched_rt.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index e2698c0fc69..8e189455ed1 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -1394,6 +1394,7 @@ static int push_rt_task(struct rq *rq) { struct task_struct *next_task; struct rq *lowest_rq; + int ret = 0; if (!rq->rt.overloaded) return 0; @@ -1426,7 +1427,7 @@ retry: if (!lowest_rq) { struct task_struct *task; /* - * find lock_lowest_rq releases rq->lock + * find_lock_lowest_rq releases rq->lock * so it is possible that next_task has migrated. * * We need to make sure that the task is still on the same @@ -1436,12 +1437,11 @@ retry: task = pick_next_pushable_task(rq); if (task_cpu(next_task) == rq->cpu && task == next_task) { /* - * If we get here, the task hasn't moved at all, but - * it has failed to push. We will not try again, - * since the other cpus will pull from us when they - * are ready. + * The task hasn't migrated, and is still the next + * eligible task, but we failed to find a run-queue + * to push it to. Do not retry in this case, since + * other cpus will pull from us when ready. */ - dequeue_pushable_task(rq, next_task); goto out; } @@ -1460,6 +1460,7 @@ retry: deactivate_task(rq, next_task, 0); set_task_cpu(next_task, lowest_rq->cpu); activate_task(lowest_rq, next_task, 0); + ret = 1; resched_task(lowest_rq->curr); @@ -1468,7 +1469,7 @@ retry: out: put_task_struct(next_task); - return 1; + return ret; } static void push_rt_tasks(struct rq *rq) From 1812a643ccbfeb61a00a7f0d7219606e63d8815b Mon Sep 17 00:00:00 2001 From: Hillf Danton Date: Thu, 16 Jun 2011 21:55:21 -0400 Subject: [PATCH 07/46] sched: Remove resetting exec_start in put_prev_task_rt() There's no reason to clean the exec_start in put_prev_task_rt() as it is reset when the task gets back to the run queue. This saves us doing a store() in the fast path. Signed-off-by: Hillf Danton Signed-off-by: Steven Rostedt Signed-off-by: Peter Zijlstra Cc: Mike Galbraith Cc: Yong Zhang Link: http://lkml.kernel.org/r/BANLkTimqWD=q6YnSDi-v9y=LMWecgEzEWg@mail.gmail.com Signed-off-by: Ingo Molnar --- kernel/sched_rt.c | 1 - 1 file changed, 1 deletion(-) diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 8e189455ed1..70107a39b1b 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -1178,7 +1178,6 @@ static struct task_struct *pick_next_task_rt(struct rq *rq) static void put_prev_task_rt(struct rq *rq, struct task_struct *p) { update_curr_rt(rq); - p->se.exec_start = 0; /* * The previous task needs to be made eligible for pushing From c37495fd0f64fc139b5a07d242bcb485174d1206 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Thu, 16 Jun 2011 21:55:22 -0400 Subject: [PATCH 08/46] sched: Balance RT tasks when forked as well When a new task is woken, the code to balance the RT task is currently skipped in the select_task_rq() call. But it will be pushed if the rq is currently overloaded with RT tasks anyway. The issue is that we already queued the task, and if it does get pushed, it will have to be dequeued and requeued on the new run queue. The advantage with pushing it first is that we avoid this requeuing as we are pushing it off before the task is ever queued. See commit 318e0893ce3f524 ("sched: pre-route RT tasks on wakeup") for more details. The return of select_task_rq() when it is not a wake up has also been changed to return task_cpu() instead of smp_processor_id(). This is more of a sanity because the current only other user of select_task_rq() besides wake ups, is an exec, where task_cpu() should also be the same as smp_processor_id(). But if it is used for other purposes, lets keep the task on the same CPU. Why would we mant to migrate it to the current CPU? Signed-off-by: Steven Rostedt Signed-off-by: Peter Zijlstra Cc: Hillf Danton Link: http://lkml.kernel.org/r/20110617015919.832743148@goodmis.org Signed-off-by: Ingo Molnar --- kernel/sched_rt.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 70107a39b1b..2153a87e615 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -1017,10 +1017,12 @@ select_task_rq_rt(struct task_struct *p, int sd_flag, int flags) struct rq *rq; int cpu; - if (sd_flag != SD_BALANCE_WAKE) - return smp_processor_id(); - cpu = task_cpu(p); + + /* For anything but wake ups, just return the task_cpu */ + if (sd_flag != SD_BALANCE_WAKE && sd_flag != SD_BALANCE_FORK) + goto out; + rq = cpu_rq(cpu); rcu_read_lock(); @@ -1059,6 +1061,7 @@ select_task_rq_rt(struct task_struct *p, int sd_flag, int flags) } rcu_read_unlock(); +out: return cpu; } From 5181f4a46afd99e5e85c639b189e43e0a42b53df Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Thu, 16 Jun 2011 21:55:23 -0400 Subject: [PATCH 09/46] sched: Use pushable_tasks to determine next highest prio Hillf Danton proposed a patch (see link) that cleaned up the sched_rt code that calculates the priority of the next highest priority task to be used in finding run queues to pull from. His patch removed the calculating of the next prio to just use the current prio when deteriming if we should examine a run queue to pull from. The problem with his patch was that it caused more false checks. Because we check a run queue for pushable tasks if the current priority of that run queue is higher in priority than the task about to run on our run queue. But after grabbing the locks and doing the real check, we find that there may not be a task that has a higher prio task to pull. Thus the locks were taken with nothing to do. I added some trace_printks() to record when and how many times the run queue locks were taken to check for pullable tasks, compared to how many times we pulled a task. With the current method, it was: 3806 locks taken vs 2812 pulled tasks With Hillf's patch: 6728 locks taken vs 2804 pulled tasks The number of times locks were taken to pull a task went up almost double with no more success rate. But his patch did get me thinking. When we look at the priority of the highest task to consider taking the locks to do a pull, a failure to pull can be one of the following: (in order of most likely) o RT task was pushed off already between the check and taking the lock o Waiting RT task can not be migrated o RT task's CPU affinity does not include the target run queue's CPU o RT task's priority changed between the check and taking the lock And with Hillf's patch, the thing that caused most of the failures, is the RT task to pull was not at the right priority to pull (not greater than the current RT task priority on the target run queue). Most of the above cases we can't help. But the current method does not check if the next highest prio RT task can be migrated or not, and if it can not, we still grab the locks to do the test (we don't find out about this fact until after we have the locks). I thought about this case, and realized that the pushable task plist that is maintained only holds RT tasks that can migrate. If we move the calculating of the next highest prio task from the inc/dec_rt_task() functions into the queuing of the pushable tasks, then we only measure the priorities of those tasks that we push, and we get this basically for free. Not only does this patch make the code a little more efficient, it cleans it up and makes it a little simpler. Thanks to Hillf Danton for inspiring me on this patch. Signed-off-by: Steven Rostedt Signed-off-by: Peter Zijlstra Cc: Hillf Danton Cc: Gregory Haskins Link: http://lkml.kernel.org/r/BANLkTimQ67180HxCx5vgMqumqw1EkFh3qg@mail.gmail.com Signed-off-by: Ingo Molnar --- kernel/sched_rt.c | 61 ++++++++++++++--------------------------------- 1 file changed, 18 insertions(+), 43 deletions(-) diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 2153a87e615..a8c207ff349 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -124,21 +124,33 @@ static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) update_rt_migration(rt_rq); } +static inline int has_pushable_tasks(struct rq *rq) +{ + return !plist_head_empty(&rq->rt.pushable_tasks); +} + static void enqueue_pushable_task(struct rq *rq, struct task_struct *p) { plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); plist_node_init(&p->pushable_tasks, p->prio); plist_add(&p->pushable_tasks, &rq->rt.pushable_tasks); + + /* Update the highest prio pushable task */ + if (p->prio < rq->rt.highest_prio.next) + rq->rt.highest_prio.next = p->prio; } static void dequeue_pushable_task(struct rq *rq, struct task_struct *p) { plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks); -} -static inline int has_pushable_tasks(struct rq *rq) -{ - return !plist_head_empty(&rq->rt.pushable_tasks); + /* Update the new highest prio pushable task */ + if (has_pushable_tasks(rq)) { + p = plist_first_entry(&rq->rt.pushable_tasks, + struct task_struct, pushable_tasks); + rq->rt.highest_prio.next = p->prio; + } else + rq->rt.highest_prio.next = MAX_RT_PRIO; } #else @@ -698,47 +710,13 @@ static void update_curr_rt(struct rq *rq) #if defined CONFIG_SMP -static struct task_struct *pick_next_highest_task_rt(struct rq *rq, int cpu); - -static inline int next_prio(struct rq *rq) -{ - struct task_struct *next = pick_next_highest_task_rt(rq, rq->cpu); - - if (next) - return next->prio; - else - return MAX_RT_PRIO; -} - static void inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) { struct rq *rq = rq_of_rt_rq(rt_rq); - if (prio < prev_prio) { - - /* - * If the new task is higher in priority than anything on the - * run-queue, we know that the previous high becomes our - * next-highest. - */ - rt_rq->highest_prio.next = prev_prio; - - if (rq->online) - cpupri_set(&rq->rd->cpupri, rq->cpu, prio); - - } else if (prio == rt_rq->highest_prio.curr) - /* - * If the next task is equal in priority to the highest on - * the run-queue, then we implicitly know that the next highest - * task cannot be any lower than current - */ - rt_rq->highest_prio.next = prio; - else if (prio < rt_rq->highest_prio.next) - /* - * Otherwise, we need to recompute next-highest - */ - rt_rq->highest_prio.next = next_prio(rq); + if (rq->online && prio < prev_prio) + cpupri_set(&rq->rd->cpupri, rq->cpu, prio); } static void @@ -746,9 +724,6 @@ dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) { struct rq *rq = rq_of_rt_rq(rt_rq); - if (rt_rq->rt_nr_running && (prio <= rt_rq->highest_prio.next)) - rt_rq->highest_prio.next = next_prio(rq); - if (rq->online && rt_rq->highest_prio.curr != prev_prio) cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr); } From c92211d9b772792a9dea530c042efb4ab5562f50 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Tue, 2 Aug 2011 16:36:12 -0400 Subject: [PATCH 10/46] sched/cpupri: Remove the vec->lock MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit sched/cpupri: Remove the vec->lock The cpupri vec->lock has been showing up as a top contention lately. This is because of the RT push/pull logic takes an agressive approach for migrating RT tasks. The cpupri logic is in place to improve the performance of the push/pull when dealing with large number CPU machines. The problem though is a vec->lock is required, where a vec is a global per RT priority structure. That is, if there are lots of RT tasks at the same priority, every time they are added or removed from the RT queue, this global vec->lock is taken. Now that more kernel threads are becoming RT (RCU boost and threaded interrupts) this is becoming much more of an issue. There are two variables that are being synced by the vec->lock. The cpupri bitmask, and the vec->counter. The cpupri bitmask is one bit per priority. If a RT priority vec has a process queued, then the vec->count is > 0 and the cpupri bitmask is set for that RT priority. If the cpupri bitmask gets out of sync with the vec->counter, we could end up pushing a low proirity RT task to a high priority queue. That RT task that could have run immediately could be queued on a run queue with a higher priority task indefinitely. The solution is not to use the cpupri bitmask and just look at the vec->count directly when doing a pull. The cpupri bitmask is just a fast way to scan the RT priorities when a pull is made. Instead of using the bitmask, and just examine all RT priorities, and look at the vec->counts, we could eliminate the vec->lock. The scan of RT tasks is to find a run queue that we can push an RT task to, and we do not push to a high priority queue, thus the scan only needs to go from 1 to RT task->prio, and not all 100 RT priorities. The push algorithm, which does the scan of RT priorities (and scan of the bitmask) only happens when we have an overloaded RT run queue (more than one RT task queued). The grabbing of the vec->lock happens every time any RT task is queued or dequeued on the run queue for that priority. The slowing down of the scan by not using a bitmask is negligible by the speed up of removing the vec->lock contention, and replacing it with an atomic counter and memory barrier. To prove this, I wrote a patch that times both the loop and the code that grabs the vec->locks. I passed the patches to various people (and companies) to test and show the results. I let everyone choose their own load to test, giving different loads on the system, for various different setups. Here's some of the results: (snipping to a few CPUs to not make this change log huge, but the results were consistent across the entire system). System 1 (24 CPUs) Before patch: CPU: Name Count Max Min Average Total ---- ---- ----- --- --- ------- ----- [...] cpu 20: loop 3057 1.766 0.061 0.642 1963.170 vec 6782949 90.469 0.089 0.414 2811760.503 cpu 21: loop 2617 1.723 0.062 0.641 1679.074 vec 6782810 90.499 0.089 0.291 1978499.900 cpu 22: loop 2212 1.863 0.063 0.699 1547.160 vec 6767244 85.685 0.089 0.435 2949676.898 cpu 23: loop 2320 2.013 0.062 0.594 1380.265 vec 6781694 87.923 0.088 0.431 2928538.224 After patch: cpu 20: loop 2078 1.579 0.061 0.533 1108.006 vec 6164555 5.704 0.060 0.143 885185.809 cpu 21: loop 2268 1.712 0.065 0.575 1305.248 vec 6153376 5.558 0.060 0.187 1154960.469 cpu 22: loop 1542 1.639 0.095 0.533 823.249 vec 6156510 5.720 0.060 0.190 1172727.232 cpu 23: loop 1650 1.733 0.068 0.545 900.781 vec 6170784 5.533 0.060 0.167 1034287.953 All times are in microseconds. The 'loop' is the amount of time spent doing the loop across the priorities (before patch uses bitmask). the 'vec' is the amount of time in the code that requires grabbing the vec->lock. The second patch just does not have the vec lock, but encompasses the same code. Amazingly the loop code even went down on average. The vec code went from .5 down to .18, that's more than half the time spent! Note, more than one test was run, but they all had the same results. System 2 (64 CPUs) Before patch: CPU: Name Count Max Min Average Total ---- ---- ----- --- --- ------- ----- cpu 60: loop 0 0 0 0 0 vec 5410840 277.954 0.084 0.782 4232895.727 cpu 61: loop 0 0 0 0 0 vec 4915648 188.399 0.084 0.570 2803220.301 cpu 62: loop 0 0 0 0 0 vec 5356076 276.417 0.085 0.786 4214544.548 cpu 63: loop 0 0 0 0 0 vec 4891837 170.531 0.085 0.799 3910948.833 After patch: cpu 60: loop 0 0 0 0 0 vec 5365118 5.080 0.021 0.063 340490.267 cpu 61: loop 0 0 0 0 0 vec 4898590 1.757 0.019 0.071 347903.615 cpu 62: loop 0 0 0 0 0 vec 5737130 3.067 0.021 0.119 687108.734 cpu 63: loop 0 0 0 0 0 vec 4903228 1.822 0.021 0.071 348506.477 The test run during the measurement did not have any (very few, from other CPUs) RT tasks pushing. But this shows that it helped out tremendously with the contention, as the contention happens because the vec->lock is taken only on queuing at an RT priority, and different CPUs that queue tasks at the same priority will have contention. I tested on my own 4 CPU machine with the following results: Before patch: CPU: Name Count Max Min Average Total ---- ---- ----- --- --- ------- ----- cpu 0: loop 2377 1.489 0.158 0.588 1398.395 vec 4484 770.146 2.301 4.396 19711.755 cpu 1: loop 2169 1.962 0.160 0.576 1250.110 vec 4425 152.769 2.297 4.030 17834.228 cpu 2: loop 2324 1.749 0.155 0.559 1299.799 vec 4368 779.632 2.325 4.665 20379.268 cpu 3: loop 2325 1.629 0.157 0.561 1306.113 vec 4650 408.782 2.394 4.348 20222.577 After patch: CPU: Name Count Max Min Average Total ---- ---- ----- --- --- ------- ----- cpu 0: loop 2121 1.616 0.113 0.636 1349.189 vec 4303 1.151 0.225 0.421 1811.966 cpu 1: loop 2130 1.638 0.178 0.644 1372.927 vec 4627 1.379 0.235 0.428 1983.648 cpu 2: loop 2056 1.464 0.165 0.637 1310.141 vec 4471 1.311 0.217 0.433 1937.927 cpu 3: loop 2154 1.481 0.162 0.601 1295.083 vec 4236 1.253 0.230 0.425 1803.008 This was running my migrate.c code that can be found at: http://lwn.net/Articles/425763/ The migrate code does stress the RT tasks a bit. This shows that the loop did increase a little after the patch, but not by much. The vec code dropped dramatically. From 4.3us down to .42us. That's a 10x improvement! Tested-by: Mike Galbraith Tested-by: Luis Claudio R. Gonçalves Tested-by: Matthew Hank Sabins Signed-off-by: Steven Rostedt Reviewed-by: Gregory Haskins Acked-by: Hillf Danton Signed-off-by: Peter Zijlstra Cc: Chris Mason Link: http://lkml.kernel.org/r/1312317372.18583.101.camel@gandalf.stny.rr.com Signed-off-by: Ingo Molnar --- kernel/sched_cpupri.c | 62 +++++++++++++++++++++++++++---------------- kernel/sched_cpupri.h | 5 ++-- 2 files changed, 41 insertions(+), 26 deletions(-) diff --git a/kernel/sched_cpupri.c b/kernel/sched_cpupri.c index 2722dc1b413..7761a2669ff 100644 --- a/kernel/sched_cpupri.c +++ b/kernel/sched_cpupri.c @@ -47,9 +47,6 @@ static int convert_prio(int prio) return cpupri; } -#define for_each_cpupri_active(array, idx) \ - for_each_set_bit(idx, array, CPUPRI_NR_PRIORITIES) - /** * cpupri_find - find the best (lowest-pri) CPU in the system * @cp: The cpupri context @@ -71,11 +68,33 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p, int idx = 0; int task_pri = convert_prio(p->prio); - for_each_cpupri_active(cp->pri_active, idx) { + if (task_pri >= MAX_RT_PRIO) + return 0; + + for (idx = 0; idx < task_pri; idx++) { struct cpupri_vec *vec = &cp->pri_to_cpu[idx]; - if (idx >= task_pri) - break; + if (!atomic_read(&(vec)->count)) + continue; + /* + * When looking at the vector, we need to read the counter, + * do a memory barrier, then read the mask. + * + * Note: This is still all racey, but we can deal with it. + * Ideally, we only want to look at masks that are set. + * + * If a mask is not set, then the only thing wrong is that we + * did a little more work than necessary. + * + * If we read a zero count but the mask is set, because of the + * memory barriers, that can only happen when the highest prio + * task for a run queue has left the run queue, in which case, + * it will be followed by a pull. If the task we are processing + * fails to find a proper place to go, that pull request will + * pull this task if the run queue is running at a lower + * priority. + */ + smp_rmb(); if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) continue; @@ -115,7 +134,6 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri) { int *currpri = &cp->cpu_to_pri[cpu]; int oldpri = *currpri; - unsigned long flags; newpri = convert_prio(newpri); @@ -134,26 +152,25 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri) if (likely(newpri != CPUPRI_INVALID)) { struct cpupri_vec *vec = &cp->pri_to_cpu[newpri]; - raw_spin_lock_irqsave(&vec->lock, flags); - cpumask_set_cpu(cpu, vec->mask); - vec->count++; - if (vec->count == 1) - set_bit(newpri, cp->pri_active); - - raw_spin_unlock_irqrestore(&vec->lock, flags); + /* + * When adding a new vector, we update the mask first, + * do a write memory barrier, and then update the count, to + * make sure the vector is visible when count is set. + */ + smp_wmb(); + atomic_inc(&(vec)->count); } if (likely(oldpri != CPUPRI_INVALID)) { struct cpupri_vec *vec = &cp->pri_to_cpu[oldpri]; - raw_spin_lock_irqsave(&vec->lock, flags); - - vec->count--; - if (!vec->count) - clear_bit(oldpri, cp->pri_active); + /* + * When removing from the vector, we decrement the counter first + * do a memory barrier and then clear the mask. + */ + atomic_dec(&(vec)->count); + smp_wmb(); cpumask_clear_cpu(cpu, vec->mask); - - raw_spin_unlock_irqrestore(&vec->lock, flags); } *currpri = newpri; @@ -175,8 +192,7 @@ int cpupri_init(struct cpupri *cp) for (i = 0; i < CPUPRI_NR_PRIORITIES; i++) { struct cpupri_vec *vec = &cp->pri_to_cpu[i]; - raw_spin_lock_init(&vec->lock); - vec->count = 0; + atomic_set(&vec->count, 0); if (!zalloc_cpumask_var(&vec->mask, GFP_KERNEL)) goto cleanup; } diff --git a/kernel/sched_cpupri.h b/kernel/sched_cpupri.h index 9fc7d386fea..6b4cd17dead 100644 --- a/kernel/sched_cpupri.h +++ b/kernel/sched_cpupri.h @@ -12,9 +12,8 @@ /* values 2-101 are RT priorities 0-99 */ struct cpupri_vec { - raw_spinlock_t lock; - int count; - cpumask_var_t mask; + atomic_t count; + cpumask_var_t mask; }; struct cpupri { From d473750b4073f16f23f46f30dc1bd3de45c35754 Mon Sep 17 00:00:00 2001 From: Steven Rostedt Date: Fri, 5 Aug 2011 08:27:49 -0400 Subject: [PATCH 11/46] sched/cpupri: Fix memory barriers for vec updates to always be in order [ This patch actually compiles. Thanks to Mike Galbraith for pointing that out. I compiled and booted this patch with no issues. ] Re-examining the cpupri patch, I see there's a possible race because the update of the two priorities vec->counts are not protected by a memory barrier. When a RT runqueue is overloaded and wants to push an RT task to another runqueue, it scans the RT priority vectors in a loop from lowest priority to highest. When we queue or dequeue an RT task that changes a runqueue's highest priority task, we update the vectors to show that a runqueue is rated at a different priority. To do this, we first set the new priority mask, and increment the vec->count, and then set the old priority mask by decrementing the vec->count. If we are lowering the runqueue's RT priority rating, it will trigger a RT pull, and we do not care if we miss pushing to this runqueue or not. But if we raise the priority, but the priority is still lower than an RT task that is looking to be pushed, we must make sure that this runqueue is still seen by the push algorithm (the loop). Because the loop reads from lowest to highest, and the new priority is set before the old one is cleared, we will either see the new or old priority set and the vector will be checked. But! Since there's no memory barrier between the updates of the two, the old count may be decremented first before the new count is incremented. This means the loop may see the old count of zero and skip it, and also the new count of zero before it was updated. A possible runqueue that the RT task could move to could be missed. A conditional memory barrier is placed between the vec->count updates and is only called when both updates are done. The smp_wmb() has also been changed to smp_mb__before_atomic_inc/dec(), as they are not needed by archs that already synchronize atomic_inc/dec(). The smp_rmb() has been moved to be called at every iteration of the loop so that the race between seeing the two updates is visible by each iteration of the loop, as an arch is free to optimize the reading of memory of the counters in the loop. Signed-off-by: Steven Rostedt Signed-off-by: Peter Zijlstra Cc: Nick Piggin Cc: Linus Torvalds Link: http://lkml.kernel.org/r/1312547269.18583.194.camel@gandalf.stny.rr.com Signed-off-by: Ingo Molnar --- kernel/sched_cpupri.c | 28 +++++++++++++++++++++++++--- 1 file changed, 25 insertions(+), 3 deletions(-) diff --git a/kernel/sched_cpupri.c b/kernel/sched_cpupri.c index 7761a2669ff..90faffdbdf9 100644 --- a/kernel/sched_cpupri.c +++ b/kernel/sched_cpupri.c @@ -73,9 +73,10 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p, for (idx = 0; idx < task_pri; idx++) { struct cpupri_vec *vec = &cp->pri_to_cpu[idx]; + int skip = 0; if (!atomic_read(&(vec)->count)) - continue; + skip = 1; /* * When looking at the vector, we need to read the counter, * do a memory barrier, then read the mask. @@ -96,6 +97,10 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p, */ smp_rmb(); + /* Need to do the rmb for every iteration */ + if (skip) + continue; + if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) continue; @@ -134,6 +139,7 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri) { int *currpri = &cp->cpu_to_pri[cpu]; int oldpri = *currpri; + int do_mb = 0; newpri = convert_prio(newpri); @@ -158,18 +164,34 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri) * do a write memory barrier, and then update the count, to * make sure the vector is visible when count is set. */ - smp_wmb(); + smp_mb__before_atomic_inc(); atomic_inc(&(vec)->count); + do_mb = 1; } if (likely(oldpri != CPUPRI_INVALID)) { struct cpupri_vec *vec = &cp->pri_to_cpu[oldpri]; + /* + * Because the order of modification of the vec->count + * is important, we must make sure that the update + * of the new prio is seen before we decrement the + * old prio. This makes sure that the loop sees + * one or the other when we raise the priority of + * the run queue. We don't care about when we lower the + * priority, as that will trigger an rt pull anyway. + * + * We only need to do a memory barrier if we updated + * the new priority vec. + */ + if (do_mb) + smp_mb__after_atomic_inc(); + /* * When removing from the vector, we decrement the counter first * do a memory barrier and then clear the mask. */ atomic_dec(&(vec)->count); - smp_wmb(); + smp_mb__after_atomic_inc(); cpumask_clear_cpu(cpu, vec->mask); } From 5710f15b52664ae0bfa60a66d75464769d297b2b Mon Sep 17 00:00:00 2001 From: Yong Zhang Date: Sat, 6 Aug 2011 08:10:04 +0800 Subject: [PATCH 12/46] sched/cpupri: Remove cpupri->pri_active Since [sched/cpupri: Remove the vec->lock], member pri_active of struct cpupri is not needed any more, just remove it. Also clean stuff related to it. Signed-off-by: Yong Zhang Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/20110806001004.GA2207@zhy Signed-off-by: Ingo Molnar --- kernel/sched_cpupri.c | 3 +-- kernel/sched_cpupri.h | 2 -- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/kernel/sched_cpupri.c b/kernel/sched_cpupri.c index 90faffdbdf9..a86cf9d9eb1 100644 --- a/kernel/sched_cpupri.c +++ b/kernel/sched_cpupri.c @@ -152,8 +152,7 @@ void cpupri_set(struct cpupri *cp, int cpu, int newpri) * If the cpu was currently mapped to a different value, we * need to map it to the new value then remove the old value. * Note, we must add the new value first, otherwise we risk the - * cpu being cleared from pri_active, and this cpu could be - * missed for a push or pull. + * cpu being missed by the priority loop in cpupri_find. */ if (likely(newpri != CPUPRI_INVALID)) { struct cpupri_vec *vec = &cp->pri_to_cpu[newpri]; diff --git a/kernel/sched_cpupri.h b/kernel/sched_cpupri.h index 6b4cd17dead..f6d75617349 100644 --- a/kernel/sched_cpupri.h +++ b/kernel/sched_cpupri.h @@ -4,7 +4,6 @@ #include #define CPUPRI_NR_PRIORITIES (MAX_RT_PRIO + 2) -#define CPUPRI_NR_PRI_WORDS BITS_TO_LONGS(CPUPRI_NR_PRIORITIES) #define CPUPRI_INVALID -1 #define CPUPRI_IDLE 0 @@ -18,7 +17,6 @@ struct cpupri_vec { struct cpupri { struct cpupri_vec pri_to_cpu[CPUPRI_NR_PRIORITIES]; - long pri_active[CPUPRI_NR_PRI_WORDS]; int cpu_to_pri[NR_CPUS]; }; From 953bfcd10e6f3697233e8e5128c611d275da39c1 Mon Sep 17 00:00:00 2001 From: Paul Turner Date: Thu, 21 Jul 2011 09:43:27 -0700 Subject: [PATCH 13/46] sched: Implement hierarchical task accounting for SCHED_OTHER Introduce hierarchical task accounting for the group scheduling case in CFS, as well as promoting the responsibility for maintaining rq->nr_running to the scheduling classes. The primary motivation for this is that with scheduling classes supporting bandwidth throttling it is possible for entities participating in throttled sub-trees to not have root visible changes in rq->nr_running across activate and de-activate operations. This in turn leads to incorrect idle and weight-per-task load balance decisions. This also allows us to make a small fixlet to the fastpath in pick_next_task() under group scheduling. Note: this issue also exists with the existing sched_rt throttling mechanism. This patch does not address that. Signed-off-by: Paul Turner Reviewed-by: Hidetoshi Seto Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/20110721184756.878333391@google.com Signed-off-by: Ingo Molnar --- kernel/sched.c | 6 ++---- kernel/sched_fair.c | 6 ++++++ kernel/sched_rt.c | 5 ++++- kernel/sched_stoptask.c | 2 ++ 4 files changed, 14 insertions(+), 5 deletions(-) diff --git a/kernel/sched.c b/kernel/sched.c index cf427bb2b65..cd1a531ca8f 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -311,7 +311,7 @@ struct task_group root_task_group; /* CFS-related fields in a runqueue */ struct cfs_rq { struct load_weight load; - unsigned long nr_running; + unsigned long nr_running, h_nr_running; u64 exec_clock; u64 min_vruntime; @@ -1802,7 +1802,6 @@ static void activate_task(struct rq *rq, struct task_struct *p, int flags) rq->nr_uninterruptible--; enqueue_task(rq, p, flags); - inc_nr_running(rq); } /* @@ -1814,7 +1813,6 @@ static void deactivate_task(struct rq *rq, struct task_struct *p, int flags) rq->nr_uninterruptible++; dequeue_task(rq, p, flags); - dec_nr_running(rq); } #ifdef CONFIG_IRQ_TIME_ACCOUNTING @@ -4258,7 +4256,7 @@ pick_next_task(struct rq *rq) * Optimization: we know that if all tasks are in * the fair class we can call that function directly: */ - if (likely(rq->nr_running == rq->cfs.nr_running)) { + if (likely(rq->nr_running == rq->cfs.h_nr_running)) { p = fair_sched_class.pick_next_task(rq); if (likely(p)) return p; diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index f4b732a3552..f86b0cb5eb2 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -1310,16 +1310,19 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) break; cfs_rq = cfs_rq_of(se); enqueue_entity(cfs_rq, se, flags); + cfs_rq->h_nr_running++; flags = ENQUEUE_WAKEUP; } for_each_sched_entity(se) { cfs_rq = cfs_rq_of(se); + cfs_rq->h_nr_running++; update_cfs_load(cfs_rq, 0); update_cfs_shares(cfs_rq); } + inc_nr_running(rq); hrtick_update(rq); } @@ -1339,6 +1342,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) for_each_sched_entity(se) { cfs_rq = cfs_rq_of(se); dequeue_entity(cfs_rq, se, flags); + cfs_rq->h_nr_running--; /* Don't dequeue parent if it has other entities besides us */ if (cfs_rq->load.weight) { @@ -1358,11 +1362,13 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) for_each_sched_entity(se) { cfs_rq = cfs_rq_of(se); + cfs_rq->h_nr_running--; update_cfs_load(cfs_rq, 0); update_cfs_shares(cfs_rq); } + dec_nr_running(rq); hrtick_update(rq); } diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index a8c207ff349..a9d3c6bc684 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -936,6 +936,8 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags) if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1) enqueue_pushable_task(rq, p); + + inc_nr_running(rq); } static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags) @@ -946,6 +948,8 @@ static void dequeue_task_rt(struct rq *rq, struct task_struct *p, int flags) dequeue_rt_entity(rt_se); dequeue_pushable_task(rq, p); + + dec_nr_running(rq); } /* @@ -1841,4 +1845,3 @@ static void print_rt_stats(struct seq_file *m, int cpu) rcu_read_unlock(); } #endif /* CONFIG_SCHED_DEBUG */ - diff --git a/kernel/sched_stoptask.c b/kernel/sched_stoptask.c index 6f437632afa..8b44e7fa7fb 100644 --- a/kernel/sched_stoptask.c +++ b/kernel/sched_stoptask.c @@ -34,11 +34,13 @@ static struct task_struct *pick_next_task_stop(struct rq *rq) static void enqueue_task_stop(struct rq *rq, struct task_struct *p, int flags) { + inc_nr_running(rq); } static void dequeue_task_stop(struct rq *rq, struct task_struct *p, int flags) { + dec_nr_running(rq); } static void yield_task_stop(struct rq *rq) From ab84d31e15502fb626169ba2663381e34bf965b2 Mon Sep 17 00:00:00 2001 From: Paul Turner Date: Thu, 21 Jul 2011 09:43:28 -0700 Subject: [PATCH 14/46] sched: Introduce primitives to account for CFS bandwidth tracking In this patch we introduce the notion of CFS bandwidth, partitioned into globally unassigned bandwidth, and locally claimed bandwidth. - The global bandwidth is per task_group, it represents a pool of unclaimed bandwidth that cfs_rqs can allocate from. - The local bandwidth is tracked per-cfs_rq, this represents allotments from the global pool bandwidth assigned to a specific cpu. Bandwidth is managed via cgroupfs, adding two new interfaces to the cpu subsystem: - cpu.cfs_period_us : the bandwidth period in usecs - cpu.cfs_quota_us : the cpu bandwidth (in usecs) that this tg will be allowed to consume over period above. Signed-off-by: Paul Turner Signed-off-by: Nikhil Rao Signed-off-by: Bharata B Rao Reviewed-by: Hidetoshi Seto Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/20110721184756.972636699@google.com Signed-off-by: Ingo Molnar --- init/Kconfig | 12 +++ kernel/sched.c | 196 +++++++++++++++++++++++++++++++++++++++++++- kernel/sched_fair.c | 16 ++++ 3 files changed, 220 insertions(+), 4 deletions(-) diff --git a/init/Kconfig b/init/Kconfig index d62778390e5..d19b3a77ab4 100644 --- a/init/Kconfig +++ b/init/Kconfig @@ -715,6 +715,18 @@ config FAIR_GROUP_SCHED depends on CGROUP_SCHED default CGROUP_SCHED +config CFS_BANDWIDTH + bool "CPU bandwidth provisioning for FAIR_GROUP_SCHED" + depends on EXPERIMENTAL + depends on FAIR_GROUP_SCHED + default n + help + This option allows users to define CPU bandwidth rates (limits) for + tasks running within the fair group scheduler. Groups with no limit + set are considered to be unconstrained and will run with no + restriction. + See tip/Documentation/scheduler/sched-bwc.txt for more information. + config RT_GROUP_SCHED bool "Group scheduling for SCHED_RR/FIFO" depends on EXPERIMENTAL diff --git a/kernel/sched.c b/kernel/sched.c index cd1a531ca8f..f08cb23be96 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -247,6 +247,14 @@ struct cfs_rq; static LIST_HEAD(task_groups); +struct cfs_bandwidth { +#ifdef CONFIG_CFS_BANDWIDTH + raw_spinlock_t lock; + ktime_t period; + u64 quota; +#endif +}; + /* task group related information */ struct task_group { struct cgroup_subsys_state css; @@ -278,6 +286,8 @@ struct task_group { #ifdef CONFIG_SCHED_AUTOGROUP struct autogroup *autogroup; #endif + + struct cfs_bandwidth cfs_bandwidth; }; /* task_group_lock serializes the addition/removal of task groups */ @@ -377,9 +387,48 @@ struct cfs_rq { unsigned long load_contribution; #endif +#ifdef CONFIG_CFS_BANDWIDTH + int runtime_enabled; + s64 runtime_remaining; +#endif #endif }; +#ifdef CONFIG_FAIR_GROUP_SCHED +#ifdef CONFIG_CFS_BANDWIDTH +static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) +{ + return &tg->cfs_bandwidth; +} + +static inline u64 default_cfs_period(void); + +static void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) +{ + raw_spin_lock_init(&cfs_b->lock); + cfs_b->quota = RUNTIME_INF; + cfs_b->period = ns_to_ktime(default_cfs_period()); +} + +static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) +{ + cfs_rq->runtime_enabled = 0; +} + +static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) +{} +#else +static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} +static void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {} +static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {} + +static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) +{ + return NULL; +} +#endif /* CONFIG_CFS_BANDWIDTH */ +#endif /* CONFIG_FAIR_GROUP_SCHED */ + /* Real-Time classes' related field in a runqueue: */ struct rt_rq { struct rt_prio_array active; @@ -7971,6 +8020,7 @@ static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq, /* allow initial update_cfs_load() to truncate */ cfs_rq->load_stamp = 1; #endif + init_cfs_rq_runtime(cfs_rq); tg->cfs_rq[cpu] = cfs_rq; tg->se[cpu] = se; @@ -8110,6 +8160,7 @@ void __init sched_init(void) * We achieve this by letting root_task_group's tasks sit * directly in rq->cfs (i.e root_task_group->se[] = NULL). */ + init_cfs_bandwidth(&root_task_group.cfs_bandwidth); init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL); #endif /* CONFIG_FAIR_GROUP_SCHED */ @@ -8351,6 +8402,8 @@ static void free_fair_sched_group(struct task_group *tg) { int i; + destroy_cfs_bandwidth(tg_cfs_bandwidth(tg)); + for_each_possible_cpu(i) { if (tg->cfs_rq) kfree(tg->cfs_rq[i]); @@ -8378,6 +8431,8 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent) tg->shares = NICE_0_LOAD; + init_cfs_bandwidth(tg_cfs_bandwidth(tg)); + for_each_possible_cpu(i) { cfs_rq = kzalloc_node(sizeof(struct cfs_rq), GFP_KERNEL, cpu_to_node(i)); @@ -8753,7 +8808,7 @@ static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) return walk_tg_tree(tg_schedulable, tg_nop, &data); } -static int tg_set_bandwidth(struct task_group *tg, +static int tg_set_rt_bandwidth(struct task_group *tg, u64 rt_period, u64 rt_runtime) { int i, err = 0; @@ -8792,7 +8847,7 @@ int sched_group_set_rt_runtime(struct task_group *tg, long rt_runtime_us) if (rt_runtime_us < 0) rt_runtime = RUNTIME_INF; - return tg_set_bandwidth(tg, rt_period, rt_runtime); + return tg_set_rt_bandwidth(tg, rt_period, rt_runtime); } long sched_group_rt_runtime(struct task_group *tg) @@ -8817,7 +8872,7 @@ int sched_group_set_rt_period(struct task_group *tg, long rt_period_us) if (rt_period == 0) return -EINVAL; - return tg_set_bandwidth(tg, rt_period, rt_runtime); + return tg_set_rt_bandwidth(tg, rt_period, rt_runtime); } long sched_group_rt_period(struct task_group *tg) @@ -9007,6 +9062,128 @@ static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft) return (u64) scale_load_down(tg->shares); } + +#ifdef CONFIG_CFS_BANDWIDTH +const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */ +const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */ + +static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota) +{ + int i; + struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg); + static DEFINE_MUTEX(mutex); + + if (tg == &root_task_group) + return -EINVAL; + + /* + * Ensure we have at some amount of bandwidth every period. This is + * to prevent reaching a state of large arrears when throttled via + * entity_tick() resulting in prolonged exit starvation. + */ + if (quota < min_cfs_quota_period || period < min_cfs_quota_period) + return -EINVAL; + + /* + * Likewise, bound things on the otherside by preventing insane quota + * periods. This also allows us to normalize in computing quota + * feasibility. + */ + if (period > max_cfs_quota_period) + return -EINVAL; + + mutex_lock(&mutex); + raw_spin_lock_irq(&cfs_b->lock); + cfs_b->period = ns_to_ktime(period); + cfs_b->quota = quota; + raw_spin_unlock_irq(&cfs_b->lock); + + for_each_possible_cpu(i) { + struct cfs_rq *cfs_rq = tg->cfs_rq[i]; + struct rq *rq = rq_of(cfs_rq); + + raw_spin_lock_irq(&rq->lock); + cfs_rq->runtime_enabled = quota != RUNTIME_INF; + cfs_rq->runtime_remaining = 0; + raw_spin_unlock_irq(&rq->lock); + } + mutex_unlock(&mutex); + + return 0; +} + +int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us) +{ + u64 quota, period; + + period = ktime_to_ns(tg_cfs_bandwidth(tg)->period); + if (cfs_quota_us < 0) + quota = RUNTIME_INF; + else + quota = (u64)cfs_quota_us * NSEC_PER_USEC; + + return tg_set_cfs_bandwidth(tg, period, quota); +} + +long tg_get_cfs_quota(struct task_group *tg) +{ + u64 quota_us; + + if (tg_cfs_bandwidth(tg)->quota == RUNTIME_INF) + return -1; + + quota_us = tg_cfs_bandwidth(tg)->quota; + do_div(quota_us, NSEC_PER_USEC); + + return quota_us; +} + +int tg_set_cfs_period(struct task_group *tg, long cfs_period_us) +{ + u64 quota, period; + + period = (u64)cfs_period_us * NSEC_PER_USEC; + quota = tg_cfs_bandwidth(tg)->quota; + + if (period <= 0) + return -EINVAL; + + return tg_set_cfs_bandwidth(tg, period, quota); +} + +long tg_get_cfs_period(struct task_group *tg) +{ + u64 cfs_period_us; + + cfs_period_us = ktime_to_ns(tg_cfs_bandwidth(tg)->period); + do_div(cfs_period_us, NSEC_PER_USEC); + + return cfs_period_us; +} + +static s64 cpu_cfs_quota_read_s64(struct cgroup *cgrp, struct cftype *cft) +{ + return tg_get_cfs_quota(cgroup_tg(cgrp)); +} + +static int cpu_cfs_quota_write_s64(struct cgroup *cgrp, struct cftype *cftype, + s64 cfs_quota_us) +{ + return tg_set_cfs_quota(cgroup_tg(cgrp), cfs_quota_us); +} + +static u64 cpu_cfs_period_read_u64(struct cgroup *cgrp, struct cftype *cft) +{ + return tg_get_cfs_period(cgroup_tg(cgrp)); +} + +static int cpu_cfs_period_write_u64(struct cgroup *cgrp, struct cftype *cftype, + u64 cfs_period_us) +{ + return tg_set_cfs_period(cgroup_tg(cgrp), cfs_period_us); +} + +#endif /* CONFIG_CFS_BANDWIDTH */ #endif /* CONFIG_FAIR_GROUP_SCHED */ #ifdef CONFIG_RT_GROUP_SCHED @@ -9041,6 +9218,18 @@ static struct cftype cpu_files[] = { .write_u64 = cpu_shares_write_u64, }, #endif +#ifdef CONFIG_CFS_BANDWIDTH + { + .name = "cfs_quota_us", + .read_s64 = cpu_cfs_quota_read_s64, + .write_s64 = cpu_cfs_quota_write_s64, + }, + { + .name = "cfs_period_us", + .read_u64 = cpu_cfs_period_read_u64, + .write_u64 = cpu_cfs_period_write_u64, + }, +#endif #ifdef CONFIG_RT_GROUP_SCHED { .name = "rt_runtime_us", @@ -9350,4 +9539,3 @@ struct cgroup_subsys cpuacct_subsys = { .subsys_id = cpuacct_subsys_id, }; #endif /* CONFIG_CGROUP_CPUACCT */ - diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index f86b0cb5eb2..f24f4171019 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -1234,6 +1234,22 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) check_preempt_tick(cfs_rq, curr); } + +/************************************************** + * CFS bandwidth control machinery + */ + +#ifdef CONFIG_CFS_BANDWIDTH +/* + * default period for cfs group bandwidth. + * default: 0.1s, units: nanoseconds + */ +static inline u64 default_cfs_period(void) +{ + return 100000000ULL; +} +#endif + /************************************************** * CFS operations on tasks: */ From a790de99599a29ad3f18667530cf4b9f4b7e3234 Mon Sep 17 00:00:00 2001 From: Paul Turner Date: Thu, 21 Jul 2011 09:43:29 -0700 Subject: [PATCH 15/46] sched: Validate CFS quota hierarchies Add constraints validation for CFS bandwidth hierarchies. Validate that: max(child bandwidth) <= parent_bandwidth In a quota limited hierarchy, an unconstrained entity (e.g. bandwidth==RUNTIME_INF) inherits the bandwidth of its parent. This constraint is chosen over sum(child_bandwidth) as notion of over-commit is valuable within SCHED_OTHER. Some basic code from the RT case is re-factored for reuse. Signed-off-by: Paul Turner Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/20110721184757.083774572@google.com Signed-off-by: Ingo Molnar --- kernel/sched.c | 112 ++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 98 insertions(+), 14 deletions(-) diff --git a/kernel/sched.c b/kernel/sched.c index f08cb23be96..ea6850d93b2 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -252,6 +252,7 @@ struct cfs_bandwidth { raw_spinlock_t lock; ktime_t period; u64 quota; + s64 hierarchal_quota; #endif }; @@ -1518,7 +1519,8 @@ static inline void dec_cpu_load(struct rq *rq, unsigned long load) update_load_sub(&rq->load, load); } -#if (defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)) || defined(CONFIG_RT_GROUP_SCHED) +#if defined(CONFIG_RT_GROUP_SCHED) || (defined(CONFIG_FAIR_GROUP_SCHED) && \ + (defined(CONFIG_SMP) || defined(CONFIG_CFS_BANDWIDTH))) typedef int (*tg_visitor)(struct task_group *, void *); /* @@ -8708,12 +8710,7 @@ unsigned long sched_group_shares(struct task_group *tg) } #endif -#ifdef CONFIG_RT_GROUP_SCHED -/* - * Ensure that the real time constraints are schedulable. - */ -static DEFINE_MUTEX(rt_constraints_mutex); - +#if defined(CONFIG_RT_GROUP_SCHED) || defined(CONFIG_CFS_BANDWIDTH) static unsigned long to_ratio(u64 period, u64 runtime) { if (runtime == RUNTIME_INF) @@ -8721,6 +8718,13 @@ static unsigned long to_ratio(u64 period, u64 runtime) return div64_u64(runtime << 20, period); } +#endif + +#ifdef CONFIG_RT_GROUP_SCHED +/* + * Ensure that the real time constraints are schedulable. + */ +static DEFINE_MUTEX(rt_constraints_mutex); /* Must be called with tasklist_lock held */ static inline int tg_has_rt_tasks(struct task_group *tg) @@ -8741,7 +8745,7 @@ struct rt_schedulable_data { u64 rt_runtime; }; -static int tg_schedulable(struct task_group *tg, void *data) +static int tg_rt_schedulable(struct task_group *tg, void *data) { struct rt_schedulable_data *d = data; struct task_group *child; @@ -8805,7 +8809,7 @@ static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) .rt_runtime = runtime, }; - return walk_tg_tree(tg_schedulable, tg_nop, &data); + return walk_tg_tree(tg_rt_schedulable, tg_nop, &data); } static int tg_set_rt_bandwidth(struct task_group *tg, @@ -9064,14 +9068,17 @@ static u64 cpu_shares_read_u64(struct cgroup *cgrp, struct cftype *cft) } #ifdef CONFIG_CFS_BANDWIDTH +static DEFINE_MUTEX(cfs_constraints_mutex); + const u64 max_cfs_quota_period = 1 * NSEC_PER_SEC; /* 1s */ const u64 min_cfs_quota_period = 1 * NSEC_PER_MSEC; /* 1ms */ +static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime); + static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota) { - int i; + int i, ret = 0; struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg); - static DEFINE_MUTEX(mutex); if (tg == &root_task_group) return -EINVAL; @@ -9092,7 +9099,11 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota) if (period > max_cfs_quota_period) return -EINVAL; - mutex_lock(&mutex); + mutex_lock(&cfs_constraints_mutex); + ret = __cfs_schedulable(tg, period, quota); + if (ret) + goto out_unlock; + raw_spin_lock_irq(&cfs_b->lock); cfs_b->period = ns_to_ktime(period); cfs_b->quota = quota; @@ -9107,9 +9118,10 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota) cfs_rq->runtime_remaining = 0; raw_spin_unlock_irq(&rq->lock); } - mutex_unlock(&mutex); +out_unlock: + mutex_unlock(&cfs_constraints_mutex); - return 0; + return ret; } int tg_set_cfs_quota(struct task_group *tg, long cfs_quota_us) @@ -9183,6 +9195,78 @@ static int cpu_cfs_period_write_u64(struct cgroup *cgrp, struct cftype *cftype, return tg_set_cfs_period(cgroup_tg(cgrp), cfs_period_us); } +struct cfs_schedulable_data { + struct task_group *tg; + u64 period, quota; +}; + +/* + * normalize group quota/period to be quota/max_period + * note: units are usecs + */ +static u64 normalize_cfs_quota(struct task_group *tg, + struct cfs_schedulable_data *d) +{ + u64 quota, period; + + if (tg == d->tg) { + period = d->period; + quota = d->quota; + } else { + period = tg_get_cfs_period(tg); + quota = tg_get_cfs_quota(tg); + } + + /* note: these should typically be equivalent */ + if (quota == RUNTIME_INF || quota == -1) + return RUNTIME_INF; + + return to_ratio(period, quota); +} + +static int tg_cfs_schedulable_down(struct task_group *tg, void *data) +{ + struct cfs_schedulable_data *d = data; + struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg); + s64 quota = 0, parent_quota = -1; + + if (!tg->parent) { + quota = RUNTIME_INF; + } else { + struct cfs_bandwidth *parent_b = tg_cfs_bandwidth(tg->parent); + + quota = normalize_cfs_quota(tg, d); + parent_quota = parent_b->hierarchal_quota; + + /* + * ensure max(child_quota) <= parent_quota, inherit when no + * limit is set + */ + if (quota == RUNTIME_INF) + quota = parent_quota; + else if (parent_quota != RUNTIME_INF && quota > parent_quota) + return -EINVAL; + } + cfs_b->hierarchal_quota = quota; + + return 0; +} + +static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota) +{ + struct cfs_schedulable_data data = { + .tg = tg, + .period = period, + .quota = quota, + }; + + if (quota != RUNTIME_INF) { + do_div(data.period, NSEC_PER_USEC); + do_div(data.quota, NSEC_PER_USEC); + } + + return walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data); +} #endif /* CONFIG_CFS_BANDWIDTH */ #endif /* CONFIG_FAIR_GROUP_SCHED */ From ec12cb7f31e28854efae7dd6f9544e0a66379040 Mon Sep 17 00:00:00 2001 From: Paul Turner Date: Thu, 21 Jul 2011 09:43:30 -0700 Subject: [PATCH 16/46] sched: Accumulate per-cfs_rq cpu usage and charge against bandwidth Account bandwidth usage on the cfs_rq level versus the task_groups to which they belong. Whether we are tracking bandwidth on a given cfs_rq is maintained under cfs_rq->runtime_enabled. cfs_rq's which belong to a bandwidth constrained task_group have their runtime accounted via the update_curr() path, which withdraws bandwidth from the global pool as desired. Updates involving the global pool are currently protected under cfs_bandwidth->lock, local runtime is protected by rq->lock. This patch only assigns and tracks quota, no action is taken in the case that cfs_rq->runtime_used exceeds cfs_rq->runtime_assigned. Signed-off-by: Paul Turner Signed-off-by: Nikhil Rao Signed-off-by: Bharata B Rao Reviewed-by: Hidetoshi Seto Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/20110721184757.179386821@google.com Signed-off-by: Ingo Molnar --- include/linux/sched.h | 4 +++ kernel/sched.c | 4 ++- kernel/sched_fair.c | 79 +++++++++++++++++++++++++++++++++++++++++-- kernel/sysctl.c | 10 ++++++ 4 files changed, 94 insertions(+), 3 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index 4ac2c0578e0..bc6f5f2e24f 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -2040,6 +2040,10 @@ static inline void sched_autogroup_fork(struct signal_struct *sig) { } static inline void sched_autogroup_exit(struct signal_struct *sig) { } #endif +#ifdef CONFIG_CFS_BANDWIDTH +extern unsigned int sysctl_sched_cfs_bandwidth_slice; +#endif + #ifdef CONFIG_RT_MUTEXES extern int rt_mutex_getprio(struct task_struct *p); extern void rt_mutex_setprio(struct task_struct *p, int prio); diff --git a/kernel/sched.c b/kernel/sched.c index ea6850d93b2..35561c63a49 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -251,7 +251,7 @@ struct cfs_bandwidth { #ifdef CONFIG_CFS_BANDWIDTH raw_spinlock_t lock; ktime_t period; - u64 quota; + u64 quota, runtime; s64 hierarchal_quota; #endif }; @@ -407,6 +407,7 @@ static inline u64 default_cfs_period(void); static void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) { raw_spin_lock_init(&cfs_b->lock); + cfs_b->runtime = 0; cfs_b->quota = RUNTIME_INF; cfs_b->period = ns_to_ktime(default_cfs_period()); } @@ -9107,6 +9108,7 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota) raw_spin_lock_irq(&cfs_b->lock); cfs_b->period = ns_to_ktime(period); cfs_b->quota = quota; + cfs_b->runtime = quota; raw_spin_unlock_irq(&cfs_b->lock); for_each_possible_cpu(i) { diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index f24f4171019..9502aa899f7 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -89,6 +89,20 @@ const_debug unsigned int sysctl_sched_migration_cost = 500000UL; */ unsigned int __read_mostly sysctl_sched_shares_window = 10000000UL; +#ifdef CONFIG_CFS_BANDWIDTH +/* + * Amount of runtime to allocate from global (tg) to local (per-cfs_rq) pool + * each time a cfs_rq requests quota. + * + * Note: in the case that the slice exceeds the runtime remaining (either due + * to consumption or the quota being specified to be smaller than the slice) + * we will always only issue the remaining available time. + * + * default: 5 msec, units: microseconds + */ +unsigned int sysctl_sched_cfs_bandwidth_slice = 5000UL; +#endif + static const struct sched_class fair_sched_class; /************************************************************** @@ -292,6 +306,8 @@ find_matching_se(struct sched_entity **se, struct sched_entity **pse) #endif /* CONFIG_FAIR_GROUP_SCHED */ +static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, + unsigned long delta_exec); /************************************************************** * Scheduling class tree data structure manipulation methods: @@ -583,6 +599,8 @@ static void update_curr(struct cfs_rq *cfs_rq) cpuacct_charge(curtask, delta_exec); account_group_exec_runtime(curtask, delta_exec); } + + account_cfs_rq_runtime(cfs_rq, delta_exec); } static inline void @@ -1248,6 +1266,58 @@ static inline u64 default_cfs_period(void) { return 100000000ULL; } + +static inline u64 sched_cfs_bandwidth_slice(void) +{ + return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC; +} + +static void assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) +{ + struct task_group *tg = cfs_rq->tg; + struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg); + u64 amount = 0, min_amount; + + /* note: this is a positive sum as runtime_remaining <= 0 */ + min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining; + + raw_spin_lock(&cfs_b->lock); + if (cfs_b->quota == RUNTIME_INF) + amount = min_amount; + else if (cfs_b->runtime > 0) { + amount = min(cfs_b->runtime, min_amount); + cfs_b->runtime -= amount; + } + raw_spin_unlock(&cfs_b->lock); + + cfs_rq->runtime_remaining += amount; +} + +static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, + unsigned long delta_exec) +{ + if (!cfs_rq->runtime_enabled) + return; + + cfs_rq->runtime_remaining -= delta_exec; + if (cfs_rq->runtime_remaining > 0) + return; + + assign_cfs_rq_runtime(cfs_rq); +} + +static __always_inline void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, + unsigned long delta_exec) +{ + if (!cfs_rq->runtime_enabled) + return; + + __account_cfs_rq_runtime(cfs_rq, delta_exec); +} + +#else +static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, + unsigned long delta_exec) {} #endif /************************************************** @@ -4266,8 +4336,13 @@ static void set_curr_task_fair(struct rq *rq) { struct sched_entity *se = &rq->curr->se; - for_each_sched_entity(se) - set_next_entity(cfs_rq_of(se), se); + for_each_sched_entity(se) { + struct cfs_rq *cfs_rq = cfs_rq_of(se); + + set_next_entity(cfs_rq, se); + /* ensure bandwidth has been allocated on our new cfs_rq */ + account_cfs_rq_runtime(cfs_rq, 0); + } } #ifdef CONFIG_FAIR_GROUP_SCHED diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 11d65b531e5..2d2ecdcc8cd 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -379,6 +379,16 @@ static struct ctl_table kern_table[] = { .extra2 = &one, }, #endif +#ifdef CONFIG_CFS_BANDWIDTH + { + .procname = "sched_cfs_bandwidth_slice_us", + .data = &sysctl_sched_cfs_bandwidth_slice, + .maxlen = sizeof(unsigned int), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &one, + }, +#endif #ifdef CONFIG_PROVE_LOCKING { .procname = "prove_locking", From 58088ad0152ba4b7997388c93d0ca208ec1ece75 Mon Sep 17 00:00:00 2001 From: Paul Turner Date: Thu, 21 Jul 2011 09:43:31 -0700 Subject: [PATCH 17/46] sched: Add a timer to handle CFS bandwidth refresh This patch adds a per-task_group timer which handles the refresh of the global CFS bandwidth pool. Since the RT pool is using a similar timer there's some small refactoring to share this support. Signed-off-by: Paul Turner Reviewed-by: Hidetoshi Seto Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/20110721184757.277271273@google.com Signed-off-by: Ingo Molnar --- kernel/sched.c | 107 +++++++++++++++++++++++++++++++++++--------- kernel/sched_fair.c | 40 +++++++++++++++-- 2 files changed, 123 insertions(+), 24 deletions(-) diff --git a/kernel/sched.c b/kernel/sched.c index 35561c63a49..34bf8e6db9a 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -196,10 +196,28 @@ static inline int rt_bandwidth_enabled(void) return sysctl_sched_rt_runtime >= 0; } +static void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period) +{ + unsigned long delta; + ktime_t soft, hard, now; + + for (;;) { + if (hrtimer_active(period_timer)) + break; + + now = hrtimer_cb_get_time(period_timer); + hrtimer_forward(period_timer, now, period); + + soft = hrtimer_get_softexpires(period_timer); + hard = hrtimer_get_expires(period_timer); + delta = ktime_to_ns(ktime_sub(hard, soft)); + __hrtimer_start_range_ns(period_timer, soft, delta, + HRTIMER_MODE_ABS_PINNED, 0); + } +} + static void start_rt_bandwidth(struct rt_bandwidth *rt_b) { - ktime_t now; - if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF) return; @@ -207,22 +225,7 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b) return; raw_spin_lock(&rt_b->rt_runtime_lock); - for (;;) { - unsigned long delta; - ktime_t soft, hard; - - if (hrtimer_active(&rt_b->rt_period_timer)) - break; - - now = hrtimer_cb_get_time(&rt_b->rt_period_timer); - hrtimer_forward(&rt_b->rt_period_timer, now, rt_b->rt_period); - - soft = hrtimer_get_softexpires(&rt_b->rt_period_timer); - hard = hrtimer_get_expires(&rt_b->rt_period_timer); - delta = ktime_to_ns(ktime_sub(hard, soft)); - __hrtimer_start_range_ns(&rt_b->rt_period_timer, soft, delta, - HRTIMER_MODE_ABS_PINNED, 0); - } + start_bandwidth_timer(&rt_b->rt_period_timer, rt_b->rt_period); raw_spin_unlock(&rt_b->rt_runtime_lock); } @@ -253,6 +256,9 @@ struct cfs_bandwidth { ktime_t period; u64 quota, runtime; s64 hierarchal_quota; + + int idle, timer_active; + struct hrtimer period_timer; #endif }; @@ -403,6 +409,28 @@ static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) } static inline u64 default_cfs_period(void); +static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun); + +static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) +{ + struct cfs_bandwidth *cfs_b = + container_of(timer, struct cfs_bandwidth, period_timer); + ktime_t now; + int overrun; + int idle = 0; + + for (;;) { + now = hrtimer_cb_get_time(timer); + overrun = hrtimer_forward(timer, now, cfs_b->period); + + if (!overrun) + break; + + idle = do_sched_cfs_period_timer(cfs_b, overrun); + } + + return idle ? HRTIMER_NORESTART : HRTIMER_RESTART; +} static void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) { @@ -410,6 +438,9 @@ static void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) cfs_b->runtime = 0; cfs_b->quota = RUNTIME_INF; cfs_b->period = ns_to_ktime(default_cfs_period()); + + hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + cfs_b->period_timer.function = sched_cfs_period_timer; } static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) @@ -417,8 +448,34 @@ static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) cfs_rq->runtime_enabled = 0; } +/* requires cfs_b->lock, may release to reprogram timer */ +static void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b) +{ + /* + * The timer may be active because we're trying to set a new bandwidth + * period or because we're racing with the tear-down path + * (timer_active==0 becomes visible before the hrtimer call-back + * terminates). In either case we ensure that it's re-programmed + */ + while (unlikely(hrtimer_active(&cfs_b->period_timer))) { + raw_spin_unlock(&cfs_b->lock); + /* ensure cfs_b->lock is available while we wait */ + hrtimer_cancel(&cfs_b->period_timer); + + raw_spin_lock(&cfs_b->lock); + /* if someone else restarted the timer then we're done */ + if (cfs_b->timer_active) + return; + } + + cfs_b->timer_active = 1; + start_bandwidth_timer(&cfs_b->period_timer, cfs_b->period); +} + static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) -{} +{ + hrtimer_cancel(&cfs_b->period_timer); +} #else static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} static void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) {} @@ -9078,7 +9135,7 @@ static int __cfs_schedulable(struct task_group *tg, u64 period, u64 runtime); static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota) { - int i, ret = 0; + int i, ret = 0, runtime_enabled; struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg); if (tg == &root_task_group) @@ -9105,10 +9162,18 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota) if (ret) goto out_unlock; + runtime_enabled = quota != RUNTIME_INF; raw_spin_lock_irq(&cfs_b->lock); cfs_b->period = ns_to_ktime(period); cfs_b->quota = quota; cfs_b->runtime = quota; + + /* restart the period timer (if active) to handle new period expiry */ + if (runtime_enabled && cfs_b->timer_active) { + /* force a reprogram */ + cfs_b->timer_active = 0; + __start_cfs_bandwidth(cfs_b); + } raw_spin_unlock_irq(&cfs_b->lock); for_each_possible_cpu(i) { @@ -9116,7 +9181,7 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota) struct rq *rq = rq_of(cfs_rq); raw_spin_lock_irq(&rq->lock); - cfs_rq->runtime_enabled = quota != RUNTIME_INF; + cfs_rq->runtime_enabled = runtime_enabled; cfs_rq->runtime_remaining = 0; raw_spin_unlock_irq(&rq->lock); } diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 9502aa899f7..af73a8a85ee 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -1284,9 +1284,16 @@ static void assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) raw_spin_lock(&cfs_b->lock); if (cfs_b->quota == RUNTIME_INF) amount = min_amount; - else if (cfs_b->runtime > 0) { - amount = min(cfs_b->runtime, min_amount); - cfs_b->runtime -= amount; + else { + /* ensure bandwidth timer remains active under consumption */ + if (!cfs_b->timer_active) + __start_cfs_bandwidth(cfs_b); + + if (cfs_b->runtime > 0) { + amount = min(cfs_b->runtime, min_amount); + cfs_b->runtime -= amount; + cfs_b->idle = 0; + } } raw_spin_unlock(&cfs_b->lock); @@ -1315,6 +1322,33 @@ static __always_inline void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, __account_cfs_rq_runtime(cfs_rq, delta_exec); } +/* + * Responsible for refilling a task_group's bandwidth and unthrottling its + * cfs_rqs as appropriate. If there has been no activity within the last + * period the timer is deactivated until scheduling resumes; cfs_b->idle is + * used to track this state. + */ +static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun) +{ + int idle = 1; + + raw_spin_lock(&cfs_b->lock); + /* no need to continue the timer with no bandwidth constraint */ + if (cfs_b->quota == RUNTIME_INF) + goto out_unlock; + + idle = cfs_b->idle; + cfs_b->runtime = cfs_b->quota; + + /* mark as potentially idle for the upcoming period */ + cfs_b->idle = 1; +out_unlock: + if (idle) + cfs_b->timer_active = 0; + raw_spin_unlock(&cfs_b->lock); + + return idle; +} #else static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec) {} From a9cf55b2861057a213e610da2fec52125439a11d Mon Sep 17 00:00:00 2001 From: Paul Turner Date: Thu, 21 Jul 2011 09:43:32 -0700 Subject: [PATCH 18/46] sched: Expire invalid runtime Since quota is managed using a global state but consumed on a per-cpu basis we need to ensure that our per-cpu state is appropriately synchronized. Most importantly, runtime that is state (from a previous period) should not be locally consumable. We take advantage of existing sched_clock synchronization about the jiffy to efficiently detect whether we have (globally) crossed a quota boundary above. One catch is that the direction of spread on sched_clock is undefined, specifically, we don't know whether our local clock is behind or ahead of the one responsible for the current expiration time. Fortunately we can differentiate these by considering whether the global deadline has advanced. If it has not, then we assume our clock to be "fast" and advance our local expiration; otherwise, we know the deadline has truly passed and we expire our local runtime. Signed-off-by: Paul Turner Reviewed-by: Hidetoshi Seto Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/20110721184757.379275352@google.com Signed-off-by: Ingo Molnar --- kernel/sched.c | 4 +- kernel/sched_fair.c | 90 ++++++++++++++++++++++++++++++++++++++++----- 2 files changed, 84 insertions(+), 10 deletions(-) diff --git a/kernel/sched.c b/kernel/sched.c index 34bf8e6db9a..a2d55144bd9 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -256,6 +256,7 @@ struct cfs_bandwidth { ktime_t period; u64 quota, runtime; s64 hierarchal_quota; + u64 runtime_expires; int idle, timer_active; struct hrtimer period_timer; @@ -396,6 +397,7 @@ struct cfs_rq { #endif #ifdef CONFIG_CFS_BANDWIDTH int runtime_enabled; + u64 runtime_expires; s64 runtime_remaining; #endif #endif @@ -9166,8 +9168,8 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota) raw_spin_lock_irq(&cfs_b->lock); cfs_b->period = ns_to_ktime(period); cfs_b->quota = quota; - cfs_b->runtime = quota; + __refill_cfs_bandwidth_runtime(cfs_b); /* restart the period timer (if active) to handle new period expiry */ if (runtime_enabled && cfs_b->timer_active) { /* force a reprogram */ diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index af73a8a85ee..9d1adbd0b61 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -1272,11 +1272,30 @@ static inline u64 sched_cfs_bandwidth_slice(void) return (u64)sysctl_sched_cfs_bandwidth_slice * NSEC_PER_USEC; } +/* + * Replenish runtime according to assigned quota and update expiration time. + * We use sched_clock_cpu directly instead of rq->clock to avoid adding + * additional synchronization around rq->lock. + * + * requires cfs_b->lock + */ +static void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b) +{ + u64 now; + + if (cfs_b->quota == RUNTIME_INF) + return; + + now = sched_clock_cpu(smp_processor_id()); + cfs_b->runtime = cfs_b->quota; + cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period); +} + static void assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) { struct task_group *tg = cfs_rq->tg; struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg); - u64 amount = 0, min_amount; + u64 amount = 0, min_amount, expires; /* note: this is a positive sum as runtime_remaining <= 0 */ min_amount = sched_cfs_bandwidth_slice() - cfs_rq->runtime_remaining; @@ -1285,9 +1304,16 @@ static void assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) if (cfs_b->quota == RUNTIME_INF) amount = min_amount; else { - /* ensure bandwidth timer remains active under consumption */ - if (!cfs_b->timer_active) + /* + * If the bandwidth pool has become inactive, then at least one + * period must have elapsed since the last consumption. + * Refresh the global state and ensure bandwidth timer becomes + * active. + */ + if (!cfs_b->timer_active) { + __refill_cfs_bandwidth_runtime(cfs_b); __start_cfs_bandwidth(cfs_b); + } if (cfs_b->runtime > 0) { amount = min(cfs_b->runtime, min_amount); @@ -1295,19 +1321,61 @@ static void assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) cfs_b->idle = 0; } } + expires = cfs_b->runtime_expires; raw_spin_unlock(&cfs_b->lock); cfs_rq->runtime_remaining += amount; + /* + * we may have advanced our local expiration to account for allowed + * spread between our sched_clock and the one on which runtime was + * issued. + */ + if ((s64)(expires - cfs_rq->runtime_expires) > 0) + cfs_rq->runtime_expires = expires; +} + +/* + * Note: This depends on the synchronization provided by sched_clock and the + * fact that rq->clock snapshots this value. + */ +static void expire_cfs_rq_runtime(struct cfs_rq *cfs_rq) +{ + struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); + struct rq *rq = rq_of(cfs_rq); + + /* if the deadline is ahead of our clock, nothing to do */ + if (likely((s64)(rq->clock - cfs_rq->runtime_expires) < 0)) + return; + + if (cfs_rq->runtime_remaining < 0) + return; + + /* + * If the local deadline has passed we have to consider the + * possibility that our sched_clock is 'fast' and the global deadline + * has not truly expired. + * + * Fortunately we can check determine whether this the case by checking + * whether the global deadline has advanced. + */ + + if ((s64)(cfs_rq->runtime_expires - cfs_b->runtime_expires) >= 0) { + /* extend local deadline, drift is bounded above by 2 ticks */ + cfs_rq->runtime_expires += TICK_NSEC; + } else { + /* global deadline is ahead, expiration has passed */ + cfs_rq->runtime_remaining = 0; + } } static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec) { - if (!cfs_rq->runtime_enabled) - return; - + /* dock delta_exec before expiring quota (as it could span periods) */ cfs_rq->runtime_remaining -= delta_exec; - if (cfs_rq->runtime_remaining > 0) + expire_cfs_rq_runtime(cfs_rq); + + if (likely(cfs_rq->runtime_remaining > 0)) return; assign_cfs_rq_runtime(cfs_rq); @@ -1338,7 +1406,12 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun) goto out_unlock; idle = cfs_b->idle; - cfs_b->runtime = cfs_b->quota; + /* if we're going inactive then everything else can be deferred */ + if (idle) + goto out_unlock; + + __refill_cfs_bandwidth_runtime(cfs_b); + /* mark as potentially idle for the upcoming period */ cfs_b->idle = 1; @@ -1557,7 +1630,6 @@ static long effective_load(struct task_group *tg, int cpu, long wl, long wg) return wl; } - #else static inline unsigned long effective_load(struct task_group *tg, int cpu, From 85dac906bec3bb41bfaa7ccaa65c4706de5cfdf8 Mon Sep 17 00:00:00 2001 From: Paul Turner Date: Thu, 21 Jul 2011 09:43:33 -0700 Subject: [PATCH 19/46] sched: Add support for throttling group entities Now that consumption is tracked (via update_curr()) we add support to throttle group entities (and their corresponding cfs_rqs) in the case where this is no run-time remaining. Throttled entities are dequeued to prevent scheduling, additionally we mark them as throttled (using cfs_rq->throttled) to prevent them from becoming re-enqueued until they are unthrottled. A list of a task_group's throttled entities are maintained on the cfs_bandwidth structure. Note: While the machinery for throttling is added in this patch the act of throttling an entity exceeding its bandwidth is deferred until later within the series. Signed-off-by: Paul Turner Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/20110721184757.480608533@google.com Signed-off-by: Ingo Molnar --- kernel/sched.c | 7 ++++ kernel/sched_fair.c | 89 +++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 92 insertions(+), 4 deletions(-) diff --git a/kernel/sched.c b/kernel/sched.c index a2d55144bd9..044260a9418 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -260,6 +260,8 @@ struct cfs_bandwidth { int idle, timer_active; struct hrtimer period_timer; + struct list_head throttled_cfs_rq; + #endif }; @@ -399,6 +401,9 @@ struct cfs_rq { int runtime_enabled; u64 runtime_expires; s64 runtime_remaining; + + int throttled; + struct list_head throttled_list; #endif #endif }; @@ -441,6 +446,7 @@ static void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) cfs_b->quota = RUNTIME_INF; cfs_b->period = ns_to_ktime(default_cfs_period()); + INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq); hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); cfs_b->period_timer.function = sched_cfs_period_timer; } @@ -448,6 +454,7 @@ static void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) { cfs_rq->runtime_enabled = 0; + INIT_LIST_HEAD(&cfs_rq->throttled_list); } /* requires cfs_b->lock, may release to reprogram timer */ diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 9d1adbd0b61..72c9d4ed599 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -1291,7 +1291,8 @@ static void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b) cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period); } -static void assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) +/* returns 0 on failure to allocate runtime */ +static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) { struct task_group *tg = cfs_rq->tg; struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg); @@ -1332,6 +1333,8 @@ static void assign_cfs_rq_runtime(struct cfs_rq *cfs_rq) */ if ((s64)(expires - cfs_rq->runtime_expires) > 0) cfs_rq->runtime_expires = expires; + + return cfs_rq->runtime_remaining > 0; } /* @@ -1378,7 +1381,12 @@ static void __account_cfs_rq_runtime(struct cfs_rq *cfs_rq, if (likely(cfs_rq->runtime_remaining > 0)) return; - assign_cfs_rq_runtime(cfs_rq); + /* + * if we're unable to extend our runtime we resched so that the active + * hierarchy can be throttled + */ + if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) + resched_task(rq_of(cfs_rq)->curr); } static __always_inline void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, @@ -1390,6 +1398,47 @@ static __always_inline void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, __account_cfs_rq_runtime(cfs_rq, delta_exec); } +static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) +{ + return cfs_rq->throttled; +} + +static __used void throttle_cfs_rq(struct cfs_rq *cfs_rq) +{ + struct rq *rq = rq_of(cfs_rq); + struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); + struct sched_entity *se; + long task_delta, dequeue = 1; + + se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))]; + + /* account load preceding throttle */ + update_cfs_load(cfs_rq, 0); + + task_delta = cfs_rq->h_nr_running; + for_each_sched_entity(se) { + struct cfs_rq *qcfs_rq = cfs_rq_of(se); + /* throttled entity or throttle-on-deactivate */ + if (!se->on_rq) + break; + + if (dequeue) + dequeue_entity(qcfs_rq, se, DEQUEUE_SLEEP); + qcfs_rq->h_nr_running -= task_delta; + + if (qcfs_rq->load.weight) + dequeue = 0; + } + + if (!se) + rq->nr_running -= task_delta; + + cfs_rq->throttled = 1; + raw_spin_lock(&cfs_b->lock); + list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq); + raw_spin_unlock(&cfs_b->lock); +} + /* * Responsible for refilling a task_group's bandwidth and unthrottling its * cfs_rqs as appropriate. If there has been no activity within the last @@ -1425,6 +1474,11 @@ out_unlock: #else static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec) {} + +static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) +{ + return 0; +} #endif /************************************************** @@ -1503,7 +1557,17 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) break; cfs_rq = cfs_rq_of(se); enqueue_entity(cfs_rq, se, flags); + + /* + * end evaluation on encountering a throttled cfs_rq + * + * note: in the case of encountering a throttled cfs_rq we will + * post the final h_nr_running increment below. + */ + if (cfs_rq_throttled(cfs_rq)) + break; cfs_rq->h_nr_running++; + flags = ENQUEUE_WAKEUP; } @@ -1511,11 +1575,15 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags) cfs_rq = cfs_rq_of(se); cfs_rq->h_nr_running++; + if (cfs_rq_throttled(cfs_rq)) + break; + update_cfs_load(cfs_rq, 0); update_cfs_shares(cfs_rq); } - inc_nr_running(rq); + if (!se) + inc_nr_running(rq); hrtick_update(rq); } @@ -1535,6 +1603,15 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) for_each_sched_entity(se) { cfs_rq = cfs_rq_of(se); dequeue_entity(cfs_rq, se, flags); + + /* + * end evaluation on encountering a throttled cfs_rq + * + * note: in the case of encountering a throttled cfs_rq we will + * post the final h_nr_running decrement below. + */ + if (cfs_rq_throttled(cfs_rq)) + break; cfs_rq->h_nr_running--; /* Don't dequeue parent if it has other entities besides us */ @@ -1557,11 +1634,15 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags) cfs_rq = cfs_rq_of(se); cfs_rq->h_nr_running--; + if (cfs_rq_throttled(cfs_rq)) + break; + update_cfs_load(cfs_rq, 0); update_cfs_shares(cfs_rq); } - dec_nr_running(rq); + if (!se) + dec_nr_running(rq); hrtick_update(rq); } From 671fd9dabe5239ad218c7eb48b2b9edee50250e6 Mon Sep 17 00:00:00 2001 From: Paul Turner Date: Thu, 21 Jul 2011 09:43:34 -0700 Subject: [PATCH 20/46] sched: Add support for unthrottling group entities At the start of each period we refresh the global bandwidth pool. At this time we must also unthrottle any cfs_rq entities who are now within bandwidth once more (as quota permits). Unthrottled entities have their corresponding cfs_rq->throttled flag cleared and their entities re-enqueued. Signed-off-by: Paul Turner Reviewed-by: Hidetoshi Seto Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/20110721184757.574628950@google.com Signed-off-by: Ingo Molnar --- kernel/sched.c | 3 ++ kernel/sched_fair.c | 127 ++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 126 insertions(+), 4 deletions(-) diff --git a/kernel/sched.c b/kernel/sched.c index 044260a9418..4bbabc2c4a7 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -9192,6 +9192,9 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota) raw_spin_lock_irq(&rq->lock); cfs_rq->runtime_enabled = runtime_enabled; cfs_rq->runtime_remaining = 0; + + if (cfs_rq_throttled(cfs_rq)) + unthrottle_cfs_rq(cfs_rq); raw_spin_unlock_irq(&rq->lock); } out_unlock: diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 72c9d4ed599..76411950ff3 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -1439,6 +1439,84 @@ static __used void throttle_cfs_rq(struct cfs_rq *cfs_rq) raw_spin_unlock(&cfs_b->lock); } +static void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) +{ + struct rq *rq = rq_of(cfs_rq); + struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); + struct sched_entity *se; + int enqueue = 1; + long task_delta; + + se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))]; + + cfs_rq->throttled = 0; + raw_spin_lock(&cfs_b->lock); + list_del_rcu(&cfs_rq->throttled_list); + raw_spin_unlock(&cfs_b->lock); + + if (!cfs_rq->load.weight) + return; + + task_delta = cfs_rq->h_nr_running; + for_each_sched_entity(se) { + if (se->on_rq) + enqueue = 0; + + cfs_rq = cfs_rq_of(se); + if (enqueue) + enqueue_entity(cfs_rq, se, ENQUEUE_WAKEUP); + cfs_rq->h_nr_running += task_delta; + + if (cfs_rq_throttled(cfs_rq)) + break; + } + + if (!se) + rq->nr_running += task_delta; + + /* determine whether we need to wake up potentially idle cpu */ + if (rq->curr == rq->idle && rq->cfs.nr_running) + resched_task(rq->curr); +} + +static u64 distribute_cfs_runtime(struct cfs_bandwidth *cfs_b, + u64 remaining, u64 expires) +{ + struct cfs_rq *cfs_rq; + u64 runtime = remaining; + + rcu_read_lock(); + list_for_each_entry_rcu(cfs_rq, &cfs_b->throttled_cfs_rq, + throttled_list) { + struct rq *rq = rq_of(cfs_rq); + + raw_spin_lock(&rq->lock); + if (!cfs_rq_throttled(cfs_rq)) + goto next; + + runtime = -cfs_rq->runtime_remaining + 1; + if (runtime > remaining) + runtime = remaining; + remaining -= runtime; + + cfs_rq->runtime_remaining += runtime; + cfs_rq->runtime_expires = expires; + + /* we check whether we're throttled above */ + if (cfs_rq->runtime_remaining > 0) + unthrottle_cfs_rq(cfs_rq); + +next: + raw_spin_unlock(&rq->lock); + + if (!remaining) + break; + } + rcu_read_unlock(); + + return remaining; +} + /* * Responsible for refilling a task_group's bandwidth and unthrottling its * cfs_rqs as appropriate. If there has been no activity within the last @@ -1447,23 +1525,64 @@ static __used void throttle_cfs_rq(struct cfs_rq *cfs_rq) */ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun) { - int idle = 1; + u64 runtime, runtime_expires; + int idle = 1, throttled; raw_spin_lock(&cfs_b->lock); /* no need to continue the timer with no bandwidth constraint */ if (cfs_b->quota == RUNTIME_INF) goto out_unlock; - idle = cfs_b->idle; + throttled = !list_empty(&cfs_b->throttled_cfs_rq); + /* idle depends on !throttled (for the case of a large deficit) */ + idle = cfs_b->idle && !throttled; + /* if we're going inactive then everything else can be deferred */ if (idle) goto out_unlock; __refill_cfs_bandwidth_runtime(cfs_b); + if (!throttled) { + /* mark as potentially idle for the upcoming period */ + cfs_b->idle = 1; + goto out_unlock; + } - /* mark as potentially idle for the upcoming period */ - cfs_b->idle = 1; + /* + * There are throttled entities so we must first use the new bandwidth + * to unthrottle them before making it generally available. This + * ensures that all existing debts will be paid before a new cfs_rq is + * allowed to run. + */ + runtime = cfs_b->runtime; + runtime_expires = cfs_b->runtime_expires; + cfs_b->runtime = 0; + + /* + * This check is repeated as we are holding onto the new bandwidth + * while we unthrottle. This can potentially race with an unthrottled + * group trying to acquire new bandwidth from the global pool. + */ + while (throttled && runtime > 0) { + raw_spin_unlock(&cfs_b->lock); + /* we can't nest cfs_b->lock while distributing bandwidth */ + runtime = distribute_cfs_runtime(cfs_b, runtime, + runtime_expires); + raw_spin_lock(&cfs_b->lock); + + throttled = !list_empty(&cfs_b->throttled_cfs_rq); + } + + /* return (any) remaining runtime */ + cfs_b->runtime = runtime; + /* + * While we are ensured activity in the period following an + * unthrottle, this also covers the case in which the new bandwidth is + * insufficient to cover the existing bandwidth deficit. (Forcing the + * timer to remain active while there are any throttled entities.) + */ + cfs_b->idle = 0; out_unlock: if (idle) cfs_b->timer_active = 0; From 8277434ef1202ce30315f8edb3fc760aa6e74493 Mon Sep 17 00:00:00 2001 From: Paul Turner Date: Thu, 21 Jul 2011 09:43:35 -0700 Subject: [PATCH 21/46] sched: Allow for positional tg_tree walks Extend walk_tg_tree to accept a positional argument static int walk_tg_tree_from(struct task_group *from, tg_visitor down, tg_visitor up, void *data) Existing semantics are preserved, caller must hold rcu_lock() or sufficient analogue. Signed-off-by: Paul Turner Reviewed-by: Hidetoshi Seto Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/20110721184757.677889157@google.com Signed-off-by: Ingo Molnar --- kernel/sched.c | 50 +++++++++++++++++++++++++++++++++++++------------- 1 file changed, 37 insertions(+), 13 deletions(-) diff --git a/kernel/sched.c b/kernel/sched.c index 4bbabc2c4a7..8ec1e7ac289 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -1591,20 +1591,23 @@ static inline void dec_cpu_load(struct rq *rq, unsigned long load) typedef int (*tg_visitor)(struct task_group *, void *); /* - * Iterate the full tree, calling @down when first entering a node and @up when - * leaving it for the final time. + * Iterate task_group tree rooted at *from, calling @down when first entering a + * node and @up when leaving it for the final time. + * + * Caller must hold rcu_lock or sufficient equivalent. */ -static int walk_tg_tree(tg_visitor down, tg_visitor up, void *data) +static int walk_tg_tree_from(struct task_group *from, + tg_visitor down, tg_visitor up, void *data) { struct task_group *parent, *child; int ret; - rcu_read_lock(); - parent = &root_task_group; + parent = from; + down: ret = (*down)(parent, data); if (ret) - goto out_unlock; + goto out; list_for_each_entry_rcu(child, &parent->children, siblings) { parent = child; goto down; @@ -1613,19 +1616,29 @@ up: continue; } ret = (*up)(parent, data); - if (ret) - goto out_unlock; + if (ret || parent == from) + goto out; child = parent; parent = parent->parent; if (parent) goto up; -out_unlock: - rcu_read_unlock(); - +out: return ret; } +/* + * Iterate the full tree, calling @down when first entering a node and @up when + * leaving it for the final time. + * + * Caller must hold rcu_lock or sufficient equivalent. + */ + +static inline int walk_tg_tree(tg_visitor down, tg_visitor up, void *data) +{ + return walk_tg_tree_from(&root_task_group, down, up, data); +} + static int tg_nop(struct task_group *tg, void *data) { return 0; @@ -8870,13 +8883,19 @@ static int tg_rt_schedulable(struct task_group *tg, void *data) static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) { + int ret; + struct rt_schedulable_data data = { .tg = tg, .rt_period = period, .rt_runtime = runtime, }; - return walk_tg_tree(tg_rt_schedulable, tg_nop, &data); + rcu_read_lock(); + ret = walk_tg_tree(tg_rt_schedulable, tg_nop, &data); + rcu_read_unlock(); + + return ret; } static int tg_set_rt_bandwidth(struct task_group *tg, @@ -9333,6 +9352,7 @@ static int tg_cfs_schedulable_down(struct task_group *tg, void *data) static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota) { + int ret; struct cfs_schedulable_data data = { .tg = tg, .period = period, @@ -9344,7 +9364,11 @@ static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota) do_div(data.quota, NSEC_PER_USEC); } - return walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data); + rcu_read_lock(); + ret = walk_tg_tree(tg_cfs_schedulable_down, tg_nop, &data); + rcu_read_unlock(); + + return ret; } #endif /* CONFIG_CFS_BANDWIDTH */ #endif /* CONFIG_FAIR_GROUP_SCHED */ From 64660c864f46202b932b911a69deb09805bdbaf8 Mon Sep 17 00:00:00 2001 From: Paul Turner Date: Thu, 21 Jul 2011 09:43:36 -0700 Subject: [PATCH 22/46] sched: Prevent interactions with throttled entities From the perspective of load-balance and shares distribution, throttled entities should be invisible. However, both of these operations work on 'active' lists and are not inherently aware of what group hierarchies may be present. In some cases this may be side-stepped (e.g. we could sideload via tg_load_down in load balance) while in others (e.g. update_shares()) it is more difficult to compute without incurring some O(n^2) costs. Instead, track hierarchicaal throttled state at time of transition. This allows us to easily identify whether an entity belongs to a throttled hierarchy and avoid incorrect interactions with it. Also, when an entity leaves a throttled hierarchy we need to advance its time averaging for shares averaging so that the elapsed throttled time is not considered as part of the cfs_rq's operation. We also use this information to prevent buddy interactions in the wakeup and yield_to() paths. Signed-off-by: Paul Turner Reviewed-by: Hidetoshi Seto Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/20110721184757.777916795@google.com Signed-off-by: Ingo Molnar --- kernel/sched.c | 2 +- kernel/sched_fair.c | 99 ++++++++++++++++++++++++++++++++++++++++++--- 2 files changed, 94 insertions(+), 7 deletions(-) diff --git a/kernel/sched.c b/kernel/sched.c index 8ec1e7ac289..5db05f6fb47 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -402,7 +402,7 @@ struct cfs_rq { u64 runtime_expires; s64 runtime_remaining; - int throttled; + int throttled, throttle_count; struct list_head throttled_list; #endif #endif diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 76411950ff3..5a2089492a9 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -706,6 +706,8 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se) } #ifdef CONFIG_FAIR_GROUP_SCHED +/* we need this in update_cfs_load and load-balance functions below */ +static inline int throttled_hierarchy(struct cfs_rq *cfs_rq); # ifdef CONFIG_SMP static void update_cfs_rq_load_contribution(struct cfs_rq *cfs_rq, int global_update) @@ -728,7 +730,7 @@ static void update_cfs_load(struct cfs_rq *cfs_rq, int global_update) u64 now, delta; unsigned long load = cfs_rq->load.weight; - if (cfs_rq->tg == &root_task_group) + if (cfs_rq->tg == &root_task_group || throttled_hierarchy(cfs_rq)) return; now = rq_of(cfs_rq)->clock_task; @@ -837,7 +839,7 @@ static void update_cfs_shares(struct cfs_rq *cfs_rq) tg = cfs_rq->tg; se = tg->se[cpu_of(rq_of(cfs_rq))]; - if (!se) + if (!se || throttled_hierarchy(cfs_rq)) return; #ifndef CONFIG_SMP if (likely(se->load.weight == tg->shares)) @@ -1403,6 +1405,65 @@ static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) return cfs_rq->throttled; } +/* check whether cfs_rq, or any parent, is throttled */ +static inline int throttled_hierarchy(struct cfs_rq *cfs_rq) +{ + return cfs_rq->throttle_count; +} + +/* + * Ensure that neither of the group entities corresponding to src_cpu or + * dest_cpu are members of a throttled hierarchy when performing group + * load-balance operations. + */ +static inline int throttled_lb_pair(struct task_group *tg, + int src_cpu, int dest_cpu) +{ + struct cfs_rq *src_cfs_rq, *dest_cfs_rq; + + src_cfs_rq = tg->cfs_rq[src_cpu]; + dest_cfs_rq = tg->cfs_rq[dest_cpu]; + + return throttled_hierarchy(src_cfs_rq) || + throttled_hierarchy(dest_cfs_rq); +} + +/* updated child weight may affect parent so we have to do this bottom up */ +static int tg_unthrottle_up(struct task_group *tg, void *data) +{ + struct rq *rq = data; + struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; + + cfs_rq->throttle_count--; +#ifdef CONFIG_SMP + if (!cfs_rq->throttle_count) { + u64 delta = rq->clock_task - cfs_rq->load_stamp; + + /* leaving throttled state, advance shares averaging windows */ + cfs_rq->load_stamp += delta; + cfs_rq->load_last += delta; + + /* update entity weight now that we are on_rq again */ + update_cfs_shares(cfs_rq); + } +#endif + + return 0; +} + +static int tg_throttle_down(struct task_group *tg, void *data) +{ + struct rq *rq = data; + struct cfs_rq *cfs_rq = tg->cfs_rq[cpu_of(rq)]; + + /* group is entering throttled state, record last load */ + if (!cfs_rq->throttle_count) + update_cfs_load(cfs_rq, 0); + cfs_rq->throttle_count++; + + return 0; +} + static __used void throttle_cfs_rq(struct cfs_rq *cfs_rq) { struct rq *rq = rq_of(cfs_rq); @@ -1413,7 +1474,9 @@ static __used void throttle_cfs_rq(struct cfs_rq *cfs_rq) se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))]; /* account load preceding throttle */ - update_cfs_load(cfs_rq, 0); + rcu_read_lock(); + walk_tg_tree_from(cfs_rq->tg, tg_throttle_down, tg_nop, (void *)rq); + rcu_read_unlock(); task_delta = cfs_rq->h_nr_running; for_each_sched_entity(se) { @@ -1454,6 +1517,10 @@ static void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) list_del_rcu(&cfs_rq->throttled_list); raw_spin_unlock(&cfs_b->lock); + update_rq_clock(rq); + /* update hierarchical throttle state */ + walk_tg_tree_from(cfs_rq->tg, tg_nop, tg_unthrottle_up, (void *)rq); + if (!cfs_rq->load.weight) return; @@ -1598,6 +1665,17 @@ static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) { return 0; } + +static inline int throttled_hierarchy(struct cfs_rq *cfs_rq) +{ + return 0; +} + +static inline int throttled_lb_pair(struct task_group *tg, + int src_cpu, int dest_cpu) +{ + return 0; +} #endif /************************************************** @@ -2493,6 +2571,9 @@ move_one_task(struct rq *this_rq, int this_cpu, struct rq *busiest, for_each_leaf_cfs_rq(busiest, cfs_rq) { list_for_each_entry_safe(p, n, &cfs_rq->tasks, se.group_node) { + if (throttled_lb_pair(task_group(p), + busiest->cpu, this_cpu)) + break; if (!can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) @@ -2608,8 +2689,13 @@ static void update_shares(int cpu) * Iterates the task_group tree in a bottom up fashion, see * list_add_leaf_cfs_rq() for details. */ - for_each_leaf_cfs_rq(rq, cfs_rq) + for_each_leaf_cfs_rq(rq, cfs_rq) { + /* throttled entities do not contribute to load */ + if (throttled_hierarchy(cfs_rq)) + continue; + update_shares_cpu(cfs_rq->tg, cpu); + } rcu_read_unlock(); } @@ -2659,9 +2745,10 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest, u64 rem_load, moved_load; /* - * empty group + * empty group or part of a throttled hierarchy */ - if (!busiest_cfs_rq->task_weight) + if (!busiest_cfs_rq->task_weight || + throttled_lb_pair(busiest_cfs_rq->tg, cpu_of(busiest), this_cpu)) continue; rem_load = (u64)rem_load_move * busiest_weight; From 5238cdd3873e67a98b28c1161d65d2a615c320a3 Mon Sep 17 00:00:00 2001 From: Paul Turner Date: Thu, 21 Jul 2011 09:43:37 -0700 Subject: [PATCH 23/46] sched: Prevent buddy interactions with throttled entities Buddies allow us to select "on-rq" entities without actually selecting them from a cfs_rq's rb_tree. As a result we must ensure that throttled entities are not falsely nominated as buddies. The fact that entities are dequeued within throttle_entity is not sufficient for clearing buddy status as the nomination may occur after throttling. Signed-off-by: Paul Turner Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/20110721184757.886850167@google.com Signed-off-by: Ingo Molnar --- kernel/sched_fair.c | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 5a2089492a9..1d4acbea9e6 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -2348,6 +2348,15 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ if (unlikely(se == pse)) return; + /* + * This is possible from callers such as pull_task(), in which we + * unconditionally check_prempt_curr() after an enqueue (which may have + * lead to a throttle). This both saves work and prevents false + * next-buddy nomination below. + */ + if (unlikely(throttled_hierarchy(cfs_rq_of(pse)))) + return; + if (sched_feat(NEXT_BUDDY) && scale && !(wake_flags & WF_FORK)) { set_next_buddy(pse); next_buddy_marked = 1; @@ -2356,6 +2365,12 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_ /* * We can come here with TIF_NEED_RESCHED already set from new task * wake up path. + * + * Note: this also catches the edge-case of curr being in a throttled + * group (e.g. via set_curr_task), since update_curr() (in the + * enqueue of curr) will have resulted in resched being set. This + * prevents us from potentially nominating it as a false LAST_BUDDY + * below. */ if (test_tsk_need_resched(curr)) return; @@ -2474,7 +2489,8 @@ static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preemp { struct sched_entity *se = &p->se; - if (!se->on_rq) + /* throttled hierarchies are not runnable */ + if (!se->on_rq || throttled_hierarchy(cfs_rq_of(se))) return false; /* Tell the scheduler that we'd really like pse to run next. */ From 8cb120d3e41a0464a559d639d519cef563717a4e Mon Sep 17 00:00:00 2001 From: Paul Turner Date: Thu, 21 Jul 2011 09:43:38 -0700 Subject: [PATCH 24/46] sched: Migrate throttled tasks on HOTPLUG Throttled tasks are invisisble to cpu-offline since they are not eligible for selection by pick_next_task(). The regular 'escape' path for a thread that is blocked at offline is via ttwu->select_task_rq, however this will not handle a throttled group since there are no individual thread wakeups on an unthrottle. Resolve this by unthrottling offline cpus so that threads can be migrated. Signed-off-by: Paul Turner Reviewed-by: Hidetoshi Seto Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/20110721184757.989000590@google.com Signed-off-by: Ingo Molnar --- kernel/sched.c | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/kernel/sched.c b/kernel/sched.c index 5db05f6fb47..397317248dd 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -6335,6 +6335,30 @@ static void calc_global_load_remove(struct rq *rq) rq->calc_load_active = 0; } +#ifdef CONFIG_CFS_BANDWIDTH +static void unthrottle_offline_cfs_rqs(struct rq *rq) +{ + struct cfs_rq *cfs_rq; + + for_each_leaf_cfs_rq(rq, cfs_rq) { + struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); + + if (!cfs_rq->runtime_enabled) + continue; + + /* + * clock_task is not advancing so we just need to make sure + * there's some valid quota amount + */ + cfs_rq->runtime_remaining = cfs_b->quota; + if (cfs_rq_throttled(cfs_rq)) + unthrottle_cfs_rq(cfs_rq); + } +} +#else +static void unthrottle_offline_cfs_rqs(struct rq *rq) {} +#endif + /* * Migrate all tasks from the rq, sleeping tasks will be migrated by * try_to_wake_up()->select_task_rq(). @@ -6360,6 +6384,9 @@ static void migrate_tasks(unsigned int dead_cpu) */ rq->stop = NULL; + /* Ensure any throttled groups are reachable by pick_next_task */ + unthrottle_offline_cfs_rqs(rq); + for ( ; ; ) { /* * There's this thread running, bail when that's the only From d3d9dc3302368269acf94b7381663b93000fe2fe Mon Sep 17 00:00:00 2001 From: Paul Turner Date: Thu, 21 Jul 2011 09:43:39 -0700 Subject: [PATCH 25/46] sched: Throttle entities exceeding their allowed bandwidth With the machinery in place to throttle and unthrottle entities, as well as handle their participation (or lack there of) we can now enable throttling. There are 2 points that we must check whether it's time to set throttled state: put_prev_entity() and enqueue_entity(). - put_prev_entity() is the typical throttle path, we reach it by exceeding our allocated run-time within update_curr()->account_cfs_rq_runtime() and going through a reschedule. - enqueue_entity() covers the case of a wake-up into an already throttled group. In this case we know the group cannot be on_rq and can throttle immediately. Checks are added at time of put_prev_entity() and enqueue_entity() Signed-off-by: Paul Turner Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/20110721184758.091415417@google.com Signed-off-by: Ingo Molnar --- kernel/sched_fair.c | 52 +++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 50 insertions(+), 2 deletions(-) diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 1d4acbea9e6..f9f671a7d0a 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -970,6 +970,8 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial) se->vruntime = vruntime; } +static void check_enqueue_throttle(struct cfs_rq *cfs_rq); + static void enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) { @@ -999,8 +1001,10 @@ enqueue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) __enqueue_entity(cfs_rq, se); se->on_rq = 1; - if (cfs_rq->nr_running == 1) + if (cfs_rq->nr_running == 1) { list_add_leaf_cfs_rq(cfs_rq); + check_enqueue_throttle(cfs_rq); + } } static void __clear_buddies_last(struct sched_entity *se) @@ -1202,6 +1206,8 @@ static struct sched_entity *pick_next_entity(struct cfs_rq *cfs_rq) return se; } +static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq); + static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) { /* @@ -1211,6 +1217,9 @@ static void put_prev_entity(struct cfs_rq *cfs_rq, struct sched_entity *prev) if (prev->on_rq) update_curr(cfs_rq); + /* throttle cfs_rqs exceeding runtime */ + check_cfs_rq_runtime(cfs_rq); + check_spread(cfs_rq, prev); if (prev->on_rq) { update_stats_wait_start(cfs_rq, prev); @@ -1464,7 +1473,7 @@ static int tg_throttle_down(struct task_group *tg, void *data) return 0; } -static __used void throttle_cfs_rq(struct cfs_rq *cfs_rq) +static void throttle_cfs_rq(struct cfs_rq *cfs_rq) { struct rq *rq = rq_of(cfs_rq); struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); @@ -1657,9 +1666,48 @@ out_unlock: return idle; } + +/* + * When a group wakes up we want to make sure that its quota is not already + * expired/exceeded, otherwise it may be allowed to steal additional ticks of + * runtime as update_curr() throttling can not not trigger until it's on-rq. + */ +static void check_enqueue_throttle(struct cfs_rq *cfs_rq) +{ + /* an active group must be handled by the update_curr()->put() path */ + if (!cfs_rq->runtime_enabled || cfs_rq->curr) + return; + + /* ensure the group is not already throttled */ + if (cfs_rq_throttled(cfs_rq)) + return; + + /* update runtime allocation */ + account_cfs_rq_runtime(cfs_rq, 0); + if (cfs_rq->runtime_remaining <= 0) + throttle_cfs_rq(cfs_rq); +} + +/* conditionally throttle active cfs_rq's from put_prev_entity() */ +static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) +{ + if (likely(!cfs_rq->runtime_enabled || cfs_rq->runtime_remaining > 0)) + return; + + /* + * it's possible for a throttled entity to be forced into a running + * state (e.g. set_curr_task), in this case we're finished. + */ + if (cfs_rq_throttled(cfs_rq)) + return; + + throttle_cfs_rq(cfs_rq); +} #else static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec) {} +static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} +static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {} static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) { From e8da1b18b32064c43881bceef0f051c2110c9ab9 Mon Sep 17 00:00:00 2001 From: Nikhil Rao Date: Thu, 21 Jul 2011 09:43:40 -0700 Subject: [PATCH 26/46] sched: Add exports tracking cfs bandwidth control statistics This change introduces statistics exports for the cpu sub-system, these are added through the use of a stat file similar to that exported by other subsystems. The following exports are included: nr_periods: number of periods in which execution occurred nr_throttled: the number of periods above in which execution was throttle throttled_time: cumulative wall-time that any cpus have been throttled for this group Signed-off-by: Paul Turner Signed-off-by: Nikhil Rao Signed-off-by: Bharata B Rao Reviewed-by: Hidetoshi Seto Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/20110721184758.198901931@google.com Signed-off-by: Ingo Molnar --- kernel/sched.c | 21 +++++++++++++++++++++ kernel/sched_fair.c | 7 +++++++ 2 files changed, 28 insertions(+) diff --git a/kernel/sched.c b/kernel/sched.c index 397317248dd..35c91859f8a 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -262,6 +262,9 @@ struct cfs_bandwidth { struct hrtimer period_timer; struct list_head throttled_cfs_rq; + /* statistics */ + int nr_periods, nr_throttled; + u64 throttled_time; #endif }; @@ -402,6 +405,7 @@ struct cfs_rq { u64 runtime_expires; s64 runtime_remaining; + u64 throttled_timestamp; int throttled, throttle_count; struct list_head throttled_list; #endif @@ -9397,6 +9401,19 @@ static int __cfs_schedulable(struct task_group *tg, u64 period, u64 quota) return ret; } + +static int cpu_stats_show(struct cgroup *cgrp, struct cftype *cft, + struct cgroup_map_cb *cb) +{ + struct task_group *tg = cgroup_tg(cgrp); + struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(tg); + + cb->fill(cb, "nr_periods", cfs_b->nr_periods); + cb->fill(cb, "nr_throttled", cfs_b->nr_throttled); + cb->fill(cb, "throttled_time", cfs_b->throttled_time); + + return 0; +} #endif /* CONFIG_CFS_BANDWIDTH */ #endif /* CONFIG_FAIR_GROUP_SCHED */ @@ -9443,6 +9460,10 @@ static struct cftype cpu_files[] = { .read_u64 = cpu_cfs_period_read_u64, .write_u64 = cpu_cfs_period_write_u64, }, + { + .name = "stat", + .read_map = cpu_stats_show, + }, #endif #ifdef CONFIG_RT_GROUP_SCHED { diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index f9f671a7d0a..d201f28c1de 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -1506,6 +1506,7 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq) rq->nr_running -= task_delta; cfs_rq->throttled = 1; + cfs_rq->throttled_timestamp = rq->clock; raw_spin_lock(&cfs_b->lock); list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq); raw_spin_unlock(&cfs_b->lock); @@ -1523,8 +1524,10 @@ static void unthrottle_cfs_rq(struct cfs_rq *cfs_rq) cfs_rq->throttled = 0; raw_spin_lock(&cfs_b->lock); + cfs_b->throttled_time += rq->clock - cfs_rq->throttled_timestamp; list_del_rcu(&cfs_rq->throttled_list); raw_spin_unlock(&cfs_b->lock); + cfs_rq->throttled_timestamp = 0; update_rq_clock(rq); /* update hierarchical throttle state */ @@ -1612,6 +1615,7 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun) throttled = !list_empty(&cfs_b->throttled_cfs_rq); /* idle depends on !throttled (for the case of a large deficit) */ idle = cfs_b->idle && !throttled; + cfs_b->nr_periods += overrun; /* if we're going inactive then everything else can be deferred */ if (idle) @@ -1625,6 +1629,9 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun) goto out_unlock; } + /* account preceding periods in which throttling occurred */ + cfs_b->nr_throttled += overrun; + /* * There are throttled entities so we must first use the new bandwidth * to unthrottle them before making it generally available. This From d8b4986d3dbc4fabc2054d63f1d31d6ed2fb1ca8 Mon Sep 17 00:00:00 2001 From: Paul Turner Date: Thu, 21 Jul 2011 09:43:41 -0700 Subject: [PATCH 27/46] sched: Return unused runtime on group dequeue When a local cfs_rq blocks we return the majority of its remaining quota to the global bandwidth pool for use by other runqueues. We do this only when the quota is current and there is more than min_cfs_rq_quota [1ms by default] of runtime remaining on the rq. In the case where there are throttled runqueues and we have sufficient bandwidth to meter out a slice, a second timer is kicked off to handle this delivery, unthrottling where appropriate. Using a 'worst case' antagonist which executes on each cpu for 1ms before moving onto the next on a fairly large machine: no quota generations: 197.47 ms /cgroup/a/cpuacct.usage 199.46 ms /cgroup/a/cpuacct.usage 205.46 ms /cgroup/a/cpuacct.usage 198.46 ms /cgroup/a/cpuacct.usage 208.39 ms /cgroup/a/cpuacct.usage Since we are allowed to use "stale" quota our usage is effectively bounded by the rate of input into the global pool and performance is relatively stable. with quota generations [1s increments]: 119.58 ms /cgroup/a/cpuacct.usage 119.65 ms /cgroup/a/cpuacct.usage 119.64 ms /cgroup/a/cpuacct.usage 119.63 ms /cgroup/a/cpuacct.usage 119.60 ms /cgroup/a/cpuacct.usage The large deficit here is due to quota generations (/intentionally/) preventing us from now using previously stranded slack quota. The cost is that this quota becomes unavailable. with quota generations and quota return: 200.09 ms /cgroup/a/cpuacct.usage 200.09 ms /cgroup/a/cpuacct.usage 198.09 ms /cgroup/a/cpuacct.usage 200.09 ms /cgroup/a/cpuacct.usage 200.06 ms /cgroup/a/cpuacct.usage By returning unused quota we're able to both stably consume our desired quota and prevent unintentional overages due to the abuse of slack quota from previous quota periods (especially on a large machine). Signed-off-by: Paul Turner Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/20110721184758.306848658@google.com Signed-off-by: Ingo Molnar --- kernel/sched.c | 15 +++++- kernel/sched_fair.c | 108 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 122 insertions(+), 1 deletion(-) diff --git a/kernel/sched.c b/kernel/sched.c index 35c91859f8a..6baade0d764 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -259,7 +259,7 @@ struct cfs_bandwidth { u64 runtime_expires; int idle, timer_active; - struct hrtimer period_timer; + struct hrtimer period_timer, slack_timer; struct list_head throttled_cfs_rq; /* statistics */ @@ -421,6 +421,16 @@ static inline struct cfs_bandwidth *tg_cfs_bandwidth(struct task_group *tg) static inline u64 default_cfs_period(void); static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun); +static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b); + +static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer) +{ + struct cfs_bandwidth *cfs_b = + container_of(timer, struct cfs_bandwidth, slack_timer); + do_sched_cfs_slack_timer(cfs_b); + + return HRTIMER_NORESTART; +} static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer) { @@ -453,6 +463,8 @@ static void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b) INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq); hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); cfs_b->period_timer.function = sched_cfs_period_timer; + hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + cfs_b->slack_timer.function = sched_cfs_slack_timer; } static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) @@ -488,6 +500,7 @@ static void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b) static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b) { hrtimer_cancel(&cfs_b->period_timer); + hrtimer_cancel(&cfs_b->slack_timer); } #else static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index d201f28c1de..1ca2cd44d64 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -1052,6 +1052,8 @@ static void clear_buddies(struct cfs_rq *cfs_rq, struct sched_entity *se) __clear_buddies_skip(se); } +static void return_cfs_rq_runtime(struct cfs_rq *cfs_rq); + static void dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) { @@ -1090,6 +1092,9 @@ dequeue_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int flags) if (!(flags & DEQUEUE_SLEEP)) se->vruntime -= cfs_rq->min_vruntime; + /* return excess runtime on last dequeue */ + return_cfs_rq_runtime(cfs_rq); + update_min_vruntime(cfs_rq); update_cfs_shares(cfs_rq); } @@ -1674,6 +1679,108 @@ out_unlock: return idle; } +/* a cfs_rq won't donate quota below this amount */ +static const u64 min_cfs_rq_runtime = 1 * NSEC_PER_MSEC; +/* minimum remaining period time to redistribute slack quota */ +static const u64 min_bandwidth_expiration = 2 * NSEC_PER_MSEC; +/* how long we wait to gather additional slack before distributing */ +static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC; + +/* are we near the end of the current quota period? */ +static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire) +{ + struct hrtimer *refresh_timer = &cfs_b->period_timer; + u64 remaining; + + /* if the call-back is running a quota refresh is already occurring */ + if (hrtimer_callback_running(refresh_timer)) + return 1; + + /* is a quota refresh about to occur? */ + remaining = ktime_to_ns(hrtimer_expires_remaining(refresh_timer)); + if (remaining < min_expire) + return 1; + + return 0; +} + +static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b) +{ + u64 min_left = cfs_bandwidth_slack_period + min_bandwidth_expiration; + + /* if there's a quota refresh soon don't bother with slack */ + if (runtime_refresh_within(cfs_b, min_left)) + return; + + start_bandwidth_timer(&cfs_b->slack_timer, + ns_to_ktime(cfs_bandwidth_slack_period)); +} + +/* we know any runtime found here is valid as update_curr() precedes return */ +static void __return_cfs_rq_runtime(struct cfs_rq *cfs_rq) +{ + struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg); + s64 slack_runtime = cfs_rq->runtime_remaining - min_cfs_rq_runtime; + + if (slack_runtime <= 0) + return; + + raw_spin_lock(&cfs_b->lock); + if (cfs_b->quota != RUNTIME_INF && + cfs_rq->runtime_expires == cfs_b->runtime_expires) { + cfs_b->runtime += slack_runtime; + + /* we are under rq->lock, defer unthrottling using a timer */ + if (cfs_b->runtime > sched_cfs_bandwidth_slice() && + !list_empty(&cfs_b->throttled_cfs_rq)) + start_cfs_slack_bandwidth(cfs_b); + } + raw_spin_unlock(&cfs_b->lock); + + /* even if it's not valid for return we don't want to try again */ + cfs_rq->runtime_remaining -= slack_runtime; +} + +static __always_inline void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) +{ + if (!cfs_rq->runtime_enabled || !cfs_rq->nr_running) + return; + + __return_cfs_rq_runtime(cfs_rq); +} + +/* + * This is done with a timer (instead of inline with bandwidth return) since + * it's necessary to juggle rq->locks to unthrottle their respective cfs_rqs. + */ +static void do_sched_cfs_slack_timer(struct cfs_bandwidth *cfs_b) +{ + u64 runtime = 0, slice = sched_cfs_bandwidth_slice(); + u64 expires; + + /* confirm we're still not at a refresh boundary */ + if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) + return; + + raw_spin_lock(&cfs_b->lock); + if (cfs_b->quota != RUNTIME_INF && cfs_b->runtime > slice) { + runtime = cfs_b->runtime; + cfs_b->runtime = 0; + } + expires = cfs_b->runtime_expires; + raw_spin_unlock(&cfs_b->lock); + + if (!runtime) + return; + + runtime = distribute_cfs_runtime(cfs_b, runtime, expires); + + raw_spin_lock(&cfs_b->lock); + if (expires == cfs_b->runtime_expires) + cfs_b->runtime = runtime; + raw_spin_unlock(&cfs_b->lock); +} + /* * When a group wakes up we want to make sure that its quota is not already * expired/exceeded, otherwise it may be allowed to steal additional ticks of @@ -1715,6 +1822,7 @@ static void account_cfs_rq_runtime(struct cfs_rq *cfs_rq, unsigned long delta_exec) {} static void check_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} static void check_enqueue_throttle(struct cfs_rq *cfs_rq) {} +static void return_cfs_rq_runtime(struct cfs_rq *cfs_rq) {} static inline int cfs_rq_throttled(struct cfs_rq *cfs_rq) { From 88ebc08ea9f721d1345d5414288a308ea42ac458 Mon Sep 17 00:00:00 2001 From: Bharata B Rao Date: Thu, 21 Jul 2011 09:43:43 -0700 Subject: [PATCH 28/46] sched: Add documentation for bandwidth control Basic description of usage and effect for CFS Bandwidth Control. Signed-off-by: Bharata B Rao Signed-off-by: Paul Turner Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/20110721184758.498036116@google.com Signed-off-by: Ingo Molnar --- Documentation/scheduler/sched-bwc.txt | 122 ++++++++++++++++++++++++++ 1 file changed, 122 insertions(+) create mode 100644 Documentation/scheduler/sched-bwc.txt diff --git a/Documentation/scheduler/sched-bwc.txt b/Documentation/scheduler/sched-bwc.txt new file mode 100644 index 00000000000..f6b1873f68a --- /dev/null +++ b/Documentation/scheduler/sched-bwc.txt @@ -0,0 +1,122 @@ +CFS Bandwidth Control +===================== + +[ This document only discusses CPU bandwidth control for SCHED_NORMAL. + The SCHED_RT case is covered in Documentation/scheduler/sched-rt-group.txt ] + +CFS bandwidth control is a CONFIG_FAIR_GROUP_SCHED extension which allows the +specification of the maximum CPU bandwidth available to a group or hierarchy. + +The bandwidth allowed for a group is specified using a quota and period. Within +each given "period" (microseconds), a group is allowed to consume only up to +"quota" microseconds of CPU time. When the CPU bandwidth consumption of a +group exceeds this limit (for that period), the tasks belonging to its +hierarchy will be throttled and are not allowed to run again until the next +period. + +A group's unused runtime is globally tracked, being refreshed with quota units +above at each period boundary. As threads consume this bandwidth it is +transferred to cpu-local "silos" on a demand basis. The amount transferred +within each of these updates is tunable and described as the "slice". + +Management +---------- +Quota and period are managed within the cpu subsystem via cgroupfs. + +cpu.cfs_quota_us: the total available run-time within a period (in microseconds) +cpu.cfs_period_us: the length of a period (in microseconds) +cpu.stat: exports throttling statistics [explained further below] + +The default values are: + cpu.cfs_period_us=100ms + cpu.cfs_quota=-1 + +A value of -1 for cpu.cfs_quota_us indicates that the group does not have any +bandwidth restriction in place, such a group is described as an unconstrained +bandwidth group. This represents the traditional work-conserving behavior for +CFS. + +Writing any (valid) positive value(s) will enact the specified bandwidth limit. +The minimum quota allowed for the quota or period is 1ms. There is also an +upper bound on the period length of 1s. Additional restrictions exist when +bandwidth limits are used in a hierarchical fashion, these are explained in +more detail below. + +Writing any negative value to cpu.cfs_quota_us will remove the bandwidth limit +and return the group to an unconstrained state once more. + +Any updates to a group's bandwidth specification will result in it becoming +unthrottled if it is in a constrained state. + +System wide settings +-------------------- +For efficiency run-time is transferred between the global pool and CPU local +"silos" in a batch fashion. This greatly reduces global accounting pressure +on large systems. The amount transferred each time such an update is required +is described as the "slice". + +This is tunable via procfs: + /proc/sys/kernel/sched_cfs_bandwidth_slice_us (default=5ms) + +Larger slice values will reduce transfer overheads, while smaller values allow +for more fine-grained consumption. + +Statistics +---------- +A group's bandwidth statistics are exported via 3 fields in cpu.stat. + +cpu.stat: +- nr_periods: Number of enforcement intervals that have elapsed. +- nr_throttled: Number of times the group has been throttled/limited. +- throttled_time: The total time duration (in nanoseconds) for which entities + of the group have been throttled. + +This interface is read-only. + +Hierarchical considerations +--------------------------- +The interface enforces that an individual entity's bandwidth is always +attainable, that is: max(c_i) <= C. However, over-subscription in the +aggregate case is explicitly allowed to enable work-conserving semantics +within a hierarchy. + e.g. \Sum (c_i) may exceed C +[ Where C is the parent's bandwidth, and c_i its children ] + + +There are two ways in which a group may become throttled: + a. it fully consumes its own quota within a period + b. a parent's quota is fully consumed within its period + +In case b) above, even though the child may have runtime remaining it will not +be allowed to until the parent's runtime is refreshed. + +Examples +-------- +1. Limit a group to 1 CPU worth of runtime. + + If period is 250ms and quota is also 250ms, the group will get + 1 CPU worth of runtime every 250ms. + + # echo 250000 > cpu.cfs_quota_us /* quota = 250ms */ + # echo 250000 > cpu.cfs_period_us /* period = 250ms */ + +2. Limit a group to 2 CPUs worth of runtime on a multi-CPU machine. + + With 500ms period and 1000ms quota, the group can get 2 CPUs worth of + runtime every 500ms. + + # echo 1000000 > cpu.cfs_quota_us /* quota = 1000ms */ + # echo 500000 > cpu.cfs_period_us /* period = 500ms */ + + The larger period here allows for increased burst capacity. + +3. Limit a group to 20% of 1 CPU. + + With 50ms period, 10ms quota will be equivalent to 20% of 1 CPU. + + # echo 10000 > cpu.cfs_quota_us /* quota = 10ms */ + # echo 50000 > cpu.cfs_period_us /* period = 50ms */ + + By using a small period here we are ensuring a consistent latency + response at the expense of burst capacity. + From f4cfb33ed980085b14a8f376281edb2e270f0244 Mon Sep 17 00:00:00 2001 From: Wang Xingchao Date: Fri, 16 Sep 2011 13:35:52 -0400 Subject: [PATCH 29/46] sched: Remove redundant test in check_preempt_tick() The caller already checks for nr_running > 1, therefore we don't have to do so again. Signed-off-by: Wang Xingchao Reviewed-by: Paul Turner Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1316194552-12019-1-git-send-email-xingchao.wang@intel.com Signed-off-by: Ingo Molnar --- kernel/sched_fair.c | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 1ca2cd44d64..fef0bfde7c8 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -1106,6 +1106,8 @@ static void check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) { unsigned long ideal_runtime, delta_exec; + struct sched_entity *se; + s64 delta; ideal_runtime = sched_slice(cfs_rq, curr); delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; @@ -1127,16 +1129,14 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) if (delta_exec < sysctl_sched_min_granularity) return; - if (cfs_rq->nr_running > 1) { - struct sched_entity *se = __pick_first_entity(cfs_rq); - s64 delta = curr->vruntime - se->vruntime; + se = __pick_first_entity(cfs_rq); + delta = curr->vruntime - se->vruntime; - if (delta < 0) - return; + if (delta < 0) + return; - if (delta > ideal_runtime) - resched_task(rq_of(cfs_rq)->curr); - } + if (delta > ideal_runtime) + resched_task(rq_of(cfs_rq)->curr); } static void From 557ab425429a5123d37f412ce3e6d6137cb621f8 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Fri, 16 Sep 2011 11:16:43 +0200 Subject: [PATCH 30/46] sched, tracing: Show PREEMPT_ACTIVE state in trace_sched_switch We had need to see the difference between scheduling a runnable task and a runnable task being involuntarily preempted. No app should rely on the old string output (the binary trace event record format is not changed). Signed-off-by: Peter Zijlstra Cc: Thomas Gleixner Cc: Steven Rostedt Link: http://lkml.kernel.org/r/1316164603.10174.11.camel@twins Signed-off-by: Ingo Molnar --- include/trace/events/sched.h | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h index f6334782a59..959ff18b63b 100644 --- a/include/trace/events/sched.h +++ b/include/trace/events/sched.h @@ -100,7 +100,7 @@ static inline long __trace_sched_switch_state(struct task_struct *p) * For all intents and purposes a preempted task is a running task. */ if (task_thread_info(p)->preempt_count & PREEMPT_ACTIVE) - state = TASK_RUNNING; + state = TASK_RUNNING | TASK_STATE_MAX; #endif return state; @@ -137,13 +137,14 @@ TRACE_EVENT(sched_switch, __entry->next_prio = next->prio; ), - TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s ==> next_comm=%s next_pid=%d next_prio=%d", + TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_pid=%d next_prio=%d", __entry->prev_comm, __entry->prev_pid, __entry->prev_prio, - __entry->prev_state ? - __print_flags(__entry->prev_state, "|", + __entry->prev_state & (TASK_STATE_MAX-1) ? + __print_flags(__entry->prev_state & (TASK_STATE_MAX-1), "|", { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" }, { 16, "Z" }, { 32, "X" }, { 64, "x" }, { 128, "W" }) : "R", + __entry->prev_state & TASK_STATE_MAX ? "+" : "", __entry->next_comm, __entry->next_pid, __entry->next_prio) ); From 1230db8e1543c0471dd165727d34647ab098cc1e Mon Sep 17 00:00:00 2001 From: Huang Ying Date: Thu, 8 Sep 2011 14:00:42 +0800 Subject: [PATCH 31/46] llist: Make some llist functions inline Because llist code will be used in performance critical scheduler code path, make llist_add() and llist_del_all() inline to avoid function calling overhead and related 'glue' overhead. Signed-off-by: Huang Ying Acked-by: Mathieu Desnoyers Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1315461646-1379-2-git-send-email-ying.huang@intel.com Signed-off-by: Ingo Molnar --- drivers/acpi/apei/Kconfig | 1 - include/linux/llist.h | 64 +++++++++++++++++++++++++++++++++++---- lib/Kconfig | 3 -- lib/Makefile | 4 +-- lib/llist.c | 40 ------------------------ 5 files changed, 59 insertions(+), 53 deletions(-) diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig index e3f47872ec2..f0c1ce95a0e 100644 --- a/drivers/acpi/apei/Kconfig +++ b/drivers/acpi/apei/Kconfig @@ -14,7 +14,6 @@ config ACPI_APEI_GHES depends on ACPI_APEI && X86 select ACPI_HED select IRQ_WORK - select LLIST select GENERIC_ALLOCATOR help Generic Hardware Error Source provides a way to report diff --git a/include/linux/llist.h b/include/linux/llist.h index aa0c8b5b3cd..3eccdfd6609 100644 --- a/include/linux/llist.h +++ b/include/linux/llist.h @@ -37,8 +37,28 @@ * architectures that don't have NMI-safe cmpxchg implementation, the * list can NOT be used in NMI handler. So code uses the list in NMI * handler should depend on CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG. + * + * Copyright 2010,2011 Intel Corp. + * Author: Huang Ying + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation; + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#include +#include +#include + struct llist_head { struct llist_node *first; }; @@ -113,14 +133,46 @@ static inline void init_llist_head(struct llist_head *list) * test whether the list is empty without deleting something from the * list. */ -static inline int llist_empty(const struct llist_head *head) +static inline bool llist_empty(const struct llist_head *head) { return ACCESS_ONCE(head->first) == NULL; } -void llist_add(struct llist_node *new, struct llist_head *head); -void llist_add_batch(struct llist_node *new_first, struct llist_node *new_last, - struct llist_head *head); -struct llist_node *llist_del_first(struct llist_head *head); -struct llist_node *llist_del_all(struct llist_head *head); +/** + * llist_add - add a new entry + * @new: new entry to be added + * @head: the head for your lock-less list + */ +static inline void llist_add(struct llist_node *new, struct llist_head *head) +{ + struct llist_node *entry, *old_entry; + +#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG + BUG_ON(in_nmi()); +#endif + + entry = head->first; + do { + old_entry = entry; + new->next = entry; + cpu_relax(); + } while ((entry = cmpxchg(&head->first, old_entry, new)) != old_entry); +} + +/** + * llist_del_all - delete all entries from lock-less list + * @head: the head of lock-less list to delete all entries + * + * If list is empty, return NULL, otherwise, delete all entries and + * return the pointer to the first entry. The order of entries + * deleted is from the newest to the oldest added one. + */ +static inline struct llist_node *llist_del_all(struct llist_head *head) +{ +#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG + BUG_ON(in_nmi()); +#endif + + return xchg(&head->first, NULL); +} #endif /* LLIST_H */ diff --git a/lib/Kconfig b/lib/Kconfig index 6c695ff9cab..32f3e5ae2be 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -276,7 +276,4 @@ config CORDIC so its calculations are in fixed point. Modules can select this when they require this function. Module will be called cordic. -config LLIST - bool - endmenu diff --git a/lib/Makefile b/lib/Makefile index 3f5bc6d903e..a4da283f5dc 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -22,7 +22,7 @@ lib-y += kobject.o kref.o klist.o obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o \ - bsearch.o find_last_bit.o find_next_bit.o + bsearch.o find_last_bit.o find_next_bit.o llist.o obj-y += kstrtox.o obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o @@ -115,8 +115,6 @@ obj-$(CONFIG_CPU_RMAP) += cpu_rmap.o obj-$(CONFIG_CORDIC) += cordic.o -obj-$(CONFIG_LLIST) += llist.o - hostprogs-y := gen_crc32table clean-files := crc32table.h diff --git a/lib/llist.c b/lib/llist.c index da445724fa1..3e3fa9139c4 100644 --- a/lib/llist.c +++ b/lib/llist.c @@ -29,28 +29,6 @@ #include -/** - * llist_add - add a new entry - * @new: new entry to be added - * @head: the head for your lock-less list - */ -void llist_add(struct llist_node *new, struct llist_head *head) -{ - struct llist_node *entry, *old_entry; - -#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG - BUG_ON(in_nmi()); -#endif - - entry = head->first; - do { - old_entry = entry; - new->next = entry; - cpu_relax(); - } while ((entry = cmpxchg(&head->first, old_entry, new)) != old_entry); -} -EXPORT_SYMBOL_GPL(llist_add); - /** * llist_add_batch - add several linked entries in batch * @new_first: first entry in batch to be added @@ -109,21 +87,3 @@ struct llist_node *llist_del_first(struct llist_head *head) return entry; } EXPORT_SYMBOL_GPL(llist_del_first); - -/** - * llist_del_all - delete all entries from lock-less list - * @head: the head of lock-less list to delete all entries - * - * If list is empty, return NULL, otherwise, delete all entries and - * return the pointer to the first entry. The order of entries - * deleted is from the newest to the oldest added one. - */ -struct llist_node *llist_del_all(struct llist_head *head) -{ -#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG - BUG_ON(in_nmi()); -#endif - - return xchg(&head->first, NULL); -} -EXPORT_SYMBOL_GPL(llist_del_all); From 2c30245c65e8ebc3080b75ce65572ab8140bad0b Mon Sep 17 00:00:00 2001 From: Ingo Molnar Date: Tue, 4 Oct 2011 12:43:11 +0200 Subject: [PATCH 32/46] llist: Remove the platform-dependent NMI checks Remove the nmi() checks spread around the code. in_nmi() is not available on every architecture and it's a pretty obscure and ugly check in any case. Cc: Huang Ying Cc: Mathieu Desnoyers Cc: Peter Zijlstra Link: http://lkml.kernel.org/r/1315461646-1379-3-git-send-email-ying.huang@intel.com Signed-off-by: Ingo Molnar --- include/linux/llist.h | 12 ++---------- lib/llist.c | 12 ++---------- 2 files changed, 4 insertions(+), 20 deletions(-) diff --git a/include/linux/llist.h b/include/linux/llist.h index 3eccdfd6609..65fca1cbf51 100644 --- a/include/linux/llist.h +++ b/include/linux/llist.h @@ -35,8 +35,8 @@ * * The basic atomic operation of this list is cmpxchg on long. On * architectures that don't have NMI-safe cmpxchg implementation, the - * list can NOT be used in NMI handler. So code uses the list in NMI - * handler should depend on CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG. + * list can NOT be used in NMI handlers. So code that uses the list in + * an NMI handler should depend on CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG. * * Copyright 2010,2011 Intel Corp. * Author: Huang Ying @@ -147,10 +147,6 @@ static inline void llist_add(struct llist_node *new, struct llist_head *head) { struct llist_node *entry, *old_entry; -#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG - BUG_ON(in_nmi()); -#endif - entry = head->first; do { old_entry = entry; @@ -169,10 +165,6 @@ static inline void llist_add(struct llist_node *new, struct llist_head *head) */ static inline struct llist_node *llist_del_all(struct llist_head *head) { -#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG - BUG_ON(in_nmi()); -#endif - return xchg(&head->first, NULL); } #endif /* LLIST_H */ diff --git a/lib/llist.c b/lib/llist.c index 3e3fa9139c4..b445f2c8596 100644 --- a/lib/llist.c +++ b/lib/llist.c @@ -3,8 +3,8 @@ * * The basic atomic operation of this list is cmpxchg on long. On * architectures that don't have NMI-safe cmpxchg implementation, the - * list can NOT be used in NMI handler. So code uses the list in NMI - * handler should depend on CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG. + * list can NOT be used in NMI handlers. So code that uses the list in + * an NMI handler should depend on CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG. * * Copyright 2010,2011 Intel Corp. * Author: Huang Ying @@ -40,10 +40,6 @@ void llist_add_batch(struct llist_node *new_first, struct llist_node *new_last, { struct llist_node *entry, *old_entry; -#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG - BUG_ON(in_nmi()); -#endif - entry = head->first; do { old_entry = entry; @@ -71,10 +67,6 @@ struct llist_node *llist_del_first(struct llist_head *head) { struct llist_node *entry, *old_entry, *next; -#ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG - BUG_ON(in_nmi()); -#endif - entry = head->first; do { if (entry == NULL) From a3127336b71f6833d1483c856dce91fe558dc3a9 Mon Sep 17 00:00:00 2001 From: Huang Ying Date: Thu, 8 Sep 2011 14:00:44 +0800 Subject: [PATCH 33/46] llist: Move cpu_relax() to after the cmpxchg() If in llist_add()/etc. functions the first cmpxchg() call succeeds, it is not necessary to use cpu_relax() before the cmpxchg(). So cpu_relax() in a busy loop involving cmpxchg() should go after cmpxchg() instead of before that. This patch fixes this for all involved llist functions. Signed-off-by: Huang Ying Acked-by: Mathieu Desnoyers Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1315461646-1379-4-git-send-email-ying.huang@intel.com Signed-off-by: Ingo Molnar --- include/linux/llist.h | 7 +++++-- lib/llist.c | 14 ++++++++++---- 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/include/linux/llist.h b/include/linux/llist.h index 65fca1cbf51..ca91875286b 100644 --- a/include/linux/llist.h +++ b/include/linux/llist.h @@ -148,11 +148,14 @@ static inline void llist_add(struct llist_node *new, struct llist_head *head) struct llist_node *entry, *old_entry; entry = head->first; - do { + for (;;) { old_entry = entry; new->next = entry; + entry = cmpxchg(&head->first, old_entry, new); + if (entry == old_entry) + break; cpu_relax(); - } while ((entry = cmpxchg(&head->first, old_entry, new)) != old_entry); + } } /** diff --git a/lib/llist.c b/lib/llist.c index b445f2c8596..6c69f1d14c4 100644 --- a/lib/llist.c +++ b/lib/llist.c @@ -41,11 +41,14 @@ void llist_add_batch(struct llist_node *new_first, struct llist_node *new_last, struct llist_node *entry, *old_entry; entry = head->first; - do { + for (;;) { old_entry = entry; new_last->next = entry; + entry = cmpxchg(&head->first, old_entry, new_first); + if (entry == old_entry) + break; cpu_relax(); - } while ((entry = cmpxchg(&head->first, old_entry, new_first)) != old_entry); + } } EXPORT_SYMBOL_GPL(llist_add_batch); @@ -68,13 +71,16 @@ struct llist_node *llist_del_first(struct llist_head *head) struct llist_node *entry, *old_entry, *next; entry = head->first; - do { + for (;;) { if (entry == NULL) return NULL; old_entry = entry; next = entry->next; + entry = cmpxchg(&head->first, old_entry, next); + if (entry == old_entry) + break; cpu_relax(); - } while ((entry = cmpxchg(&head->first, old_entry, next)) != old_entry); + } return entry; } From 781f7fd916fc77a862e20063ed3aeedf173234f9 Mon Sep 17 00:00:00 2001 From: Huang Ying Date: Thu, 8 Sep 2011 14:00:45 +0800 Subject: [PATCH 34/46] llist: Return whether list is empty before adding in llist_add() Extend the llist_add*() functions to return a success indicator, this allows us in the scheduler code to send an IPI if the queue was empty. ( There's no effect on existing users, because the list_add_xxx() functions are inline, thus this will be optimized out by the compiler if not used by callers. ) Signed-off-by: Huang Ying Cc: Mathieu Desnoyers Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1315461646-1379-5-git-send-email-ying.huang@intel.com Signed-off-by: Ingo Molnar --- include/linux/llist.h | 6 +++++- lib/llist.c | 6 +++++- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/include/linux/llist.h b/include/linux/llist.h index ca91875286b..27bbdf5ddf8 100644 --- a/include/linux/llist.h +++ b/include/linux/llist.h @@ -142,8 +142,10 @@ static inline bool llist_empty(const struct llist_head *head) * llist_add - add a new entry * @new: new entry to be added * @head: the head for your lock-less list + * + * Return whether list is empty before adding. */ -static inline void llist_add(struct llist_node *new, struct llist_head *head) +static inline bool llist_add(struct llist_node *new, struct llist_head *head) { struct llist_node *entry, *old_entry; @@ -156,6 +158,8 @@ static inline void llist_add(struct llist_node *new, struct llist_head *head) break; cpu_relax(); } + + return old_entry == NULL; } /** diff --git a/lib/llist.c b/lib/llist.c index 6c69f1d14c4..878985c4d19 100644 --- a/lib/llist.c +++ b/lib/llist.c @@ -34,8 +34,10 @@ * @new_first: first entry in batch to be added * @new_last: last entry in batch to be added * @head: the head for your lock-less list + * + * Return whether list is empty before adding. */ -void llist_add_batch(struct llist_node *new_first, struct llist_node *new_last, +bool llist_add_batch(struct llist_node *new_first, struct llist_node *new_last, struct llist_head *head) { struct llist_node *entry, *old_entry; @@ -49,6 +51,8 @@ void llist_add_batch(struct llist_node *new_first, struct llist_node *new_last, break; cpu_relax(); } + + return old_entry == NULL; } EXPORT_SYMBOL_GPL(llist_add_batch); From 38aaf8090d34b623b7919d8c933f6e938c9bf44b Mon Sep 17 00:00:00 2001 From: Huang Ying Date: Thu, 8 Sep 2011 14:00:46 +0800 Subject: [PATCH 35/46] irq_work: Use llist in the struct irq_work logic Use llist in irq_work instead of the lock-less linked list implementation in irq_work to avoid the code duplication. Signed-off-by: Huang Ying Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1315461646-1379-6-git-send-email-ying.huang@intel.com Signed-off-by: Ingo Molnar --- include/linux/irq_work.h | 15 ++++--- kernel/irq_work.c | 91 +++++++++++++++------------------------- 2 files changed, 42 insertions(+), 64 deletions(-) diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h index 4fa09d4d0b7..6a9e8f5399e 100644 --- a/include/linux/irq_work.h +++ b/include/linux/irq_work.h @@ -1,20 +1,23 @@ #ifndef _LINUX_IRQ_WORK_H #define _LINUX_IRQ_WORK_H +#include + struct irq_work { - struct irq_work *next; + unsigned long flags; + struct llist_node llnode; void (*func)(struct irq_work *); }; static inline -void init_irq_work(struct irq_work *entry, void (*func)(struct irq_work *)) +void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *)) { - entry->next = NULL; - entry->func = func; + work->flags = 0; + work->func = func; } -bool irq_work_queue(struct irq_work *entry); +bool irq_work_queue(struct irq_work *work); void irq_work_run(void); -void irq_work_sync(struct irq_work *entry); +void irq_work_sync(struct irq_work *work); #endif /* _LINUX_IRQ_WORK_H */ diff --git a/kernel/irq_work.c b/kernel/irq_work.c index c58fa7da8ae..6f0a4310def 100644 --- a/kernel/irq_work.c +++ b/kernel/irq_work.c @@ -17,54 +17,34 @@ * claimed NULL, 3 -> {pending} : claimed to be enqueued * pending next, 3 -> {busy} : queued, pending callback * busy NULL, 2 -> {free, claimed} : callback in progress, can be claimed - * - * We use the lower two bits of the next pointer to keep PENDING and BUSY - * flags. */ #define IRQ_WORK_PENDING 1UL #define IRQ_WORK_BUSY 2UL #define IRQ_WORK_FLAGS 3UL -static inline bool irq_work_is_set(struct irq_work *entry, int flags) -{ - return (unsigned long)entry->next & flags; -} - -static inline struct irq_work *irq_work_next(struct irq_work *entry) -{ - unsigned long next = (unsigned long)entry->next; - next &= ~IRQ_WORK_FLAGS; - return (struct irq_work *)next; -} - -static inline struct irq_work *next_flags(struct irq_work *entry, int flags) -{ - unsigned long next = (unsigned long)entry; - next |= flags; - return (struct irq_work *)next; -} - -static DEFINE_PER_CPU(struct irq_work *, irq_work_list); +static DEFINE_PER_CPU(struct llist_head, irq_work_list); /* * Claim the entry so that no one else will poke at it. */ -static bool irq_work_claim(struct irq_work *entry) +static bool irq_work_claim(struct irq_work *work) { - struct irq_work *next, *nflags; + unsigned long flags, nflags; - do { - next = entry->next; - if ((unsigned long)next & IRQ_WORK_PENDING) + for (;;) { + flags = work->flags; + if (flags & IRQ_WORK_PENDING) return false; - nflags = next_flags(next, IRQ_WORK_FLAGS); - } while (cmpxchg(&entry->next, next, nflags) != next); + nflags = flags | IRQ_WORK_FLAGS; + if (cmpxchg(&work->flags, flags, nflags) == flags) + break; + cpu_relax(); + } return true; } - void __weak arch_irq_work_raise(void) { /* @@ -75,20 +55,15 @@ void __weak arch_irq_work_raise(void) /* * Queue the entry and raise the IPI if needed. */ -static void __irq_work_queue(struct irq_work *entry) +static void __irq_work_queue(struct irq_work *work) { - struct irq_work *next; + bool empty; preempt_disable(); - do { - next = __this_cpu_read(irq_work_list); - /* Can assign non-atomic because we keep the flags set. */ - entry->next = next_flags(next, IRQ_WORK_FLAGS); - } while (this_cpu_cmpxchg(irq_work_list, next, entry) != next); - + empty = llist_add(&work->llnode, &__get_cpu_var(irq_work_list)); /* The list was empty, raise self-interrupt to start processing. */ - if (!irq_work_next(entry)) + if (empty) arch_irq_work_raise(); preempt_enable(); @@ -100,16 +75,16 @@ static void __irq_work_queue(struct irq_work *entry) * * Can be re-enqueued while the callback is still in progress. */ -bool irq_work_queue(struct irq_work *entry) +bool irq_work_queue(struct irq_work *work) { - if (!irq_work_claim(entry)) { + if (!irq_work_claim(work)) { /* * Already enqueued, can't do! */ return false; } - __irq_work_queue(entry); + __irq_work_queue(work); return true; } EXPORT_SYMBOL_GPL(irq_work_queue); @@ -120,34 +95,34 @@ EXPORT_SYMBOL_GPL(irq_work_queue); */ void irq_work_run(void) { - struct irq_work *list; + struct irq_work *work; + struct llist_head *this_list; + struct llist_node *llnode; - if (this_cpu_read(irq_work_list) == NULL) + this_list = &__get_cpu_var(irq_work_list); + if (llist_empty(this_list)) return; BUG_ON(!in_irq()); BUG_ON(!irqs_disabled()); - list = this_cpu_xchg(irq_work_list, NULL); + llnode = llist_del_all(this_list); + while (llnode != NULL) { + work = llist_entry(llnode, struct irq_work, llnode); - while (list != NULL) { - struct irq_work *entry = list; - - list = irq_work_next(list); + llnode = llnode->next; /* - * Clear the PENDING bit, after this point the @entry + * Clear the PENDING bit, after this point the @work * can be re-used. */ - entry->next = next_flags(NULL, IRQ_WORK_BUSY); - entry->func(entry); + work->flags = IRQ_WORK_BUSY; + work->func(work); /* * Clear the BUSY bit and return to the free state if * no-one else claimed it meanwhile. */ - (void)cmpxchg(&entry->next, - next_flags(NULL, IRQ_WORK_BUSY), - NULL); + (void)cmpxchg(&work->flags, IRQ_WORK_BUSY, 0); } } EXPORT_SYMBOL_GPL(irq_work_run); @@ -156,11 +131,11 @@ EXPORT_SYMBOL_GPL(irq_work_run); * Synchronize against the irq_work @entry, ensures the entry is not * currently in use. */ -void irq_work_sync(struct irq_work *entry) +void irq_work_sync(struct irq_work *work) { WARN_ON_ONCE(irqs_disabled()); - while (irq_work_is_set(entry, IRQ_WORK_BUSY)) + while (work->flags & IRQ_WORK_BUSY) cpu_relax(); } EXPORT_SYMBOL_GPL(irq_work_sync); From 924f8f5af31423529cc3940cb2ae9fee736b7517 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 12 Sep 2011 13:12:28 +0200 Subject: [PATCH 36/46] llist: Add llist_next() So we don't have to expose the struct list_node member. Cc: Huang Ying Cc: Andrew Morton Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/1315836348.26517.41.camel@twins Signed-off-by: Ingo Molnar --- include/linux/llist.h | 5 +++++ kernel/irq_work.c | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/include/linux/llist.h b/include/linux/llist.h index 27bbdf5ddf8..e2e96d04ee4 100644 --- a/include/linux/llist.h +++ b/include/linux/llist.h @@ -138,6 +138,11 @@ static inline bool llist_empty(const struct llist_head *head) return ACCESS_ONCE(head->first) == NULL; } +static inline struct llist_node *llist_next(struct llist_node *node) +{ + return node->next; +} + /** * llist_add - add a new entry * @new: new entry to be added diff --git a/kernel/irq_work.c b/kernel/irq_work.c index 6f0a4310def..0e2cde4f380 100644 --- a/kernel/irq_work.c +++ b/kernel/irq_work.c @@ -110,7 +110,7 @@ void irq_work_run(void) while (llnode != NULL) { work = llist_entry(llnode, struct irq_work, llnode); - llnode = llnode->next; + llnode = llist_next(llnode); /* * Clear the PENDING bit, after this point the @work From fa14ff4accfb24e59d2473f3d864d6648d80563b Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 12 Sep 2011 13:06:17 +0200 Subject: [PATCH 37/46] sched: Convert to struct llist Use the generic llist primitives. We had a private lockless list implementation in the scheduler in the wake-list code, now that we have a generic llist implementation that provides all required operations, switch to it. This patch is not expected to change any behavior. Signed-off-by: Peter Zijlstra Cc: Huang Ying Cc: Andrew Morton Link: http://lkml.kernel.org/r/1315836353.26517.42.camel@twins Signed-off-by: Ingo Molnar --- include/linux/sched.h | 3 ++- kernel/sched.c | 48 +++++++++---------------------------------- 2 files changed, 12 insertions(+), 39 deletions(-) diff --git a/include/linux/sched.h b/include/linux/sched.h index 9fda2888a6a..fc3e8911818 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -90,6 +90,7 @@ struct sched_param { #include #include #include +#include #include @@ -1225,7 +1226,7 @@ struct task_struct { unsigned int ptrace; #ifdef CONFIG_SMP - struct task_struct *wake_entry; + struct llist_node wake_entry; int on_cpu; #endif int on_rq; diff --git a/kernel/sched.c b/kernel/sched.c index c5cf15e1eb5..1874c741831 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -702,7 +702,7 @@ struct rq { #endif #ifdef CONFIG_SMP - struct task_struct *wake_list; + struct llist_head wake_list; #endif }; @@ -2698,42 +2698,26 @@ static int ttwu_remote(struct task_struct *p, int wake_flags) } #ifdef CONFIG_SMP -static void sched_ttwu_do_pending(struct task_struct *list) +static void sched_ttwu_pending(void) { struct rq *rq = this_rq(); + struct llist_node *llist = llist_del_all(&rq->wake_list); + struct task_struct *p; raw_spin_lock(&rq->lock); - while (list) { - struct task_struct *p = list; - list = list->wake_entry; + while (llist) { + p = llist_entry(llist, struct task_struct, wake_entry); + llist = llist_next(llist); ttwu_do_activate(rq, p, 0); } raw_spin_unlock(&rq->lock); } -#ifdef CONFIG_HOTPLUG_CPU - -static void sched_ttwu_pending(void) -{ - struct rq *rq = this_rq(); - struct task_struct *list = xchg(&rq->wake_list, NULL); - - if (!list) - return; - - sched_ttwu_do_pending(list); -} - -#endif /* CONFIG_HOTPLUG_CPU */ - void scheduler_ipi(void) { - struct rq *rq = this_rq(); - struct task_struct *list = xchg(&rq->wake_list, NULL); - - if (!list) + if (llist_empty(&this_rq()->wake_list)) return; /* @@ -2750,25 +2734,13 @@ void scheduler_ipi(void) * somewhat pessimize the simple resched case. */ irq_enter(); - sched_ttwu_do_pending(list); + sched_ttwu_pending(); irq_exit(); } static void ttwu_queue_remote(struct task_struct *p, int cpu) { - struct rq *rq = cpu_rq(cpu); - struct task_struct *next = rq->wake_list; - - for (;;) { - struct task_struct *old = next; - - p->wake_entry = next; - next = cmpxchg(&rq->wake_list, old, p); - if (next == old) - break; - } - - if (!next) + if (llist_add(&p->wake_entry, &cpu_rq(cpu)->wake_list)) smp_send_reschedule(cpu); } From f0f1d32f931b705c4ee5dd374074d34edf3eae14 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Mon, 12 Sep 2011 15:50:49 +0200 Subject: [PATCH 38/46] llist: Remove cpu_relax() usage in cmpxchg loops Initial benchmarks show they're a net loss: $ for i in /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor ; do echo performance > $i; done $ echo 4096 32000 64 128 > /proc/sys/kernel/sem $ ./sembench -t 2048 -w 1900 -o 0 Pre: run time 30 seconds 778936 worker burns per second run time 30 seconds 912190 worker burns per second run time 30 seconds 817506 worker burns per second run time 30 seconds 830870 worker burns per second run time 30 seconds 845056 worker burns per second Post: run time 30 seconds 905920 worker burns per second run time 30 seconds 849046 worker burns per second run time 30 seconds 886286 worker burns per second run time 30 seconds 822320 worker burns per second run time 30 seconds 900283 worker burns per second So about 4% faster. (!) cpu_relax() stalls the pipeline, therefore, when used in a tight loop it has the following benefits: - allows SMT siblings to have a go; - reduces pressure on the CPU interconnect. However, cmpxchg loops are unfair and thus have unbounded completion time, therefore we should avoid getting in such heavily contended situations where the above benefits make any difference. A typical cmpxchg loop should not go round more than a handfull of times at worst, therefore adding extra delays just slows things down. Since the llist primitives are new, there aren't any bad users yet, and we should avoid growing them. Heavily contended sites should generally be better off using the ticket locks for serialization since they provide bounded completion times (fifo-fair over the cpus). Signed-off-by: Peter Zijlstra Cc: Huang Ying Cc: Andrew Morton Link: http://lkml.kernel.org/r/1315836358.26517.43.camel@twins Signed-off-by: Ingo Molnar --- include/linux/llist.h | 1 - lib/llist.c | 2 -- 2 files changed, 3 deletions(-) diff --git a/include/linux/llist.h b/include/linux/llist.h index e2e96d04ee4..837fb4ae66f 100644 --- a/include/linux/llist.h +++ b/include/linux/llist.h @@ -161,7 +161,6 @@ static inline bool llist_add(struct llist_node *new, struct llist_head *head) entry = cmpxchg(&head->first, old_entry, new); if (entry == old_entry) break; - cpu_relax(); } return old_entry == NULL; diff --git a/lib/llist.c b/lib/llist.c index 878985c4d19..700cff77a38 100644 --- a/lib/llist.c +++ b/lib/llist.c @@ -49,7 +49,6 @@ bool llist_add_batch(struct llist_node *new_first, struct llist_node *new_last, entry = cmpxchg(&head->first, old_entry, new_first); if (entry == old_entry) break; - cpu_relax(); } return old_entry == NULL; @@ -83,7 +82,6 @@ struct llist_node *llist_del_first(struct llist_head *head) entry = cmpxchg(&head->first, old_entry, next); if (entry == old_entry) break; - cpu_relax(); } return entry; From 908a3283728d92df36e0c7cd63304fd35e93a8a9 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Thu, 15 Sep 2011 15:32:06 +0200 Subject: [PATCH 39/46] sched: Fix idle_cpu() On -rt we observed hackbench waking all 400 tasks to a single cpu. This is because of select_idle_sibling()'s interaction with the new ipi based wakeup scheme. The existing idle_cpu() test only checks to see if the current task on that cpu is the idle task, it does not take already queued tasks into account, nor does it take queued to be woken tasks into account. If the remote wakeup IPIs come hard enough, there won't be time to schedule away from the idle task, and would thus keep thinking the cpu was in fact idle, regardless of the fact that there were already several hundred tasks runnable. We couldn't reproduce on mainline, but there's no reason it couldn't happen. Signed-off-by: Thomas Gleixner Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/n/tip-3o30p18b2paswpc9ohy2gltp@git.kernel.org Signed-off-by: Ingo Molnar --- kernel/sched.c | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/kernel/sched.c b/kernel/sched.c index 1874c741831..4cdc91cf48f 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -5138,7 +5138,20 @@ EXPORT_SYMBOL(task_nice); */ int idle_cpu(int cpu) { - return cpu_curr(cpu) == cpu_rq(cpu)->idle; + struct rq *rq = cpu_rq(cpu); + + if (rq->curr != rq->idle) + return 0; + + if (rq->nr_running) + return 0; + +#ifdef CONFIG_SMP + if (!llist_empty(&rq->wake_list)) + return 0; +#endif + + return 1; } /** From ca38062e57e97791c2f62e3dbd06caf3ebb5721c Mon Sep 17 00:00:00 2001 From: Suresh Siddha Date: Mon, 3 Oct 2011 15:09:00 -0700 Subject: [PATCH 40/46] sched: Use resched IPI to kick off the nohz idle balance Current use of smp call function to kick the nohz idle balance can deadlock in this scenario. 1. cpu-A did a generic_exec_single() to cpu-B and after queuing its call single data (csd) to the call single queue, cpu-A took a timer interrupt. Actual IPI to cpu-B to process the call single queue is not yet sent. 2. As part of the timer interrupt handler, cpu-A decided to kick cpu-B for the idle load balancing (sets cpu-B's rq->nohz_balance_kick to 1) and __smp_call_function_single() with nowait will queue the csd to the cpu-B's queue. But the generic_exec_single() won't send an IPI to cpu-B as the call single queue was not empty. 3. cpu-A is busy with lot of interrupts 4. Meanwhile cpu-B is entering and exiting idle and noticed that it has it's rq->nohz_balance_kick set to '1'. So it will go ahead and do the idle load balancer and clear its rq->nohz_balance_kick. 5. At this point, csd queued as part of the step-2 above is still locked and waiting to be serviced on cpu-B. 6. cpu-A is still busy with interrupt load and now it got another timer interrupt and as part of it decided to kick cpu-B for another idle load balancing (as it finds cpu-B's rq->nohz_balance_kick cleared in step-4 above) and does __smp_call_function_single() with the same csd that is still locked. 7. And we get a deadlock waiting for the csd_lock() in the __smp_call_function_single(). Main issue here is that cpu-B can service the idle load balancer kick request from cpu-A even with out receiving the IPI and this lead to doing multiple __smp_call_function_single() on the same csd leading to deadlock. To kick a cpu, scheduler already has the reschedule vector reserved. Use that mechanism (kick_process()) instead of using the generic smp call function mechanism to kick off the nohz idle load balancing and avoid the deadlock. [ This issue is present from 2.6.35+ kernels, but marking it -stable only from v3.0+ as the proposed fix depends on the scheduler_ipi() that is introduced recently. ] Reported-by: Prarit Bhargava Signed-off-by: Suresh Siddha Cc: stable@kernel.org # v3.0+ Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/20111003220934.834943260@sbsiddha-desk.sc.intel.com Signed-off-by: Ingo Molnar --- kernel/sched.c | 21 +++++++++++++++++++-- kernel/sched_fair.c | 29 +++++++++-------------------- 2 files changed, 28 insertions(+), 22 deletions(-) diff --git a/kernel/sched.c b/kernel/sched.c index 4cdc91cf48f..9e49af00ae3 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -1404,6 +1404,18 @@ void wake_up_idle_cpu(int cpu) smp_send_reschedule(cpu); } +static inline bool got_nohz_idle_kick(void) +{ + return idle_cpu(smp_processor_id()) && this_rq()->nohz_balance_kick; +} + +#else /* CONFIG_NO_HZ */ + +static inline bool got_nohz_idle_kick(void) +{ + return false; +} + #endif /* CONFIG_NO_HZ */ static u64 sched_avg_period(void) @@ -2717,7 +2729,7 @@ static void sched_ttwu_pending(void) void scheduler_ipi(void) { - if (llist_empty(&this_rq()->wake_list)) + if (llist_empty(&this_rq()->wake_list) && !got_nohz_idle_kick()) return; /* @@ -2735,6 +2747,12 @@ void scheduler_ipi(void) */ irq_enter(); sched_ttwu_pending(); + + /* + * Check if someone kicked us for doing the nohz idle load balance. + */ + if (unlikely(got_nohz_idle_kick() && !need_resched())) + raise_softirq_irqoff(SCHED_SOFTIRQ); irq_exit(); } @@ -8288,7 +8306,6 @@ void __init sched_init(void) rq_attach_root(rq, &def_root_domain); #ifdef CONFIG_NO_HZ rq->nohz_balance_kick = 0; - init_sched_softirq_csd(&per_cpu(remote_sched_softirq_cb, i)); #endif #endif init_rq_hrtick(rq); diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index fef0bfde7c8..6c5fa109922 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -4269,22 +4269,6 @@ out_unlock: } #ifdef CONFIG_NO_HZ - -static DEFINE_PER_CPU(struct call_single_data, remote_sched_softirq_cb); - -static void trigger_sched_softirq(void *data) -{ - raise_softirq_irqoff(SCHED_SOFTIRQ); -} - -static inline void init_sched_softirq_csd(struct call_single_data *csd) -{ - csd->func = trigger_sched_softirq; - csd->info = NULL; - csd->flags = 0; - csd->priv = 0; -} - /* * idle load balancing details * - One of the idle CPUs nominates itself as idle load_balancer, while @@ -4450,11 +4434,16 @@ static void nohz_balancer_kick(int cpu) } if (!cpu_rq(ilb_cpu)->nohz_balance_kick) { - struct call_single_data *cp; - cpu_rq(ilb_cpu)->nohz_balance_kick = 1; - cp = &per_cpu(remote_sched_softirq_cb, cpu); - __smp_call_function_single(ilb_cpu, cp, 0); + + smp_mb(); + /* + * Use smp_send_reschedule() instead of resched_cpu(). + * This way we generate a sched IPI on the target cpu which + * is idle. And the softirq performing nohz idle load balance + * will be run before returning from the IPI. + */ + smp_send_reschedule(ilb_cpu); } return; } From 6eb57e0d65ebd99a71d435dc96d83e725752eef8 Mon Sep 17 00:00:00 2001 From: Suresh Siddha Date: Mon, 3 Oct 2011 15:09:01 -0700 Subject: [PATCH 41/46] sched: Request for idle balance during nohz idle load balance rq's idle_at_tick is set to idle/busy during the timer tick depending on the cpu was idle or not. This will be used later in the load balance that will be done in the softirq context (which is a process context in -RT kernels). For nohz kernels, for the cpu doing nohz idle load balance on behalf of all the idle cpu's, its rq->idle_at_tick might have a stale value (which is recorded when it got the timer tick presumably when it is busy). As the nohz idle load balancing is also being done at the same place as the regular load balancing, nohz idle load balancing was bailing out when it sees rq's idle_at_tick not set. Thus leading to poor system utilization. Rename rq's idle_at_tick to idle_balance and set it when someone requests for nohz idle balance on an idle cpu. Reported-by: Srivatsa Vaddagiri Signed-off-by: Suresh Siddha Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/20111003220934.892350549@sbsiddha-desk.sc.intel.com Signed-off-by: Ingo Molnar --- kernel/sched.c | 8 +++++--- kernel/sched_fair.c | 4 ++-- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/kernel/sched.c b/kernel/sched.c index 9e49af00ae3..7bc9b0e84eb 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -644,7 +644,7 @@ struct rq { unsigned long cpu_power; - unsigned char idle_at_tick; + unsigned char idle_balance; /* For active balancing */ int post_schedule; int active_balance; @@ -2751,8 +2751,10 @@ void scheduler_ipi(void) /* * Check if someone kicked us for doing the nohz idle load balance. */ - if (unlikely(got_nohz_idle_kick() && !need_resched())) + if (unlikely(got_nohz_idle_kick() && !need_resched())) { + this_rq()->idle_balance = 1; raise_softirq_irqoff(SCHED_SOFTIRQ); + } irq_exit(); } @@ -4247,7 +4249,7 @@ void scheduler_tick(void) perf_event_task_tick(); #ifdef CONFIG_SMP - rq->idle_at_tick = idle_cpu(cpu); + rq->idle_balance = idle_cpu(cpu); trigger_load_balance(rq, cpu); #endif } diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 6c5fa109922..506db0966eb 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -4676,7 +4676,7 @@ static inline int nohz_kick_needed(struct rq *rq, int cpu) if (time_before(now, nohz.next_balance)) return 0; - if (rq->idle_at_tick) + if (idle_cpu(cpu)) return 0; first_pick_cpu = atomic_read(&nohz.first_pick_cpu); @@ -4712,7 +4712,7 @@ static void run_rebalance_domains(struct softirq_action *h) { int this_cpu = smp_processor_id(); struct rq *this_rq = cpu_rq(this_cpu); - enum cpu_idle_type idle = this_rq->idle_at_tick ? + enum cpu_idle_type idle = this_rq->idle_balance ? CPU_IDLE : CPU_NOT_IDLE; rebalance_domains(this_cpu, idle); From fa17b507f142d37aeac322a95f6f7c6375f25601 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 16 Jun 2011 12:23:22 +0200 Subject: [PATCH 42/46] sched: Wrap scheduler p->cpus_allowed access This task is preparatory for the migrate_disable() implementation, but stands on its own and provides a cleanup. It currently only converts those sites required for task-placement. Kosaki-san once mentioned replacing cpus_allowed with a proper cpumask_t instead of the NR_CPUS sized array it currently is, that would also require something like this. Signed-off-by: Peter Zijlstra Acked-by: Thomas Gleixner Cc: KOSAKI Motohiro Link: http://lkml.kernel.org/n/tip-e42skvaddos99psip0vce41o@git.kernel.org Signed-off-by: Ingo Molnar --- kernel/sched.c | 8 ++++---- kernel/sched_fair.c | 12 ++++++------ kernel/sched_rt.c | 4 ++-- lib/smp_processor_id.c | 2 +- 4 files changed, 13 insertions(+), 13 deletions(-) diff --git a/kernel/sched.c b/kernel/sched.c index 7bc9b0e84eb..45174ca5c8e 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -2544,11 +2544,11 @@ static int select_fallback_rq(int cpu, struct task_struct *p) /* Look for allowed, online CPU in same node. */ for_each_cpu_and(dest_cpu, nodemask, cpu_active_mask) - if (cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) + if (cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p))) return dest_cpu; /* Any allowed, online CPU? */ - dest_cpu = cpumask_any_and(&p->cpus_allowed, cpu_active_mask); + dest_cpu = cpumask_any_and(tsk_cpus_allowed(p), cpu_active_mask); if (dest_cpu < nr_cpu_ids) return dest_cpu; @@ -2585,7 +2585,7 @@ int select_task_rq(struct task_struct *p, int sd_flags, int wake_flags) * [ this allows ->select_task() to simply return task_cpu(p) and * not worry about this generic constraint ] */ - if (unlikely(!cpumask_test_cpu(cpu, &p->cpus_allowed) || + if (unlikely(!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) || !cpu_online(cpu))) cpu = select_fallback_rq(task_cpu(p), p); @@ -6262,7 +6262,7 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu) if (task_cpu(p) != src_cpu) goto done; /* Affinity changed (again). */ - if (!cpumask_test_cpu(dest_cpu, &p->cpus_allowed)) + if (!cpumask_test_cpu(dest_cpu, tsk_cpus_allowed(p))) goto fail; /* diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 506db0966eb..5c9e67923b7 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -2183,7 +2183,7 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p, /* Skip over this group if it has no CPUs allowed */ if (!cpumask_intersects(sched_group_cpus(group), - &p->cpus_allowed)) + tsk_cpus_allowed(p))) continue; local_group = cpumask_test_cpu(this_cpu, @@ -2229,7 +2229,7 @@ find_idlest_cpu(struct sched_group *group, struct task_struct *p, int this_cpu) int i; /* Traverse only the allowed CPUs */ - for_each_cpu_and(i, sched_group_cpus(group), &p->cpus_allowed) { + for_each_cpu_and(i, sched_group_cpus(group), tsk_cpus_allowed(p)) { load = weighted_cpuload(i); if (load < min_load || (load == min_load && i == this_cpu)) { @@ -2273,7 +2273,7 @@ static int select_idle_sibling(struct task_struct *p, int target) if (!(sd->flags & SD_SHARE_PKG_RESOURCES)) break; - for_each_cpu_and(i, sched_domain_span(sd), &p->cpus_allowed) { + for_each_cpu_and(i, sched_domain_span(sd), tsk_cpus_allowed(p)) { if (idle_cpu(i)) { target = i; break; @@ -2316,7 +2316,7 @@ select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags) int sync = wake_flags & WF_SYNC; if (sd_flag & SD_BALANCE_WAKE) { - if (cpumask_test_cpu(cpu, &p->cpus_allowed)) + if (cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) want_affine = 1; new_cpu = prev_cpu; } @@ -2697,7 +2697,7 @@ int can_migrate_task(struct task_struct *p, struct rq *rq, int this_cpu, * 2) cannot be migrated to this CPU due to cpus_allowed, or * 3) are cache-hot on their current CPU. */ - if (!cpumask_test_cpu(this_cpu, &p->cpus_allowed)) { + if (!cpumask_test_cpu(this_cpu, tsk_cpus_allowed(p))) { schedstat_inc(p, se.statistics.nr_failed_migrations_affine); return 0; } @@ -4087,7 +4087,7 @@ redo: * moved to this_cpu */ if (!cpumask_test_cpu(this_cpu, - &busiest->curr->cpus_allowed)) { + tsk_cpus_allowed(busiest->curr))) { raw_spin_unlock_irqrestore(&busiest->lock, flags); all_pinned = 1; diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 0cc188cf766..57a10842afa 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -1179,7 +1179,7 @@ static void deactivate_task(struct rq *rq, struct task_struct *p, int sleep); static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu) { if (!task_running(rq, p) && - (cpu < 0 || cpumask_test_cpu(cpu, &p->cpus_allowed)) && + (cpu < 0 || cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) && (p->rt.nr_cpus_allowed > 1)) return 1; return 0; @@ -1324,7 +1324,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq) */ if (unlikely(task_rq(task) != rq || !cpumask_test_cpu(lowest_rq->cpu, - &task->cpus_allowed) || + tsk_cpus_allowed(task)) || task_running(rq, task) || !task->on_rq)) { diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c index 4689cb073da..503f087382a 100644 --- a/lib/smp_processor_id.c +++ b/lib/smp_processor_id.c @@ -22,7 +22,7 @@ notrace unsigned int debug_smp_processor_id(void) * Kernel threads bound to a single CPU can safely use * smp_processor_id(): */ - if (cpumask_equal(¤t->cpus_allowed, cpumask_of(this_cpu))) + if (cpumask_equal(tsk_cpus_allowed(current), cpumask_of(this_cpu))) goto out; /* From 4939602a2441306008c6dca38216b741d4e09a42 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Sat, 25 Jun 2011 15:45:46 +0200 Subject: [PATCH 43/46] sched: Unify the ->cpus_allowed mask copy Currently every sched_class::set_cpus_allowed() implementation has to copy the cpumask into task_struct::cpus_allowed, this is pointless, put this copy in the generic code. Signed-off-by: Peter Zijlstra Acked-by: Thomas Gleixner Link: http://lkml.kernel.org/n/tip-jhl5s9fckd9ptw1fzbqqlrd3@git.kernel.org Signed-off-by: Ingo Molnar --- kernel/sched.c | 7 +++---- kernel/sched_rt.c | 3 --- 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/kernel/sched.c b/kernel/sched.c index 45174ca5c8e..ce9a9e7db11 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -6161,10 +6161,9 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask) { if (p->sched_class && p->sched_class->set_cpus_allowed) p->sched_class->set_cpus_allowed(p, new_mask); - else { - cpumask_copy(&p->cpus_allowed, new_mask); - p->rt.nr_cpus_allowed = cpumask_weight(new_mask); - } + + cpumask_copy(&p->cpus_allowed, new_mask); + p->rt.nr_cpus_allowed = cpumask_weight(new_mask); } /* diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 57a10842afa..3023fd1ef36 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -1608,9 +1608,6 @@ static void set_cpus_allowed_rt(struct task_struct *p, update_rt_migration(&rq->rt); } - - cpumask_copy(&p->cpus_allowed, new_mask); - p->rt.nr_cpus_allowed = weight; } /* Assumes rq->lock is held */ From 1c83437e80186832a9a48dbb6b8868d28e40e562 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Wed, 5 Oct 2011 13:32:34 +0200 Subject: [PATCH 44/46] sched: Warn on rt throttling The default rt-throttling is a source of never ending questions. Warn once when we go into throttling so folks have that info in dmesg. Signed-off-by: Thomas Gleixner Signed-off-by: Peter Zijlstra Link: http://lkml.kernel.org/r/alpine.LFD.2.02.1110051331480.18778@ionos Signed-off-by: Ingo Molnar --- kernel/sched_rt.c | 1 + 1 file changed, 1 insertion(+) diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 3023fd1ef36..056cbd2e2a2 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -655,6 +655,7 @@ static int sched_rt_runtime_exceeded(struct rt_rq *rt_rq) if (rt_rq->rt_time > runtime) { rt_rq->rt_throttled = 1; + printk_once(KERN_WARNING "sched: RT throttling activated\n"); if (rt_rq_throttled(rt_rq)) { sched_rt_rq_dequeue(rt_rq); return 1; From 510f5acc4f4fb07f3f075900dc468d6e380beff6 Mon Sep 17 00:00:00 2001 From: Thomas Gleixner Date: Sun, 17 Jul 2011 20:47:54 +0200 Subject: [PATCH 45/46] sched: Don't use tasklist_lock for debug prints Avoid taking locks from debug prints, this avoids latencies on -rt, and improves reliability of the debug code. Signed-off-by: Thomas Gleixner Signed-off-by: Peter Zijlstra Signed-off-by: Ingo Molnar --- kernel/sched.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/kernel/sched.c b/kernel/sched.c index ce9a9e7db11..24637c78200 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -6021,7 +6021,7 @@ void show_state_filter(unsigned long state_filter) printk(KERN_INFO " task PC stack pid father\n"); #endif - read_lock(&tasklist_lock); + rcu_read_lock(); do_each_thread(g, p) { /* * reset the NMI-timeout, listing all files on a slow @@ -6037,7 +6037,7 @@ void show_state_filter(unsigned long state_filter) #ifdef CONFIG_SCHED_DEBUG sysrq_sched_debug_show(); #endif - read_unlock(&tasklist_lock); + rcu_read_unlock(); /* * Only show locks if all tasks are dumped: */ From 540f41edc15473ca3b2876de72646546ae101374 Mon Sep 17 00:00:00 2001 From: Stephen Rothwell Date: Wed, 5 Oct 2011 17:25:28 +1100 Subject: [PATCH 46/46] llist: Add back llist_add_batch() and llist_del_first() prototypes Commit 1230db8e1543 ("llist: Make some llist functions inline") has deleted the definitions, causing problems for (not upstream yet) code that tries to make use of them. Signed-off-by: Stephen Rothwell Acked-by: Peter Zijlstra Cc: Huang Ying Cc: David Miller Link: http://lkml.kernel.org/r/20111005172528.0d0a8afc65acef7ace22a24e@canb.auug.org.au Signed-off-by: Ingo Molnar --- include/linux/llist.h | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/include/linux/llist.h b/include/linux/llist.h index 837fb4ae66f..7287734e08d 100644 --- a/include/linux/llist.h +++ b/include/linux/llist.h @@ -178,4 +178,10 @@ static inline struct llist_node *llist_del_all(struct llist_head *head) { return xchg(&head->first, NULL); } + +extern bool llist_add_batch(struct llist_node *new_first, + struct llist_node *new_last, + struct llist_head *head); +extern struct llist_node *llist_del_first(struct llist_head *head); + #endif /* LLIST_H */