sched: Unthrottle rt runqueues in __disable_runtime()
authorPeter Boonstoppel <pboonstoppel@nvidia.com>
Thu, 9 Aug 2012 22:34:47 +0000 (15:34 -0700)
committerBen Hutchings <ben@decadent.org.uk>
Sat, 15 Feb 2014 19:20:18 +0000 (19:20 +0000)
commit a4c96ae319b8047f62dedbe1eac79e321c185749 upstream.

migrate_tasks() uses _pick_next_task_rt() to get tasks from the
real-time runqueues to be migrated. When rt_rq is throttled
_pick_next_task_rt() won't return anything, in which case
migrate_tasks() can't move all threads over and gets stuck in an
infinite loop.

Instead unthrottle rt runqueues before migrating tasks.

Additionally: move unthrottle_offline_cfs_rqs() to rq_offline_fair()

Signed-off-by: Peter Boonstoppel <pboonstoppel@nvidia.com>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Turner <pjt@google.com>
Link: http://lkml.kernel.org/r/5FBF8E85CA34454794F0F7ECBA79798F379D3648B7@HQMAIL04.nvidia.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
[ lizf: backported to 3.4: adjust context ]
Signed-off-by: Li Zefan <lizefan@huawei.com>
[bwh: Backported to 3.2:
 - Adjust filenames
 - unthrottle_offline_cfs_rqs() is already static, but defined in sched.c
   after including sched_fair.c, so add forward declaration
 - unthrottle_offline_cfs_rqs() also needs to be defined for all CONFIG_SMP
   configurations now]
Signed-off-by: Ben Hutchings <ben@decadent.org.uk>
kernel/sched.c
kernel/sched_fair.c
kernel/sched_rt.c

index d93369a977d18d492f26afaeb60bcedd62b3a15a..ea85b0d2bbf2cf8c725d504fc48494abb9f35cb8 100644 (file)
@@ -2189,6 +2189,10 @@ static int irqtime_account_si_update(void)
 
 #endif
 
+#ifdef CONFIG_SMP
+static void unthrottle_offline_cfs_rqs(struct rq *rq);
+#endif
+
 #include "sched_idletask.c"
 #include "sched_fair.c"
 #include "sched_rt.c"
@@ -6566,8 +6570,6 @@ static void unthrottle_offline_cfs_rqs(struct rq *rq)
                        unthrottle_cfs_rq(cfs_rq);
        }
 }
-#else
-static void unthrottle_offline_cfs_rqs(struct rq *rq) {}
 #endif
 
 /*
@@ -6595,9 +6597,6 @@ static void migrate_tasks(unsigned int dead_cpu)
         */
        rq->stop = NULL;
 
-       /* Ensure any throttled groups are reachable by pick_next_task */
-       unthrottle_offline_cfs_rqs(rq);
-
        for ( ; ; ) {
                /*
                 * There's this thread running, bail when that's the only
@@ -6624,6 +6623,10 @@ static void migrate_tasks(unsigned int dead_cpu)
 
 #endif /* CONFIG_HOTPLUG_CPU */
 
+#if !defined(CONFIG_HOTPLUG_CPU) || !defined(CONFIG_CFS_BANDWIDTH)
+static void unthrottle_offline_cfs_rqs(struct rq *rq) {}
+#endif
+
 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
 
 static struct ctl_table sd_ctl_dir[] = {
index 5b9e456ea98ceb189a22b4af94c222ddcb133b44..37f3f3925d3d38c6419a7c70b05f8c754ef4cc07 100644 (file)
@@ -4848,6 +4848,9 @@ static void rq_online_fair(struct rq *rq)
 static void rq_offline_fair(struct rq *rq)
 {
        update_sysctl();
+
+       /* Ensure any throttled groups are reachable by pick_next_task */
+       unthrottle_offline_cfs_rqs(rq);
 }
 
 #else  /* CONFIG_SMP */
index d0fff81393770765775d21cab78e8e7036bab33a..2992f93b68cb2c0deddc28240bd108c47b94b2be 100644 (file)
@@ -509,6 +509,7 @@ balanced:
                 * runtime - in which case borrowing doesn't make sense.
                 */
                rt_rq->rt_runtime = RUNTIME_INF;
+               rt_rq->rt_throttled = 0;
                raw_spin_unlock(&rt_rq->rt_runtime_lock);
                raw_spin_unlock(&rt_b->rt_runtime_lock);
        }