Revert "ANDROID: sched/rt: Add support for rt sync wakeups"
This reverts commit 3a9495889492ae5132ea995723bf5ed6b045d8fa.
It conflicts with scheduler changes going into 6.3-rc1 so it must be
removed. If it is still needed it can be brought back after 6.3-rc1 is
merged into android-mainline.
Bug: 157906395
Cc: J. Avila <elavila@google.com>
Cc: Stephen Dickey <quic_dickey@quicinc.com>
Cc: Lee Jones <joneslee@google.com>
Change-Id: I9870034226407bc811db9c36802f0446289f13f0
Signed-off-by: Greg Kroah-Hartman <gregkh@google.com>
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index c634290..5b9fba6 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3690,9 +3690,6 @@ ttwu_do_activate(struct rq *rq, struct task_struct *p, int wake_flags,
{
int en_flags = ENQUEUE_WAKEUP | ENQUEUE_NOCLOCK;
- if (wake_flags & WF_SYNC)
- en_flags |= ENQUEUE_WAKEUP_SYNC;
-
lockdep_assert_rq_held(rq);
if (p->sched_contributes_to_load)
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 8e546d4..af98697 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1395,27 +1395,6 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se, unsigned int flags)
enqueue_top_rt_rq(&rq->rt);
}
-#ifdef CONFIG_SMP
-static inline bool should_honor_rt_sync(struct rq *rq, struct task_struct *p,
- bool sync)
-{
- /*
- * If the waker is CFS, then an RT sync wakeup would preempt the waker
- * and force it to run for a likely small time after the RT wakee is
- * done. So, only honor RT sync wakeups from RT wakers.
- */
- return sync && task_has_rt_policy(rq->curr) &&
- p->prio <= rq->rt.highest_prio.next &&
- rq->rt.rt_nr_running <= 2;
-}
-#else
-static inline bool should_honor_rt_sync(struct rq *rq, struct task_struct *p,
- bool sync)
-{
- return 0;
-}
-#endif
-
/*
* Adding/removing a task to/from a priority array:
*/
@@ -1423,15 +1402,13 @@ static void
enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
{
struct sched_rt_entity *rt_se = &p->rt;
- bool sync = !!(flags & ENQUEUE_WAKEUP_SYNC);
if (flags & ENQUEUE_WAKEUP)
rt_se->timeout = 0;
enqueue_rt_entity(rt_se, flags);
- if (!task_current(rq, p) && p->nr_cpus_allowed > 1 &&
- !should_honor_rt_sync(rq, p, sync))
+ if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
enqueue_pushable_task(rq, p);
}
@@ -1508,12 +1485,9 @@ select_task_rq_rt(struct task_struct *p, int cpu, int flags)
{
struct task_struct *curr;
struct rq *rq;
- struct rq *this_cpu_rq;
bool test;
int target_cpu = -1;
bool may_not_preempt;
- bool sync = !!(flags & WF_SYNC);
- int this_cpu;
trace_android_rvh_select_task_rq_rt(p, cpu, flags & 0xF,
flags, &target_cpu);
@@ -1528,8 +1502,6 @@ select_task_rq_rt(struct task_struct *p, int cpu, int flags)
rcu_read_lock();
curr = READ_ONCE(rq->curr); /* unlocked access */
- this_cpu = smp_processor_id();
- this_cpu_rq = cpu_rq(this_cpu);
/*
* If the current task on @p's runqueue is a softirq task,
@@ -1567,15 +1539,6 @@ select_task_rq_rt(struct task_struct *p, int cpu, int flags)
(unlikely(rt_task(curr)) &&
(curr->nr_cpus_allowed < 2 || curr->prio <= p->prio))));
- /*
- * Respect the sync flag as long as the task can run on this CPU.
- */
- if (should_honor_rt_sync(this_cpu_rq, p, sync) &&
- cpumask_test_cpu(this_cpu, p->cpus_ptr)) {
- cpu = this_cpu;
- goto out_unlock;
- }
-
if (test || !rt_task_fits_capacity(p, cpu)) {
int target = find_lowest_rq(p);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index b11da4b..d10fa53 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2162,8 +2162,6 @@ extern const u32 sched_prio_to_wmult[40];
#define ENQUEUE_MIGRATED 0x00
#endif
-#define ENQUEUE_WAKEUP_SYNC 0x80
-
#define RETRY_TASK ((void *)-1UL)
struct sched_class {