blob: ed751ec781204cd66e0d1d4e03f0991379175477 [file] [log] [blame]
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
From: Chris Redpath <chris.redpath@arm.com>
Date: Wed, 27 Mar 2019 17:15:17 +0000
Subject: NOUPSTREAM: ANDROID: sched: Unconditionally honor sync flag for
energy-aware wakeups
Since we don't do energy-aware wakeups when we are overutilized, always
honoring sync wakeups in this state does not prevent wake-wide mechanics
overruling the flag as normal.
This patch is based upon previous work to build EAS for android products.
sync-hint code taken from commit 4a5e890ec60d
"sched/fair: add tunable to force selection at cpu granularity" written
by Juri Lelli <juri.lelli@arm.com>
[CPNOTE: 29/06/21] Lee: Binder related - may regress upstream workloads
Bug: 120440300
Change-Id: I4b3d79141fc8e53dc51cd63ac11096c2e3cb10f5
Signed-off-by: Chris Redpath <chris.redpath@arm.com>
(cherry-picked from commit f1ec666a62dec1083ed52fe1ddef093b84373aaf)
[ Moved the feature to find_energy_efficient_cpu() and removed the
sysctl knob ]
Signed-off-by: Quentin Perret <quentin.perret@arm.com>
---
kernel/sched/fair.c | 11 +++++++++--
1 file changed, 9 insertions(+), 2 deletions(-)
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6747,7 +6747,7 @@ compute_energy(struct task_struct *p, int dst_cpu, struct perf_domain *pd)
* other use-cases too. So, until someone finds a better way to solve this,
* let's keep things simple by re-using the existing slow path.
*/
-static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
+static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu, int sync)
{
unsigned long prev_delta = ULONG_MAX, best_delta = ULONG_MAX;
struct root_domain *rd = cpu_rq(smp_processor_id())->rd;
@@ -6761,6 +6761,13 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu)
if (!pd || READ_ONCE(rd->overutilized))
goto unlock;
+ cpu = smp_processor_id();
+ if (sync && cpu_rq(cpu)->nr_running == 1 &&
+ cpumask_test_cpu(cpu, p->cpus_ptr)) {
+ rcu_read_unlock();
+ return cpu;
+ }
+
/*
* Energy-aware wake-up happens on the lowest sched_domain starting
* from sd_asym_cpucapacity spanning over this_cpu and prev_cpu.
@@ -6891,7 +6898,7 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
record_wakee(p);
if (sched_energy_enabled()) {
- new_cpu = find_energy_efficient_cpu(p, prev_cpu);
+ new_cpu = find_energy_efficient_cpu(p, prev_cpu, sync);
if (new_cpu >= 0)
return new_cpu;
new_cpu = prev_cpu;