blob: 40a2c6c5431681f09699d3fba19f2859ae8d6a8d [file] [log] [blame]
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
From: Quentin Perret <qperret@google.com>
Date: Tue, 24 Nov 2020 15:33:49 +0000
Subject: NOUPSTREAM: ANDROID: sched: Track wake_q length
Some partners have value-adds based on aosp/540066, which cannot be
carried in ACK in its entirety as it no longer makes sense as-is (the
select_idle_capacity() rework upstream solved the issue differently).
It seems that those partners do not actually need the wake-wide tweaks,
they only need to access the wake_q length for wake-up balance. To
support this, add minimal tracking to the wake_q infrastructure in the
core kernel, but do that by adding a pointer to the wake_q_head to
task_struct directly to not litter all sched classes with an additional
sibling_count_hint argument to the select_task_rq callbacks.
Modules needing to access the wake_q length can do so by dereferencing
p->wake_q_head in the wake-up path when it is non-NULL.
[CPNOTE: 30/06/21] Lee: Vendor specific reference counting
Bug: 173981591
Signed-off-by: Quentin Perret <qperret@google.com>
Change-Id: I9a98167face92e70aba847d9f04d0c216065478c
---
include/linux/sched.h | 1 +
include/linux/sched/wake_q.h | 2 ++
kernel/sched/core.c | 3 +++
3 files changed, 6 insertions(+)
diff --git a/include/linux/sched.h b/include/linux/sched.h
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1109,6 +1109,7 @@ struct task_struct {
raw_spinlock_t pi_lock;
struct wake_q_node wake_q;
+ int wake_q_count;
#ifdef CONFIG_RT_MUTEXES
/* PI waiters blocked on a rt_mutex held by this task: */
diff --git a/include/linux/sched/wake_q.h b/include/linux/sched/wake_q.h
--- a/include/linux/sched/wake_q.h
+++ b/include/linux/sched/wake_q.h
@@ -38,6 +38,7 @@
struct wake_q_head {
struct wake_q_node *first;
struct wake_q_node **lastp;
+ int count;
};
#define WAKE_Q_TAIL ((struct wake_q_node *) 0x01)
@@ -52,6 +53,7 @@ static inline void wake_q_init(struct wake_q_head *head)
{
head->first = WAKE_Q_TAIL;
head->lastp = &head->first;
+ head->count = 0;
}
static inline bool wake_q_empty(struct wake_q_head *head)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -890,6 +890,7 @@ static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task)
*/
*head->lastp = node;
head->lastp = &node->next;
+ head->count++;
return true;
}
@@ -945,12 +946,14 @@ void wake_up_q(struct wake_q_head *head)
/* Task can safely be re-inserted now: */
node = node->next;
task->wake_q.next = NULL;
+ task->wake_q_count = head->count;
/*
* wake_up_process() executes a full barrier, which pairs with
* the queueing in wake_q_add() so as not to miss wakeups.
*/
wake_up_process(task);
+ task->wake_q_count = 0;
put_task_struct(task);
}
}