aosp/android-mainline: update series

up to a9715862824a4 ("ANDROID: dm-user: Remove bio recount in I/O path")

Signed-off-by: Lee Jones <joneslee@google.com>
Change-Id: Ic13281c72f9b2db51c4f17626e14a3253c700398
diff --git a/android-mainline/ANDROID-dm-dm-user-New-target-that-proxies-BIOs-to-userspace.patch b/android-mainline/ANDROID-dm-dm-user-New-target-that-proxies-BIOs-to-userspace.patch
index dea7d11..a068df3 100644
--- a/android-mainline/ANDROID-dm-dm-user-New-target-that-proxies-BIOs-to-userspace.patch
+++ b/android-mainline/ANDROID-dm-dm-user-New-target-that-proxies-BIOs-to-userspace.patch
@@ -27,9 +27,9 @@
 ---
  drivers/md/Kconfig           |   14 +
  drivers/md/Makefile          |    1 +
- drivers/md/dm-user.c         | 1289 ++++++++++++++++++++++++++++++++++
+ drivers/md/dm-user.c         | 1286 ++++++++++++++++++++++++++++++++++
  include/uapi/linux/dm-user.h |   68 ++
- 4 files changed, 1372 insertions(+)
+ 4 files changed, 1369 insertions(+)
  create mode 100644 drivers/md/dm-user.c
  create mode 100644 include/uapi/linux/dm-user.h
 
@@ -70,7 +70,7 @@
 new file mode 100644
 --- /dev/null
 +++ b/drivers/md/dm-user.c
-@@ -0,0 +1,1289 @@
+@@ -0,0 +1,1286 @@
 +// SPDX-License-Identifier: GPL-2.0+
 +/*
 + * Copyright (C) 2020 Google, Inc
@@ -261,7 +261,6 @@
 +{
 +	m->bio->bi_status = BLK_STS_IOERR;
 +	bio_endio(m->bio);
-+	bio_put(m->bio);
 +	mempool_free(m, pool);
 +}
 +
@@ -1058,7 +1057,6 @@
 +	 */
 +	WARN_ON(bio_size(c->cur_from_user->bio) != 0);
 +	bio_endio(c->cur_from_user->bio);
-+	bio_put(c->cur_from_user->bio);
 +
 +	/*
 +	 * We don't actually need to take the target lock here, as all
@@ -1296,7 +1294,6 @@
 +		return DM_MAPIO_REQUEUE;
 +	}
 +
-+	bio_get(bio);
 +	entry->msg.type = bio_type_to_user_type(bio);
 +	entry->msg.flags = bio_flags_to_user_flags(bio);
 +	entry->msg.sector = bio->bi_iter.bi_sector;
diff --git a/android-mainline/NOUPSTREAM-ANDROID-Add-vendor-hooks-to-the-scheduler.patch b/android-mainline/NOUPSTREAM-ANDROID-Add-vendor-hooks-to-the-scheduler.patch
index 889b780..cffa1fe 100644
--- a/android-mainline/NOUPSTREAM-ANDROID-Add-vendor-hooks-to-the-scheduler.patch
+++ b/android-mainline/NOUPSTREAM-ANDROID-Add-vendor-hooks-to-the-scheduler.patch
@@ -80,22 +80,23 @@
 Signed-off-by: Lee Jones <lee.jones@linaro.org>
 Signed-off-by: Lee Jones <joneslee@google.com>
 ---
- drivers/android/vendor_hooks.c     |  44 +++++
- drivers/base/arch_topology.c       |   4 +
+ drivers/android/vendor_hooks.c     |  44 ++++
+ drivers/base/arch_topology.c       |   9 +-
  drivers/cpufreq/cpufreq.c          |   8 +
  include/linux/sched.h              |   3 +
- include/trace/hooks/sched.h        | 247 +++++++++++++++++++++++++++++
- include/trace/hooks/vendor_hooks.h |   9 ++
+ include/trace/hooks/sched.h        | 316 +++++++++++++++++++++++++++++
+ include/trace/hooks/vendor_hooks.h |   9 +
  init/init_task.c                   |   2 +
  kernel/fork.c                      |   6 +
  kernel/sched/Makefile              |   1 +
- kernel/sched/core.c                |  89 ++++++++++-
- kernel/sched/fair.c                | 115 ++++++++++++--
+ kernel/sched/core.c                | 108 +++++++++-
+ kernel/sched/cputime.c             |  11 +-
+ kernel/sched/fair.c                | 122 ++++++++++-
  kernel/sched/rt.c                  |  35 +++-
  kernel/sched/sched.h               |   3 +
- kernel/sched/topology.c            |  11 +-
- kernel/sched/vendor_hooks.c        |  67 ++++++++
- 15 files changed, 624 insertions(+), 20 deletions(-)
+ kernel/sched/topology.c            |  12 +-
+ kernel/sched/vendor_hooks.c        |  84 ++++++++
+ 16 files changed, 748 insertions(+), 25 deletions(-)
  create mode 100644 include/trace/hooks/sched.h
  create mode 100644 kernel/sched/vendor_hooks.c
 
@@ -174,7 +175,20 @@
  static DEFINE_PER_CPU(struct scale_freq_data __rcu *, sft_data);
  static struct cpumask scale_freq_counters_mask;
  static bool scale_freq_invariant;
-@@ -254,6 +257,7 @@ static void update_topology_flags_workfn(struct work_struct *work)
+@@ -201,8 +204,11 @@ void topology_update_thermal_pressure(const struct cpumask *cpus,
+ 
+ 	trace_thermal_pressure_update(cpu, th_pressure);
+ 
+-	for_each_cpu(cpu, cpus)
++	for_each_cpu(cpu, cpus) {
+ 		WRITE_ONCE(per_cpu(thermal_pressure, cpu), th_pressure);
++		trace_android_rvh_update_thermal_stats(cpu);
++	}
++
+ }
+ EXPORT_SYMBOL_GPL(topology_update_thermal_pressure);
+ 
+@@ -254,6 +260,7 @@ static void update_topology_flags_workfn(struct work_struct *work)
  {
  	update_topology = 1;
  	rebuild_sched_domains();
@@ -224,7 +238,7 @@
 new file mode 100644
 --- /dev/null
 +++ b/include/trace/hooks/sched.h
-@@ -0,0 +1,247 @@
+@@ -0,0 +1,316 @@
 +/* SPDX-License-Identifier: GPL-2.0 */
 +#undef TRACE_SYSTEM
 +#define TRACE_SYSTEM sched
@@ -352,6 +366,71 @@
 +	TP_PROTO(struct task_struct *p, struct cpumask *in_mask),
 +	TP_ARGS(p, in_mask), 1);
 +
++DECLARE_RESTRICTED_HOOK(android_rvh_set_task_cpu,
++	TP_PROTO(struct task_struct *p, unsigned int new_cpu),
++	TP_ARGS(p, new_cpu), 1);
++
++DECLARE_RESTRICTED_HOOK(android_rvh_try_to_wake_up,
++	TP_PROTO(struct task_struct *p),
++	TP_ARGS(p), 1);
++
++DECLARE_RESTRICTED_HOOK(android_rvh_try_to_wake_up_success,
++	TP_PROTO(struct task_struct *p),
++	TP_ARGS(p), 1);
++
++DECLARE_RESTRICTED_HOOK(android_rvh_sched_fork,
++	TP_PROTO(struct task_struct *p),
++	TP_ARGS(p), 1);
++
++DECLARE_RESTRICTED_HOOK(android_rvh_wake_up_new_task,
++	TP_PROTO(struct task_struct *p),
++	TP_ARGS(p), 1);
++
++DECLARE_RESTRICTED_HOOK(android_rvh_new_task_stats,
++	TP_PROTO(struct task_struct *p),
++	TP_ARGS(p), 1);
++
++DECLARE_RESTRICTED_HOOK(android_rvh_flush_task,
++	TP_PROTO(struct task_struct *prev),
++	TP_ARGS(prev), 1);
++
++DECLARE_RESTRICTED_HOOK(android_rvh_tick_entry,
++	TP_PROTO(struct rq *rq),
++	TP_ARGS(rq), 1);
++
++DECLARE_RESTRICTED_HOOK(android_rvh_schedule,
++	TP_PROTO(struct task_struct *prev, struct task_struct *next, struct rq *rq),
++	TP_ARGS(prev, next, rq), 1);
++
++DECLARE_RESTRICTED_HOOK(android_rvh_sched_cpu_starting,
++	TP_PROTO(int cpu),
++	TP_ARGS(cpu), 1);
++
++DECLARE_RESTRICTED_HOOK(android_rvh_sched_cpu_dying,
++	TP_PROTO(int cpu),
++	TP_ARGS(cpu), 1);
++
++DECLARE_RESTRICTED_HOOK(android_rvh_account_irq,
++	TP_PROTO(struct task_struct *curr, int cpu, s64 delta, bool start),
++	TP_ARGS(curr, cpu, delta, start), 1);
++
++struct sched_entity;
++DECLARE_RESTRICTED_HOOK(android_rvh_place_entity,
++	TP_PROTO(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial, u64 *vruntime),
++	TP_ARGS(cfs_rq, se, initial, vruntime), 1);
++
++DECLARE_RESTRICTED_HOOK(android_rvh_build_perf_domains,
++	TP_PROTO(bool *eas_check),
++	TP_ARGS(eas_check), 1);
++
++DECLARE_RESTRICTED_HOOK(android_rvh_update_cpu_capacity,
++	TP_PROTO(int cpu, unsigned long *capacity),
++	TP_ARGS(cpu, capacity), 1);
++
++DECLARE_RESTRICTED_HOOK(android_rvh_update_misfit_status,
++	TP_PROTO(struct task_struct *p, struct rq *rq, bool *need_update),
++	TP_ARGS(p, rq, need_update), 1);
++
 +DECLARE_RESTRICTED_HOOK(android_rvh_sched_fork_init,
 +	TP_PROTO(struct task_struct *p),
 +	TP_ARGS(p), 1);
@@ -467,6 +546,10 @@
 +	TP_PROTO(void *unused),
 +	TP_ARGS(unused));
 +
++DECLARE_RESTRICTED_HOOK(android_rvh_update_thermal_stats,
++		TP_PROTO(int cpu),
++		TP_ARGS(cpu), 1);
++
 +/* macro versions of hooks are no longer required */
 +
 +#endif /* _TRACE_HOOK_SCHED_H */
@@ -684,7 +767,15 @@
  	if (dest_cpu >= nr_cpu_ids) {
  		ret = -EINVAL;
  		goto out;
-@@ -3409,7 +3451,11 @@ int select_fallback_rq(int cpu, struct task_struct *p)
+@@ -3127,6 +3169,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
+ 		p->se.nr_migrations++;
+ 		rseq_migrate(p);
+ 		perf_event_task_migrate(p);
++		trace_android_rvh_set_task_cpu(p, new_cpu);
+ 	}
+ 
+ 	__set_task_cpu(p, new_cpu);
+@@ -3409,7 +3452,11 @@ int select_fallback_rq(int cpu, struct task_struct *p)
  	int nid = cpu_to_node(cpu);
  	const struct cpumask *nodemask = NULL;
  	enum { cpuset, possible, fail } state = cpuset;
@@ -697,7 +788,7 @@
  
  	/*
  	 * If the node that the CPU is on has been offlined, cpu_to_node()
-@@ -3846,7 +3892,12 @@ static inline bool ttwu_queue_cond(struct task_struct *p, int cpu)
+@@ -3846,7 +3893,12 @@ static inline bool ttwu_queue_cond(struct task_struct *p, int cpu)
  
  static bool ttwu_queue_wakelist(struct task_struct *p, int cpu, int wake_flags)
  {
@@ -711,7 +802,28 @@
  		sched_clock_cpu(cpu); /* Sync clocks across CPUs */
  		__ttwu_queue_wakelist(p, cpu, wake_flags);
  		return true;
-@@ -4347,6 +4398,8 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
+@@ -4185,6 +4237,8 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
+ 	 */
+ 	smp_cond_load_acquire(&p->on_cpu, !VAL);
+ 
++	trace_android_rvh_try_to_wake_up(p);
++
+ 	cpu = select_task_rq(p, p->wake_cpu, wake_flags | WF_TTWU);
+ 	if (task_cpu(p) != cpu) {
+ 		if (p->in_iowait) {
+@@ -4204,8 +4258,10 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
+ unlock:
+ 	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+ out:
+-	if (success)
++	if (success) {
++		trace_android_rvh_try_to_wake_up_success(p);
+ 		ttwu_stat(p, task_cpu(p), wake_flags);
++	}
+ 	preempt_enable();
+ 
+ 	return success;
+@@ -4347,6 +4403,8 @@ static void __sched_fork(unsigned long clone_flags, struct task_struct *p)
  	p->se.cfs_rq			= NULL;
  #endif
  
@@ -720,7 +832,16 @@
  #ifdef CONFIG_SCHEDSTATS
  	/* Even if schedstat is disabled, there should not be garbage */
  	memset(&p->stats, 0, sizeof(p->stats));
-@@ -4566,6 +4619,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
+@@ -4554,6 +4612,8 @@ late_initcall(sched_core_sysctl_init);
+  */
+ int sched_fork(unsigned long clone_flags, struct task_struct *p)
+ {
++	trace_android_rvh_sched_fork(p);
++
+ 	__sched_fork(clone_flags, p);
+ 	/*
+ 	 * We mark the process as NEW here. This guarantees that
+@@ -4566,6 +4626,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
  	 * Make sure we do not leak PI boosting priority to the child.
  	 */
  	p->prio = current->normal_prio;
@@ -728,7 +849,7 @@
  
  	uclamp_fork(p);
  
-@@ -4598,6 +4652,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
+@@ -4598,6 +4659,7 @@ int sched_fork(unsigned long clone_flags, struct task_struct *p)
  		p->sched_class = &fair_sched_class;
  
  	init_entity_runnable_average(&p->se);
@@ -736,7 +857,33 @@
  
  
  #ifdef CONFIG_SCHED_INFO
-@@ -5314,6 +5369,11 @@ void sched_exec(void)
+@@ -4677,6 +4739,8 @@ void wake_up_new_task(struct task_struct *p)
+ 	struct rq_flags rf;
+ 	struct rq *rq;
+ 
++	trace_android_rvh_wake_up_new_task(p);
++
+ 	raw_spin_lock_irqsave(&p->pi_lock, rf.flags);
+ 	WRITE_ONCE(p->__state, TASK_RUNNING);
+ #ifdef CONFIG_SMP
+@@ -4695,6 +4759,7 @@ void wake_up_new_task(struct task_struct *p)
+ 	rq = __task_rq_lock(p, &rf);
+ 	update_rq_clock(rq);
+ 	post_init_entity_util_avg(p);
++	trace_android_rvh_new_task_stats(p);
+ 
+ 	activate_task(rq, p, ENQUEUE_NOCLOCK);
+ 	trace_sched_wakeup_new(p);
+@@ -5109,6 +5174,8 @@ static struct rq *finish_task_switch(struct task_struct *prev)
+ 		if (prev->sched_class->task_dead)
+ 			prev->sched_class->task_dead(prev);
+ 
++		trace_android_rvh_flush_task(prev);
++
+ 		/* Task is done with its stack. */
+ 		put_task_stack(prev);
+ 
+@@ -5314,6 +5381,11 @@ void sched_exec(void)
  	struct task_struct *p = current;
  	unsigned long flags;
  	int dest_cpu;
@@ -748,15 +895,16 @@
  
  	raw_spin_lock_irqsave(&p->pi_lock, flags);
  	dest_cpu = p->sched_class->select_task_rq(p, task_cpu(p), WF_EXEC);
-@@ -5468,6 +5528,7 @@ void scheduler_tick(void)
+@@ -5468,6 +5540,8 @@ void scheduler_tick(void)
  	rq_lock(rq, &rf);
  
  	update_rq_clock(rq);
++	trace_android_rvh_tick_entry(rq);
 +
  	thermal_pressure = arch_scale_thermal_pressure(cpu_of(rq));
  	update_thermal_load_avg(rq_clock_thermal(rq), rq, thermal_pressure);
  	curr->sched_class->task_tick(rq, curr, 0);
-@@ -5487,6 +5548,8 @@ void scheduler_tick(void)
+@@ -5487,6 +5561,8 @@ void scheduler_tick(void)
  	rq->idle_balance = idle_cpu(cpu);
  	trigger_load_balance(rq);
  #endif
@@ -765,7 +913,7 @@
  }
  
  #ifdef CONFIG_NO_HZ_FULL
-@@ -5743,6 +5806,8 @@ static noinline void __schedule_bug(struct task_struct *prev)
+@@ -5743,6 +5819,8 @@ static noinline void __schedule_bug(struct task_struct *prev)
  	if (panic_on_warn)
  		panic("scheduling while atomic\n");
  
@@ -774,7 +922,15 @@
  	dump_stack();
  	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
  }
-@@ -6887,6 +6952,7 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
+@@ -6482,6 +6560,7 @@ static void __sched notrace __schedule(unsigned int sched_mode)
+ 	rq->last_seen_need_resched_ns = 0;
+ #endif
+ 
++	trace_android_rvh_schedule(prev, next, rq);
+ 	if (likely(prev != next)) {
+ 		rq->nr_switches++;
+ 		/*
+@@ -6887,6 +6966,7 @@ void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task)
  	struct rq_flags rf;
  	struct rq *rq;
  
@@ -782,7 +938,7 @@
  	/* XXX used to be waiter->prio, not waiter->task->prio */
  	prio = __rt_effective_prio(pi_task, p->normal_prio);
  
-@@ -7005,12 +7071,13 @@ static inline int rt_effective_prio(struct task_struct *p, int prio)
+@@ -7005,12 +7085,13 @@ static inline int rt_effective_prio(struct task_struct *p, int prio)
  
  void set_user_nice(struct task_struct *p, long nice)
  {
@@ -798,7 +954,7 @@
  		return;
  	/*
  	 * We have to be careful, if called from sys_setpriority(),
-@@ -8163,6 +8230,8 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
+@@ -8163,6 +8244,8 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
  		goto out_put_task;
  
  	retval = __sched_setaffinity(p, in_mask);
@@ -807,7 +963,7 @@
  out_put_task:
  	put_task_struct(p);
  	return retval;
-@@ -8222,6 +8291,7 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask)
+@@ -8222,6 +8305,7 @@ long sched_getaffinity(pid_t pid, struct cpumask *mask)
  
  	raw_spin_lock_irqsave(&p->pi_lock, flags);
  	cpumask_and(mask, &p->cpus_mask, cpu_active_mask);
@@ -815,7 +971,7 @@
  	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
  
  out_unlock:
-@@ -8277,6 +8347,8 @@ static void do_sched_yield(void)
+@@ -8277,6 +8361,8 @@ static void do_sched_yield(void)
  	schedstat_inc(rq->yld_count);
  	current->sched_class->yield_task(rq);
  
@@ -824,7 +980,24 @@
  	preempt_disable();
  	rq_unlock_irq(rq, &rf);
  	sched_preempt_enable_no_resched();
-@@ -9902,6 +9974,8 @@ void __might_resched(const char *file, int line, unsigned int offsets)
+@@ -9486,6 +9572,7 @@ int sched_cpu_starting(unsigned int cpu)
+ 	sched_core_cpu_starting(cpu);
+ 	sched_rq_cpu_starting(cpu);
+ 	sched_tick_start(cpu);
++	trace_android_rvh_sched_cpu_starting(cpu);
+ 	return 0;
+ }
+ 
+@@ -9559,6 +9646,8 @@ int sched_cpu_dying(unsigned int cpu)
+ 	}
+ 	rq_unlock_irqrestore(rq, &rf);
+ 
++	trace_android_rvh_sched_cpu_dying(cpu);
++
+ 	calc_load_migrate(rq);
+ 	update_max_interval();
+ 	hrtick_clear(rq);
+@@ -9902,6 +9991,8 @@ void __might_resched(const char *file, int line, unsigned int offsets)
  	print_preempt_disable_ip(offsets & MIGHT_RESCHED_PREEMPT_MASK,
  				 preempt_disable_ip);
  
@@ -833,7 +1006,7 @@
  	dump_stack();
  	add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
  }
-@@ -10281,6 +10355,7 @@ static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
+@@ -10281,6 +10372,7 @@ static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
  	mutex_unlock(&uclamp_mutex);
  #endif
  
@@ -841,7 +1014,7 @@
  	return 0;
  }
  
-@@ -10322,6 +10397,8 @@ static void cpu_cgroup_attach(struct cgroup_taskset *tset)
+@@ -10322,6 +10414,8 @@ static void cpu_cgroup_attach(struct cgroup_taskset *tset)
  
  	cgroup_taskset_for_each(task, css, tset)
  		sched_move_task(task);
@@ -850,6 +1023,43 @@
  }
  
  #ifdef CONFIG_UCLAMP_TASK_GROUP
+diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
+--- a/kernel/sched/cputime.c
++++ b/kernel/sched/cputime.c
+@@ -3,6 +3,7 @@
+  * Simple CPU accounting cgroup controller
+  */
+ #include <linux/cpufreq_times.h>
++#include <trace/hooks/sched.h>
+ 
+ #ifdef CONFIG_IRQ_TIME_ACCOUNTING
+ 
+@@ -53,6 +54,7 @@ void irqtime_account_irq(struct task_struct *curr, unsigned int offset)
+ 	unsigned int pc;
+ 	s64 delta;
+ 	int cpu;
++	bool irq_start = true;
+ 
+ 	if (!sched_clock_irqtime)
+ 		return;
+@@ -68,10 +70,15 @@ void irqtime_account_irq(struct task_struct *curr, unsigned int offset)
+ 	 * in that case, so as not to confuse scheduler with a special task
+ 	 * that do not consume any time, but still wants to run.
+ 	 */
+-	if (pc & HARDIRQ_MASK)
++	if (pc & HARDIRQ_MASK) {
+ 		irqtime_account_delta(irqtime, delta, CPUTIME_IRQ);
+-	else if ((pc & SOFTIRQ_OFFSET) && curr != this_cpu_ksoftirqd())
++		irq_start = false;
++	} else if ((pc & SOFTIRQ_OFFSET) && curr != this_cpu_ksoftirqd()) {
+ 		irqtime_account_delta(irqtime, delta, CPUTIME_SOFTIRQ);
++		irq_start = false;
++	}
++
++	trace_android_rvh_account_irq(curr, cpu, delta, irq_start);
+ }
+ 
+ static u64 irqtime_tick_accounted(u64 maxtime)
 diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
 --- a/kernel/sched/fair.c
 +++ b/kernel/sched/fair.c
@@ -888,7 +1098,27 @@
  
  	if (!sched_feat(UTIL_EST))
  		return;
-@@ -4752,9 +4761,14 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
+@@ -4434,7 +4443,10 @@ static inline int task_fits_capacity(struct task_struct *p,
+ 
+ static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
+ {
+-	if (!sched_asym_cpucap_active())
++	bool need_update = true;
++
++	trace_android_rvh_update_misfit_status(p, rq, &need_update);
++	if (!sched_asym_cpucap_active() || !need_update)
+ 		return;
+ 
+ 	if (!p || p->nr_cpus_allowed == 1) {
+@@ -4544,6 +4556,7 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
+ 
+ 	/* ensure we never gain time by being placed backwards. */
+ 	se->vruntime = max_vruntime(se->vruntime, vruntime);
++	trace_android_rvh_place_entity(cfs_rq, se, initial, &vruntime);
+ }
+ 
+ static void check_enqueue_throttle(struct cfs_rq *cfs_rq);
+@@ -4752,9 +4765,14 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
  	unsigned long ideal_runtime, delta_exec;
  	struct sched_entity *se;
  	s64 delta;
@@ -903,7 +1133,7 @@
  	if (delta_exec > ideal_runtime) {
  		resched_curr(rq_of(cfs_rq));
  		/*
-@@ -4783,8 +4797,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
+@@ -4783,8 +4801,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
  		resched_curr(rq_of(cfs_rq));
  }
  
@@ -913,7 +1143,7 @@
  {
  	clear_buddies(cfs_rq, se);
  
-@@ -4835,7 +4848,11 @@ static struct sched_entity *
+@@ -4835,7 +4852,11 @@ static struct sched_entity *
  pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
  {
  	struct sched_entity *left = __pick_first_entity(cfs_rq);
@@ -926,7 +1156,7 @@
  
  	/*
  	 * If curr is set we have to see if its left of the leftmost entity
-@@ -4877,6 +4894,7 @@ pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
+@@ -4877,6 +4898,7 @@ pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr)
  		se = cfs_rq->last;
  	}
  
@@ -934,7 +1164,7 @@
  	return se;
  }
  
-@@ -4939,6 +4957,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
+@@ -4939,6 +4961,7 @@ entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued)
  
  	if (cfs_rq->nr_running > 1)
  		check_preempt_tick(cfs_rq, curr);
@@ -942,7 +1172,7 @@
  }
  
  
-@@ -5862,6 +5881,12 @@ static inline void hrtick_update(struct rq *rq)
+@@ -5862,6 +5885,12 @@ static inline void hrtick_update(struct rq *rq)
  #ifdef CONFIG_SMP
  static inline bool cpu_overutilized(int cpu)
  {
@@ -955,7 +1185,7 @@
  	return !fits_capacity(cpu_util_cfs(cpu), capacity_of(cpu));
  }
  
-@@ -5949,6 +5974,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+@@ -5949,6 +5978,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
  		flags = ENQUEUE_WAKEUP;
  	}
  
@@ -963,7 +1193,7 @@
  	for_each_sched_entity(se) {
  		cfs_rq = cfs_rq_of(se);
  
-@@ -6039,6 +6065,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
+@@ -6039,6 +6069,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
  		flags |= DEQUEUE_SLEEP;
  	}
  
@@ -971,7 +1201,7 @@
  	for_each_sched_entity(se) {
  		cfs_rq = cfs_rq_of(se);
  
-@@ -7050,6 +7077,8 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu, int sy
+@@ -7050,6 +7081,8 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu, int sy
  	struct perf_domain *pd;
  	struct energy_env eenv;
  
@@ -980,7 +1210,7 @@
  	rcu_read_lock();
  	pd = rcu_dereference(rd->pd);
  	if (!pd || READ_ONCE(rd->overutilized))
-@@ -7075,7 +7104,6 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu, int sy
+@@ -7075,7 +7108,6 @@ static int find_energy_efficient_cpu(struct task_struct *p, int prev_cpu, int sy
  
  	target = prev_cpu;
  
@@ -988,7 +1218,7 @@
  	if (!task_util_est(p))
  		goto unlock;
  
-@@ -7202,9 +7230,18 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
+@@ -7202,9 +7234,18 @@ select_task_rq_fair(struct task_struct *p, int prev_cpu, int wake_flags)
  	int cpu = smp_processor_id();
  	int new_cpu = prev_cpu;
  	int want_affine = 0;
@@ -1007,7 +1237,7 @@
  	/*
  	 * required for stable ->cpus_allowed
  	 */
-@@ -7409,9 +7446,14 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
+@@ -7409,9 +7450,14 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
  	int scale = cfs_rq->nr_running >= sched_nr_latency;
  	int next_buddy_marked = 0;
  	int cse_is_idle, pse_is_idle;
@@ -1022,7 +1252,7 @@
  
  	/*
  	 * This is possible from callers such as attach_tasks(), in which we
-@@ -7468,6 +7510,13 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
+@@ -7468,6 +7514,13 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int wake_
  		return;
  
  	update_curr(cfs_rq_of(se));
@@ -1036,7 +1266,7 @@
  	if (wakeup_preempt_entity(se, pse) == 1) {
  		/*
  		 * Bias pick_next to pick the sched entity that is
-@@ -7535,9 +7584,10 @@ struct task_struct *
+@@ -7535,9 +7588,10 @@ struct task_struct *
  pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
  {
  	struct cfs_rq *cfs_rq = &rq->cfs;
@@ -1049,7 +1279,7 @@
  
  again:
  	if (!sched_fair_runnable(rq))
-@@ -7591,7 +7641,7 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
+@@ -7591,7 +7645,7 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
  	} while (cfs_rq);
  
  	p = task_of(se);
@@ -1058,7 +1288,7 @@
  	/*
  	 * Since we haven't yet done put_prev_entity and if the selected task
  	 * is a different task than we started out with, try and touch the
-@@ -7624,6 +7674,10 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
+@@ -7624,6 +7678,10 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
  	if (prev)
  		put_prev_task(rq, prev);
  
@@ -1069,7 +1299,7 @@
  	do {
  		se = pick_next_entity(cfs_rq, NULL);
  		set_next_entity(cfs_rq, se);
-@@ -7945,6 +7999,7 @@ struct lb_env {
+@@ -7945,6 +8003,7 @@ struct lb_env {
  	enum fbq_type		fbq_type;
  	enum migration_type	migration_type;
  	struct list_head	tasks;
@@ -1077,7 +1307,7 @@
  };
  
  /*
-@@ -8059,9 +8114,14 @@ static
+@@ -8059,9 +8118,14 @@ static
  int can_migrate_task(struct task_struct *p, struct lb_env *env)
  {
  	int tsk_cache_hot;
@@ -1092,7 +1322,7 @@
  	/*
  	 * We do not migrate tasks that are:
  	 * 1) throttled_lb_pair, or
-@@ -8149,8 +8209,20 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
+@@ -8149,8 +8213,20 @@ int can_migrate_task(struct task_struct *p, struct lb_env *env)
   */
  static void detach_task(struct task_struct *p, struct lb_env *env)
  {
@@ -1113,7 +1343,15 @@
  	deactivate_task(env->src_rq, p, DEQUEUE_NOCLOCK);
  	set_task_cpu(p, env->dst_cpu);
  }
-@@ -9908,8 +9980,12 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
+@@ -8681,6 +8757,7 @@ static void update_cpu_capacity(struct sched_domain *sd, int cpu)
+ 	if (!capacity)
+ 		capacity = 1;
+ 
++	trace_android_rvh_update_cpu_capacity(cpu, &capacity);
+ 	cpu_rq(cpu)->cpu_capacity = capacity;
+ 	trace_sched_cpu_capacity_tp(cpu_rq(cpu));
+ 
+@@ -9908,8 +9985,12 @@ static struct sched_group *find_busiest_group(struct lb_env *env)
  
  	if (sched_energy_enabled()) {
  		struct root_domain *rd = env->dst_rq->rd;
@@ -1127,7 +1365,7 @@
  			goto out_balanced;
  	}
  
-@@ -10028,7 +10104,12 @@ static struct rq *find_busiest_queue(struct lb_env *env,
+@@ -10028,7 +10109,12 @@ static struct rq *find_busiest_queue(struct lb_env *env,
  	struct rq *busiest = NULL, *rq;
  	unsigned long busiest_util = 0, busiest_load = 0, busiest_capacity = 1;
  	unsigned int busiest_nr = 0;
@@ -1141,7 +1379,7 @@
  
  	for_each_cpu_and(i, sched_group_span(group), env->cpus) {
  		unsigned long capacity, load, util;
-@@ -10330,6 +10411,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
+@@ -10330,6 +10416,7 @@ static int load_balance(int this_cpu, struct rq *this_rq,
  
  more_balance:
  		rq_lock_irqsave(busiest, &rf);
@@ -1149,7 +1387,7 @@
  		update_rq_clock(busiest);
  
  		/*
-@@ -10625,6 +10707,7 @@ static int active_load_balance_cpu_stop(void *data)
+@@ -10625,6 +10712,7 @@ static int active_load_balance_cpu_stop(void *data)
  			.src_rq		= busiest_rq,
  			.idle		= CPU_IDLE,
  			.flags		= LBF_ACTIVE_LB,
@@ -1157,7 +1395,7 @@
  		};
  
  		schedstat_inc(sd->alb_count);
-@@ -10706,6 +10789,10 @@ static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
+@@ -10706,6 +10794,10 @@ static void rebalance_domains(struct rq *rq, enum cpu_idle_type idle)
  	int need_serialize, need_decay = 0;
  	u64 max_cost = 0;
  
@@ -1168,7 +1406,7 @@
  	rcu_read_lock();
  	for_each_domain(cpu, sd) {
  		/*
-@@ -10856,6 +10943,7 @@ static void nohz_balancer_kick(struct rq *rq)
+@@ -10856,6 +10948,7 @@ static void nohz_balancer_kick(struct rq *rq)
  	struct sched_domain *sd;
  	int nr_busy, i, cpu = rq->cpu;
  	unsigned int flags = 0;
@@ -1176,7 +1414,7 @@
  
  	if (unlikely(rq->idle_balance))
  		return;
-@@ -10880,6 +10968,10 @@ static void nohz_balancer_kick(struct rq *rq)
+@@ -10880,6 +10973,10 @@ static void nohz_balancer_kick(struct rq *rq)
  	if (time_before(now, nohz.next_balance))
  		goto out;
  
@@ -1187,7 +1425,7 @@
  	if (rq->nr_running >= 2) {
  		flags = NOHZ_STATS_KICK | NOHZ_BALANCE_KICK;
  		goto out;
-@@ -11285,6 +11377,11 @@ static int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
+@@ -11285,6 +11382,11 @@ static int newidle_balance(struct rq *this_rq, struct rq_flags *rf)
  	u64 t0, t1, curr_cost = 0;
  	struct sched_domain *sd;
  	int pulled_task = 0;
@@ -1345,7 +1583,7 @@
  DEFINE_MUTEX(sched_domains_mutex);
  
  /* Protected by sched_domains_mutex: */
-@@ -373,12 +375,16 @@ static bool build_perf_domains(const struct cpumask *cpu_map)
+@@ -373,12 +375,17 @@ static bool build_perf_domains(const struct cpumask *cpu_map)
  	struct perf_domain *pd = NULL, *tmp;
  	int cpu = cpumask_first(cpu_map);
  	struct root_domain *rd = cpu_rq(cpu)->rd;
@@ -1360,11 +1598,12 @@
 +	 * EAS is enabled for asymmetric CPU capacity topologies.
 +	 * Allow vendor to override if desired.
 +	 */
++	trace_android_rvh_build_perf_domains(&eas_check);
 +	if (!per_cpu(sd_asym_cpucapacity, cpu) && !eas_check) {
  		if (sched_debug()) {
  			pr_info("rd %*pbl: CPUs do not have asymmetric capacities\n",
  					cpumask_pr_args(cpu_map));
-@@ -2371,6 +2377,7 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
+@@ -2371,6 +2378,7 @@ build_sched_domains(const struct cpumask *cpu_map, struct sched_domain_attr *att
  		pr_info("root domain span: %*pbl (max cpu_capacity = %lu)\n",
  			cpumask_pr_args(cpu_map), rq->rd->max_cpu_capacity);
  	}
@@ -1376,7 +1615,7 @@
 new file mode 100644
 --- /dev/null
 +++ b/kernel/sched/vendor_hooks.c
-@@ -0,0 +1,67 @@
+@@ -0,0 +1,84 @@
 +// SPDX-License-Identifier: GPL-2.0-only
 +/* vendor_hook.c
 + *
@@ -1434,6 +1673,22 @@
 +EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_dequeue_task_fair);
 +EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_util_est_update);
 +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_setscheduler_uclamp);
++EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_set_task_cpu);
++EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_try_to_wake_up);
++EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_try_to_wake_up_success);
++EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_sched_fork);
++EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_wake_up_new_task);
++EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_new_task_stats);
++EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_flush_task);
++EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_tick_entry);
++EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_schedule);
++EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_sched_cpu_starting);
++EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_sched_cpu_dying);
++EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_account_irq);
++EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_place_entity);
++EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_build_perf_domains);
++EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_update_cpu_capacity);
++EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_update_misfit_status);
 +EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_rto_next_cpu);
 +EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_is_cpu_allowed);
 +EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_get_nohz_timer_target);
@@ -1444,3 +1699,4 @@
 +EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_schedule_bug);
 +EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_sched_exec);
 +EXPORT_TRACEPOINT_SYMBOL_GPL(android_vh_update_topology_flags_workfn);
++EXPORT_TRACEPOINT_SYMBOL_GPL(android_rvh_update_thermal_stats);
diff --git a/android-mainline/NOUPSTREAM-ANDROID-base-Drivers-Base-related-exports.patch b/android-mainline/NOUPSTREAM-ANDROID-base-Drivers-Base-related-exports.patch
index 27d9437..afb9d73 100644
--- a/android-mainline/NOUPSTREAM-ANDROID-base-Drivers-Base-related-exports.patch
+++ b/android-mainline/NOUPSTREAM-ANDROID-base-Drivers-Base-related-exports.patch
@@ -35,7 +35,7 @@
  
  /**
   * topology_update_thermal_pressure() - Update thermal pressure for CPUs
-@@ -243,6 +244,8 @@ static int register_cpu_capacity_sysctl(void)
+@@ -246,6 +247,8 @@ static int register_cpu_capacity_sysctl(void)
  subsys_initcall(register_cpu_capacity_sysctl);
  
  static int update_topology;
@@ -44,7 +44,7 @@
  
  int topology_update_cpu_topology(void)
  {
-@@ -257,6 +260,7 @@ static void update_topology_flags_workfn(struct work_struct *work)
+@@ -260,6 +263,7 @@ static void update_topology_flags_workfn(struct work_struct *work)
  {
  	update_topology = 1;
  	rebuild_sched_domains();
diff --git a/android-mainline/NOUPSTREAM-ANDROID-kernel-Core-Kernel-Exports.patch b/android-mainline/NOUPSTREAM-ANDROID-kernel-Core-Kernel-Exports.patch
index 319c8a8..4efd9d2 100644
--- a/android-mainline/NOUPSTREAM-ANDROID-kernel-Core-Kernel-Exports.patch
+++ b/android-mainline/NOUPSTREAM-ANDROID-kernel-Core-Kernel-Exports.patch
@@ -386,7 +386,7 @@
  
  #ifdef CONFIG_SMP
  
-@@ -3173,8 +3188,8 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
+@@ -3174,8 +3189,8 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
  
  	__set_task_cpu(p, new_cpu);
  }
@@ -396,7 +396,7 @@
  static void __migrate_swap_task(struct task_struct *p, int cpu)
  {
  	if (task_on_rq_queued(p)) {
-@@ -3289,7 +3304,7 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p,
+@@ -3290,7 +3305,7 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p,
  out:
  	return ret;
  }
@@ -405,7 +405,7 @@
  
  /*
   * wait_task_inactive - wait for a thread to unschedule.
-@@ -3841,6 +3856,7 @@ void wake_up_if_idle(int cpu)
+@@ -3842,6 +3857,7 @@ void wake_up_if_idle(int cpu)
  out:
  	rcu_read_unlock();
  }
@@ -413,7 +413,7 @@
  
  bool cpus_share_cache(int this_cpu, int that_cpu)
  {
-@@ -4923,6 +4939,7 @@ struct balance_callback balance_push_callback = {
+@@ -4933,6 +4949,7 @@ struct balance_callback balance_push_callback = {
  	.next = NULL,
  	.func = balance_push,
  };
@@ -421,7 +421,7 @@
  
  static inline struct balance_callback *
  __splice_balance_callbacks(struct rq *rq, bool split)
-@@ -4954,10 +4971,11 @@ static inline struct balance_callback *splice_balance_callbacks(struct rq *rq)
+@@ -4964,10 +4981,11 @@ static inline struct balance_callback *splice_balance_callbacks(struct rq *rq)
  	return __splice_balance_callbacks(rq, true);
  }
  
@@ -434,7 +434,7 @@
  
  static inline void balance_callbacks(struct rq *rq, struct balance_callback *head)
  {
-@@ -7242,6 +7260,7 @@ int available_idle_cpu(int cpu)
+@@ -7256,6 +7274,7 @@ int available_idle_cpu(int cpu)
  
  	return 1;
  }
@@ -442,7 +442,7 @@
  
  /**
   * idle_task - return the idle task for a given CPU.
-@@ -7739,11 +7758,13 @@ int sched_setscheduler(struct task_struct *p, int policy,
+@@ -7753,11 +7772,13 @@ int sched_setscheduler(struct task_struct *p, int policy,
  {
  	return _sched_setscheduler(p, policy, param, true);
  }
@@ -456,7 +456,7 @@
  
  int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr)
  {
-@@ -7769,6 +7790,7 @@ int sched_setscheduler_nocheck(struct task_struct *p, int policy,
+@@ -7783,6 +7804,7 @@ int sched_setscheduler_nocheck(struct task_struct *p, int policy,
  {
  	return _sched_setscheduler(p, policy, param, false);
  }
@@ -464,7 +464,7 @@
  
  /*
   * SCHED_FIFO is a broken scheduler model; that is, it is fundamentally
-@@ -9691,7 +9713,9 @@ int in_sched_functions(unsigned long addr)
+@@ -9708,7 +9730,9 @@ int in_sched_functions(unsigned long addr)
   * Every task in system belongs to this group at bootup.
   */
  struct task_group root_task_group;
@@ -496,7 +496,7 @@
 diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
 --- a/kernel/sched/cputime.c
 +++ b/kernel/sched/cputime.c
-@@ -18,6 +18,7 @@
+@@ -19,6 +19,7 @@
   * compromise in place of having locks on each irq in account_system_time.
   */
  DEFINE_PER_CPU(struct irqtime, cpu_irqtime);
@@ -504,7 +504,7 @@
  
  static int sched_clock_irqtime;
  
-@@ -479,6 +480,7 @@ void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
+@@ -486,6 +487,7 @@ void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
  	*ut = cputime.utime;
  	*st = cputime.stime;
  }
@@ -512,7 +512,7 @@
  
  #else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE: */
  
-@@ -649,6 +651,8 @@ void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
+@@ -656,6 +658,8 @@ void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
  	thread_group_cputime(p, &cputime);
  	cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st);
  }
@@ -555,7 +555,7 @@
  static unsigned int normalized_sysctl_sched_latency	= 6000000ULL;
  
  /*
-@@ -4797,7 +4798,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
+@@ -4801,7 +4802,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
  		resched_curr(rq_of(cfs_rq));
  }
  
@@ -564,7 +564,7 @@
  {
  	clear_buddies(cfs_rq, se);
  
-@@ -4833,6 +4834,7 @@ static void set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
+@@ -4837,6 +4838,7 @@ static void set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
  
  	se->prev_sum_exec_runtime = se->sum_exec_runtime;
  }
@@ -572,7 +572,7 @@
  
  static int
  wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
-@@ -7919,7 +7921,8 @@ static bool yield_to_task_fair(struct rq *rq, struct task_struct *p)
+@@ -7923,7 +7925,8 @@ static bool yield_to_task_fair(struct rq *rq, struct task_struct *p)
   *      rewrite all of this once again.]
   */
  
diff --git a/android-mainline/series b/android-mainline/series
index 69c0ffe..61a8d7d 100644
--- a/android-mainline/series
+++ b/android-mainline/series
@@ -2,8 +2,8 @@
 # android-mainline patches
 #
 # Applies onto upstream 8e5423e991e8c Linux v6.1-rc3-72-g8e5423e991e8c
-# Matches android-mainline a4c2687cd943e ("ANDROID: kleaf: //common:all_headers should use linux_includes.")
-# Status: Tested
+# Matches android-mainline a9715862824a4 ("ANDROID: dm-user: Remove bio recount in I/O path")
+# Status: Untested
 #
 Revert-sched-core-Prevent-race-condition-between-cpuset-and-__sched_setscheduler.patch
 Revert-drm-virtio-fix-DRM_FORMAT_-handling.patch