blob: 4617b2e9c2d951338143ff749d476fae1d1beca6 [file] [log] [blame]
From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
From: Elliot Berman <quic_eberman@quicinc.com>
Date: Tue, 28 Sep 2021 08:48:55 -0700
Subject: NOUPSTREAM: ANDROID: kernel: Core Kernel Exports
[CPNOTE: 30/09/21] Lee: GKI vendor exports
Squash:
NOUPSTREAM: ANDROID: GKI: time: Export for nsec_to_clock_t
NOUPSTREAM: ANDROID: tick/nohz: export tick_nohz_get_sleep_length()
NOUPSTREAM: ANDROID: GKI: pid: Export for find_task_by_vpid
NOUPSTREAM: ANDROID: Export perf_event_read_local function
NOUPSTREAM: ANDROID: PM: sleep: export device_pm_callback_start/end trace event
NOUPSTREAM: ANDROID: perf: Export clock_set_rate tracepoint
NOUPSTREAM: Revert "sched: Remove sched_setscheduler*() EXPORTs"
NOUPSTREAM: ANDROID: Export some scheduler APIs for vendor modules
NOUPSTREAM: ANDROID: GKI: core: Export for runqueues
NOUPSTREAM: ANDROID: GKI: cputime: Export for thread_group_cputime_adjusted
NOUPSTREAM: ANDROID: sched: export wake_up_if_idle()
NOUPSTREAM: ANDROID: softirq: Export irq_handler_entry tracepoint
NOUPSTREAM: ANDROID: sched: Export few scheduler symbols for vendor modules
NOUPSTREAM: ANDROID: Sched: Export sched_feat_keys symbol needed by vendor modules
NOUPSTREAM: ANDROID: sched/core: Export symbols needed by modules
NOUPSTREAM: ANDROID: sched: Export symbols for vendor EAS wakup path function
NOUPSTREAM: ANDROID: Sched: Export scheduler symbols needed by vendor modules
NOUPSTREAM: ANDROID: cgroup: Export functions used by modules
NOUPSTREAM: ANDROID: sched: Export key used by module
NOUPSTREAM: ANDROID: sched: Export sched_switch tracepoint symbol
NOUPSTREAM: ANDROID: sched: time: Export symbols needed for schedutil module
NOUPSTREAM: ANDROID: dma: Rework so dev_get_cma_area() can be called from a module
NOUPSTREAM: ANDROID: irqdesc: Export symbol for vendor modules
ANDROID: sched: Export set_next_entity
ANDROID: sched/fair: export sysctl_sched_latency symbol
ANDROID: sched: Add export symbol resched_curr
ANDROID: sched: Add export symbols for sched features
ANDROID: sched: Export max_load_balance_interval
ANDROID: printk: Export symbol for scheduler vendor hook
ANDROID: sched: Export rq lock symbols
ANDROID: sched: Export symbol for scheduler vendor module
Bug: 143964928
Bug: 158067689
Bug: 169136276
Bug: 158067689
Bug: 151972133
Bug: 158528747
Bug: 167449286
Bug: 166776693
Bug: 163613349
Bug: 155241766
Bug: 158067689
Bug: 158067689
Bug: 169136276
Bug: 175806230
Bug: 173468448
Bug: 173559623
Bug: 174219212
Bug: 170507310
Bug: 173725277
Bug: 192873984
Bug: 175045928
Bug: 175448875
Bug: 175806230
Bug: 170511085
Bug: 155218010
Bug: 177393442
Bug: 199823932
Bug: 183674818
Bug: 175448877
Bug: 176077958
Bug: 177050087
Bug: 180125905
Bug: 203530090
Bug: 203580415
Change-Id: I9667d55296a0c73115a7a55d27e406b1291104ef
Signed-off-by: Lee Jones <lee.jones@linaro.org>
ANDROID: sched: Add parameter to android_rvh_set_iowait
While overriding sugov behavior, it is also necessary to know the
runqueue on which the task is going to be enqueued.
Bug: 171598214
Change-Id: I1491a2be34a6e615c5d4bc68e095647744870233
Signed-off-by: Shaleen Agrawal <shalagra@codeaurora.org>
---
include/trace/hooks/sched.h | 4 ++--
kernel/cgroup/cgroup.c | 3 +++
kernel/dma/contiguous.c | 1 +
kernel/events/core.c | 1 +
kernel/fork.c | 3 +++
kernel/irq/irqdesc.c | 4 ++--
kernel/irq_work.c | 1 +
kernel/kthread.c | 1 +
kernel/pid.c | 1 +
kernel/printk/printk.c | 1 +
kernel/reboot.c | 1 +
kernel/sched/core.c | 26 ++++++++++++++++++++++++--
kernel/sched/cpufreq.c | 1 +
kernel/sched/cpupri.c | 1 +
kernel/sched/cputime.c | 4 ++++
kernel/sched/debug.c | 4 +++-
kernel/sched/fair.c | 9 ++++++---
kernel/sched/pelt.c | 1 +
kernel/sched/rt.c | 1 +
kernel/sched/sched.h | 8 ++++++--
kernel/softirq.c | 3 +++
kernel/stop_machine.c | 1 +
kernel/time/tick-sched.c | 2 ++
kernel/time/time.c | 1 +
kernel/trace/power-traces.c | 4 +++-
25 files changed, 74 insertions(+), 13 deletions(-)
diff --git a/include/trace/hooks/sched.h b/include/trace/hooks/sched.h
--- a/include/trace/hooks/sched.h
+++ b/include/trace/hooks/sched.h
@@ -109,8 +109,8 @@ DECLARE_HOOK(android_vh_set_sugov_sched_attr,
TP_PROTO(struct sched_attr *attr),
TP_ARGS(attr));
DECLARE_RESTRICTED_HOOK(android_rvh_set_iowait,
- TP_PROTO(struct task_struct *p, int *should_iowait_boost),
- TP_ARGS(p, should_iowait_boost), 1);
+ TP_PROTO(struct task_struct *p, struct rq *rq, int *should_iowait_boost),
+ TP_ARGS(p, rq, should_iowait_boost), 1);
struct sugov_policy;
DECLARE_RESTRICTED_HOOK(android_rvh_set_sugov_update,
TP_PROTO(struct sugov_policy *sg_policy, unsigned int next_freq, bool *should_update),
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -2399,6 +2399,7 @@ struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset,
return cgroup_taskset_next(tset, dst_cssp);
}
+EXPORT_SYMBOL_GPL(cgroup_taskset_first);
/**
* cgroup_taskset_next - iterate to the next task in taskset
@@ -2445,6 +2446,7 @@ struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset,
return NULL;
}
+EXPORT_SYMBOL_GPL(cgroup_taskset_next);
/**
* cgroup_migrate_execute - migrate a taskset
@@ -4333,6 +4335,7 @@ struct cgroup_subsys_state *css_next_child(struct cgroup_subsys_state *pos,
return next;
return NULL;
}
+EXPORT_SYMBOL_GPL(css_next_child);
/**
* css_next_descendant_pre - find the next descendant for pre-order walk
diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c
--- a/kernel/dma/contiguous.c
+++ b/kernel/dma/contiguous.c
@@ -58,6 +58,7 @@
#endif
struct cma *dma_contiguous_default_area;
+EXPORT_SYMBOL_GPL(dma_contiguous_default_area);
/*
* Default global CMA area size can be defined in kernel's .config.
diff --git a/kernel/events/core.c b/kernel/events/core.c
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -4487,6 +4487,7 @@ int perf_event_read_local(struct perf_event *event, u64 *value,
return ret;
}
+EXPORT_SYMBOL_GPL(perf_event_read_local);
static int perf_event_read(struct perf_event *event, bool group)
{
diff --git a/kernel/fork.c b/kernel/fork.c
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -121,6 +121,8 @@
*/
#define MAX_THREADS FUTEX_TID_MASK
+EXPORT_TRACEPOINT_SYMBOL_GPL(task_newtask);
+
/*
* Protected counters by write_lock_irq(&tasklist_lock)
*/
@@ -141,6 +143,7 @@ static const char * const resident_page_types[] = {
DEFINE_PER_CPU(unsigned long, process_counts) = 0;
__cacheline_aligned DEFINE_RWLOCK(tasklist_lock); /* outer */
+EXPORT_SYMBOL_GPL(tasklist_lock);
#ifdef CONFIG_PROVE_RCU
int lockdep_tasklist_lock_is_held(void)
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -352,9 +352,7 @@ struct irq_desc *irq_to_desc(unsigned int irq)
{
return radix_tree_lookup(&irq_desc_tree, irq);
}
-#ifdef CONFIG_KVM_BOOK3S_64_HV_MODULE
EXPORT_SYMBOL_GPL(irq_to_desc);
-#endif
static void delete_irq_desc(unsigned int irq)
{
@@ -885,6 +883,7 @@ unsigned int kstat_irqs_cpu(unsigned int irq, int cpu)
return desc && desc->kstat_irqs ?
*per_cpu_ptr(desc->kstat_irqs, cpu) : 0;
}
+EXPORT_SYMBOL_GPL(kstat_irqs_cpu);
static bool irq_is_nmi(struct irq_desc *desc)
{
@@ -942,3 +941,4 @@ void __irq_set_lockdep_class(unsigned int irq, struct lock_class_key *lock_class
}
EXPORT_SYMBOL_GPL(__irq_set_lockdep_class);
#endif
+EXPORT_SYMBOL_GPL(kstat_irqs_usr);
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -170,6 +170,7 @@ bool irq_work_queue_on(struct irq_work *work, int cpu)
return true;
#endif /* CONFIG_SMP */
}
+EXPORT_SYMBOL_GPL(irq_work_queue_on);
bool irq_work_needs_cpu(void)
{
diff --git a/kernel/kthread.c b/kernel/kthread.c
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -482,6 +482,7 @@ void kthread_bind_mask(struct task_struct *p, const struct cpumask *mask)
{
__kthread_bind_mask(p, mask, TASK_UNINTERRUPTIBLE);
}
+EXPORT_SYMBOL_GPL(kthread_bind_mask);
/**
* kthread_bind - bind a just-created kthread to a cpu.
diff --git a/kernel/pid.c b/kernel/pid.c
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -421,6 +421,7 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
{
return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
}
+EXPORT_SYMBOL_GPL(find_task_by_vpid);
struct task_struct *find_get_task_by_vpid(pid_t nr)
{
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -3287,6 +3287,7 @@ int _printk_deferred(const char *fmt, ...)
return r;
}
+EXPORT_SYMBOL_GPL(_printk_deferred);
/*
* printk rate limiting, lifted from the networking subsystem.
diff --git a/kernel/reboot.c b/kernel/reboot.c
--- a/kernel/reboot.c
+++ b/kernel/reboot.c
@@ -35,6 +35,7 @@ EXPORT_SYMBOL(cad_pid);
enum reboot_mode reboot_mode DEFAULT_REBOOT_MODE;
EXPORT_SYMBOL_GPL(reboot_mode);
enum reboot_mode panic_reboot_mode = REBOOT_UNDEFINED;
+EXPORT_SYMBOL_GPL(panic_reboot_mode);
/*
* This variable is used privately to keep track of whether or not
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -44,8 +44,10 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(sched_overutilized_tp);
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_cfs_tp);
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_util_est_se_tp);
EXPORT_TRACEPOINT_SYMBOL_GPL(sched_update_nr_running_tp);
+EXPORT_TRACEPOINT_SYMBOL_GPL(sched_switch);
DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
+EXPORT_SYMBOL_GPL(runqueues);
#ifdef CONFIG_SCHED_DEBUG
/*
@@ -60,6 +62,7 @@ DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
const_debug unsigned int sysctl_sched_features =
#include "features.h"
0;
+EXPORT_SYMBOL_GPL(sysctl_sched_features);
#undef SCHED_FEAT
/*
@@ -495,6 +498,7 @@ void raw_spin_rq_lock_nested(struct rq *rq, int subclass)
raw_spin_unlock(lock);
}
}
+EXPORT_SYMBOL_GPL(raw_spin_rq_lock_nested);
bool raw_spin_rq_trylock(struct rq *rq)
{
@@ -524,6 +528,7 @@ void raw_spin_rq_unlock(struct rq *rq)
{
raw_spin_unlock(rq_lockp(rq));
}
+EXPORT_SYMBOL_GPL(raw_spin_rq_unlock);
#ifdef CONFIG_SMP
/*
@@ -542,6 +547,7 @@ void double_rq_lock(struct rq *rq1, struct rq *rq2)
raw_spin_rq_lock_nested(rq2, SINGLE_DEPTH_NESTING);
}
+EXPORT_SYMBOL_GPL(double_rq_lock);
#endif
/*
@@ -567,6 +573,7 @@ struct rq *__task_rq_lock(struct task_struct *p, struct rq_flags *rf)
cpu_relax();
}
}
+EXPORT_SYMBOL_GPL(__task_rq_lock);
/*
* task_rq_lock - lock p->pi_lock and lock the rq @p resides on.
@@ -689,6 +696,7 @@ void update_rq_clock(struct rq *rq)
rq->clock += delta;
update_rq_clock_task(rq, delta);
}
+EXPORT_SYMBOL_GPL(update_rq_clock);
#ifdef CONFIG_SCHED_HRTICK
/*
@@ -991,6 +999,7 @@ void resched_curr(struct rq *rq)
else
trace_sched_wake_idle_without_ipi(cpu);
}
+EXPORT_SYMBOL_GPL(resched_curr);
void resched_cpu(int cpu)
{
@@ -1293,6 +1302,7 @@ static struct uclamp_se uclamp_default[UCLAMP_CNT];
* * An admin modifying the cgroup cpu.uclamp.{min, max}
*/
DEFINE_STATIC_KEY_FALSE(sched_uclamp_used);
+EXPORT_SYMBOL_GPL(sched_uclamp_used);
/* Integer rounded range for each bucket */
#define UCLAMP_BUCKET_DELTA DIV_ROUND_CLOSEST(SCHED_CAPACITY_SCALE, UCLAMP_BUCKETS)
@@ -1486,6 +1496,7 @@ unsigned long uclamp_eff_value(struct task_struct *p, enum uclamp_id clamp_id)
return (unsigned long)uc_eff.value;
}
+EXPORT_SYMBOL_GPL(uclamp_eff_value);
/*
* When a task is enqueued on a rq, the clamp bucket currently defined by the
@@ -2035,6 +2046,7 @@ void activate_task(struct rq *rq, struct task_struct *p, int flags)
p->on_rq = TASK_ON_RQ_QUEUED;
}
+EXPORT_SYMBOL_GPL(activate_task);
void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
{
@@ -2042,6 +2054,7 @@ void deactivate_task(struct rq *rq, struct task_struct *p, int flags)
dequeue_task(rq, p, flags);
}
+EXPORT_SYMBOL_GPL(deactivate_task);
static inline int __normal_prio(int policy, int rt_prio, int nice)
{
@@ -2134,6 +2147,7 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr))
rq_clock_skip_update(rq);
}
+EXPORT_SYMBOL_GPL(check_preempt_curr);
#ifdef CONFIG_SMP
@@ -3086,8 +3100,8 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
__set_task_cpu(p, new_cpu);
}
+EXPORT_SYMBOL_GPL(set_task_cpu);
-#ifdef CONFIG_NUMA_BALANCING
static void __migrate_swap_task(struct task_struct *p, int cpu)
{
if (task_on_rq_queued(p)) {
@@ -3202,7 +3216,7 @@ int migrate_swap(struct task_struct *cur, struct task_struct *p,
out:
return ret;
}
-#endif /* CONFIG_NUMA_BALANCING */
+EXPORT_SYMBOL_GPL(migrate_swap);
/*
* wait_task_inactive - wait for a thread to unschedule.
@@ -3750,6 +3764,7 @@ void wake_up_if_idle(int cpu)
out:
rcu_read_unlock();
}
+EXPORT_SYMBOL_GPL(wake_up_if_idle);
bool cpus_share_cache(int this_cpu, int that_cpu)
{
@@ -4706,6 +4721,7 @@ struct callback_head balance_push_callback = {
.next = NULL,
.func = (void (*)(struct callback_head *))balance_push,
};
+EXPORT_SYMBOL_GPL(balance_push_callback);
static inline struct callback_head *splice_balance_callbacks(struct rq *rq)
{
@@ -7068,6 +7084,7 @@ int available_idle_cpu(int cpu)
return 1;
}
+EXPORT_SYMBOL_GPL(available_idle_cpu);
/**
* idle_task - return the idle task for a given CPU.
@@ -7552,11 +7569,13 @@ int sched_setscheduler(struct task_struct *p, int policy,
{
return _sched_setscheduler(p, policy, param, true);
}
+EXPORT_SYMBOL_GPL(sched_setscheduler);
int sched_setattr(struct task_struct *p, const struct sched_attr *attr)
{
return __sched_setscheduler(p, attr, true, true);
}
+EXPORT_SYMBOL_GPL(sched_setattr);
int sched_setattr_nocheck(struct task_struct *p, const struct sched_attr *attr)
{
@@ -7582,6 +7601,7 @@ int sched_setscheduler_nocheck(struct task_struct *p, int policy,
{
return _sched_setscheduler(p, policy, param, false);
}
+EXPORT_SYMBOL_GPL(sched_setscheduler_nocheck);
/*
* SCHED_FIFO is a broken scheduler model; that is, it is fundamentally
@@ -9309,7 +9329,9 @@ int in_sched_functions(unsigned long addr)
* Every task in system belongs to this group at bootup.
*/
struct task_group root_task_group;
+EXPORT_SYMBOL_GPL(root_task_group);
LIST_HEAD(task_groups);
+EXPORT_SYMBOL_GPL(task_groups);
/* Cacheline aligned slab cache for task_group */
static struct kmem_cache *task_group_cache __read_mostly;
diff --git a/kernel/sched/cpufreq.c b/kernel/sched/cpufreq.c
--- a/kernel/sched/cpufreq.c
+++ b/kernel/sched/cpufreq.c
@@ -75,3 +75,4 @@ bool cpufreq_this_cpu_can_update(struct cpufreq_policy *policy)
(policy->dvfs_possible_from_any_cpu &&
rcu_dereference_sched(*this_cpu_ptr(&cpufreq_update_util_data)));
}
+EXPORT_SYMBOL_GPL(cpufreq_this_cpu_can_update);
diff --git a/kernel/sched/cpupri.c b/kernel/sched/cpupri.c
--- a/kernel/sched/cpupri.c
+++ b/kernel/sched/cpupri.c
@@ -237,6 +237,7 @@ int cpupri_find_fitness(struct cpupri *cp, struct task_struct *p,
return 0;
}
+EXPORT_SYMBOL_GPL(cpupri_find_fitness);
/**
* cpupri_set - update the CPU priority setting
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -20,6 +20,7 @@
* compromise in place of having locks on each irq in account_system_time.
*/
DEFINE_PER_CPU(struct irqtime, cpu_irqtime);
+EXPORT_PER_CPU_SYMBOL_GPL(cpu_irqtime);
static int sched_clock_irqtime;
@@ -468,6 +469,7 @@ void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
*ut = cputime.utime;
*st = cputime.stime;
}
+EXPORT_SYMBOL_GPL(thread_group_cputime_adjusted);
#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE: */
@@ -638,6 +640,8 @@ void thread_group_cputime_adjusted(struct task_struct *p, u64 *ut, u64 *st)
thread_group_cputime(p, &cputime);
cputime_adjust(&cputime, &p->signal->prev_cputime, ut, st);
}
+EXPORT_SYMBOL_GPL(thread_group_cputime_adjusted);
+
#endif /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -48,9 +48,10 @@ static unsigned long nsec_low(unsigned long long nsec)
#define SCHED_FEAT(name, enabled) \
#name ,
-static const char * const sched_feat_names[] = {
+const char * const sched_feat_names[] = {
#include "features.h"
};
+EXPORT_SYMBOL_GPL(sched_feat_names);
#undef SCHED_FEAT
@@ -79,6 +80,7 @@ static int sched_feat_show(struct seq_file *m, void *v)
struct static_key sched_feat_keys[__SCHED_FEAT_NR] = {
#include "features.h"
};
+EXPORT_SYMBOL_GPL(sched_feat_keys);
#undef SCHED_FEAT
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -38,6 +38,7 @@
* (default: 6ms * (1 + ilog(ncpus)), units: nanoseconds)
*/
unsigned int sysctl_sched_latency = 6000000ULL;
+EXPORT_SYMBOL_GPL(sysctl_sched_latency);
static unsigned int normalized_sysctl_sched_latency = 6000000ULL;
/*
@@ -4433,7 +4434,7 @@ check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr)
resched_curr(rq_of(cfs_rq));
}
-static void set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
+void set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
{
clear_buddies(cfs_rq, se);
@@ -4469,6 +4470,7 @@ static void set_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *se)
se->prev_sum_exec_runtime = se->sum_exec_runtime;
}
+EXPORT_SYMBOL_GPL(set_next_entity);
static int
wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se);
@@ -5598,7 +5600,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
* passed.
*/
should_iowait_boost = p->in_iowait;
- trace_android_rvh_set_iowait(p, &should_iowait_boost);
+ trace_android_rvh_set_iowait(p, rq, &should_iowait_boost);
if (should_iowait_boost)
cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT);
@@ -7658,7 +7660,8 @@ static bool yield_to_task_fair(struct rq *rq, struct task_struct *p)
* rewrite all of this once again.]
*/
-static unsigned long __read_mostly max_load_balance_interval = HZ/10;
+unsigned long __read_mostly max_load_balance_interval = HZ/10;
+EXPORT_SYMBOL_GPL(max_load_balance_interval);
enum fbq_type { regular, remote, all };
diff --git a/kernel/sched/pelt.c b/kernel/sched/pelt.c
--- a/kernel/sched/pelt.c
+++ b/kernel/sched/pelt.c
@@ -306,6 +306,7 @@ int __update_load_avg_blocked_se(u64 now, struct sched_entity *se)
return 0;
}
+EXPORT_SYMBOL_GPL(__update_load_avg_blocked_se);
int __update_load_avg_se(u64 now, struct cfs_rq *cfs_rq, struct sched_entity *se)
{
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1577,6 +1577,7 @@ task_may_not_preempt(struct task_struct *task, int cpu)
(task == cpu_ksoftirqd ||
task_thread_info(task)->preempt_count & SOFTIRQ_MASK));
}
+EXPORT_SYMBOL_GPL(task_may_not_preempt);
#endif /* CONFIG_RT_SOFTINT_OPTIMIZATION */
static int
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1694,8 +1694,6 @@ enum numa_faults_stats {
};
extern void sched_setnuma(struct task_struct *p, int node);
extern int migrate_task_to(struct task_struct *p, int cpu);
-extern int migrate_swap(struct task_struct *p, struct task_struct *t,
- int cpu, int scpu);
extern void init_numa_balancing(unsigned long clone_flags, struct task_struct *p);
#else
static inline void
@@ -1706,6 +1704,9 @@ init_numa_balancing(unsigned long clone_flags, struct task_struct *p)
#ifdef CONFIG_SMP
+extern int migrate_swap(struct task_struct *p, struct task_struct *t,
+ int cpu, int scpu);
+
static inline void
queue_balance_callback(struct rq *rq,
struct callback_head *head,
@@ -1967,6 +1968,8 @@ static __always_inline bool static_branch_##name(struct static_key *key) \
#undef SCHED_FEAT
extern struct static_key sched_feat_keys[__SCHED_FEAT_NR];
+extern const char * const sched_feat_names[__SCHED_FEAT_NR];
+
#define sched_feat(x) (static_branch_##x(&sched_feat_keys[__SCHED_FEAT_##x]))
#else /* !CONFIG_JUMP_LABEL */
@@ -2268,6 +2271,7 @@ static inline struct task_struct *get_push_task(struct rq *rq)
extern int push_cpu_stop(void *arg);
+extern unsigned long __read_mostly max_load_balance_interval;
#endif
#ifdef CONFIG_CPU_IDLE
diff --git a/kernel/softirq.c b/kernel/softirq.c
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -33,6 +33,8 @@
#define CREATE_TRACE_POINTS
#include <trace/events/irq.h>
+EXPORT_TRACEPOINT_SYMBOL_GPL(irq_handler_entry);
+
/*
- No shared variables, all the data are CPU local.
- If a softirq needs serialization, let it serialize itself
@@ -59,6 +61,7 @@ EXPORT_PER_CPU_SYMBOL(irq_stat);
static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
+EXPORT_PER_CPU_SYMBOL_GPL(ksoftirqd);
/*
* active_softirqs -- per cpu, a mask of softirqs that are being handled,
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -387,6 +387,7 @@ bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
*work_buf = (struct cpu_stop_work){ .fn = fn, .arg = arg, .caller = _RET_IP_, };
return cpu_stop_queue_work(cpu, work_buf);
}
+EXPORT_SYMBOL_GPL(stop_one_cpu_nowait);
static bool queue_stop_cpus_work(const struct cpumask *cpumask,
cpu_stop_fn_t fn, void *arg,
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -1205,6 +1205,7 @@ ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next)
return ktime_sub(next_event, now);
}
+EXPORT_SYMBOL_GPL(tick_nohz_get_sleep_length);
/**
* tick_nohz_get_idle_calls_cpu - return the current idle calls counter value
@@ -1218,6 +1219,7 @@ unsigned long tick_nohz_get_idle_calls_cpu(int cpu)
return ts->idle_calls;
}
+EXPORT_SYMBOL_GPL(tick_nohz_get_idle_calls_cpu);
/**
* tick_nohz_get_idle_calls - return the current idle calls counter value
diff --git a/kernel/time/time.c b/kernel/time/time.c
--- a/kernel/time/time.c
+++ b/kernel/time/time.c
@@ -686,6 +686,7 @@ u64 nsec_to_clock_t(u64 x)
return div_u64(x * 9, (9ull * NSEC_PER_SEC + (USER_HZ / 2)) / USER_HZ);
#endif
}
+EXPORT_SYMBOL_GPL(nsec_to_clock_t);
u64 jiffies64_to_nsecs(u64 j)
{
diff --git a/kernel/trace/power-traces.c b/kernel/trace/power-traces.c
--- a/kernel/trace/power-traces.c
+++ b/kernel/trace/power-traces.c
@@ -18,4 +18,6 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(suspend_resume);
EXPORT_TRACEPOINT_SYMBOL_GPL(cpu_idle);
EXPORT_TRACEPOINT_SYMBOL_GPL(cpu_frequency);
EXPORT_TRACEPOINT_SYMBOL_GPL(powernv_throttle);
-
+EXPORT_TRACEPOINT_SYMBOL_GPL(device_pm_callback_start);
+EXPORT_TRACEPOINT_SYMBOL_GPL(device_pm_callback_end);
+EXPORT_TRACEPOINT_SYMBOL_GPL(clock_set_rate);