Merge branch 'android-msm-bluecross-4.9-pi-qpr1' into android-msm-bluecross-4.9-pi-qpr2

APR 2019.3

Bug: 123654997
Change-Id: I26ece033765f9baf56f69a8e516a628bf3ba3c25
Signed-off-by: Maggie White <maggiewhite@google.com>
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 47c5b39..d32e7b8 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -266,7 +266,7 @@
 	int				capabilities;
 
 	int * __percpu			pmu_disable_count;
-	struct perf_cpu_context * __percpu pmu_cpu_context;
+	struct perf_cpu_context __percpu *pmu_cpu_context;
 	atomic_t			exclusive_cnt; /* < 0: cpu; > 0: tsk */
 	int				task_ctx_nr;
 	int				hrtimer_interval_ms;
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 9762b42..04043c7 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -11206,13 +11206,25 @@
 
 static void perf_event_exit_cpu_context(int cpu)
 {
+	struct perf_cpu_context *cpuctx;
 	struct perf_event_context *ctx;
+	unsigned long flags;
 	struct pmu *pmu;
 	int idx;
 
 	idx = srcu_read_lock(&pmus_srcu);
 	list_for_each_entry_rcu(pmu, &pmus, entry) {
-		ctx = &per_cpu_ptr(pmu->pmu_cpu_context, cpu)->ctx;
+		cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
+		ctx = &cpuctx->ctx;
+
+		/* Cancel the mux hrtimer to avoid CPU migration */
+		if (pmu->task_ctx_nr != perf_sw_context) {
+			raw_spin_lock_irqsave(&cpuctx->hrtimer_lock, flags);
+			hrtimer_cancel(&cpuctx->hrtimer);
+			cpuctx->hrtimer_active = 0;
+			raw_spin_unlock_irqrestore(&cpuctx->hrtimer_lock,
+							flags);
+		}
 
 		mutex_lock(&ctx->mutex);
 		smp_call_function_single(cpu, __perf_event_exit_context, ctx, 1);