Snap for 6198741 from 8acde7e93168f176f5e5f409be22c534a0b3d890 to sdk-release

Change-Id: I74add8358a7c6480f9abaf4d17c38ff78c519ebb
diff --git a/time_in_state.c b/time_in_state.c
index 618ebd4..0b5c4b2 100644
--- a/time_in_state.c
+++ b/time_in_state.c
@@ -15,17 +15,21 @@
  */
 
 #include <bpf_helpers.h>
+#include <bpf_timeinstate.h>
 
-typedef struct {
-    uint32_t uid;
-    uint32_t freq;
-} time_key;
+DEFINE_BPF_MAP(uid_time_in_state_map, PERCPU_HASH, time_key_t, tis_val_t, 1024)
 
-DEFINE_BPF_MAP(uid_times_map, PERCPU_HASH, time_key, uint64_t, 10240)
+DEFINE_BPF_MAP(uid_concurrent_times_map, PERCPU_HASH, time_key_t, concurrent_val_t, 1024)
+
 DEFINE_BPF_MAP(cpu_last_update_map, PERCPU_ARRAY, uint32_t, uint64_t, 1)
 
-/* Assume max of 1024 CPUs */
-DEFINE_BPF_MAP(cpu_freq_map, ARRAY, uint32_t, uint32_t, 1024)
+DEFINE_BPF_MAP(cpu_policy_map, ARRAY, uint32_t, uint32_t, 1024)
+DEFINE_BPF_MAP(policy_freq_idx_map, ARRAY, uint32_t, uint8_t, 1024)
+
+DEFINE_BPF_MAP(freq_to_idx_map, HASH, freq_idx_key_t, uint8_t, 2048)
+
+DEFINE_BPF_MAP(nr_active_map, ARRAY, uint32_t, uint32_t, 1)
+DEFINE_BPF_MAP(policy_nr_active_map, ARRAY, uint32_t, uint32_t, 1024)
 
 struct switch_args {
     unsigned long long ignore;
@@ -46,18 +50,74 @@
     uint64_t old_last = *last;
     uint64_t time = bpf_ktime_get_ns();
     *last = time;
+
+    uint32_t* active = bpf_nr_active_map_lookup_elem(&zero);
+    if (!active) return 0;
+
     uint32_t cpu = bpf_get_smp_processor_id();
-    uint32_t* freq = bpf_cpu_freq_map_lookup_elem(&cpu);
-    if (args->prev_pid && old_last && freq && *freq) {
-        uint32_t uid = bpf_get_current_uid_gid();
-        time_key key = {.uid = uid, .freq = *freq};
-        uint64_t* tot_time = bpf_uid_times_map_lookup_elem(&key);
-        uint64_t delta = time - old_last;
-        if (!tot_time)
-            bpf_uid_times_map_update_elem(&key, &delta, BPF_ANY);
-        else
-            *tot_time += delta;
+    uint32_t* policyp = bpf_cpu_policy_map_lookup_elem(&cpu);
+    if (!policyp) return 0;
+    uint32_t policy = *policyp;
+
+    uint32_t* policy_active = bpf_policy_nr_active_map_lookup_elem(&policy);
+    if (!policy_active) return 0;
+
+    uint32_t nactive = *active - 1;
+    uint32_t policy_nactive = *policy_active - 1;
+
+    if (!args->prev_pid || (!old_last && args->next_pid)) {
+        __sync_fetch_and_add(active, 1);
+        __sync_fetch_and_add(policy_active, 1);
     }
+
+    // Return here in 2 scenarios:
+    // 1) prev_pid == 0, so we're exiting idle. No UID stats need updating, and active CPUs can't be
+    //    decreasing.
+    // 2) old_last == 0, so this is the first time we've seen this CPU. Any delta will be invalid,
+    //    and our active CPU counts don't include this CPU yet so we shouldn't decrement them even
+    //    if we're going idle.
+    if (!args->prev_pid || !old_last) return 0;
+
+    if (!args->next_pid) {
+        __sync_fetch_and_add(active, -1);
+        __sync_fetch_and_add(policy_active, -1);
+    }
+
+    uint8_t* freq_idxp = bpf_policy_freq_idx_map_lookup_elem(&policy);
+    if (!freq_idxp || !*freq_idxp) return 0;
+    // freq_to_idx_map uses 1 as its minimum index so that *freq_idxp == 0 only when uninitialized
+    uint8_t freq_idx = *freq_idxp - 1;
+
+    uint32_t uid = bpf_get_current_uid_gid();
+    time_key_t key = {.uid = uid, .bucket = freq_idx / FREQS_PER_ENTRY};
+    tis_val_t* val = bpf_uid_time_in_state_map_lookup_elem(&key);
+    if (!val) {
+        tis_val_t zero_val = {.ar = {0}};
+        bpf_uid_time_in_state_map_update_elem(&key, &zero_val, BPF_NOEXIST);
+        val = bpf_uid_time_in_state_map_lookup_elem(&key);
+    }
+    uint64_t delta = time - old_last;
+    if (val) val->ar[freq_idx % FREQS_PER_ENTRY] += delta;
+
+    key.bucket = nactive / CPUS_PER_ENTRY;
+    concurrent_val_t* ct = bpf_uid_concurrent_times_map_lookup_elem(&key);
+    if (!ct) {
+        concurrent_val_t zero_val = {.active = {0}, .policy = {0}};
+        bpf_uid_concurrent_times_map_update_elem(&key, &zero_val, BPF_NOEXIST);
+        ct = bpf_uid_concurrent_times_map_lookup_elem(&key);
+    }
+    if (ct) ct->active[nactive % CPUS_PER_ENTRY] += delta;
+
+    if (policy_nactive / CPUS_PER_ENTRY != key.bucket) {
+        key.bucket = policy_nactive / CPUS_PER_ENTRY;
+        ct = bpf_uid_concurrent_times_map_lookup_elem(&key);
+        if (!ct) {
+            concurrent_val_t zero_val = {.active = {0}, .policy = {0}};
+            bpf_uid_concurrent_times_map_update_elem(&key, &zero_val, BPF_NOEXIST);
+            ct = bpf_uid_concurrent_times_map_lookup_elem(&key);
+        }
+    }
+    if (ct) ct->policy[policy_nactive % CPUS_PER_ENTRY] += delta;
     return 0;
 }
 
@@ -71,7 +131,14 @@
 int tp_cpufreq(struct cpufreq_args* args) {
     uint32_t cpu = args->cpu_id;
     unsigned int new = args->state;
-    bpf_cpu_freq_map_update_elem(&cpu, &new, BPF_ANY);
+    uint32_t* policyp = bpf_cpu_policy_map_lookup_elem(&cpu);
+    if (!policyp) return 0;
+    uint32_t policy = *policyp;
+    freq_idx_key_t key = {.policy = policy, .freq = new};
+    uint8_t* idxp = bpf_freq_to_idx_map_lookup_elem(&key);
+    if (!idxp) return 0;
+    uint8_t idx = *idxp;
+    bpf_policy_freq_idx_map_update_elem(&policy, &idx, BPF_ANY);
     return 0;
 }