| // SPDX-License-Identifier: GPL-2.0 |
| // Copyright (c) 2019 Facebook |
| #include <vmlinux.h> |
| #include <bpf/bpf_core_read.h> |
| #include <bpf/bpf_helpers.h> |
| #include <bpf/bpf_tracing.h> |
| #include "runqslower.h" |
| #include "core_fixes.bpf.h" |
| |
| #define TASK_RUNNING 0 |
| |
| const volatile __u64 min_us = 0; |
| const volatile pid_t targ_pid = 0; |
| const volatile pid_t targ_tgid = 0; |
| |
| struct { |
| __uint(type, BPF_MAP_TYPE_HASH); |
| __uint(max_entries, 10240); |
| __type(key, u32); |
| __type(value, u64); |
| } start SEC(".maps"); |
| |
| struct { |
| __uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY); |
| __uint(key_size, sizeof(u32)); |
| __uint(value_size, sizeof(u32)); |
| } events SEC(".maps"); |
| |
| /* record enqueue timestamp */ |
| static int trace_enqueue(u32 tgid, u32 pid) |
| { |
| u64 ts; |
| |
| if (!pid) |
| return 0; |
| if (targ_tgid && targ_tgid != tgid) |
| return 0; |
| if (targ_pid && targ_pid != pid) |
| return 0; |
| |
| ts = bpf_ktime_get_ns(); |
| bpf_map_update_elem(&start, &pid, &ts, 0); |
| return 0; |
| } |
| |
| static int handle_switch(void *ctx, struct task_struct *prev, struct task_struct *next) |
| { |
| struct event event = {}; |
| u64 *tsp, delta_us; |
| u32 pid; |
| |
| /* ivcsw: treat like an enqueue event and store timestamp */ |
| if (get_task_state(prev) == TASK_RUNNING) |
| trace_enqueue(BPF_CORE_READ(prev, tgid), BPF_CORE_READ(prev, pid)); |
| |
| pid = BPF_CORE_READ(next, pid); |
| |
| /* fetch timestamp and calculate delta */ |
| tsp = bpf_map_lookup_elem(&start, &pid); |
| if (!tsp) |
| return 0; /* missed enqueue */ |
| |
| delta_us = (bpf_ktime_get_ns() - *tsp) / 1000; |
| if (min_us && delta_us <= min_us) |
| return 0; |
| |
| event.pid = pid; |
| event.prev_pid = BPF_CORE_READ(prev, pid); |
| event.delta_us = delta_us; |
| bpf_probe_read_kernel_str(&event.task, sizeof(event.task), next->comm); |
| bpf_probe_read_kernel_str(&event.prev_task, sizeof(event.prev_task), prev->comm); |
| |
| /* output */ |
| bpf_perf_event_output(ctx, &events, BPF_F_CURRENT_CPU, |
| &event, sizeof(event)); |
| |
| bpf_map_delete_elem(&start, &pid); |
| return 0; |
| } |
| |
| SEC("tp_btf/sched_wakeup") |
| int BPF_PROG(sched_wakeup, struct task_struct *p) |
| { |
| return trace_enqueue(p->tgid, p->pid); |
| } |
| |
| SEC("tp_btf/sched_wakeup_new") |
| int BPF_PROG(sched_wakeup_new, struct task_struct *p) |
| { |
| return trace_enqueue(p->tgid, p->pid); |
| } |
| |
| SEC("tp_btf/sched_switch") |
| int BPF_PROG(sched_switch, bool preempt, struct task_struct *prev, struct task_struct *next) |
| { |
| return handle_switch(ctx, prev, next); |
| } |
| |
| SEC("raw_tp/sched_wakeup") |
| int BPF_PROG(handle_sched_wakeup, struct task_struct *p) |
| { |
| return trace_enqueue(BPF_CORE_READ(p, tgid), BPF_CORE_READ(p, pid)); |
| } |
| |
| SEC("raw_tp/sched_wakeup_new") |
| int BPF_PROG(handle_sched_wakeup_new, struct task_struct *p) |
| { |
| return trace_enqueue(BPF_CORE_READ(p, tgid), BPF_CORE_READ(p, pid)); |
| } |
| |
| SEC("raw_tp/sched_switch") |
| int BPF_PROG(handle_sched_switch, bool preempt, struct task_struct *prev, struct task_struct *next) |
| { |
| return handle_switch(ctx, prev, next); |
| } |
| |
| char LICENSE[] SEC("license") = "GPL"; |