bpf: support COMPT_BPF_XXX flags for old kernel

1. on kernel upstream, BPF_XXX is changed to enum type.
and will be exported in vmlinux.h.
2. use the compat type for more old kernel version.

Signed-off-by: Tonghao Zhang <tonghao@bamaicloud.com>
This commit is contained in:
Tonghao Zhang 2025-07-06 07:27:01 -04:00
parent c74b061c83
commit 1c5155e15a
16 changed files with 39 additions and 57 deletions

View File

@ -48,7 +48,7 @@ bpf_cgroup_event_class_prog(struct bpf_raw_tracepoint_args *ctx, u64 type)
bpf_probe_read(&data.css, sizeof(u64) * CGROUP_SUBSYS_COUNT,
BPF_CORE_READ(cgrp, subsys));
bpf_perf_event_output(ctx, &cgroup_perf_events, BPF_F_CURRENT_CPU,
bpf_perf_event_output(ctx, &cgroup_perf_events, COMPAT_BPF_F_CURRENT_CPU,
&data, sizeof(data));
return 0;
}

View File

@ -50,7 +50,7 @@ int bpf_cgroup_clone_children_read_prog(struct pt_regs *ctx)
BPF_CORE_READ(cgrp, subsys));
/* output */
bpf_perf_event_output(ctx, &cgroup_perf_events, BPF_F_CURRENT_CPU,
bpf_perf_event_output(ctx, &cgroup_perf_events, COMPAT_BPF_F_CURRENT_CPU,
&data, sizeof(data));
return 0;
}

View File

@ -28,7 +28,7 @@ struct perf_event_t {
u32 sk_max_ack_backlog;
u8 state;
u8 type;
char comm[TASK_COMM_LEN];
char comm[COMPAT_TASK_COMM_LEN];
};
/* format: /sys/kernel/debug/tracing/events/skb/kfree_skb/format */
@ -182,10 +182,10 @@ int bpf_kfree_skb_prog(struct kfree_skb_args *ctx)
data->sk_max_ack_backlog = 0; // ignore sk_max_ack_backlog in dropwatch case.
// output
bpf_perf_event_output(ctx, &perf_events, BPF_F_CURRENT_CPU, data, sizeof(*data));
bpf_perf_event_output(ctx, &perf_events, COMPAT_BPF_F_CURRENT_CPU, data, sizeof(*data));
// clean
bpf_map_update_elem(&dropwatch_stackmap, &stackmap_key, &zero_data, BPF_EXIST);
bpf_map_update_elem(&dropwatch_stackmap, &stackmap_key, &zero_data, COMPAT_BPF_EXIST);
return 0;
}
@ -223,10 +223,10 @@ static int fill_overflow_event(void *ctx, u8 type, struct sock *sk, struct sk_bu
data->sk_max_ack_backlog = BPF_CORE_READ(sk, sk_max_ack_backlog);
// output
bpf_perf_event_output(ctx, &perf_events, BPF_F_CURRENT_CPU, data, sizeof(*data));
bpf_perf_event_output(ctx, &perf_events, COMPAT_BPF_F_CURRENT_CPU, data, sizeof(*data));
// clean
bpf_map_update_elem(&dropwatch_stackmap, &stackmap_key, &zero_data, BPF_EXIST);
bpf_map_update_elem(&dropwatch_stackmap, &stackmap_key, &zero_data, COMPAT_BPF_EXIST);
return 0;
}

View File

@ -19,12 +19,12 @@ struct {
struct hungtask_info {
int32_t pid;
char comm[TASK_COMM_LEN];
char comm[COMPAT_TASK_COMM_LEN];
};
struct tracepoint_args {
unsigned long pad;
char comm[TASK_COMM_LEN];
char comm[COMPAT_TASK_COMM_LEN];
int pid;
};
@ -37,7 +37,7 @@ int tracepoint_sched_process_hang(struct tracepoint_args *ctx)
return 0;
info.pid = ctx->pid;
// custom defined struct can't use BPF_CORE_READ_STR_INTO()
bpf_probe_read_str(&info.comm, TASK_COMM_LEN, ctx->comm);
bpf_perf_event_output(ctx, &hungtask_perf_events, BPF_F_CURRENT_CPU, &info, sizeof(info));
bpf_probe_read_str(&info.comm, COMPAT_TASK_COMM_LEN, ctx->comm);
bpf_perf_event_output(ctx, &hungtask_perf_events, COMPAT_BPF_F_CURRENT_CPU, &info, sizeof(info));
return 0;
}

View File

@ -5,28 +5,10 @@
#define NULL ((void *)0)
#endif
/* BPF_FUNC_perf_event_output, BPF_FUNC_perf_event_read and
* BPF_FUNC_perf_event_read_value flags.
*/
enum {
BPF_F_INDEX_MASK = 0xffffffffULL,
BPF_F_CURRENT_CPU = BPF_F_INDEX_MASK,
/* BPF_FUNC_perf_event_output for sk_buff input context. */
BPF_F_CTXLEN_MASK = (0xfffffULL << 32),
};
/* define COMPAT_XXX for compat old kernel vmlinux.h */
#define COMPAT_BPF_F_CURRENT_CPU 0xffffffffULL
/* flags for both BPF_FUNC_get_stackid and BPF_FUNC_get_stack. */
enum {
BPF_F_SKIP_FIELD_MASK = 0xffULL,
BPF_F_USER_STACK = (1ULL << 8),
/* flags used by BPF_FUNC_get_stackid only. */
BPF_F_FAST_STACK_CMP = (1ULL << 9),
BPF_F_REUSE_STACKID = (1ULL << 10),
/* flags used by BPF_FUNC_get_stack only. */
BPF_F_USER_BUILD_ID = (1ULL << 11),
};
#define TASK_COMM_LEN 16
#define COMPAT_TASK_COMM_LEN 16
#define PATH_MAX 4096 /* # chars in a path name including nul */
/* include/uapi/linux/perf_event.h */
@ -34,9 +16,9 @@ enum {
#define PERF_MIN_STACK_DEPTH 16
/* flags for BPF_MAP_UPDATE_ELEM command */
#define BPF_ANY 0 /* create new element or update existing */
#define BPF_NOEXIST 1 /* create new element if it didn't exist */
#define BPF_EXIST 2 /* update existing element */
#define BPF_F_LOCK 4 /* spin_lock-ed map_lookup/map_update */
#define COMPAT_BPF_ANY 0 /* create new element or update existing */
#define COMPAT_BPF_NOEXIST 1 /* create new element if it didn't exist */
#define COMPAT_BPF_EXIST 2 /* update existing element */
#define COMPAT_BPF_F_LOCK 4 /* spin_lock-ed map_lookup/map_update */
#endif /* __BPF_COMMON_H__ */

View File

@ -23,7 +23,7 @@ static __always_inline void func_trace_begain(u64 id)
.id = id,
};
bpf_map_update_elem(&func_trace_map, &id, &entry, BPF_ANY);
bpf_map_update_elem(&func_trace_map, &id, &entry, COMPAT_BPF_ANY);
}
static __always_inline struct trace_entry_ctx *func_trace_end(u64 id)

View File

@ -17,7 +17,7 @@ int ad_disable(struct pt_regs *ctx)
// ko module and CO-RE relocation is not supported directly at old
// kernel
u64 nothing = 0;
bpf_perf_event_output(ctx, &ad_event_map, BPF_F_CURRENT_CPU, &nothing,
bpf_perf_event_output(ctx, &ad_event_map, COMPAT_BPF_F_CURRENT_CPU, &nothing,
sizeof(nothing));
return 0;
}

View File

@ -38,7 +38,7 @@ int tracepoint_vmscan_mm_vmscan_memcg_reclaim_begin(struct pt_regs *ctx)
.directstall_count = 1,
};
bpf_map_update_elem(&mem_cgroup_map, &mm_subsys, &new_metrics,
BPF_ANY);
COMPAT_BPF_ANY);
return 0;
}

View File

@ -34,7 +34,7 @@ update_metric_map(u64 free_delta_ns, u64 compact_delta_ns)
.compaction_stat = compact_delta_ns,
};
bpf_map_update_elem(&mm_free_compact_map, &key, &new_metrics,
BPF_ANY);
COMPAT_BPF_ANY);
return;
}

View File

@ -19,7 +19,7 @@ struct {
} reclaim_perf_events SEC(".maps");
struct reclaim_entry {
char comm[TASK_COMM_LEN];
char comm[COMPAT_TASK_COMM_LEN];
u64 delta_time;
u64 css;
u64 pid;
@ -55,7 +55,7 @@ int kretprobe_try_to_free_pages(struct pt_regs *ctx)
bpf_get_current_comm(data.comm, sizeof(data.comm));
bpf_perf_event_output(ctx, &reclaim_perf_events,
BPF_F_CURRENT_CPU, &data,
COMPAT_BPF_F_CURRENT_CPU, &data,
sizeof(struct reclaim_entry));
}

View File

@ -53,7 +53,7 @@ void probe_softirq_raise(struct tp_softirq *ctx)
nr = ctx->vec;
now = bpf_ktime_get_ns();
bpf_map_update_elem(&silat_map, &nr, &now, BPF_ANY);
bpf_map_update_elem(&silat_map, &nr, &now, COMPAT_BPF_ANY);
}
static void

View File

@ -30,7 +30,7 @@ struct skb_copy_datagram_iovec_args {
};
struct perf_event_t {
char comm[TASK_COMM_LEN];
char comm[COMPAT_TASK_COMM_LEN];
u64 latency;
u64 tgid_pid;
u64 pkt_len;
@ -106,7 +106,7 @@ static inline void fill_and_output_event(void *ctx, struct sk_buff *skb, struct
event.state = _mix->state;
event.where = _mix->where;
bpf_perf_event_output(ctx, &net_recv_lat_event_map, BPF_F_CURRENT_CPU, &event, sizeof(struct perf_event_t));
bpf_perf_event_output(ctx, &net_recv_lat_event_map, COMPAT_BPF_F_CURRENT_CPU, &event, sizeof(struct perf_event_t));
}
SEC("tracepoint/net/netif_receive_skb")

View File

@ -17,8 +17,8 @@ struct {
} oom_perf_events SEC(".maps");
struct oom_info {
char trigger_comm[TASK_COMM_LEN];
char victim_comm[TASK_COMM_LEN];
char trigger_comm[COMPAT_TASK_COMM_LEN];
char victim_comm[COMPAT_TASK_COMM_LEN];
u32 trigger_pid;
u32 victim_pid;
u64 trigger_memcg_css;
@ -49,6 +49,6 @@ int kprobe_oom_kill_process(struct pt_regs *ctx)
info.victim_memcg_css = (u64)BPF_CORE_READ(victim_task, cgroups, subsys[4]);
info.trigger_memcg_css = (u64)BPF_CORE_READ(trigger_task, cgroups, subsys[4]);
bpf_perf_event_output(ctx, &oom_perf_events, BPF_F_CURRENT_CPU, &info, sizeof(info));
bpf_perf_event_output(ctx, &oom_perf_events, COMPAT_BPF_F_CURRENT_CPU, &info, sizeof(info));
return 0;
}

View File

@ -76,7 +76,7 @@ static int trace_enqueue(u32 pid)
return 0;
ts = bpf_ktime_get_ns();
bpf_map_update_elem(&latency, &pid, &ts, BPF_ANY);
bpf_map_update_elem(&latency, &pid, &ts, COMPAT_BPF_ANY);
return 0;
}
@ -142,7 +142,7 @@ int sched_switch_entry(struct bpf_raw_tracepoint_args *ctx)
if (state == TASK_RUNNING) {
if (prev_pid != 0) {
now = bpf_ktime_get_ns();
bpf_map_update_elem(&latency, &prev_pid, &now, BPF_ANY);
bpf_map_update_elem(&latency, &prev_pid, &now, COMPAT_BPF_ANY);
}
is_voluntary = 0;
} else {
@ -160,7 +160,7 @@ int sched_switch_entry(struct bpf_raw_tracepoint_args *ctx)
.g_nlat_03 = 0,
.g_nlat_04 = 0,
};
bpf_map_update_elem(&cpu_host_metric, &g_key, &g_new_stat, BPF_NOEXIST);
bpf_map_update_elem(&cpu_host_metric, &g_key, &g_new_stat, COMPAT_BPF_NOEXIST);
g_entry = bpf_map_lookup_elem(&cpu_host_metric, &g_key);
if (!g_entry)
return 0;
@ -180,7 +180,7 @@ int sched_switch_entry(struct bpf_raw_tracepoint_args *ctx)
.nlat_03 = 0,
.nlat_04 = 0,
};
bpf_map_update_elem(&cpu_tg_metric, &key, &new_stat, BPF_NOEXIST);
bpf_map_update_elem(&cpu_tg_metric, &key, &new_stat, COMPAT_BPF_NOEXIST);
entry = bpf_map_lookup_elem(&cpu_tg_metric, &key);
if (!entry)
return 0;
@ -229,7 +229,7 @@ int sched_switch_entry(struct bpf_raw_tracepoint_args *ctx)
.nlat_03 = 0,
.nlat_04 = 0,
};
bpf_map_update_elem(&cpu_tg_metric, &key, &new_stat, BPF_NOEXIST);
bpf_map_update_elem(&cpu_tg_metric, &key, &new_stat, COMPAT_BPF_NOEXIST);
entry = bpf_map_lookup_elem(&cpu_tg_metric, &key);
if (!entry)
return 0;

View File

@ -28,7 +28,7 @@ struct report_event {
s64 stack_size;
u64 now;
u64 stall_time;
char comm[TASK_COMM_LEN];
char comm[COMPAT_TASK_COMM_LEN];
u32 pid;
u32 cpu;
};
@ -108,7 +108,7 @@ void probe_scheduler_tick(struct pt_regs *ctx)
event->cpu = bpf_get_smp_processor_id();
event->stack_size = bpf_get_stack(ctx, event->stack, sizeof(event->stack), 0);
bpf_perf_event_output(ctx, &irqoff_event_map, BPF_F_CURRENT_CPU,
bpf_perf_event_output(ctx, &irqoff_event_map, COMPAT_BPF_F_CURRENT_CPU,
event, sizeof(struct report_event));
}

View File

@ -20,7 +20,7 @@ struct {
struct softlockup_info {
u32 cpu;
u32 pid;
char comm[TASK_COMM_LEN];
char comm[COMPAT_TASK_COMM_LEN];
};
SEC("kprobe/watchdog_timer_fn+442")
@ -35,6 +35,6 @@ int kprobe_watchdog_timer_fn(struct pt_regs *ctx)
task = (struct task_struct *)bpf_get_current_task();
info.pid = bpf_get_current_pid_tgid() & 0xffffffffUL;
BPF_CORE_READ_STR_INTO(&info.comm, task, comm);
bpf_perf_event_output(ctx, &softlockup_perf_events, BPF_F_CURRENT_CPU, &info, sizeof(info));
bpf_perf_event_output(ctx, &softlockup_perf_events, COMPAT_BPF_F_CURRENT_CPU, &info, sizeof(info));
return 0;
}