bpf: use more stable API interface
Use the built-in structures of the kernel, which are more stable API interfaces Signed-off-by: Tonghao Zhang <tonghao@bamaicloud.com>
This commit is contained in:
parent
7cd0e9b218
commit
9c5bab33bf
|
@ -32,15 +32,6 @@ struct perf_event_t {
|
|||
char comm[COMPAT_TASK_COMM_LEN];
|
||||
};
|
||||
|
||||
/* format: /sys/kernel/debug/tracing/events/skb/kfree_skb/format */
|
||||
struct kfree_skb_args {
|
||||
unsigned long long pad;
|
||||
|
||||
void *skbaddr;
|
||||
void *location;
|
||||
u16 protocol;
|
||||
};
|
||||
|
||||
struct {
|
||||
__uint(type, BPF_MAP_TYPE_PERF_EVENT_ARRAY);
|
||||
__uint(key_size, sizeof(int));
|
||||
|
@ -116,7 +107,7 @@ static void sk_get_type_and_protocol(struct sock *sk, u16 *protocol, u16 *type)
|
|||
}
|
||||
|
||||
SEC("tracepoint/skb/kfree_skb")
|
||||
int bpf_kfree_skb_prog(struct kfree_skb_args *ctx)
|
||||
int bpf_kfree_skb_prog(struct trace_event_raw_kfree_skb *ctx)
|
||||
{
|
||||
struct sk_buff *skb = ctx->skbaddr;
|
||||
struct perf_event_t *data = NULL;
|
||||
|
|
|
@ -23,14 +23,8 @@ struct hungtask_info {
|
|||
char comm[COMPAT_TASK_COMM_LEN];
|
||||
};
|
||||
|
||||
struct tracepoint_args {
|
||||
unsigned long pad;
|
||||
char comm[COMPAT_TASK_COMM_LEN];
|
||||
int pid;
|
||||
};
|
||||
|
||||
SEC("tracepoint/sched/sched_process_hang")
|
||||
int tracepoint_sched_process_hang(struct tracepoint_args *ctx)
|
||||
int tracepoint_sched_process_hang(struct trace_event_raw_sched_process_hang *ctx)
|
||||
{
|
||||
struct hungtask_info info = {};
|
||||
|
||||
|
|
|
@ -21,16 +21,6 @@ volatile const long long to_user_copy = 115 * 1000 * 1000; // 115ms
|
|||
|
||||
BPF_RATELIMIT(rate, 1, 100);
|
||||
|
||||
struct netif_receive_skb_args {
|
||||
struct trace_entry entry;
|
||||
struct sk_buff *skb;
|
||||
};
|
||||
|
||||
struct skb_copy_datagram_iovec_args {
|
||||
struct trace_entry entry;
|
||||
struct sk_buff *skb;
|
||||
};
|
||||
|
||||
struct perf_event_t {
|
||||
char comm[COMPAT_TASK_COMM_LEN];
|
||||
u64 latency;
|
||||
|
@ -115,9 +105,9 @@ fill_and_output_event(void *ctx, struct sk_buff *skb, struct mix *_mix)
|
|||
}
|
||||
|
||||
SEC("tracepoint/net/netif_receive_skb")
|
||||
int netif_receive_skb_prog(struct netif_receive_skb_args *args)
|
||||
int netif_receive_skb_prog(struct trace_event_raw_net_dev_template *args)
|
||||
{
|
||||
struct sk_buff *skb = args->skb;
|
||||
struct sk_buff *skb = (struct sk_buff *)args->skbaddr;
|
||||
struct iphdr ip_hdr;
|
||||
u64 delta;
|
||||
|
||||
|
@ -159,9 +149,10 @@ int tcp_v4_rcv_prog(struct pt_regs *ctx)
|
|||
}
|
||||
|
||||
SEC("tracepoint/skb/skb_copy_datagram_iovec")
|
||||
int skb_copy_datagram_iovec_prog(struct skb_copy_datagram_iovec_args *args)
|
||||
int skb_copy_datagram_iovec_prog(
|
||||
struct trace_event_raw_skb_copy_datagram_iovec *args)
|
||||
{
|
||||
struct sk_buff *skb = args->skb;
|
||||
struct sk_buff *skb = (struct sk_buff *)args->skbaddr;
|
||||
struct iphdr ip_hdr;
|
||||
u64 delta;
|
||||
|
||||
|
|
|
@ -91,17 +91,8 @@ static int trace_enqueue(u32 pid)
|
|||
return 0;
|
||||
}
|
||||
|
||||
struct sched_wakeup_new_args {
|
||||
unsigned long long pad;
|
||||
char comm[16];
|
||||
int pid;
|
||||
int prio;
|
||||
int success;
|
||||
int target_cpu;
|
||||
};
|
||||
|
||||
SEC("tracepoint/sched/sched_wakeup_new")
|
||||
int sched_wakeup_new_entry(struct sched_wakeup_new_args *ctx)
|
||||
int sched_wakeup_new_entry(struct trace_event_raw_sched_wakeup_template *ctx)
|
||||
{
|
||||
return trace_enqueue(ctx->pid);
|
||||
}
|
||||
|
@ -116,7 +107,7 @@ struct sched_wakeup_args {
|
|||
};
|
||||
|
||||
SEC("tracepoint/sched/sched_wakeup")
|
||||
int sched_wakeup_entry(struct sched_wakeup_new_args *ctx)
|
||||
int sched_wakeup_entry(struct trace_event_raw_sched_wakeup_template *ctx)
|
||||
{
|
||||
return trace_enqueue(ctx->pid);
|
||||
}
|
||||
|
|
|
@ -14,11 +14,6 @@ enum lat_zone {
|
|||
LAT_ZONE_MAX,
|
||||
};
|
||||
|
||||
struct tp_softirq {
|
||||
unsigned long long pad;
|
||||
unsigned int vec;
|
||||
};
|
||||
|
||||
struct softirq_lat {
|
||||
u64 timestamp;
|
||||
u64 total_latency[LAT_ZONE_MAX];
|
||||
|
@ -32,7 +27,7 @@ struct {
|
|||
} softirq_percpu_lats SEC(".maps");
|
||||
|
||||
SEC("tracepoint/irq/softirq_raise")
|
||||
int probe_softirq_raise(struct tp_softirq *ctx)
|
||||
int probe_softirq_raise(struct trace_event_raw_softirq *ctx)
|
||||
{
|
||||
struct softirq_lat lat = {
|
||||
.timestamp = bpf_ktime_get_ns(),
|
||||
|
@ -47,7 +42,7 @@ int probe_softirq_raise(struct tp_softirq *ctx)
|
|||
}
|
||||
|
||||
SEC("tracepoint/irq/softirq_entry")
|
||||
int probe_softirq_entry(struct tp_softirq *ctx)
|
||||
int probe_softirq_entry(struct trace_event_raw_softirq *ctx)
|
||||
{
|
||||
struct softirq_lat *lat;
|
||||
u32 vec = ctx->vec;
|
||||
|
|
|
@ -120,14 +120,8 @@ void probe_scheduler_tick(struct pt_regs *ctx)
|
|||
ts->soft_ts = now;
|
||||
}
|
||||
|
||||
struct tp_tick_stop {
|
||||
unsigned long pad;
|
||||
int success;
|
||||
int dependency;
|
||||
};
|
||||
|
||||
SEC("tracepoint/timer/tick_stop")
|
||||
void probe_tick_stop(struct tp_tick_stop *ctx)
|
||||
void probe_tick_stop(struct trace_event_raw_tick_stop *ctx)
|
||||
{
|
||||
struct timer_softirq_run_ts *ts;
|
||||
int key = 0;
|
||||
|
|
Loading…
Reference in New Issue